about summary refs log tree commit diff
path: root/.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util
diff options
context:
space:
mode:
Diffstat (limited to '.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util')
-rw-r--r--.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/__init__.py49
-rw-r--r--.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/connection.py149
-rw-r--r--.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/proxy.py57
-rw-r--r--.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/queue.py22
-rw-r--r--.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/request.py137
-rw-r--r--.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/response.py107
-rw-r--r--.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/retry.py622
-rw-r--r--.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/ssl_.py504
-rw-r--r--.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/ssl_match_hostname.py159
-rw-r--r--.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/ssltransport.py221
-rw-r--r--.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/timeout.py271
-rw-r--r--.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/url.py435
-rw-r--r--.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/wait.py152
13 files changed, 2885 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/__init__.py b/.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/__init__.py
new file mode 100644
index 00000000..4547fc52
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/__init__.py
@@ -0,0 +1,49 @@
+from __future__ import absolute_import
+
+# For backwards compatibility, provide imports that used to be here.
+from .connection import is_connection_dropped
+from .request import SKIP_HEADER, SKIPPABLE_HEADERS, make_headers
+from .response import is_fp_closed
+from .retry import Retry
+from .ssl_ import (
+    ALPN_PROTOCOLS,
+    HAS_SNI,
+    IS_PYOPENSSL,
+    IS_SECURETRANSPORT,
+    PROTOCOL_TLS,
+    SSLContext,
+    assert_fingerprint,
+    resolve_cert_reqs,
+    resolve_ssl_version,
+    ssl_wrap_socket,
+)
+from .timeout import Timeout, current_time
+from .url import Url, get_host, parse_url, split_first
+from .wait import wait_for_read, wait_for_write
+
+__all__ = (
+    "HAS_SNI",
+    "IS_PYOPENSSL",
+    "IS_SECURETRANSPORT",
+    "SSLContext",
+    "PROTOCOL_TLS",
+    "ALPN_PROTOCOLS",
+    "Retry",
+    "Timeout",
+    "Url",
+    "assert_fingerprint",
+    "current_time",
+    "is_connection_dropped",
+    "is_fp_closed",
+    "get_host",
+    "parse_url",
+    "make_headers",
+    "resolve_cert_reqs",
+    "resolve_ssl_version",
+    "split_first",
+    "ssl_wrap_socket",
+    "wait_for_read",
+    "wait_for_write",
+    "SKIP_HEADER",
+    "SKIPPABLE_HEADERS",
+)
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/connection.py b/.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/connection.py
new file mode 100644
index 00000000..6af1138f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/connection.py
@@ -0,0 +1,149 @@
+from __future__ import absolute_import
+
+import socket
+
+from ..contrib import _appengine_environ
+from ..exceptions import LocationParseError
+from ..packages import six
+from .wait import NoWayToWaitForSocketError, wait_for_read
+
+
+def is_connection_dropped(conn):  # Platform-specific
+    """
+    Returns True if the connection is dropped and should be closed.
+
+    :param conn:
+        :class:`http.client.HTTPConnection` object.
+
+    Note: For platforms like AppEngine, this will always return ``False`` to
+    let the platform handle connection recycling transparently for us.
+    """
+    sock = getattr(conn, "sock", False)
+    if sock is False:  # Platform-specific: AppEngine
+        return False
+    if sock is None:  # Connection already closed (such as by httplib).
+        return True
+    try:
+        # Returns True if readable, which here means it's been dropped
+        return wait_for_read(sock, timeout=0.0)
+    except NoWayToWaitForSocketError:  # Platform-specific: AppEngine
+        return False
+
+
+# This function is copied from socket.py in the Python 2.7 standard
+# library test suite. Added to its signature is only `socket_options`.
+# One additional modification is that we avoid binding to IPv6 servers
+# discovered in DNS if the system doesn't have IPv6 functionality.
+def create_connection(
+    address,
+    timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
+    source_address=None,
+    socket_options=None,
+):
+    """Connect to *address* and return the socket object.
+
+    Convenience function.  Connect to *address* (a 2-tuple ``(host,
+    port)``) and return the socket object.  Passing the optional
+    *timeout* parameter will set the timeout on the socket instance
+    before attempting to connect.  If no *timeout* is supplied, the
+    global default timeout setting returned by :func:`socket.getdefaulttimeout`
+    is used.  If *source_address* is set it must be a tuple of (host, port)
+    for the socket to bind as a source address before making the connection.
+    An host of '' or port 0 tells the OS to use the default.
+    """
+
+    host, port = address
+    if host.startswith("["):
+        host = host.strip("[]")
+    err = None
+
+    # Using the value from allowed_gai_family() in the context of getaddrinfo lets
+    # us select whether to work with IPv4 DNS records, IPv6 records, or both.
+    # The original create_connection function always returns all records.
+    family = allowed_gai_family()
+
+    try:
+        host.encode("idna")
+    except UnicodeError:
+        return six.raise_from(
+            LocationParseError(u"'%s', label empty or too long" % host), None
+        )
+
+    for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):
+        af, socktype, proto, canonname, sa = res
+        sock = None
+        try:
+            sock = socket.socket(af, socktype, proto)
+
+            # If provided, set socket level options before connecting.
+            _set_socket_options(sock, socket_options)
+
+            if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
+                sock.settimeout(timeout)
+            if source_address:
+                sock.bind(source_address)
+            sock.connect(sa)
+            return sock
+
+        except socket.error as e:
+            err = e
+            if sock is not None:
+                sock.close()
+                sock = None
+
+    if err is not None:
+        raise err
+
+    raise socket.error("getaddrinfo returns an empty list")
+
+
+def _set_socket_options(sock, options):
+    if options is None:
+        return
+
+    for opt in options:
+        sock.setsockopt(*opt)
+
+
+def allowed_gai_family():
+    """This function is designed to work in the context of
+    getaddrinfo, where family=socket.AF_UNSPEC is the default and
+    will perform a DNS search for both IPv6 and IPv4 records."""
+
+    family = socket.AF_INET
+    if HAS_IPV6:
+        family = socket.AF_UNSPEC
+    return family
+
+
+def _has_ipv6(host):
+    """Returns True if the system can bind an IPv6 address."""
+    sock = None
+    has_ipv6 = False
+
+    # App Engine doesn't support IPV6 sockets and actually has a quota on the
+    # number of sockets that can be used, so just early out here instead of
+    # creating a socket needlessly.
+    # See https://github.com/urllib3/urllib3/issues/1446
+    if _appengine_environ.is_appengine_sandbox():
+        return False
+
+    if socket.has_ipv6:
+        # has_ipv6 returns true if cPython was compiled with IPv6 support.
+        # It does not tell us if the system has IPv6 support enabled. To
+        # determine that we must bind to an IPv6 address.
+        # https://github.com/urllib3/urllib3/pull/611
+        # https://bugs.python.org/issue658327
+        try:
+            sock = socket.socket(socket.AF_INET6)
+            sock.bind((host, 0))
+            has_ipv6 = True
+        except Exception:
+            pass
+
+    if sock:
+        sock.close()
+    return has_ipv6
+
+
+HAS_IPV6 = _has_ipv6("::1")
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/proxy.py b/.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/proxy.py
new file mode 100644
index 00000000..2199cc7b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/proxy.py
@@ -0,0 +1,57 @@
+from .ssl_ import create_urllib3_context, resolve_cert_reqs, resolve_ssl_version
+
+
+def connection_requires_http_tunnel(
+    proxy_url=None, proxy_config=None, destination_scheme=None
+):
+    """
+    Returns True if the connection requires an HTTP CONNECT through the proxy.
+
+    :param URL proxy_url:
+        URL of the proxy.
+    :param ProxyConfig proxy_config:
+        Proxy configuration from poolmanager.py
+    :param str destination_scheme:
+        The scheme of the destination. (i.e https, http, etc)
+    """
+    # If we're not using a proxy, no way to use a tunnel.
+    if proxy_url is None:
+        return False
+
+    # HTTP destinations never require tunneling, we always forward.
+    if destination_scheme == "http":
+        return False
+
+    # Support for forwarding with HTTPS proxies and HTTPS destinations.
+    if (
+        proxy_url.scheme == "https"
+        and proxy_config
+        and proxy_config.use_forwarding_for_https
+    ):
+        return False
+
+    # Otherwise always use a tunnel.
+    return True
+
+
+def create_proxy_ssl_context(
+    ssl_version, cert_reqs, ca_certs=None, ca_cert_dir=None, ca_cert_data=None
+):
+    """
+    Generates a default proxy ssl context if one hasn't been provided by the
+    user.
+    """
+    ssl_context = create_urllib3_context(
+        ssl_version=resolve_ssl_version(ssl_version),
+        cert_reqs=resolve_cert_reqs(cert_reqs),
+    )
+
+    if (
+        not ca_certs
+        and not ca_cert_dir
+        and not ca_cert_data
+        and hasattr(ssl_context, "load_default_certs")
+    ):
+        ssl_context.load_default_certs()
+
+    return ssl_context
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/queue.py b/.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/queue.py
new file mode 100644
index 00000000..41784104
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/queue.py
@@ -0,0 +1,22 @@
+import collections
+
+from ..packages import six
+from ..packages.six.moves import queue
+
+if six.PY2:
+    # Queue is imported for side effects on MS Windows. See issue #229.
+    import Queue as _unused_module_Queue  # noqa: F401
+
+
+class LifoQueue(queue.Queue):
+    def _init(self, _):
+        self.queue = collections.deque()
+
+    def _qsize(self, len=len):
+        return len(self.queue)
+
+    def _put(self, item):
+        self.queue.append(item)
+
+    def _get(self):
+        return self.queue.pop()
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/request.py b/.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/request.py
new file mode 100644
index 00000000..330766ef
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/request.py
@@ -0,0 +1,137 @@
+from __future__ import absolute_import
+
+from base64 import b64encode
+
+from ..exceptions import UnrewindableBodyError
+from ..packages.six import b, integer_types
+
+# Pass as a value within ``headers`` to skip
+# emitting some HTTP headers that are added automatically.
+# The only headers that are supported are ``Accept-Encoding``,
+# ``Host``, and ``User-Agent``.
+SKIP_HEADER = "@@@SKIP_HEADER@@@"
+SKIPPABLE_HEADERS = frozenset(["accept-encoding", "host", "user-agent"])
+
+ACCEPT_ENCODING = "gzip,deflate"
+
+_FAILEDTELL = object()
+
+
+def make_headers(
+    keep_alive=None,
+    accept_encoding=None,
+    user_agent=None,
+    basic_auth=None,
+    proxy_basic_auth=None,
+    disable_cache=None,
+):
+    """
+    Shortcuts for generating request headers.
+
+    :param keep_alive:
+        If ``True``, adds 'connection: keep-alive' header.
+
+    :param accept_encoding:
+        Can be a boolean, list, or string.
+        ``True`` translates to 'gzip,deflate'.
+        List will get joined by comma.
+        String will be used as provided.
+
+    :param user_agent:
+        String representing the user-agent you want, such as
+        "python-urllib3/0.6"
+
+    :param basic_auth:
+        Colon-separated username:password string for 'authorization: basic ...'
+        auth header.
+
+    :param proxy_basic_auth:
+        Colon-separated username:password string for 'proxy-authorization: basic ...'
+        auth header.
+
+    :param disable_cache:
+        If ``True``, adds 'cache-control: no-cache' header.
+
+    Example::
+
+        >>> make_headers(keep_alive=True, user_agent="Batman/1.0")
+        {'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
+        >>> make_headers(accept_encoding=True)
+        {'accept-encoding': 'gzip,deflate'}
+    """
+    headers = {}
+    if accept_encoding:
+        if isinstance(accept_encoding, str):
+            pass
+        elif isinstance(accept_encoding, list):
+            accept_encoding = ",".join(accept_encoding)
+        else:
+            accept_encoding = ACCEPT_ENCODING
+        headers["accept-encoding"] = accept_encoding
+
+    if user_agent:
+        headers["user-agent"] = user_agent
+
+    if keep_alive:
+        headers["connection"] = "keep-alive"
+
+    if basic_auth:
+        headers["authorization"] = "Basic " + b64encode(b(basic_auth)).decode("utf-8")
+
+    if proxy_basic_auth:
+        headers["proxy-authorization"] = "Basic " + b64encode(
+            b(proxy_basic_auth)
+        ).decode("utf-8")
+
+    if disable_cache:
+        headers["cache-control"] = "no-cache"
+
+    return headers
+
+
+def set_file_position(body, pos):
+    """
+    If a position is provided, move file to that point.
+    Otherwise, we'll attempt to record a position for future use.
+    """
+    if pos is not None:
+        rewind_body(body, pos)
+    elif getattr(body, "tell", None) is not None:
+        try:
+            pos = body.tell()
+        except (IOError, OSError):
+            # This differentiates from None, allowing us to catch
+            # a failed `tell()` later when trying to rewind the body.
+            pos = _FAILEDTELL
+
+    return pos
+
+
+def rewind_body(body, body_pos):
+    """
+    Attempt to rewind body to a certain position.
+    Primarily used for request redirects and retries.
+
+    :param body:
+        File-like object that supports seek.
+
+    :param int pos:
+        Position to seek to in file.
+    """
+    body_seek = getattr(body, "seek", None)
+    if body_seek is not None and isinstance(body_pos, integer_types):
+        try:
+            body_seek(body_pos)
+        except (IOError, OSError):
+            raise UnrewindableBodyError(
+                "An error occurred when rewinding request body for redirect/retry."
+            )
+    elif body_pos is _FAILEDTELL:
+        raise UnrewindableBodyError(
+            "Unable to record file position for rewinding "
+            "request body during a redirect/retry."
+        )
+    else:
+        raise ValueError(
+            "body_pos must be of type integer, instead it was %s." % type(body_pos)
+        )
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/response.py b/.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/response.py
new file mode 100644
index 00000000..5ea609cc
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/response.py
@@ -0,0 +1,107 @@
+from __future__ import absolute_import
+
+from email.errors import MultipartInvariantViolationDefect, StartBoundaryNotFoundDefect
+
+from ..exceptions import HeaderParsingError
+from ..packages.six.moves import http_client as httplib
+
+
+def is_fp_closed(obj):
+    """
+    Checks whether a given file-like object is closed.
+
+    :param obj:
+        The file-like object to check.
+    """
+
+    try:
+        # Check `isclosed()` first, in case Python3 doesn't set `closed`.
+        # GH Issue #928
+        return obj.isclosed()
+    except AttributeError:
+        pass
+
+    try:
+        # Check via the official file-like-object way.
+        return obj.closed
+    except AttributeError:
+        pass
+
+    try:
+        # Check if the object is a container for another file-like object that
+        # gets released on exhaustion (e.g. HTTPResponse).
+        return obj.fp is None
+    except AttributeError:
+        pass
+
+    raise ValueError("Unable to determine whether fp is closed.")
+
+
+def assert_header_parsing(headers):
+    """
+    Asserts whether all headers have been successfully parsed.
+    Extracts encountered errors from the result of parsing headers.
+
+    Only works on Python 3.
+
+    :param http.client.HTTPMessage headers: Headers to verify.
+
+    :raises urllib3.exceptions.HeaderParsingError:
+        If parsing errors are found.
+    """
+
+    # This will fail silently if we pass in the wrong kind of parameter.
+    # To make debugging easier add an explicit check.
+    if not isinstance(headers, httplib.HTTPMessage):
+        raise TypeError("expected httplib.Message, got {0}.".format(type(headers)))
+
+    defects = getattr(headers, "defects", None)
+    get_payload = getattr(headers, "get_payload", None)
+
+    unparsed_data = None
+    if get_payload:
+        # get_payload is actually email.message.Message.get_payload;
+        # we're only interested in the result if it's not a multipart message
+        if not headers.is_multipart():
+            payload = get_payload()
+
+            if isinstance(payload, (bytes, str)):
+                unparsed_data = payload
+    if defects:
+        # httplib is assuming a response body is available
+        # when parsing headers even when httplib only sends
+        # header data to parse_headers() This results in
+        # defects on multipart responses in particular.
+        # See: https://github.com/urllib3/urllib3/issues/800
+
+        # So we ignore the following defects:
+        # - StartBoundaryNotFoundDefect:
+        #     The claimed start boundary was never found.
+        # - MultipartInvariantViolationDefect:
+        #     A message claimed to be a multipart but no subparts were found.
+        defects = [
+            defect
+            for defect in defects
+            if not isinstance(
+                defect, (StartBoundaryNotFoundDefect, MultipartInvariantViolationDefect)
+            )
+        ]
+
+    if defects or unparsed_data:
+        raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
+
+
+def is_response_to_head(response):
+    """
+    Checks whether the request of a response has been a HEAD-request.
+    Handles the quirks of AppEngine.
+
+    :param http.client.HTTPResponse response:
+        Response to check if the originating request
+        used 'HEAD' as a method.
+    """
+    # FIXME: Can we do this somehow without accessing private httplib _method?
+    method = response._method
+    if isinstance(method, int):  # Platform-specific: Appengine
+        return method == 3
+    return method.upper() == "HEAD"
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/retry.py b/.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/retry.py
new file mode 100644
index 00000000..9a1e90d0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/retry.py
@@ -0,0 +1,622 @@
+from __future__ import absolute_import
+
+import email
+import logging
+import re
+import time
+import warnings
+from collections import namedtuple
+from itertools import takewhile
+
+from ..exceptions import (
+    ConnectTimeoutError,
+    InvalidHeader,
+    MaxRetryError,
+    ProtocolError,
+    ProxyError,
+    ReadTimeoutError,
+    ResponseError,
+)
+from ..packages import six
+
+log = logging.getLogger(__name__)
+
+
+# Data structure for representing the metadata of requests that result in a retry.
+RequestHistory = namedtuple(
+    "RequestHistory", ["method", "url", "error", "status", "redirect_location"]
+)
+
+
+# TODO: In v2 we can remove this sentinel and metaclass with deprecated options.
+_Default = object()
+
+
+class _RetryMeta(type):
+    @property
+    def DEFAULT_METHOD_WHITELIST(cls):
+        warnings.warn(
+            "Using 'Retry.DEFAULT_METHOD_WHITELIST' is deprecated and "
+            "will be removed in v2.0. Use 'Retry.DEFAULT_ALLOWED_METHODS' instead",
+            DeprecationWarning,
+        )
+        return cls.DEFAULT_ALLOWED_METHODS
+
+    @DEFAULT_METHOD_WHITELIST.setter
+    def DEFAULT_METHOD_WHITELIST(cls, value):
+        warnings.warn(
+            "Using 'Retry.DEFAULT_METHOD_WHITELIST' is deprecated and "
+            "will be removed in v2.0. Use 'Retry.DEFAULT_ALLOWED_METHODS' instead",
+            DeprecationWarning,
+        )
+        cls.DEFAULT_ALLOWED_METHODS = value
+
+    @property
+    def DEFAULT_REDIRECT_HEADERS_BLACKLIST(cls):
+        warnings.warn(
+            "Using 'Retry.DEFAULT_REDIRECT_HEADERS_BLACKLIST' is deprecated and "
+            "will be removed in v2.0. Use 'Retry.DEFAULT_REMOVE_HEADERS_ON_REDIRECT' instead",
+            DeprecationWarning,
+        )
+        return cls.DEFAULT_REMOVE_HEADERS_ON_REDIRECT
+
+    @DEFAULT_REDIRECT_HEADERS_BLACKLIST.setter
+    def DEFAULT_REDIRECT_HEADERS_BLACKLIST(cls, value):
+        warnings.warn(
+            "Using 'Retry.DEFAULT_REDIRECT_HEADERS_BLACKLIST' is deprecated and "
+            "will be removed in v2.0. Use 'Retry.DEFAULT_REMOVE_HEADERS_ON_REDIRECT' instead",
+            DeprecationWarning,
+        )
+        cls.DEFAULT_REMOVE_HEADERS_ON_REDIRECT = value
+
+    @property
+    def BACKOFF_MAX(cls):
+        warnings.warn(
+            "Using 'Retry.BACKOFF_MAX' is deprecated and "
+            "will be removed in v2.0. Use 'Retry.DEFAULT_BACKOFF_MAX' instead",
+            DeprecationWarning,
+        )
+        return cls.DEFAULT_BACKOFF_MAX
+
+    @BACKOFF_MAX.setter
+    def BACKOFF_MAX(cls, value):
+        warnings.warn(
+            "Using 'Retry.BACKOFF_MAX' is deprecated and "
+            "will be removed in v2.0. Use 'Retry.DEFAULT_BACKOFF_MAX' instead",
+            DeprecationWarning,
+        )
+        cls.DEFAULT_BACKOFF_MAX = value
+
+
+@six.add_metaclass(_RetryMeta)
+class Retry(object):
+    """Retry configuration.
+
+    Each retry attempt will create a new Retry object with updated values, so
+    they can be safely reused.
+
+    Retries can be defined as a default for a pool::
+
+        retries = Retry(connect=5, read=2, redirect=5)
+        http = PoolManager(retries=retries)
+        response = http.request('GET', 'http://example.com/')
+
+    Or per-request (which overrides the default for the pool)::
+
+        response = http.request('GET', 'http://example.com/', retries=Retry(10))
+
+    Retries can be disabled by passing ``False``::
+
+        response = http.request('GET', 'http://example.com/', retries=False)
+
+    Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
+    retries are disabled, in which case the causing exception will be raised.
+
+    :param int total:
+        Total number of retries to allow. Takes precedence over other counts.
+
+        Set to ``None`` to remove this constraint and fall back on other
+        counts.
+
+        Set to ``0`` to fail on the first retry.
+
+        Set to ``False`` to disable and imply ``raise_on_redirect=False``.
+
+    :param int connect:
+        How many connection-related errors to retry on.
+
+        These are errors raised before the request is sent to the remote server,
+        which we assume has not triggered the server to process the request.
+
+        Set to ``0`` to fail on the first retry of this type.
+
+    :param int read:
+        How many times to retry on read errors.
+
+        These errors are raised after the request was sent to the server, so the
+        request may have side-effects.
+
+        Set to ``0`` to fail on the first retry of this type.
+
+    :param int redirect:
+        How many redirects to perform. Limit this to avoid infinite redirect
+        loops.
+
+        A redirect is a HTTP response with a status code 301, 302, 303, 307 or
+        308.
+
+        Set to ``0`` to fail on the first retry of this type.
+
+        Set to ``False`` to disable and imply ``raise_on_redirect=False``.
+
+    :param int status:
+        How many times to retry on bad status codes.
+
+        These are retries made on responses, where status code matches
+        ``status_forcelist``.
+
+        Set to ``0`` to fail on the first retry of this type.
+
+    :param int other:
+        How many times to retry on other errors.
+
+        Other errors are errors that are not connect, read, redirect or status errors.
+        These errors might be raised after the request was sent to the server, so the
+        request might have side-effects.
+
+        Set to ``0`` to fail on the first retry of this type.
+
+        If ``total`` is not set, it's a good idea to set this to 0 to account
+        for unexpected edge cases and avoid infinite retry loops.
+
+    :param iterable allowed_methods:
+        Set of uppercased HTTP method verbs that we should retry on.
+
+        By default, we only retry on methods which are considered to be
+        idempotent (multiple requests with the same parameters end with the
+        same state). See :attr:`Retry.DEFAULT_ALLOWED_METHODS`.
+
+        Set to a ``False`` value to retry on any verb.
+
+        .. warning::
+
+            Previously this parameter was named ``method_whitelist``, that
+            usage is deprecated in v1.26.0 and will be removed in v2.0.
+
+    :param iterable status_forcelist:
+        A set of integer HTTP status codes that we should force a retry on.
+        A retry is initiated if the request method is in ``allowed_methods``
+        and the response status code is in ``status_forcelist``.
+
+        By default, this is disabled with ``None``.
+
+    :param float backoff_factor:
+        A backoff factor to apply between attempts after the second try
+        (most errors are resolved immediately by a second try without a
+        delay). urllib3 will sleep for::
+
+            {backoff factor} * (2 ** ({number of total retries} - 1))
+
+        seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
+        for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer
+        than :attr:`Retry.DEFAULT_BACKOFF_MAX`.
+
+        By default, backoff is disabled (set to 0).
+
+    :param bool raise_on_redirect: Whether, if the number of redirects is
+        exhausted, to raise a MaxRetryError, or to return a response with a
+        response code in the 3xx range.
+
+    :param bool raise_on_status: Similar meaning to ``raise_on_redirect``:
+        whether we should raise an exception, or return a response,
+        if status falls in ``status_forcelist`` range and retries have
+        been exhausted.
+
+    :param tuple history: The history of the request encountered during
+        each call to :meth:`~Retry.increment`. The list is in the order
+        the requests occurred. Each list item is of class :class:`RequestHistory`.
+
+    :param bool respect_retry_after_header:
+        Whether to respect Retry-After header on status codes defined as
+        :attr:`Retry.RETRY_AFTER_STATUS_CODES` or not.
+
+    :param iterable remove_headers_on_redirect:
+        Sequence of headers to remove from the request when a response
+        indicating a redirect is returned before firing off the redirected
+        request.
+    """
+
+    #: Default methods to be used for ``allowed_methods``
+    DEFAULT_ALLOWED_METHODS = frozenset(
+        ["HEAD", "GET", "PUT", "DELETE", "OPTIONS", "TRACE"]
+    )
+
+    #: Default status codes to be used for ``status_forcelist``
+    RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])
+
+    #: Default headers to be used for ``remove_headers_on_redirect``
+    DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset(
+        ["Cookie", "Authorization", "Proxy-Authorization"]
+    )
+
+    #: Maximum backoff time.
+    DEFAULT_BACKOFF_MAX = 120
+
+    def __init__(
+        self,
+        total=10,
+        connect=None,
+        read=None,
+        redirect=None,
+        status=None,
+        other=None,
+        allowed_methods=_Default,
+        status_forcelist=None,
+        backoff_factor=0,
+        raise_on_redirect=True,
+        raise_on_status=True,
+        history=None,
+        respect_retry_after_header=True,
+        remove_headers_on_redirect=_Default,
+        # TODO: Deprecated, remove in v2.0
+        method_whitelist=_Default,
+    ):
+
+        if method_whitelist is not _Default:
+            if allowed_methods is not _Default:
+                raise ValueError(
+                    "Using both 'allowed_methods' and "
+                    "'method_whitelist' together is not allowed. "
+                    "Instead only use 'allowed_methods'"
+                )
+            warnings.warn(
+                "Using 'method_whitelist' with Retry is deprecated and "
+                "will be removed in v2.0. Use 'allowed_methods' instead",
+                DeprecationWarning,
+                stacklevel=2,
+            )
+            allowed_methods = method_whitelist
+        if allowed_methods is _Default:
+            allowed_methods = self.DEFAULT_ALLOWED_METHODS
+        if remove_headers_on_redirect is _Default:
+            remove_headers_on_redirect = self.DEFAULT_REMOVE_HEADERS_ON_REDIRECT
+
+        self.total = total
+        self.connect = connect
+        self.read = read
+        self.status = status
+        self.other = other
+
+        if redirect is False or total is False:
+            redirect = 0
+            raise_on_redirect = False
+
+        self.redirect = redirect
+        self.status_forcelist = status_forcelist or set()
+        self.allowed_methods = allowed_methods
+        self.backoff_factor = backoff_factor
+        self.raise_on_redirect = raise_on_redirect
+        self.raise_on_status = raise_on_status
+        self.history = history or tuple()
+        self.respect_retry_after_header = respect_retry_after_header
+        self.remove_headers_on_redirect = frozenset(
+            [h.lower() for h in remove_headers_on_redirect]
+        )
+
+    def new(self, **kw):
+        params = dict(
+            total=self.total,
+            connect=self.connect,
+            read=self.read,
+            redirect=self.redirect,
+            status=self.status,
+            other=self.other,
+            status_forcelist=self.status_forcelist,
+            backoff_factor=self.backoff_factor,
+            raise_on_redirect=self.raise_on_redirect,
+            raise_on_status=self.raise_on_status,
+            history=self.history,
+            remove_headers_on_redirect=self.remove_headers_on_redirect,
+            respect_retry_after_header=self.respect_retry_after_header,
+        )
+
+        # TODO: If already given in **kw we use what's given to us
+        # If not given we need to figure out what to pass. We decide
+        # based on whether our class has the 'method_whitelist' property
+        # and if so we pass the deprecated 'method_whitelist' otherwise
+        # we use 'allowed_methods'. Remove in v2.0
+        if "method_whitelist" not in kw and "allowed_methods" not in kw:
+            if "method_whitelist" in self.__dict__:
+                warnings.warn(
+                    "Using 'method_whitelist' with Retry is deprecated and "
+                    "will be removed in v2.0. Use 'allowed_methods' instead",
+                    DeprecationWarning,
+                )
+                params["method_whitelist"] = self.allowed_methods
+            else:
+                params["allowed_methods"] = self.allowed_methods
+
+        params.update(kw)
+        return type(self)(**params)
+
+    @classmethod
+    def from_int(cls, retries, redirect=True, default=None):
+        """Backwards-compatibility for the old retries format."""
+        if retries is None:
+            retries = default if default is not None else cls.DEFAULT
+
+        if isinstance(retries, Retry):
+            return retries
+
+        redirect = bool(redirect) and None
+        new_retries = cls(retries, redirect=redirect)
+        log.debug("Converted retries value: %r -> %r", retries, new_retries)
+        return new_retries
+
+    def get_backoff_time(self):
+        """Formula for computing the current backoff
+
+        :rtype: float
+        """
+        # We want to consider only the last consecutive errors sequence (Ignore redirects).
+        consecutive_errors_len = len(
+            list(
+                takewhile(lambda x: x.redirect_location is None, reversed(self.history))
+            )
+        )
+        if consecutive_errors_len <= 1:
+            return 0
+
+        backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1))
+        return min(self.DEFAULT_BACKOFF_MAX, backoff_value)
+
+    def parse_retry_after(self, retry_after):
+        # Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4
+        if re.match(r"^\s*[0-9]+\s*$", retry_after):
+            seconds = int(retry_after)
+        else:
+            retry_date_tuple = email.utils.parsedate_tz(retry_after)
+            if retry_date_tuple is None:
+                raise InvalidHeader("Invalid Retry-After header: %s" % retry_after)
+            if retry_date_tuple[9] is None:  # Python 2
+                # Assume UTC if no timezone was specified
+                # On Python2.7, parsedate_tz returns None for a timezone offset
+                # instead of 0 if no timezone is given, where mktime_tz treats
+                # a None timezone offset as local time.
+                retry_date_tuple = retry_date_tuple[:9] + (0,) + retry_date_tuple[10:]
+
+            retry_date = email.utils.mktime_tz(retry_date_tuple)
+            seconds = retry_date - time.time()
+
+        if seconds < 0:
+            seconds = 0
+
+        return seconds
+
+    def get_retry_after(self, response):
+        """Get the value of Retry-After in seconds."""
+
+        retry_after = response.headers.get("Retry-After")
+
+        if retry_after is None:
+            return None
+
+        return self.parse_retry_after(retry_after)
+
+    def sleep_for_retry(self, response=None):
+        retry_after = self.get_retry_after(response)
+        if retry_after:
+            time.sleep(retry_after)
+            return True
+
+        return False
+
+    def _sleep_backoff(self):
+        backoff = self.get_backoff_time()
+        if backoff <= 0:
+            return
+        time.sleep(backoff)
+
+    def sleep(self, response=None):
+        """Sleep between retry attempts.
+
+        This method will respect a server's ``Retry-After`` response header
+        and sleep the duration of the time requested. If that is not present, it
+        will use an exponential backoff. By default, the backoff factor is 0 and
+        this method will return immediately.
+        """
+
+        if self.respect_retry_after_header and response:
+            slept = self.sleep_for_retry(response)
+            if slept:
+                return
+
+        self._sleep_backoff()
+
+    def _is_connection_error(self, err):
+        """Errors when we're fairly sure that the server did not receive the
+        request, so it should be safe to retry.
+        """
+        if isinstance(err, ProxyError):
+            err = err.original_error
+        return isinstance(err, ConnectTimeoutError)
+
+    def _is_read_error(self, err):
+        """Errors that occur after the request has been started, so we should
+        assume that the server began processing it.
+        """
+        return isinstance(err, (ReadTimeoutError, ProtocolError))
+
+    def _is_method_retryable(self, method):
+        """Checks if a given HTTP method should be retried upon, depending if
+        it is included in the allowed_methods
+        """
+        # TODO: For now favor if the Retry implementation sets its own method_whitelist
+        # property outside of our constructor to avoid breaking custom implementations.
+        if "method_whitelist" in self.__dict__:
+            warnings.warn(
+                "Using 'method_whitelist' with Retry is deprecated and "
+                "will be removed in v2.0. Use 'allowed_methods' instead",
+                DeprecationWarning,
+            )
+            allowed_methods = self.method_whitelist
+        else:
+            allowed_methods = self.allowed_methods
+
+        if allowed_methods and method.upper() not in allowed_methods:
+            return False
+        return True
+
+    def is_retry(self, method, status_code, has_retry_after=False):
+        """Is this method/status code retryable? (Based on allowlists and control
+        variables such as the number of total retries to allow, whether to
+        respect the Retry-After header, whether this header is present, and
+        whether the returned status code is on the list of status codes to
+        be retried upon on the presence of the aforementioned header)
+        """
+        if not self._is_method_retryable(method):
+            return False
+
+        if self.status_forcelist and status_code in self.status_forcelist:
+            return True
+
+        return (
+            self.total
+            and self.respect_retry_after_header
+            and has_retry_after
+            and (status_code in self.RETRY_AFTER_STATUS_CODES)
+        )
+
+    def is_exhausted(self):
+        """Are we out of retries?"""
+        retry_counts = (
+            self.total,
+            self.connect,
+            self.read,
+            self.redirect,
+            self.status,
+            self.other,
+        )
+        retry_counts = list(filter(None, retry_counts))
+        if not retry_counts:
+            return False
+
+        return min(retry_counts) < 0
+
+    def increment(
+        self,
+        method=None,
+        url=None,
+        response=None,
+        error=None,
+        _pool=None,
+        _stacktrace=None,
+    ):
+        """Return a new Retry object with incremented retry counters.
+
+        :param response: A response object, or None, if the server did not
+            return a response.
+        :type response: :class:`~urllib3.response.HTTPResponse`
+        :param Exception error: An error encountered during the request, or
+            None if the response was received successfully.
+
+        :return: A new ``Retry`` object.
+        """
+        if self.total is False and error:
+            # Disabled, indicate to re-raise the error.
+            raise six.reraise(type(error), error, _stacktrace)
+
+        total = self.total
+        if total is not None:
+            total -= 1
+
+        connect = self.connect
+        read = self.read
+        redirect = self.redirect
+        status_count = self.status
+        other = self.other
+        cause = "unknown"
+        status = None
+        redirect_location = None
+
+        if error and self._is_connection_error(error):
+            # Connect retry?
+            if connect is False:
+                raise six.reraise(type(error), error, _stacktrace)
+            elif connect is not None:
+                connect -= 1
+
+        elif error and self._is_read_error(error):
+            # Read retry?
+            if read is False or not self._is_method_retryable(method):
+                raise six.reraise(type(error), error, _stacktrace)
+            elif read is not None:
+                read -= 1
+
+        elif error:
+            # Other retry?
+            if other is not None:
+                other -= 1
+
+        elif response and response.get_redirect_location():
+            # Redirect retry?
+            if redirect is not None:
+                redirect -= 1
+            cause = "too many redirects"
+            redirect_location = response.get_redirect_location()
+            status = response.status
+
+        else:
+            # Incrementing because of a server error like a 500 in
+            # status_forcelist and the given method is in the allowed_methods
+            cause = ResponseError.GENERIC_ERROR
+            if response and response.status:
+                if status_count is not None:
+                    status_count -= 1
+                cause = ResponseError.SPECIFIC_ERROR.format(status_code=response.status)
+                status = response.status
+
+        history = self.history + (
+            RequestHistory(method, url, error, status, redirect_location),
+        )
+
+        new_retry = self.new(
+            total=total,
+            connect=connect,
+            read=read,
+            redirect=redirect,
+            status=status_count,
+            other=other,
+            history=history,
+        )
+
+        if new_retry.is_exhausted():
+            raise MaxRetryError(_pool, url, error or ResponseError(cause))
+
+        log.debug("Incremented Retry for (url='%s'): %r", url, new_retry)
+
+        return new_retry
+
+    def __repr__(self):
+        return (
+            "{cls.__name__}(total={self.total}, connect={self.connect}, "
+            "read={self.read}, redirect={self.redirect}, status={self.status})"
+        ).format(cls=type(self), self=self)
+
+    def __getattr__(self, item):
+        if item == "method_whitelist":
+            # TODO: Remove this deprecated alias in v2.0
+            warnings.warn(
+                "Using 'method_whitelist' with Retry is deprecated and "
+                "will be removed in v2.0. Use 'allowed_methods' instead",
+                DeprecationWarning,
+            )
+            return self.allowed_methods
+        try:
+            return getattr(super(Retry, self), item)
+        except AttributeError:
+            return getattr(Retry, item)
+
+
+# For backwards compatibility (equivalent to pre-v1.9):
+Retry.DEFAULT = Retry(3)
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/ssl_.py b/.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/ssl_.py
new file mode 100644
index 00000000..0a6a0e06
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/ssl_.py
@@ -0,0 +1,504 @@
+from __future__ import absolute_import
+
+import hashlib
+import hmac
+import os
+import sys
+import warnings
+from binascii import hexlify, unhexlify
+
+from ..exceptions import (
+    InsecurePlatformWarning,
+    ProxySchemeUnsupported,
+    SNIMissingWarning,
+    SSLError,
+)
+from ..packages import six
+from .url import BRACELESS_IPV6_ADDRZ_RE, IPV4_RE
+
+SSLContext = None
+SSLTransport = None
+HAS_SNI = False
+IS_PYOPENSSL = False
+IS_SECURETRANSPORT = False
+ALPN_PROTOCOLS = ["http/1.1"]
+
+# Maps the length of a digest to a possible hash function producing this digest
+HASHFUNC_MAP = {
+    length: getattr(hashlib, algorithm, None)
+    for length, algorithm in ((32, "md5"), (40, "sha1"), (64, "sha256"))
+}
+
+
+def _const_compare_digest_backport(a, b):
+    """
+    Compare two digests of equal length in constant time.
+
+    The digests must be of type str/bytes.
+    Returns True if the digests match, and False otherwise.
+    """
+    result = abs(len(a) - len(b))
+    for left, right in zip(bytearray(a), bytearray(b)):
+        result |= left ^ right
+    return result == 0
+
+
+_const_compare_digest = getattr(hmac, "compare_digest", _const_compare_digest_backport)
+
+try:  # Test for SSL features
+    import ssl
+    from ssl import CERT_REQUIRED, wrap_socket
+except ImportError:
+    pass
+
+try:
+    from ssl import HAS_SNI  # Has SNI?
+except ImportError:
+    pass
+
+try:
+    from .ssltransport import SSLTransport
+except ImportError:
+    pass
+
+
+try:  # Platform-specific: Python 3.6
+    from ssl import PROTOCOL_TLS
+
+    PROTOCOL_SSLv23 = PROTOCOL_TLS
+except ImportError:
+    try:
+        from ssl import PROTOCOL_SSLv23 as PROTOCOL_TLS
+
+        PROTOCOL_SSLv23 = PROTOCOL_TLS
+    except ImportError:
+        PROTOCOL_SSLv23 = PROTOCOL_TLS = 2
+
+try:
+    from ssl import PROTOCOL_TLS_CLIENT
+except ImportError:
+    PROTOCOL_TLS_CLIENT = PROTOCOL_TLS
+
+
+try:
+    from ssl import OP_NO_COMPRESSION, OP_NO_SSLv2, OP_NO_SSLv3
+except ImportError:
+    OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000
+    OP_NO_COMPRESSION = 0x20000
+
+
+try:  # OP_NO_TICKET was added in Python 3.6
+    from ssl import OP_NO_TICKET
+except ImportError:
+    OP_NO_TICKET = 0x4000
+
+
+# A secure default.
+# Sources for more information on TLS ciphers:
+#
+# - https://wiki.mozilla.org/Security/Server_Side_TLS
+# - https://www.ssllabs.com/projects/best-practices/index.html
+# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
+#
+# The general intent is:
+# - prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
+# - prefer ECDHE over DHE for better performance,
+# - prefer any AES-GCM and ChaCha20 over any AES-CBC for better performance and
+#   security,
+# - prefer AES-GCM over ChaCha20 because hardware-accelerated AES is common,
+# - disable NULL authentication, MD5 MACs, DSS, and other
+#   insecure ciphers for security reasons.
+# - NOTE: TLS 1.3 cipher suites are managed through a different interface
+#   not exposed by CPython (yet!) and are enabled by default if they're available.
+DEFAULT_CIPHERS = ":".join(
+    [
+        "ECDHE+AESGCM",
+        "ECDHE+CHACHA20",
+        "DHE+AESGCM",
+        "DHE+CHACHA20",
+        "ECDH+AESGCM",
+        "DH+AESGCM",
+        "ECDH+AES",
+        "DH+AES",
+        "RSA+AESGCM",
+        "RSA+AES",
+        "!aNULL",
+        "!eNULL",
+        "!MD5",
+        "!DSS",
+    ]
+)
+
+try:
+    from ssl import SSLContext  # Modern SSL?
+except ImportError:
+
+    class SSLContext(object):  # Platform-specific: Python 2
+        def __init__(self, protocol_version):
+            self.protocol = protocol_version
+            # Use default values from a real SSLContext
+            self.check_hostname = False
+            self.verify_mode = ssl.CERT_NONE
+            self.ca_certs = None
+            self.options = 0
+            self.certfile = None
+            self.keyfile = None
+            self.ciphers = None
+
+        def load_cert_chain(self, certfile, keyfile):
+            self.certfile = certfile
+            self.keyfile = keyfile
+
+        def load_verify_locations(self, cafile=None, capath=None, cadata=None):
+            self.ca_certs = cafile
+
+            if capath is not None:
+                raise SSLError("CA directories not supported in older Pythons")
+
+            if cadata is not None:
+                raise SSLError("CA data not supported in older Pythons")
+
+        def set_ciphers(self, cipher_suite):
+            self.ciphers = cipher_suite
+
+        def wrap_socket(self, socket, server_hostname=None, server_side=False):
+            warnings.warn(
+                "A true SSLContext object is not available. This prevents "
+                "urllib3 from configuring SSL appropriately and may cause "
+                "certain SSL connections to fail. You can upgrade to a newer "
+                "version of Python to solve this. For more information, see "
+                "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html"
+                "#ssl-warnings",
+                InsecurePlatformWarning,
+            )
+            kwargs = {
+                "keyfile": self.keyfile,
+                "certfile": self.certfile,
+                "ca_certs": self.ca_certs,
+                "cert_reqs": self.verify_mode,
+                "ssl_version": self.protocol,
+                "server_side": server_side,
+            }
+            return wrap_socket(socket, ciphers=self.ciphers, **kwargs)
+
+
+def assert_fingerprint(cert, fingerprint):
+    """
+    Checks if given fingerprint matches the supplied certificate.
+
+    :param cert:
+        Certificate as bytes object.
+    :param fingerprint:
+        Fingerprint as string of hexdigits, can be interspersed by colons.
+    """
+
+    fingerprint = fingerprint.replace(":", "").lower()
+    digest_length = len(fingerprint)
+    if digest_length not in HASHFUNC_MAP:
+        raise SSLError("Fingerprint of invalid length: {0}".format(fingerprint))
+    hashfunc = HASHFUNC_MAP.get(digest_length)
+    if hashfunc is None:
+        raise SSLError(
+            "Hash function implementation unavailable for fingerprint length: {0}".format(
+                digest_length
+            )
+        )
+
+    # We need encode() here for py32; works on py2 and p33.
+    fingerprint_bytes = unhexlify(fingerprint.encode())
+
+    cert_digest = hashfunc(cert).digest()
+
+    if not _const_compare_digest(cert_digest, fingerprint_bytes):
+        raise SSLError(
+            'Fingerprints did not match. Expected "{0}", got "{1}".'.format(
+                fingerprint, hexlify(cert_digest)
+            )
+        )
+
+
+def resolve_cert_reqs(candidate):
+    """
+    Resolves the argument to a numeric constant, which can be passed to
+    the wrap_socket function/method from the ssl module.
+    Defaults to :data:`ssl.CERT_REQUIRED`.
+    If given a string it is assumed to be the name of the constant in the
+    :mod:`ssl` module or its abbreviation.
+    (So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
+    If it's neither `None` nor a string we assume it is already the numeric
+    constant which can directly be passed to wrap_socket.
+    """
+    if candidate is None:
+        return CERT_REQUIRED
+
+    if isinstance(candidate, str):
+        res = getattr(ssl, candidate, None)
+        if res is None:
+            res = getattr(ssl, "CERT_" + candidate)
+        return res
+
+    return candidate
+
+
+def resolve_ssl_version(candidate):
+    """
+    like resolve_cert_reqs
+    """
+    if candidate is None:
+        return PROTOCOL_TLS
+
+    if isinstance(candidate, str):
+        res = getattr(ssl, candidate, None)
+        if res is None:
+            res = getattr(ssl, "PROTOCOL_" + candidate)
+        return res
+
+    return candidate
+
+
+def create_urllib3_context(
+    ssl_version=None, cert_reqs=None, options=None, ciphers=None
+):
+    """All arguments have the same meaning as ``ssl_wrap_socket``.
+
+    By default, this function does a lot of the same work that
+    ``ssl.create_default_context`` does on Python 3.4+. It:
+
+    - Disables SSLv2, SSLv3, and compression
+    - Sets a restricted set of server ciphers
+
+    If you wish to enable SSLv3, you can do::
+
+        from pip._vendor.urllib3.util import ssl_
+        context = ssl_.create_urllib3_context()
+        context.options &= ~ssl_.OP_NO_SSLv3
+
+    You can do the same to enable compression (substituting ``COMPRESSION``
+    for ``SSLv3`` in the last line above).
+
+    :param ssl_version:
+        The desired protocol version to use. This will default to
+        PROTOCOL_SSLv23 which will negotiate the highest protocol that both
+        the server and your installation of OpenSSL support.
+    :param cert_reqs:
+        Whether to require the certificate verification. This defaults to
+        ``ssl.CERT_REQUIRED``.
+    :param options:
+        Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,
+        ``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``, and ``ssl.OP_NO_TICKET``.
+    :param ciphers:
+        Which cipher suites to allow the server to select.
+    :returns:
+        Constructed SSLContext object with specified options
+    :rtype: SSLContext
+    """
+    # PROTOCOL_TLS is deprecated in Python 3.10
+    if not ssl_version or ssl_version == PROTOCOL_TLS:
+        ssl_version = PROTOCOL_TLS_CLIENT
+
+    context = SSLContext(ssl_version)
+
+    context.set_ciphers(ciphers or DEFAULT_CIPHERS)
+
+    # Setting the default here, as we may have no ssl module on import
+    cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs
+
+    if options is None:
+        options = 0
+        # SSLv2 is easily broken and is considered harmful and dangerous
+        options |= OP_NO_SSLv2
+        # SSLv3 has several problems and is now dangerous
+        options |= OP_NO_SSLv3
+        # Disable compression to prevent CRIME attacks for OpenSSL 1.0+
+        # (issue #309)
+        options |= OP_NO_COMPRESSION
+        # TLSv1.2 only. Unless set explicitly, do not request tickets.
+        # This may save some bandwidth on wire, and although the ticket is encrypted,
+        # there is a risk associated with it being on wire,
+        # if the server is not rotating its ticketing keys properly.
+        options |= OP_NO_TICKET
+
+    context.options |= options
+
+    # Enable post-handshake authentication for TLS 1.3, see GH #1634. PHA is
+    # necessary for conditional client cert authentication with TLS 1.3.
+    # The attribute is None for OpenSSL <= 1.1.0 or does not exist in older
+    # versions of Python.  We only enable on Python 3.7.4+ or if certificate
+    # verification is enabled to work around Python issue #37428
+    # See: https://bugs.python.org/issue37428
+    if (cert_reqs == ssl.CERT_REQUIRED or sys.version_info >= (3, 7, 4)) and getattr(
+        context, "post_handshake_auth", None
+    ) is not None:
+        context.post_handshake_auth = True
+
+    def disable_check_hostname():
+        if (
+            getattr(context, "check_hostname", None) is not None
+        ):  # Platform-specific: Python 3.2
+            # We do our own verification, including fingerprints and alternative
+            # hostnames. So disable it here
+            context.check_hostname = False
+
+    # The order of the below lines setting verify_mode and check_hostname
+    # matter due to safe-guards SSLContext has to prevent an SSLContext with
+    # check_hostname=True, verify_mode=NONE/OPTIONAL. This is made even more
+    # complex because we don't know whether PROTOCOL_TLS_CLIENT will be used
+    # or not so we don't know the initial state of the freshly created SSLContext.
+    if cert_reqs == ssl.CERT_REQUIRED:
+        context.verify_mode = cert_reqs
+        disable_check_hostname()
+    else:
+        disable_check_hostname()
+        context.verify_mode = cert_reqs
+
+    # Enable logging of TLS session keys via defacto standard environment variable
+    # 'SSLKEYLOGFILE', if the feature is available (Python 3.8+). Skip empty values.
+    if hasattr(context, "keylog_filename"):
+        sslkeylogfile = os.environ.get("SSLKEYLOGFILE")
+        if sslkeylogfile:
+            context.keylog_filename = sslkeylogfile
+
+    return context
+
+
+def ssl_wrap_socket(
+    sock,
+    keyfile=None,
+    certfile=None,
+    cert_reqs=None,
+    ca_certs=None,
+    server_hostname=None,
+    ssl_version=None,
+    ciphers=None,
+    ssl_context=None,
+    ca_cert_dir=None,
+    key_password=None,
+    ca_cert_data=None,
+    tls_in_tls=False,
+):
+    """
+    All arguments except for server_hostname, ssl_context, and ca_cert_dir have
+    the same meaning as they do when using :func:`ssl.wrap_socket`.
+
+    :param server_hostname:
+        When SNI is supported, the expected hostname of the certificate
+    :param ssl_context:
+        A pre-made :class:`SSLContext` object. If none is provided, one will
+        be created using :func:`create_urllib3_context`.
+    :param ciphers:
+        A string of ciphers we wish the client to support.
+    :param ca_cert_dir:
+        A directory containing CA certificates in multiple separate files, as
+        supported by OpenSSL's -CApath flag or the capath argument to
+        SSLContext.load_verify_locations().
+    :param key_password:
+        Optional password if the keyfile is encrypted.
+    :param ca_cert_data:
+        Optional string containing CA certificates in PEM format suitable for
+        passing as the cadata parameter to SSLContext.load_verify_locations()
+    :param tls_in_tls:
+        Use SSLTransport to wrap the existing socket.
+    """
+    context = ssl_context
+    if context is None:
+        # Note: This branch of code and all the variables in it are no longer
+        # used by urllib3 itself. We should consider deprecating and removing
+        # this code.
+        context = create_urllib3_context(ssl_version, cert_reqs, ciphers=ciphers)
+
+    if ca_certs or ca_cert_dir or ca_cert_data:
+        try:
+            context.load_verify_locations(ca_certs, ca_cert_dir, ca_cert_data)
+        except (IOError, OSError) as e:
+            raise SSLError(e)
+
+    elif ssl_context is None and hasattr(context, "load_default_certs"):
+        # try to load OS default certs; works well on Windows (require Python3.4+)
+        context.load_default_certs()
+
+    # Attempt to detect if we get the goofy behavior of the
+    # keyfile being encrypted and OpenSSL asking for the
+    # passphrase via the terminal and instead error out.
+    if keyfile and key_password is None and _is_key_file_encrypted(keyfile):
+        raise SSLError("Client private key is encrypted, password is required")
+
+    if certfile:
+        if key_password is None:
+            context.load_cert_chain(certfile, keyfile)
+        else:
+            context.load_cert_chain(certfile, keyfile, key_password)
+
+    try:
+        if hasattr(context, "set_alpn_protocols"):
+            context.set_alpn_protocols(ALPN_PROTOCOLS)
+    except NotImplementedError:  # Defensive: in CI, we always have set_alpn_protocols
+        pass
+
+    # If we detect server_hostname is an IP address then the SNI
+    # extension should not be used according to RFC3546 Section 3.1
+    use_sni_hostname = server_hostname and not is_ipaddress(server_hostname)
+    # SecureTransport uses server_hostname in certificate verification.
+    send_sni = (use_sni_hostname and HAS_SNI) or (
+        IS_SECURETRANSPORT and server_hostname
+    )
+    # Do not warn the user if server_hostname is an invalid SNI hostname.
+    if not HAS_SNI and use_sni_hostname:
+        warnings.warn(
+            "An HTTPS request has been made, but the SNI (Server Name "
+            "Indication) extension to TLS is not available on this platform. "
+            "This may cause the server to present an incorrect TLS "
+            "certificate, which can cause validation failures. You can upgrade to "
+            "a newer version of Python to solve this. For more information, see "
+            "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html"
+            "#ssl-warnings",
+            SNIMissingWarning,
+        )
+
+    if send_sni:
+        ssl_sock = _ssl_wrap_socket_impl(
+            sock, context, tls_in_tls, server_hostname=server_hostname
+        )
+    else:
+        ssl_sock = _ssl_wrap_socket_impl(sock, context, tls_in_tls)
+    return ssl_sock
+
+
+def is_ipaddress(hostname):
+    """Detects whether the hostname given is an IPv4 or IPv6 address.
+    Also detects IPv6 addresses with Zone IDs.
+
+    :param str hostname: Hostname to examine.
+    :return: True if the hostname is an IP address, False otherwise.
+    """
+    if not six.PY2 and isinstance(hostname, bytes):
+        # IDN A-label bytes are ASCII compatible.
+        hostname = hostname.decode("ascii")
+    return bool(IPV4_RE.match(hostname) or BRACELESS_IPV6_ADDRZ_RE.match(hostname))
+
+
+def _is_key_file_encrypted(key_file):
+    """Detects if a key file is encrypted or not."""
+    with open(key_file, "r") as f:
+        for line in f:
+            # Look for Proc-Type: 4,ENCRYPTED
+            if "ENCRYPTED" in line:
+                return True
+
+    return False
+
+
+def _ssl_wrap_socket_impl(sock, ssl_context, tls_in_tls, server_hostname=None):
+    if tls_in_tls:
+        if not SSLTransport:
+            # Import error, ssl is not available.
+            raise ProxySchemeUnsupported(
+                "TLS in TLS requires support for the 'ssl' module"
+            )
+
+        SSLTransport._validate_ssl_context_for_tls_in_tls(ssl_context)
+        return SSLTransport(sock, ssl_context, server_hostname)
+
+    if server_hostname:
+        return ssl_context.wrap_socket(sock, server_hostname=server_hostname)
+    else:
+        return ssl_context.wrap_socket(sock)
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/ssl_match_hostname.py b/.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/ssl_match_hostname.py
new file mode 100644
index 00000000..1dd950c4
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/ssl_match_hostname.py
@@ -0,0 +1,159 @@
+"""The match_hostname() function from Python 3.3.3, essential when using SSL."""
+
+# Note: This file is under the PSF license as the code comes from the python
+# stdlib.   http://docs.python.org/3/license.html
+
+import re
+import sys
+
+# ipaddress has been backported to 2.6+ in pypi.  If it is installed on the
+# system, use it to handle IPAddress ServerAltnames (this was added in
+# python-3.5) otherwise only do DNS matching.  This allows
+# util.ssl_match_hostname to continue to be used in Python 2.7.
+try:
+    import ipaddress
+except ImportError:
+    ipaddress = None
+
+__version__ = "3.5.0.1"
+
+
+class CertificateError(ValueError):
+    pass
+
+
+def _dnsname_match(dn, hostname, max_wildcards=1):
+    """Matching according to RFC 6125, section 6.4.3
+
+    http://tools.ietf.org/html/rfc6125#section-6.4.3
+    """
+    pats = []
+    if not dn:
+        return False
+
+    # Ported from python3-syntax:
+    # leftmost, *remainder = dn.split(r'.')
+    parts = dn.split(r".")
+    leftmost = parts[0]
+    remainder = parts[1:]
+
+    wildcards = leftmost.count("*")
+    if wildcards > max_wildcards:
+        # Issue #17980: avoid denials of service by refusing more
+        # than one wildcard per fragment.  A survey of established
+        # policy among SSL implementations showed it to be a
+        # reasonable choice.
+        raise CertificateError(
+            "too many wildcards in certificate DNS name: " + repr(dn)
+        )
+
+    # speed up common case w/o wildcards
+    if not wildcards:
+        return dn.lower() == hostname.lower()
+
+    # RFC 6125, section 6.4.3, subitem 1.
+    # The client SHOULD NOT attempt to match a presented identifier in which
+    # the wildcard character comprises a label other than the left-most label.
+    if leftmost == "*":
+        # When '*' is a fragment by itself, it matches a non-empty dotless
+        # fragment.
+        pats.append("[^.]+")
+    elif leftmost.startswith("xn--") or hostname.startswith("xn--"):
+        # RFC 6125, section 6.4.3, subitem 3.
+        # The client SHOULD NOT attempt to match a presented identifier
+        # where the wildcard character is embedded within an A-label or
+        # U-label of an internationalized domain name.
+        pats.append(re.escape(leftmost))
+    else:
+        # Otherwise, '*' matches any dotless string, e.g. www*
+        pats.append(re.escape(leftmost).replace(r"\*", "[^.]*"))
+
+    # add the remaining fragments, ignore any wildcards
+    for frag in remainder:
+        pats.append(re.escape(frag))
+
+    pat = re.compile(r"\A" + r"\.".join(pats) + r"\Z", re.IGNORECASE)
+    return pat.match(hostname)
+
+
+def _to_unicode(obj):
+    if isinstance(obj, str) and sys.version_info < (3,):
+        # ignored flake8 # F821 to support python 2.7 function
+        obj = unicode(obj, encoding="ascii", errors="strict")  # noqa: F821
+    return obj
+
+
+def _ipaddress_match(ipname, host_ip):
+    """Exact matching of IP addresses.
+
+    RFC 6125 explicitly doesn't define an algorithm for this
+    (section 1.7.2 - "Out of Scope").
+    """
+    # OpenSSL may add a trailing newline to a subjectAltName's IP address
+    # Divergence from upstream: ipaddress can't handle byte str
+    ip = ipaddress.ip_address(_to_unicode(ipname).rstrip())
+    return ip == host_ip
+
+
+def match_hostname(cert, hostname):
+    """Verify that *cert* (in decoded format as returned by
+    SSLSocket.getpeercert()) matches the *hostname*.  RFC 2818 and RFC 6125
+    rules are followed, but IP addresses are not accepted for *hostname*.
+
+    CertificateError is raised on failure. On success, the function
+    returns nothing.
+    """
+    if not cert:
+        raise ValueError(
+            "empty or no certificate, match_hostname needs a "
+            "SSL socket or SSL context with either "
+            "CERT_OPTIONAL or CERT_REQUIRED"
+        )
+    try:
+        # Divergence from upstream: ipaddress can't handle byte str
+        host_ip = ipaddress.ip_address(_to_unicode(hostname))
+    except (UnicodeError, ValueError):
+        # ValueError: Not an IP address (common case)
+        # UnicodeError: Divergence from upstream: Have to deal with ipaddress not taking
+        # byte strings.  addresses should be all ascii, so we consider it not
+        # an ipaddress in this case
+        host_ip = None
+    except AttributeError:
+        # Divergence from upstream: Make ipaddress library optional
+        if ipaddress is None:
+            host_ip = None
+        else:  # Defensive
+            raise
+    dnsnames = []
+    san = cert.get("subjectAltName", ())
+    for key, value in san:
+        if key == "DNS":
+            if host_ip is None and _dnsname_match(value, hostname):
+                return
+            dnsnames.append(value)
+        elif key == "IP Address":
+            if host_ip is not None and _ipaddress_match(value, host_ip):
+                return
+            dnsnames.append(value)
+    if not dnsnames:
+        # The subject is only checked when there is no dNSName entry
+        # in subjectAltName
+        for sub in cert.get("subject", ()):
+            for key, value in sub:
+                # XXX according to RFC 2818, the most specific Common Name
+                # must be used.
+                if key == "commonName":
+                    if _dnsname_match(value, hostname):
+                        return
+                    dnsnames.append(value)
+    if len(dnsnames) > 1:
+        raise CertificateError(
+            "hostname %r "
+            "doesn't match either of %s" % (hostname, ", ".join(map(repr, dnsnames)))
+        )
+    elif len(dnsnames) == 1:
+        raise CertificateError("hostname %r doesn't match %r" % (hostname, dnsnames[0]))
+    else:
+        raise CertificateError(
+            "no appropriate commonName or subjectAltName fields were found"
+        )
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/ssltransport.py b/.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/ssltransport.py
new file mode 100644
index 00000000..4a7105d1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/ssltransport.py
@@ -0,0 +1,221 @@
+import io
+import socket
+import ssl
+
+from ..exceptions import ProxySchemeUnsupported
+from ..packages import six
+
+SSL_BLOCKSIZE = 16384
+
+
+class SSLTransport:
+    """
+    The SSLTransport wraps an existing socket and establishes an SSL connection.
+
+    Contrary to Python's implementation of SSLSocket, it allows you to chain
+    multiple TLS connections together. It's particularly useful if you need to
+    implement TLS within TLS.
+
+    The class supports most of the socket API operations.
+    """
+
+    @staticmethod
+    def _validate_ssl_context_for_tls_in_tls(ssl_context):
+        """
+        Raises a ProxySchemeUnsupported if the provided ssl_context can't be used
+        for TLS in TLS.
+
+        The only requirement is that the ssl_context provides the 'wrap_bio'
+        methods.
+        """
+
+        if not hasattr(ssl_context, "wrap_bio"):
+            if six.PY2:
+                raise ProxySchemeUnsupported(
+                    "TLS in TLS requires SSLContext.wrap_bio() which isn't "
+                    "supported on Python 2"
+                )
+            else:
+                raise ProxySchemeUnsupported(
+                    "TLS in TLS requires SSLContext.wrap_bio() which isn't "
+                    "available on non-native SSLContext"
+                )
+
+    def __init__(
+        self, socket, ssl_context, server_hostname=None, suppress_ragged_eofs=True
+    ):
+        """
+        Create an SSLTransport around socket using the provided ssl_context.
+        """
+        self.incoming = ssl.MemoryBIO()
+        self.outgoing = ssl.MemoryBIO()
+
+        self.suppress_ragged_eofs = suppress_ragged_eofs
+        self.socket = socket
+
+        self.sslobj = ssl_context.wrap_bio(
+            self.incoming, self.outgoing, server_hostname=server_hostname
+        )
+
+        # Perform initial handshake.
+        self._ssl_io_loop(self.sslobj.do_handshake)
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, *_):
+        self.close()
+
+    def fileno(self):
+        return self.socket.fileno()
+
+    def read(self, len=1024, buffer=None):
+        return self._wrap_ssl_read(len, buffer)
+
+    def recv(self, len=1024, flags=0):
+        if flags != 0:
+            raise ValueError("non-zero flags not allowed in calls to recv")
+        return self._wrap_ssl_read(len)
+
+    def recv_into(self, buffer, nbytes=None, flags=0):
+        if flags != 0:
+            raise ValueError("non-zero flags not allowed in calls to recv_into")
+        if buffer and (nbytes is None):
+            nbytes = len(buffer)
+        elif nbytes is None:
+            nbytes = 1024
+        return self.read(nbytes, buffer)
+
+    def sendall(self, data, flags=0):
+        if flags != 0:
+            raise ValueError("non-zero flags not allowed in calls to sendall")
+        count = 0
+        with memoryview(data) as view, view.cast("B") as byte_view:
+            amount = len(byte_view)
+            while count < amount:
+                v = self.send(byte_view[count:])
+                count += v
+
+    def send(self, data, flags=0):
+        if flags != 0:
+            raise ValueError("non-zero flags not allowed in calls to send")
+        response = self._ssl_io_loop(self.sslobj.write, data)
+        return response
+
+    def makefile(
+        self, mode="r", buffering=None, encoding=None, errors=None, newline=None
+    ):
+        """
+        Python's httpclient uses makefile and buffered io when reading HTTP
+        messages and we need to support it.
+
+        This is unfortunately a copy and paste of socket.py makefile with small
+        changes to point to the socket directly.
+        """
+        if not set(mode) <= {"r", "w", "b"}:
+            raise ValueError("invalid mode %r (only r, w, b allowed)" % (mode,))
+
+        writing = "w" in mode
+        reading = "r" in mode or not writing
+        assert reading or writing
+        binary = "b" in mode
+        rawmode = ""
+        if reading:
+            rawmode += "r"
+        if writing:
+            rawmode += "w"
+        raw = socket.SocketIO(self, rawmode)
+        self.socket._io_refs += 1
+        if buffering is None:
+            buffering = -1
+        if buffering < 0:
+            buffering = io.DEFAULT_BUFFER_SIZE
+        if buffering == 0:
+            if not binary:
+                raise ValueError("unbuffered streams must be binary")
+            return raw
+        if reading and writing:
+            buffer = io.BufferedRWPair(raw, raw, buffering)
+        elif reading:
+            buffer = io.BufferedReader(raw, buffering)
+        else:
+            assert writing
+            buffer = io.BufferedWriter(raw, buffering)
+        if binary:
+            return buffer
+        text = io.TextIOWrapper(buffer, encoding, errors, newline)
+        text.mode = mode
+        return text
+
+    def unwrap(self):
+        self._ssl_io_loop(self.sslobj.unwrap)
+
+    def close(self):
+        self.socket.close()
+
+    def getpeercert(self, binary_form=False):
+        return self.sslobj.getpeercert(binary_form)
+
+    def version(self):
+        return self.sslobj.version()
+
+    def cipher(self):
+        return self.sslobj.cipher()
+
+    def selected_alpn_protocol(self):
+        return self.sslobj.selected_alpn_protocol()
+
+    def selected_npn_protocol(self):
+        return self.sslobj.selected_npn_protocol()
+
+    def shared_ciphers(self):
+        return self.sslobj.shared_ciphers()
+
+    def compression(self):
+        return self.sslobj.compression()
+
+    def settimeout(self, value):
+        self.socket.settimeout(value)
+
+    def gettimeout(self):
+        return self.socket.gettimeout()
+
+    def _decref_socketios(self):
+        self.socket._decref_socketios()
+
+    def _wrap_ssl_read(self, len, buffer=None):
+        try:
+            return self._ssl_io_loop(self.sslobj.read, len, buffer)
+        except ssl.SSLError as e:
+            if e.errno == ssl.SSL_ERROR_EOF and self.suppress_ragged_eofs:
+                return 0  # eof, return 0.
+            else:
+                raise
+
+    def _ssl_io_loop(self, func, *args):
+        """Performs an I/O loop between incoming/outgoing and the socket."""
+        should_loop = True
+        ret = None
+
+        while should_loop:
+            errno = None
+            try:
+                ret = func(*args)
+            except ssl.SSLError as e:
+                if e.errno not in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE):
+                    # WANT_READ, and WANT_WRITE are expected, others are not.
+                    raise e
+                errno = e.errno
+
+            buf = self.outgoing.read()
+            self.socket.sendall(buf)
+
+            if errno is None:
+                should_loop = False
+            elif errno == ssl.SSL_ERROR_WANT_READ:
+                buf = self.socket.recv(SSL_BLOCKSIZE)
+                if buf:
+                    self.incoming.write(buf)
+                else:
+                    self.incoming.write_eof()
+        return ret
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/timeout.py b/.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/timeout.py
new file mode 100644
index 00000000..78e18a62
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/timeout.py
@@ -0,0 +1,271 @@
+from __future__ import absolute_import
+
+import time
+
+# The default socket timeout, used by httplib to indicate that no timeout was; specified by the user
+from socket import _GLOBAL_DEFAULT_TIMEOUT, getdefaulttimeout
+
+from ..exceptions import TimeoutStateError
+
+# A sentinel value to indicate that no timeout was specified by the user in
+# urllib3
+_Default = object()
+
+
+# Use time.monotonic if available.
+current_time = getattr(time, "monotonic", time.time)
+
+
+class Timeout(object):
+    """Timeout configuration.
+
+    Timeouts can be defined as a default for a pool:
+
+    .. code-block:: python
+
+       timeout = Timeout(connect=2.0, read=7.0)
+       http = PoolManager(timeout=timeout)
+       response = http.request('GET', 'http://example.com/')
+
+    Or per-request (which overrides the default for the pool):
+
+    .. code-block:: python
+
+       response = http.request('GET', 'http://example.com/', timeout=Timeout(10))
+
+    Timeouts can be disabled by setting all the parameters to ``None``:
+
+    .. code-block:: python
+
+       no_timeout = Timeout(connect=None, read=None)
+       response = http.request('GET', 'http://example.com/, timeout=no_timeout)
+
+
+    :param total:
+        This combines the connect and read timeouts into one; the read timeout
+        will be set to the time leftover from the connect attempt. In the
+        event that both a connect timeout and a total are specified, or a read
+        timeout and a total are specified, the shorter timeout will be applied.
+
+        Defaults to None.
+
+    :type total: int, float, or None
+
+    :param connect:
+        The maximum amount of time (in seconds) to wait for a connection
+        attempt to a server to succeed. Omitting the parameter will default the
+        connect timeout to the system default, probably `the global default
+        timeout in socket.py
+        <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
+        None will set an infinite timeout for connection attempts.
+
+    :type connect: int, float, or None
+
+    :param read:
+        The maximum amount of time (in seconds) to wait between consecutive
+        read operations for a response from the server. Omitting the parameter
+        will default the read timeout to the system default, probably `the
+        global default timeout in socket.py
+        <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
+        None will set an infinite timeout.
+
+    :type read: int, float, or None
+
+    .. note::
+
+        Many factors can affect the total amount of time for urllib3 to return
+        an HTTP response.
+
+        For example, Python's DNS resolver does not obey the timeout specified
+        on the socket. Other factors that can affect total request time include
+        high CPU load, high swap, the program running at a low priority level,
+        or other behaviors.
+
+        In addition, the read and total timeouts only measure the time between
+        read operations on the socket connecting the client and the server,
+        not the total amount of time for the request to return a complete
+        response. For most requests, the timeout is raised because the server
+        has not sent the first byte in the specified time. This is not always
+        the case; if a server streams one byte every fifteen seconds, a timeout
+        of 20 seconds will not trigger, even though the request will take
+        several minutes to complete.
+
+        If your goal is to cut off any request after a set amount of wall clock
+        time, consider having a second "watcher" thread to cut off a slow
+        request.
+    """
+
+    #: A sentinel object representing the default timeout value
+    DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
+
+    def __init__(self, total=None, connect=_Default, read=_Default):
+        self._connect = self._validate_timeout(connect, "connect")
+        self._read = self._validate_timeout(read, "read")
+        self.total = self._validate_timeout(total, "total")
+        self._start_connect = None
+
+    def __repr__(self):
+        return "%s(connect=%r, read=%r, total=%r)" % (
+            type(self).__name__,
+            self._connect,
+            self._read,
+            self.total,
+        )
+
+    # __str__ provided for backwards compatibility
+    __str__ = __repr__
+
+    @classmethod
+    def resolve_default_timeout(cls, timeout):
+        return getdefaulttimeout() if timeout is cls.DEFAULT_TIMEOUT else timeout
+
+    @classmethod
+    def _validate_timeout(cls, value, name):
+        """Check that a timeout attribute is valid.
+
+        :param value: The timeout value to validate
+        :param name: The name of the timeout attribute to validate. This is
+            used to specify in error messages.
+        :return: The validated and casted version of the given value.
+        :raises ValueError: If it is a numeric value less than or equal to
+            zero, or the type is not an integer, float, or None.
+        """
+        if value is _Default:
+            return cls.DEFAULT_TIMEOUT
+
+        if value is None or value is cls.DEFAULT_TIMEOUT:
+            return value
+
+        if isinstance(value, bool):
+            raise ValueError(
+                "Timeout cannot be a boolean value. It must "
+                "be an int, float or None."
+            )
+        try:
+            float(value)
+        except (TypeError, ValueError):
+            raise ValueError(
+                "Timeout value %s was %s, but it must be an "
+                "int, float or None." % (name, value)
+            )
+
+        try:
+            if value <= 0:
+                raise ValueError(
+                    "Attempted to set %s timeout to %s, but the "
+                    "timeout cannot be set to a value less "
+                    "than or equal to 0." % (name, value)
+                )
+        except TypeError:
+            # Python 3
+            raise ValueError(
+                "Timeout value %s was %s, but it must be an "
+                "int, float or None." % (name, value)
+            )
+
+        return value
+
+    @classmethod
+    def from_float(cls, timeout):
+        """Create a new Timeout from a legacy timeout value.
+
+        The timeout value used by httplib.py sets the same timeout on the
+        connect(), and recv() socket requests. This creates a :class:`Timeout`
+        object that sets the individual timeouts to the ``timeout`` value
+        passed to this function.
+
+        :param timeout: The legacy timeout value.
+        :type timeout: integer, float, sentinel default object, or None
+        :return: Timeout object
+        :rtype: :class:`Timeout`
+        """
+        return Timeout(read=timeout, connect=timeout)
+
+    def clone(self):
+        """Create a copy of the timeout object
+
+        Timeout properties are stored per-pool but each request needs a fresh
+        Timeout object to ensure each one has its own start/stop configured.
+
+        :return: a copy of the timeout object
+        :rtype: :class:`Timeout`
+        """
+        # We can't use copy.deepcopy because that will also create a new object
+        # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
+        # detect the user default.
+        return Timeout(connect=self._connect, read=self._read, total=self.total)
+
+    def start_connect(self):
+        """Start the timeout clock, used during a connect() attempt
+
+        :raises urllib3.exceptions.TimeoutStateError: if you attempt
+            to start a timer that has been started already.
+        """
+        if self._start_connect is not None:
+            raise TimeoutStateError("Timeout timer has already been started.")
+        self._start_connect = current_time()
+        return self._start_connect
+
+    def get_connect_duration(self):
+        """Gets the time elapsed since the call to :meth:`start_connect`.
+
+        :return: Elapsed time in seconds.
+        :rtype: float
+        :raises urllib3.exceptions.TimeoutStateError: if you attempt
+            to get duration for a timer that hasn't been started.
+        """
+        if self._start_connect is None:
+            raise TimeoutStateError(
+                "Can't get connect duration for timer that has not started."
+            )
+        return current_time() - self._start_connect
+
+    @property
+    def connect_timeout(self):
+        """Get the value to use when setting a connection timeout.
+
+        This will be a positive float or integer, the value None
+        (never timeout), or the default system timeout.
+
+        :return: Connect timeout.
+        :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
+        """
+        if self.total is None:
+            return self._connect
+
+        if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
+            return self.total
+
+        return min(self._connect, self.total)
+
+    @property
+    def read_timeout(self):
+        """Get the value for the read timeout.
+
+        This assumes some time has elapsed in the connection timeout and
+        computes the read timeout appropriately.
+
+        If self.total is set, the read timeout is dependent on the amount of
+        time taken by the connect timeout. If the connection time has not been
+        established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
+        raised.
+
+        :return: Value to use for the read timeout.
+        :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
+        :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
+            has not yet been called on this object.
+        """
+        if (
+            self.total is not None
+            and self.total is not self.DEFAULT_TIMEOUT
+            and self._read is not None
+            and self._read is not self.DEFAULT_TIMEOUT
+        ):
+            # In case the connect timeout has not yet been established.
+            if self._start_connect is None:
+                return self._read
+            return max(0, min(self.total - self.get_connect_duration(), self._read))
+        elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
+            return max(0, self.total - self.get_connect_duration())
+        else:
+            return self._read
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/url.py b/.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/url.py
new file mode 100644
index 00000000..a960b2f3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/url.py
@@ -0,0 +1,435 @@
+from __future__ import absolute_import
+
+import re
+from collections import namedtuple
+
+from ..exceptions import LocationParseError
+from ..packages import six
+
+url_attrs = ["scheme", "auth", "host", "port", "path", "query", "fragment"]
+
+# We only want to normalize urls with an HTTP(S) scheme.
+# urllib3 infers URLs without a scheme (None) to be http.
+NORMALIZABLE_SCHEMES = ("http", "https", None)
+
+# Almost all of these patterns were derived from the
+# 'rfc3986' module: https://github.com/python-hyper/rfc3986
+PERCENT_RE = re.compile(r"%[a-fA-F0-9]{2}")
+SCHEME_RE = re.compile(r"^(?:[a-zA-Z][a-zA-Z0-9+-]*:|/)")
+URI_RE = re.compile(
+    r"^(?:([a-zA-Z][a-zA-Z0-9+.-]*):)?"
+    r"(?://([^\\/?#]*))?"
+    r"([^?#]*)"
+    r"(?:\?([^#]*))?"
+    r"(?:#(.*))?$",
+    re.UNICODE | re.DOTALL,
+)
+
+IPV4_PAT = r"(?:[0-9]{1,3}\.){3}[0-9]{1,3}"
+HEX_PAT = "[0-9A-Fa-f]{1,4}"
+LS32_PAT = "(?:{hex}:{hex}|{ipv4})".format(hex=HEX_PAT, ipv4=IPV4_PAT)
+_subs = {"hex": HEX_PAT, "ls32": LS32_PAT}
+_variations = [
+    #                            6( h16 ":" ) ls32
+    "(?:%(hex)s:){6}%(ls32)s",
+    #                       "::" 5( h16 ":" ) ls32
+    "::(?:%(hex)s:){5}%(ls32)s",
+    # [               h16 ] "::" 4( h16 ":" ) ls32
+    "(?:%(hex)s)?::(?:%(hex)s:){4}%(ls32)s",
+    # [ *1( h16 ":" ) h16 ] "::" 3( h16 ":" ) ls32
+    "(?:(?:%(hex)s:)?%(hex)s)?::(?:%(hex)s:){3}%(ls32)s",
+    # [ *2( h16 ":" ) h16 ] "::" 2( h16 ":" ) ls32
+    "(?:(?:%(hex)s:){0,2}%(hex)s)?::(?:%(hex)s:){2}%(ls32)s",
+    # [ *3( h16 ":" ) h16 ] "::"    h16 ":"   ls32
+    "(?:(?:%(hex)s:){0,3}%(hex)s)?::%(hex)s:%(ls32)s",
+    # [ *4( h16 ":" ) h16 ] "::"              ls32
+    "(?:(?:%(hex)s:){0,4}%(hex)s)?::%(ls32)s",
+    # [ *5( h16 ":" ) h16 ] "::"              h16
+    "(?:(?:%(hex)s:){0,5}%(hex)s)?::%(hex)s",
+    # [ *6( h16 ":" ) h16 ] "::"
+    "(?:(?:%(hex)s:){0,6}%(hex)s)?::",
+]
+
+UNRESERVED_PAT = r"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._\-~"
+IPV6_PAT = "(?:" + "|".join([x % _subs for x in _variations]) + ")"
+ZONE_ID_PAT = "(?:%25|%)(?:[" + UNRESERVED_PAT + "]|%[a-fA-F0-9]{2})+"
+IPV6_ADDRZ_PAT = r"\[" + IPV6_PAT + r"(?:" + ZONE_ID_PAT + r")?\]"
+REG_NAME_PAT = r"(?:[^\[\]%:/?#]|%[a-fA-F0-9]{2})*"
+TARGET_RE = re.compile(r"^(/[^?#]*)(?:\?([^#]*))?(?:#.*)?$")
+
+IPV4_RE = re.compile("^" + IPV4_PAT + "$")
+IPV6_RE = re.compile("^" + IPV6_PAT + "$")
+IPV6_ADDRZ_RE = re.compile("^" + IPV6_ADDRZ_PAT + "$")
+BRACELESS_IPV6_ADDRZ_RE = re.compile("^" + IPV6_ADDRZ_PAT[2:-2] + "$")
+ZONE_ID_RE = re.compile("(" + ZONE_ID_PAT + r")\]$")
+
+_HOST_PORT_PAT = ("^(%s|%s|%s)(?::0*?(|0|[1-9][0-9]{0,4}))?$") % (
+    REG_NAME_PAT,
+    IPV4_PAT,
+    IPV6_ADDRZ_PAT,
+)
+_HOST_PORT_RE = re.compile(_HOST_PORT_PAT, re.UNICODE | re.DOTALL)
+
+UNRESERVED_CHARS = set(
+    "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._-~"
+)
+SUB_DELIM_CHARS = set("!$&'()*+,;=")
+USERINFO_CHARS = UNRESERVED_CHARS | SUB_DELIM_CHARS | {":"}
+PATH_CHARS = USERINFO_CHARS | {"@", "/"}
+QUERY_CHARS = FRAGMENT_CHARS = PATH_CHARS | {"?"}
+
+
+class Url(namedtuple("Url", url_attrs)):
+    """
+    Data structure for representing an HTTP URL. Used as a return value for
+    :func:`parse_url`. Both the scheme and host are normalized as they are
+    both case-insensitive according to RFC 3986.
+    """
+
+    __slots__ = ()
+
+    def __new__(
+        cls,
+        scheme=None,
+        auth=None,
+        host=None,
+        port=None,
+        path=None,
+        query=None,
+        fragment=None,
+    ):
+        if path and not path.startswith("/"):
+            path = "/" + path
+        if scheme is not None:
+            scheme = scheme.lower()
+        return super(Url, cls).__new__(
+            cls, scheme, auth, host, port, path, query, fragment
+        )
+
+    @property
+    def hostname(self):
+        """For backwards-compatibility with urlparse. We're nice like that."""
+        return self.host
+
+    @property
+    def request_uri(self):
+        """Absolute path including the query string."""
+        uri = self.path or "/"
+
+        if self.query is not None:
+            uri += "?" + self.query
+
+        return uri
+
+    @property
+    def netloc(self):
+        """Network location including host and port"""
+        if self.port:
+            return "%s:%d" % (self.host, self.port)
+        return self.host
+
+    @property
+    def url(self):
+        """
+        Convert self into a url
+
+        This function should more or less round-trip with :func:`.parse_url`. The
+        returned url may not be exactly the same as the url inputted to
+        :func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
+        with a blank port will have : removed).
+
+        Example: ::
+
+            >>> U = parse_url('http://google.com/mail/')
+            >>> U.url
+            'http://google.com/mail/'
+            >>> Url('http', 'username:password', 'host.com', 80,
+            ... '/path', 'query', 'fragment').url
+            'http://username:password@host.com:80/path?query#fragment'
+        """
+        scheme, auth, host, port, path, query, fragment = self
+        url = u""
+
+        # We use "is not None" we want things to happen with empty strings (or 0 port)
+        if scheme is not None:
+            url += scheme + u"://"
+        if auth is not None:
+            url += auth + u"@"
+        if host is not None:
+            url += host
+        if port is not None:
+            url += u":" + str(port)
+        if path is not None:
+            url += path
+        if query is not None:
+            url += u"?" + query
+        if fragment is not None:
+            url += u"#" + fragment
+
+        return url
+
+    def __str__(self):
+        return self.url
+
+
+def split_first(s, delims):
+    """
+    .. deprecated:: 1.25
+
+    Given a string and an iterable of delimiters, split on the first found
+    delimiter. Return two split parts and the matched delimiter.
+
+    If not found, then the first part is the full input string.
+
+    Example::
+
+        >>> split_first('foo/bar?baz', '?/=')
+        ('foo', 'bar?baz', '/')
+        >>> split_first('foo/bar?baz', '123')
+        ('foo/bar?baz', '', None)
+
+    Scales linearly with number of delims. Not ideal for large number of delims.
+    """
+    min_idx = None
+    min_delim = None
+    for d in delims:
+        idx = s.find(d)
+        if idx < 0:
+            continue
+
+        if min_idx is None or idx < min_idx:
+            min_idx = idx
+            min_delim = d
+
+    if min_idx is None or min_idx < 0:
+        return s, "", None
+
+    return s[:min_idx], s[min_idx + 1 :], min_delim
+
+
+def _encode_invalid_chars(component, allowed_chars, encoding="utf-8"):
+    """Percent-encodes a URI component without reapplying
+    onto an already percent-encoded component.
+    """
+    if component is None:
+        return component
+
+    component = six.ensure_text(component)
+
+    # Normalize existing percent-encoded bytes.
+    # Try to see if the component we're encoding is already percent-encoded
+    # so we can skip all '%' characters but still encode all others.
+    component, percent_encodings = PERCENT_RE.subn(
+        lambda match: match.group(0).upper(), component
+    )
+
+    uri_bytes = component.encode("utf-8", "surrogatepass")
+    is_percent_encoded = percent_encodings == uri_bytes.count(b"%")
+    encoded_component = bytearray()
+
+    for i in range(0, len(uri_bytes)):
+        # Will return a single character bytestring on both Python 2 & 3
+        byte = uri_bytes[i : i + 1]
+        byte_ord = ord(byte)
+        if (is_percent_encoded and byte == b"%") or (
+            byte_ord < 128 and byte.decode() in allowed_chars
+        ):
+            encoded_component += byte
+            continue
+        encoded_component.extend(b"%" + (hex(byte_ord)[2:].encode().zfill(2).upper()))
+
+    return encoded_component.decode(encoding)
+
+
+def _remove_path_dot_segments(path):
+    # See http://tools.ietf.org/html/rfc3986#section-5.2.4 for pseudo-code
+    segments = path.split("/")  # Turn the path into a list of segments
+    output = []  # Initialize the variable to use to store output
+
+    for segment in segments:
+        # '.' is the current directory, so ignore it, it is superfluous
+        if segment == ".":
+            continue
+        # Anything other than '..', should be appended to the output
+        elif segment != "..":
+            output.append(segment)
+        # In this case segment == '..', if we can, we should pop the last
+        # element
+        elif output:
+            output.pop()
+
+    # If the path starts with '/' and the output is empty or the first string
+    # is non-empty
+    if path.startswith("/") and (not output or output[0]):
+        output.insert(0, "")
+
+    # If the path starts with '/.' or '/..' ensure we add one more empty
+    # string to add a trailing '/'
+    if path.endswith(("/.", "/..")):
+        output.append("")
+
+    return "/".join(output)
+
+
+def _normalize_host(host, scheme):
+    if host:
+        if isinstance(host, six.binary_type):
+            host = six.ensure_str(host)
+
+        if scheme in NORMALIZABLE_SCHEMES:
+            is_ipv6 = IPV6_ADDRZ_RE.match(host)
+            if is_ipv6:
+                # IPv6 hosts of the form 'a::b%zone' are encoded in a URL as
+                # such per RFC 6874: 'a::b%25zone'. Unquote the ZoneID
+                # separator as necessary to return a valid RFC 4007 scoped IP.
+                match = ZONE_ID_RE.search(host)
+                if match:
+                    start, end = match.span(1)
+                    zone_id = host[start:end]
+
+                    if zone_id.startswith("%25") and zone_id != "%25":
+                        zone_id = zone_id[3:]
+                    else:
+                        zone_id = zone_id[1:]
+                    zone_id = "%" + _encode_invalid_chars(zone_id, UNRESERVED_CHARS)
+                    return host[:start].lower() + zone_id + host[end:]
+                else:
+                    return host.lower()
+            elif not IPV4_RE.match(host):
+                return six.ensure_str(
+                    b".".join([_idna_encode(label) for label in host.split(".")])
+                )
+    return host
+
+
+def _idna_encode(name):
+    if name and any(ord(x) >= 128 for x in name):
+        try:
+            from pip._vendor import idna
+        except ImportError:
+            six.raise_from(
+                LocationParseError("Unable to parse URL without the 'idna' module"),
+                None,
+            )
+        try:
+            return idna.encode(name.lower(), strict=True, std3_rules=True)
+        except idna.IDNAError:
+            six.raise_from(
+                LocationParseError(u"Name '%s' is not a valid IDNA label" % name), None
+            )
+    return name.lower().encode("ascii")
+
+
+def _encode_target(target):
+    """Percent-encodes a request target so that there are no invalid characters"""
+    path, query = TARGET_RE.match(target).groups()
+    target = _encode_invalid_chars(path, PATH_CHARS)
+    query = _encode_invalid_chars(query, QUERY_CHARS)
+    if query is not None:
+        target += "?" + query
+    return target
+
+
+def parse_url(url):
+    """
+    Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
+    performed to parse incomplete urls. Fields not provided will be None.
+    This parser is RFC 3986 and RFC 6874 compliant.
+
+    The parser logic and helper functions are based heavily on
+    work done in the ``rfc3986`` module.
+
+    :param str url: URL to parse into a :class:`.Url` namedtuple.
+
+    Partly backwards-compatible with :mod:`urlparse`.
+
+    Example::
+
+        >>> parse_url('http://google.com/mail/')
+        Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
+        >>> parse_url('google.com:80')
+        Url(scheme=None, host='google.com', port=80, path=None, ...)
+        >>> parse_url('/foo?bar')
+        Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
+    """
+    if not url:
+        # Empty
+        return Url()
+
+    source_url = url
+    if not SCHEME_RE.search(url):
+        url = "//" + url
+
+    try:
+        scheme, authority, path, query, fragment = URI_RE.match(url).groups()
+        normalize_uri = scheme is None or scheme.lower() in NORMALIZABLE_SCHEMES
+
+        if scheme:
+            scheme = scheme.lower()
+
+        if authority:
+            auth, _, host_port = authority.rpartition("@")
+            auth = auth or None
+            host, port = _HOST_PORT_RE.match(host_port).groups()
+            if auth and normalize_uri:
+                auth = _encode_invalid_chars(auth, USERINFO_CHARS)
+            if port == "":
+                port = None
+        else:
+            auth, host, port = None, None, None
+
+        if port is not None:
+            port = int(port)
+            if not (0 <= port <= 65535):
+                raise LocationParseError(url)
+
+        host = _normalize_host(host, scheme)
+
+        if normalize_uri and path:
+            path = _remove_path_dot_segments(path)
+            path = _encode_invalid_chars(path, PATH_CHARS)
+        if normalize_uri and query:
+            query = _encode_invalid_chars(query, QUERY_CHARS)
+        if normalize_uri and fragment:
+            fragment = _encode_invalid_chars(fragment, FRAGMENT_CHARS)
+
+    except (ValueError, AttributeError):
+        return six.raise_from(LocationParseError(source_url), None)
+
+    # For the sake of backwards compatibility we put empty
+    # string values for path if there are any defined values
+    # beyond the path in the URL.
+    # TODO: Remove this when we break backwards compatibility.
+    if not path:
+        if query is not None or fragment is not None:
+            path = ""
+        else:
+            path = None
+
+    # Ensure that each part of the URL is a `str` for
+    # backwards compatibility.
+    if isinstance(url, six.text_type):
+        ensure_func = six.ensure_text
+    else:
+        ensure_func = six.ensure_str
+
+    def ensure_type(x):
+        return x if x is None else ensure_func(x)
+
+    return Url(
+        scheme=ensure_type(scheme),
+        auth=ensure_type(auth),
+        host=ensure_type(host),
+        port=port,
+        path=ensure_type(path),
+        query=ensure_type(query),
+        fragment=ensure_type(fragment),
+    )
+
+
+def get_host(url):
+    """
+    Deprecated. Use :func:`parse_url` instead.
+    """
+    p = parse_url(url)
+    return p.scheme or "http", p.hostname, p.port
diff --git a/.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/wait.py b/.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/wait.py
new file mode 100644
index 00000000..21b4590b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/pip/_vendor/urllib3/util/wait.py
@@ -0,0 +1,152 @@
+import errno
+import select
+import sys
+from functools import partial
+
+try:
+    from time import monotonic
+except ImportError:
+    from time import time as monotonic
+
+__all__ = ["NoWayToWaitForSocketError", "wait_for_read", "wait_for_write"]
+
+
+class NoWayToWaitForSocketError(Exception):
+    pass
+
+
+# How should we wait on sockets?
+#
+# There are two types of APIs you can use for waiting on sockets: the fancy
+# modern stateful APIs like epoll/kqueue, and the older stateless APIs like
+# select/poll. The stateful APIs are more efficient when you have a lots of
+# sockets to keep track of, because you can set them up once and then use them
+# lots of times. But we only ever want to wait on a single socket at a time
+# and don't want to keep track of state, so the stateless APIs are actually
+# more efficient. So we want to use select() or poll().
+#
+# Now, how do we choose between select() and poll()? On traditional Unixes,
+# select() has a strange calling convention that makes it slow, or fail
+# altogether, for high-numbered file descriptors. The point of poll() is to fix
+# that, so on Unixes, we prefer poll().
+#
+# On Windows, there is no poll() (or at least Python doesn't provide a wrapper
+# for it), but that's OK, because on Windows, select() doesn't have this
+# strange calling convention; plain select() works fine.
+#
+# So: on Windows we use select(), and everywhere else we use poll(). We also
+# fall back to select() in case poll() is somehow broken or missing.
+
+if sys.version_info >= (3, 5):
+    # Modern Python, that retries syscalls by default
+    def _retry_on_intr(fn, timeout):
+        return fn(timeout)
+
+else:
+    # Old and broken Pythons.
+    def _retry_on_intr(fn, timeout):
+        if timeout is None:
+            deadline = float("inf")
+        else:
+            deadline = monotonic() + timeout
+
+        while True:
+            try:
+                return fn(timeout)
+            # OSError for 3 <= pyver < 3.5, select.error for pyver <= 2.7
+            except (OSError, select.error) as e:
+                # 'e.args[0]' incantation works for both OSError and select.error
+                if e.args[0] != errno.EINTR:
+                    raise
+                else:
+                    timeout = deadline - monotonic()
+                    if timeout < 0:
+                        timeout = 0
+                    if timeout == float("inf"):
+                        timeout = None
+                    continue
+
+
+def select_wait_for_socket(sock, read=False, write=False, timeout=None):
+    if not read and not write:
+        raise RuntimeError("must specify at least one of read=True, write=True")
+    rcheck = []
+    wcheck = []
+    if read:
+        rcheck.append(sock)
+    if write:
+        wcheck.append(sock)
+    # When doing a non-blocking connect, most systems signal success by
+    # marking the socket writable. Windows, though, signals success by marked
+    # it as "exceptional". We paper over the difference by checking the write
+    # sockets for both conditions. (The stdlib selectors module does the same
+    # thing.)
+    fn = partial(select.select, rcheck, wcheck, wcheck)
+    rready, wready, xready = _retry_on_intr(fn, timeout)
+    return bool(rready or wready or xready)
+
+
+def poll_wait_for_socket(sock, read=False, write=False, timeout=None):
+    if not read and not write:
+        raise RuntimeError("must specify at least one of read=True, write=True")
+    mask = 0
+    if read:
+        mask |= select.POLLIN
+    if write:
+        mask |= select.POLLOUT
+    poll_obj = select.poll()
+    poll_obj.register(sock, mask)
+
+    # For some reason, poll() takes timeout in milliseconds
+    def do_poll(t):
+        if t is not None:
+            t *= 1000
+        return poll_obj.poll(t)
+
+    return bool(_retry_on_intr(do_poll, timeout))
+
+
+def null_wait_for_socket(*args, **kwargs):
+    raise NoWayToWaitForSocketError("no select-equivalent available")
+
+
+def _have_working_poll():
+    # Apparently some systems have a select.poll that fails as soon as you try
+    # to use it, either due to strange configuration or broken monkeypatching
+    # from libraries like eventlet/greenlet.
+    try:
+        poll_obj = select.poll()
+        _retry_on_intr(poll_obj.poll, 0)
+    except (AttributeError, OSError):
+        return False
+    else:
+        return True
+
+
+def wait_for_socket(*args, **kwargs):
+    # We delay choosing which implementation to use until the first time we're
+    # called. We could do it at import time, but then we might make the wrong
+    # decision if someone goes wild with monkeypatching select.poll after
+    # we're imported.
+    global wait_for_socket
+    if _have_working_poll():
+        wait_for_socket = poll_wait_for_socket
+    elif hasattr(select, "select"):
+        wait_for_socket = select_wait_for_socket
+    else:  # Platform-specific: Appengine.
+        wait_for_socket = null_wait_for_socket
+    return wait_for_socket(*args, **kwargs)
+
+
+def wait_for_read(sock, timeout=None):
+    """Waits for reading to be available on a given socket.
+    Returns True if the socket is readable, or False if the timeout expired.
+    """
+    return wait_for_socket(sock, read=True, timeout=timeout)
+
+
+def wait_for_write(sock, timeout=None):
+    """Waits for writing to be available on a given socket.
+    Returns True if the socket is readable, or False if the timeout expired.
+    """
+    return wait_for_socket(sock, write=True, timeout=timeout)