about summary refs log tree commit diff
path: root/.venv/lib/python3.12/site-packages/networkx/utils
diff options
context:
space:
mode:
Diffstat (limited to '.venv/lib/python3.12/site-packages/networkx/utils')
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/utils/__init__.py8
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/utils/backends.py2501
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/utils/configs.py387
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/utils/decorators.py1237
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/utils/heaps.py340
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/utils/mapped_queue.py297
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/utils/misc.py653
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/utils/random_sequence.py164
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/utils/rcm.py159
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/utils/tests/__init__.py0
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/utils/tests/test__init.py11
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/utils/tests/test_backends.py170
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/utils/tests/test_config.py231
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/utils/tests/test_decorators.py510
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/utils/tests/test_heaps.py131
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/utils/tests/test_mapped_queue.py268
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/utils/tests/test_misc.py268
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/utils/tests/test_random_sequence.py38
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/utils/tests/test_rcm.py63
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/utils/tests/test_unionfind.py55
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/utils/union_find.py106
21 files changed, 7597 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/__init__.py b/.venv/lib/python3.12/site-packages/networkx/utils/__init__.py
new file mode 100644
index 00000000..d6abb178
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/utils/__init__.py
@@ -0,0 +1,8 @@
+from networkx.utils.misc import *
+from networkx.utils.decorators import *
+from networkx.utils.random_sequence import *
+from networkx.utils.union_find import *
+from networkx.utils.rcm import *
+from networkx.utils.heaps import *
+from networkx.utils.configs import *
+from networkx.utils.backends import *
diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/backends.py b/.venv/lib/python3.12/site-packages/networkx/utils/backends.py
new file mode 100644
index 00000000..0b41d4c7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/utils/backends.py
@@ -0,0 +1,2501 @@
+"""
+Docs for backend users
+~~~~~~~~~~~~~~~~~~~~~~
+NetworkX utilizes a plugin-dispatch architecture. A valid NetworkX backend
+specifies `entry points
+<https://packaging.python.org/en/latest/specifications/entry-points>`_, named
+``networkx.backends`` and an optional ``networkx.backend_info`` when it is
+installed (not imported). This allows NetworkX to dispatch (redirect) function
+calls to the backend so the execution flows to the designated backend
+implementation. This design enhances flexibility and integration, making
+NetworkX more adaptable and efficient.
+
+NetworkX can dispatch to backends **explicitly** (this requires changing code)
+or **automatically** (this requires setting configuration or environment
+variables). The best way to use a backend depends on the backend, your use
+case, and whether you want to automatically convert to or from backend
+graphs. Automatic conversions of graphs is always opt-in.
+
+To explicitly dispatch to a backend, use the `backend=` keyword argument in a
+dispatchable function. This will convert (and cache by default) input NetworkX
+graphs to backend graphs and call the backend implementation. Another explicit
+way to use a backend is to create a backend graph directly--for example,
+perhaps the backend has its own functions for loading data and creating
+graphs--and pass that graph to a dispatchable function, which will then call
+the backend implementation without converting.
+
+Using automatic dispatch requires setting configuration options. Every NetworkX
+configuration may also be set from an environment variable and are processed at
+the time networkx is imported.  The following configuration variables are
+supported:
+
+* ``nx.config.backend_priority`` (``NETWORKX_BACKEND_PRIORITY`` env var), a
+  list of backends, controls dispatchable functions that don't return graphs
+  such as e.g. ``nx.pagerank``. When one of these functions is called with
+  NetworkX graphs as input, the dispatcher iterates over the backends listed in
+  this backend_priority config and will use the first backend that implements
+  this function. The input NetworkX graphs are converted (and cached by
+  default) to backend graphs. Using this configuration can allow you to use the
+  full flexibility of NetworkX graphs and the performance of backend
+  implementations, but possible downsides are that creating NetworkX graphs,
+  converting to backend graphs, and caching backend graphs may all be
+  expensive.
+
+* ``nx.config.backend_priority.algos`` (``NETWORKX_BACKEND_PRIORITY_ALGOS`` env
+  var), can be used instead of ``nx.config.backend_priority``
+  (``NETWORKX_BACKEND_PRIORITY`` env var) to emphasize that the setting only
+  affects the dispatching of algorithm functions as described above.
+
+* ``nx.config.backend_priority.generators``
+  (``NETWORKX_BACKEND_PRIORITY_GENERATORS`` env var), a list of backends,
+  controls dispatchable functions that return graphs such as
+  nx.from_pandas_edgelist and nx.empty_graph. When one of these functions is
+  called, the first backend listed in this backend_priority config that
+  implements this function will be used and will return a backend graph. When
+  this backend graph is passed to other dispatchable NetworkX functions, it
+  will use the backend implementation if it exists or raise by default unless
+  nx.config.fallback_to_nx is True (default is False). Using this configuration
+  avoids creating NetworkX graphs, which subsequently avoids the need to
+  convert to and cache backend graphs as when using
+  nx.config.backend_priority.algos, but possible downsides are that the backend
+  graph may not behave the same as a NetworkX graph and the backend may not
+  implement all algorithms that you use, which may break your workflow.
+
+* ``nx.config.fallback_to_nx`` (``NETWORKX_FALLBACK_TO_NX`` env var), a boolean
+  (default False), controls what happens when a backend graph is passed to a
+  dispatchable function that is not implemented by that backend. The default
+  behavior when False is to raise. If True, then the backend graph will be
+  converted (and cached by default) to a NetworkX graph and will run with the
+  default NetworkX implementation. Enabling this configuration can allow
+  workflows to complete if the backend does not implement all algorithms used
+  by the workflow, but a possible downside is that it may require converting
+  the input backend graph to a NetworkX graph, which may be expensive. If a
+  backend graph is duck-type compatible as a NetworkX graph, then the backend
+  may choose not to convert to a NetworkX graph and use the incoming graph
+  as-is.
+
+* ``nx.config.cache_converted_graphs`` (``NETWORKX_CACHE_CONVERTED_GRAPHS`` env
+  var), a boolean (default True), controls whether graph conversions are cached
+  to G.__networkx_cache__ or not. Caching can improve performance by avoiding
+  repeated conversions, but it uses more memory.
+
+.. note:: Backends *should* follow the NetworkX backend naming convention. For
+   example, if a backend is named ``parallel`` and specified using
+   ``backend=parallel`` or ``NETWORKX_BACKEND_PRIORITY=parallel``, the package
+   installed is ``nx-parallel``, and we would use ``import nx_parallel`` if we
+   were to import the backend package directly.
+
+Backends are encouraged to document how they recommend to be used and whether
+their graph types are duck-type compatible as NetworkX graphs. If backend
+graphs are NetworkX-compatible and you want your workflow to automatically
+"just work" with a backend--converting and caching if necessary--then use all
+of the above configurations. Automatically converting graphs is opt-in, and
+configuration gives the user control.
+
+Examples:
+---------
+
+Use the ``cugraph`` backend for every algorithm function it supports. This will
+allow for fall back to the default NetworkX implementations for algorithm calls
+not supported by cugraph because graph generator functions are still returning
+NetworkX graphs.
+
+.. code-block:: bash
+
+   bash> NETWORKX_BACKEND_PRIORITY=cugraph python my_networkx_script.py
+
+Explicitly use the ``parallel`` backend for a function call.
+
+.. code-block:: python
+
+    nx.betweenness_centrality(G, k=10, backend="parallel")
+
+Explicitly use the ``parallel`` backend for a function call by passing an
+instance of the backend graph type to the function.
+
+.. code-block:: python
+
+   H = nx_parallel.ParallelGraph(G)
+   nx.betweenness_centrality(H, k=10)
+
+Explicitly use the ``parallel`` backend and pass additional backend-specific
+arguments. Here, ``get_chunks`` is an argument unique to the ``parallel``
+backend.
+
+.. code-block:: python
+
+   nx.betweenness_centrality(G, k=10, backend="parallel", get_chunks=get_chunks)
+
+Automatically dispatch the ``cugraph`` backend for all NetworkX algorithms and
+generators, and allow the backend graph object returned from generators to be
+passed to NetworkX functions the backend does not support.
+
+.. code-block:: bash
+
+   bash> NETWORKX_BACKEND_PRIORITY_ALGOS=cugraph \\
+         NETWORKX_BACKEND_PRIORITY_GENERATORS=cugraph \\
+         NETWORKX_FALLBACK_TO_NX=True \\
+         python my_networkx_script.py
+
+How does this work?
+-------------------
+
+If you've looked at functions in the NetworkX codebase, you might have seen the
+``@nx._dispatchable`` decorator on most of the functions. This decorator allows the NetworkX
+function to dispatch to the corresponding backend function if available. When the decorated
+function is called, it first checks for a backend to run the function, and if no appropriate
+backend is specified or available, it runs the NetworkX version of the function.
+
+Backend Keyword Argument
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+When a decorated function is called with the ``backend`` kwarg provided, it checks
+if the specified backend is installed, and loads it. Next it checks whether to convert
+input graphs by first resolving the backend of each input graph by looking
+for an attribute named ``__networkx_backend__`` that holds the backend name for that
+graph type. If all input graphs backend matches the ``backend`` kwarg, the backend's
+function is called with the original inputs. If any of the input graphs do not match
+the ``backend`` kwarg, they are converted to the backend graph type before calling.
+Exceptions are raised if any step is not possible, e.g. if the backend does not
+implement this function.
+
+Finding a Backend
+^^^^^^^^^^^^^^^^^
+
+When a decorated function is called without a ``backend`` kwarg, it tries to find a
+dispatchable backend function.
+The backend type of each input graph parameter is resolved (using the
+``__networkx_backend__`` attribute) and if they all agree, that backend's function
+is called if possible. Otherwise the backends listed in the config ``backend_priority``
+are considered one at a time in order. If that backend supports the function and
+can convert the input graphs to its backend type, that backend function is called.
+Otherwise the next backend is considered.
+
+During this process, the backends can provide helpful information to the dispatcher
+via helper methods in the backend's interface. Backend methods ``can_run`` and
+``should_run`` are used by the dispatcher to determine whether to use the backend
+function. If the number of nodes is small, it might be faster to run the NetworkX
+version of the function. This is how backends can provide info about whether to run.
+
+Falling Back to NetworkX
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+If none of the backends are appropriate, we "fall back" to the NetworkX function.
+That means we resolve the backends of all input graphs and if all are NetworkX
+graphs we call the NetworkX function. If any are not NetworkX graphs, we raise
+an exception unless the `fallback_to_nx` config is set. If it is, we convert all
+graph types to NetworkX graph types before calling the NetworkX function.
+
+Functions that mutate the graph
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Any function decorated with the option that indicates it mutates the graph goes through
+a slightly different path to automatically find backends. These functions typically
+generate a graph, or add attributes or change the graph structure. The config
+`backend_priority.generators` holds a list of backend names similar to the config
+`backend_priority`. The process is similar for finding a matching backend. Once found,
+the backend function is called and a backend graph is returned (instead of a NetworkX
+graph). You can then use this backend graph in any function supported by the backend.
+And you can use it for functions not supported by the backend if you set the config
+`fallback_to_nx` to allow it to convert the backend graph to a NetworkX graph before
+calling the function.
+
+Optional keyword arguments
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Backends can add optional keyword parameters to NetworkX functions to allow you to
+control aspects of the backend algorithm. Thus the function signatures can be extended
+beyond the NetworkX function signature. For example, the ``parallel`` backend might
+have a parameter to specify how many CPUs to use. These parameters are collected
+by the dispatchable decorator code at the start of the function call and used when
+calling the backend function.
+
+Existing Backends
+^^^^^^^^^^^^^^^^^
+
+NetworkX does not know all the backends that have been created.  In fact, the
+NetworkX library does not need to know that a backend exists for it to work. As
+long as the backend package creates the ``entry_point``, and provides the
+correct interface, it will be called when the user requests it using one of the
+three approaches described above. Some backends have been working with the
+NetworkX developers to ensure smooth operation.
+
+Refer to the :doc:`/backends` section to see a list of available backends known
+to work with the current stable release of NetworkX.
+
+.. _introspect:
+
+Introspection and Logging
+-------------------------
+Introspection techniques aim to demystify dispatching and backend graph conversion behaviors.
+
+The primary way to see what the dispatch machinery is doing is by enabling logging.
+This can help you verify that the backend you specified is being used.
+You can enable NetworkX's backend logger to print to ``sys.stderr`` like this::
+
+    import logging
+    nxl = logging.getLogger("networkx")
+    nxl.addHandler(logging.StreamHandler())
+    nxl.setLevel(logging.DEBUG)
+
+And you can disable it by running this::
+
+    nxl.setLevel(logging.CRITICAL)
+
+Refer to :external+python:mod:`logging` to learn more about the logging facilities in Python.
+
+By looking at the ``.backends`` attribute, you can get the set of all currently
+installed backends that implement a particular function. For example::
+
+    >>> nx.betweenness_centrality.backends  # doctest: +SKIP
+    {'parallel'}
+
+The function docstring will also show which installed backends support it
+along with any backend-specific notes and keyword arguments::
+
+    >>> help(nx.betweenness_centrality)  # doctest: +SKIP
+    ...
+    Backends
+    --------
+    parallel : Parallel backend for NetworkX algorithms
+      The parallel computation is implemented by dividing the nodes into chunks
+      and computing betweenness centrality for each chunk concurrently.
+    ...
+
+The NetworkX documentation website also includes info about trusted backends of NetworkX in function references.
+For example, see :func:`~networkx.algorithms.shortest_paths.weighted.all_pairs_bellman_ford_path_length`.
+
+Introspection capabilities are currently limited, but we are working to improve them.
+We plan to make it easier to answer questions such as:
+
+- What happened (and why)?
+- What *will* happen (and why)?
+- Where was time spent (including conversions)?
+- What is in the cache and how much memory is it using?
+
+Transparency is essential to allow for greater understanding, debug-ability,
+and customization. After all, NetworkX dispatching is extremely flexible and can
+support advanced workflows with multiple backends and fine-tuned configuration,
+but introspection can be helpful by describing *when* and *how* to evolve your workflow
+to meet your needs. If you have suggestions for how to improve introspection, please
+`let us know <https://github.com/networkx/networkx/issues/new>`_!
+
+Docs for backend developers
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Creating a custom backend
+-------------------------
+
+1.  Defining a ``BackendInterface`` object:
+
+    Note that the ``BackendInterface`` doesn't need to must be a class. It can be an
+    instance of a class, or a module as well. You can define the following methods or
+    functions in your backend's ``BackendInterface`` object.:
+
+    1. ``convert_from_nx`` and ``convert_to_nx`` methods or functions are required for
+       backend dispatching to work. The arguments to ``convert_from_nx`` are:
+
+       - ``G`` : NetworkX Graph
+       - ``edge_attrs`` : dict, optional
+            Dictionary mapping edge attributes to default values if missing in ``G``.
+            If None, then no edge attributes will be converted and default may be 1.
+       - ``node_attrs``: dict, optional
+            Dictionary mapping node attributes to default values if missing in ``G``.
+            If None, then no node attributes will be converted.
+       - ``preserve_edge_attrs`` : bool
+            Whether to preserve all edge attributes.
+       - ``preserve_node_attrs`` : bool
+            Whether to preserve all node attributes.
+       - ``preserve_graph_attrs`` : bool
+            Whether to preserve all graph attributes.
+       - ``preserve_all_attrs`` : bool
+            Whether to preserve all graph, node, and edge attributes.
+       - ``name`` : str
+            The name of the algorithm.
+       - ``graph_name`` : str
+            The name of the graph argument being converted.
+
+    2. ``can_run`` (Optional):
+          If your backend only partially implements an algorithm, you can define
+          a ``can_run(name, args, kwargs)`` function in your ``BackendInterface`` object that
+          returns True or False indicating whether the backend can run the algorithm with
+          the given arguments or not. Instead of a boolean you can also return a string
+          message to inform the user why that algorithm can't be run.
+
+    3. ``should_run`` (Optional):
+          A backend may also define ``should_run(name, args, kwargs)``
+          that is similar to ``can_run``, but answers whether the backend *should* be run.
+          ``should_run`` is only run when performing backend graph conversions. Like
+          ``can_run``, it receives the original arguments so it can decide whether it
+          should be run by inspecting the arguments. ``can_run`` runs before
+          ``should_run``, so ``should_run`` may assume ``can_run`` is True. If not
+          implemented by the backend, ``can_run``and ``should_run`` are assumed to
+          always return True if the backend implements the algorithm.
+
+    4. ``on_start_tests`` (Optional):
+          A special ``on_start_tests(items)`` function may be defined by the backend.
+          It will be called with the list of NetworkX tests discovered. Each item
+          is a test object that can be marked as xfail if the backend does not support
+          the test using ``item.add_marker(pytest.mark.xfail(reason=...))``.
+
+2.  Adding entry points
+
+    To be discoverable by NetworkX, your package must register an
+    `entry-point <https://packaging.python.org/en/latest/specifications/entry-points>`_
+    ``networkx.backends`` in the package's metadata, with a `key pointing to your
+    dispatch object <https://packaging.python.org/en/latest/guides/creating-and-discovering-plugins/#using-package-metadata>`_ .
+    For example, if you are using ``setuptools`` to manage your backend package,
+    you can `add the following to your pyproject.toml file <https://setuptools.pypa.io/en/latest/userguide/entry_point.html>`_::
+
+        [project.entry-points."networkx.backends"]
+        backend_name = "your_backend_interface_object"
+
+    You can also add the ``backend_info`` entry-point. It points towards the ``get_info``
+    function that returns all the backend information, which is then used to build the
+    "Additional Backend Implementation" box at the end of algorithm's documentation
+    page. Note that the `get_info` function shouldn't import your backend package.::
+
+        [project.entry-points."networkx.backend_info"]
+        backend_name = "your_get_info_function"
+
+    The ``get_info`` should return a dictionary with following key-value pairs:
+        - ``backend_name`` : str or None
+            It is the name passed in the ``backend`` kwarg.
+        - ``project`` : str or None
+            The name of your backend project.
+        - ``package`` : str or None
+            The name of your backend package.
+        - ``url`` : str or None
+            This is the url to either your backend's codebase or documentation, and
+            will be displayed as a hyperlink to the ``backend_name``, in the
+            "Additional backend implementations" section.
+        - ``short_summary`` : str or None
+            One line summary of your backend which will be displayed in the
+            "Additional backend implementations" section.
+        - ``default_config`` : dict
+            A dictionary mapping the backend config parameter names to their default values.
+            This is used to automatically initialize the default configs for all the
+            installed backends at the time of networkx's import.
+
+            .. seealso:: `~networkx.utils.configs.Config`
+
+        - ``functions`` : dict or None
+            A dictionary mapping function names to a dictionary of information
+            about the function. The information can include the following keys:
+
+            - ``url`` : str or None
+              The url to ``function``'s source code or documentation.
+            - ``additional_docs`` : str or None
+              A short description or note about the backend function's
+              implementation.
+            - ``additional_parameters`` : dict or None
+              A dictionary mapping additional parameters headers to their
+              short descriptions. For example::
+
+                  "additional_parameters": {
+                      'param1 : str, function (default = "chunks")' : "...",
+                      'param2 : int' : "...",
+                  }
+
+            If any of these keys are not present, the corresponding information
+            will not be displayed in the "Additional backend implementations"
+            section on NetworkX docs website.
+
+        Note that your backend's docs would only appear on the official NetworkX docs only
+        if your backend is a trusted backend of NetworkX, and is present in the
+        `.circleci/config.yml` and `.github/workflows/deploy-docs.yml` files in the
+        NetworkX repository.
+
+3.  Defining a Backend Graph class
+
+    The backend must create an object with an attribute ``__networkx_backend__`` that holds
+    a string with the entry point name::
+
+        class BackendGraph:
+            __networkx_backend__ = "backend_name"
+            ...
+
+    A backend graph instance may have a ``G.__networkx_cache__`` dict to enable
+    caching, and care should be taken to clear the cache when appropriate.
+
+Testing the Custom backend
+--------------------------
+
+To test your custom backend, you can run the NetworkX test suite on your backend.
+This also ensures that the custom backend is compatible with NetworkX's API.
+The following steps will help you run the tests:
+
+1. Setting Backend Environment Variables:
+    - ``NETWORKX_TEST_BACKEND`` : Setting this to your backend's ``backend_name`` will
+      let NetworkX's dispatch machinery to automatically convert a regular NetworkX
+      ``Graph``, ``DiGraph``, ``MultiGraph``, etc. to their backend equivalents, using
+      ``your_backend_interface_object.convert_from_nx(G, ...)`` function.
+    - ``NETWORKX_FALLBACK_TO_NX`` (default=False) : Setting this variable to `True` will
+      instruct tests to use a NetworkX ``Graph`` for algorithms not implemented by your
+      custom backend. Setting this to `False` will only run the tests for algorithms
+      implemented by your custom backend and tests for other algorithms will ``xfail``.
+
+2. Running Tests:
+    You can invoke NetworkX tests for your custom backend with the following commands::
+
+        NETWORKX_TEST_BACKEND=<backend_name>
+        NETWORKX_FALLBACK_TO_NX=True # or False
+        pytest --pyargs networkx
+
+How tests are run?
+------------------
+
+1. While dispatching to the backend implementation the ``_convert_and_call`` function
+   is used and while testing the ``_convert_and_call_for_tests`` function is used.
+   Other than testing it also checks for functions that return numpy scalars, and
+   for functions that return graphs it runs the backend implementation and the
+   networkx implementation and then converts the backend graph into a NetworkX graph
+   and then compares them, and returns the networkx graph. This can be regarded as
+   (pragmatic) technical debt. We may replace these checks in the future.
+
+2. Conversions while running tests:
+    - Convert NetworkX graphs using ``<your_backend_interface_object>.convert_from_nx(G, ...)`` into
+      the backend graph.
+    - Pass the backend graph objects to the backend implementation of the algorithm.
+    - Convert the result back to a form expected by NetworkX tests using
+      ``<your_backend_interface_object>.convert_to_nx(result, ...)``.
+    - For nx_loopback, the graph is copied using the dispatchable metadata
+
+3. Dispatchable algorithms that are not implemented by the backend
+   will cause a ``pytest.xfail``, when the ``NETWORKX_FALLBACK_TO_NX``
+   environment variable is set to ``False``, giving some indication that
+   not all tests are running, while avoiding causing an explicit failure.
+"""
+
+import inspect
+import itertools
+import logging
+import os
+import warnings
+from functools import partial
+from importlib.metadata import entry_points
+
+import networkx as nx
+
+from .configs import BackendPriorities, Config, NetworkXConfig
+from .decorators import argmap
+
+__all__ = ["_dispatchable"]
+
+_logger = logging.getLogger(__name__)
+
+
+def _do_nothing():
+    """This does nothing at all, yet it helps turn `_dispatchable` into functions."""
+
+
+def _get_backends(group, *, load_and_call=False):
+    """
+    Retrieve NetworkX ``backends`` and ``backend_info`` from the entry points.
+
+    Parameters
+    -----------
+    group : str
+        The entry_point to be retrieved.
+    load_and_call : bool, optional
+        If True, load and call the backend. Defaults to False.
+
+    Returns
+    --------
+    dict
+        A dictionary mapping backend names to their respective backend objects.
+
+    Notes
+    ------
+    If a backend is defined more than once, a warning is issued.
+    The `nx_loopback` backend is removed if it exists, as it is only available during testing.
+    A warning is displayed if an error occurs while loading a backend.
+    """
+    items = entry_points(group=group)
+    rv = {}
+    for ep in items:
+        if ep.name in rv:
+            warnings.warn(
+                f"networkx backend defined more than once: {ep.name}",
+                RuntimeWarning,
+                stacklevel=2,
+            )
+        elif load_and_call:
+            try:
+                rv[ep.name] = ep.load()()
+            except Exception as exc:
+                warnings.warn(
+                    f"Error encountered when loading info for backend {ep.name}: {exc}",
+                    RuntimeWarning,
+                    stacklevel=2,
+                )
+        else:
+            rv[ep.name] = ep
+    rv.pop("nx_loopback", None)
+    return rv
+
+
+# Note: "networkx" will be in `backend_info`, but not `backends` or `config.backends`.
+# It is valid to use "networkx"` as backend argument and in `config.backend_priority`.
+# We may make "networkx" a "proper" backend and have it in `backends` and `config.backends`.
+backends = _get_backends("networkx.backends")
+backend_info = {}  # fill backend_info after networkx is imported in __init__.py
+
+# Load and cache backends on-demand
+_loaded_backends = {}  # type: ignore[var-annotated]
+_registered_algorithms = {}
+
+
+# Get default configuration from environment variables at import time
+def _comma_sep_to_list(string):
+    return [stripped for x in string.strip().split(",") if (stripped := x.strip())]
+
+
+def _set_configs_from_environment():
+    """Initialize ``config.backend_priority``, load backend_info and config.
+
+    This gets default values from environment variables (see ``nx.config`` for details).
+    This function is run at the very end of importing networkx. It is run at this time
+    to avoid loading backend_info before the rest of networkx is imported in case a
+    backend uses networkx for its backend_info (e.g. subclassing the Config class.)
+    """
+    # backend_info is defined above as empty dict. Fill it after import finishes.
+    backend_info.update(_get_backends("networkx.backend_info", load_and_call=True))
+    backend_info.update(
+        (backend, {}) for backend in backends.keys() - backend_info.keys()
+    )
+
+    # set up config based on backend_info and environment
+    config = NetworkXConfig(
+        backend_priority=BackendPriorities(
+            algos=[],
+            generators=[],
+        ),
+        backends=Config(
+            **{
+                backend: (
+                    cfg
+                    if isinstance(cfg := info["default_config"], Config)
+                    else Config(**cfg)
+                )
+                if "default_config" in info
+                else Config()
+                for backend, info in backend_info.items()
+            }
+        ),
+        cache_converted_graphs=bool(
+            os.environ.get("NETWORKX_CACHE_CONVERTED_GRAPHS", True)
+        ),
+        fallback_to_nx=bool(os.environ.get("NETWORKX_FALLBACK_TO_NX", False)),
+        warnings_to_ignore={
+            x.strip()
+            for x in os.environ.get("NETWORKX_WARNINGS_TO_IGNORE", "").split(",")
+            if x.strip()
+        },
+    )
+    backend_info["networkx"] = {}
+    type(config.backends).__doc__ = "All installed NetworkX backends and their configs."
+
+    # NETWORKX_BACKEND_PRIORITY is the same as NETWORKX_BACKEND_PRIORITY_ALGOS
+    priorities = {
+        key[26:].lower(): val
+        for key, val in os.environ.items()
+        if key.startswith("NETWORKX_BACKEND_PRIORITY_")
+    }
+    backend_priority = config.backend_priority
+    backend_priority.algos = (
+        _comma_sep_to_list(priorities.pop("algos"))
+        if "algos" in priorities
+        else _comma_sep_to_list(
+            os.environ.get(
+                "NETWORKX_BACKEND_PRIORITY",
+                os.environ.get("NETWORKX_AUTOMATIC_BACKENDS", ""),
+            )
+        )
+    )
+    backend_priority.generators = _comma_sep_to_list(priorities.pop("generators", ""))
+    for key in sorted(priorities):
+        backend_priority[key] = _comma_sep_to_list(priorities[key])
+
+    return config
+
+
+def _always_run(name, args, kwargs):
+    return True
+
+
+def _load_backend(backend_name):
+    if backend_name in _loaded_backends:
+        return _loaded_backends[backend_name]
+    if backend_name not in backends:
+        raise ImportError(f"'{backend_name}' backend is not installed")
+    rv = _loaded_backends[backend_name] = backends[backend_name].load()
+    if not hasattr(rv, "can_run"):
+        rv.can_run = _always_run
+    if not hasattr(rv, "should_run"):
+        rv.should_run = _always_run
+    return rv
+
+
+class _dispatchable:
+    _is_testing = False
+
+    class _fallback_to_nx:
+        """Class property that returns ``nx.config.fallback_to_nx``."""
+
+        def __get__(self, instance, owner=None):
+            warnings.warn(
+                "`_dispatchable._fallback_to_nx` is deprecated and will be removed "
+                "in NetworkX v3.5. Use `nx.config.fallback_to_nx` instead.",
+                category=DeprecationWarning,
+                stacklevel=2,
+            )
+            return nx.config.fallback_to_nx
+
+    # Note that chaining `@classmethod` and `@property` was removed in Python 3.13
+    _fallback_to_nx = _fallback_to_nx()  # type: ignore[assignment,misc]
+
+    def __new__(
+        cls,
+        func=None,
+        *,
+        name=None,
+        graphs="G",
+        edge_attrs=None,
+        node_attrs=None,
+        preserve_edge_attrs=False,
+        preserve_node_attrs=False,
+        preserve_graph_attrs=False,
+        preserve_all_attrs=False,
+        mutates_input=False,
+        returns_graph=False,
+    ):
+        """A decorator function that is used to redirect the execution of ``func``
+        function to its backend implementation.
+
+        This decorator function dispatches to
+        a different backend implementation based on the input graph types, and it also
+        manages all the ``backend_kwargs``. Usage can be any of the following decorator
+        forms:
+
+        - ``@_dispatchable``
+        - ``@_dispatchable()``
+        - ``@_dispatchable(name="override_name")``
+        - ``@_dispatchable(graphs="graph_var_name")``
+        - ``@_dispatchable(edge_attrs="weight")``
+        - ``@_dispatchable(graphs={"G": 0, "H": 1}, edge_attrs={"weight": "default"})``
+            with 0 and 1 giving the position in the signature function for graph
+            objects. When ``edge_attrs`` is a dict, keys are keyword names and values
+            are defaults.
+
+        Parameters
+        ----------
+        func : callable, optional
+            The function to be decorated. If ``func`` is not provided, returns a
+            partial object that can be used to decorate a function later. If ``func``
+            is provided, returns a new callable object that dispatches to a backend
+            algorithm based on input graph types.
+
+        name : str, optional
+            The name of the algorithm to use for dispatching. If not provided,
+            the name of ``func`` will be used. ``name`` is useful to avoid name
+            conflicts, as all dispatched algorithms live in a single namespace.
+            For example, ``tournament.is_strongly_connected`` had a name conflict
+            with the standard ``nx.is_strongly_connected``, so we used
+            ``@_dispatchable(name="tournament_is_strongly_connected")``.
+
+        graphs : str or dict or None, default "G"
+            If a string, the parameter name of the graph, which must be the first
+            argument of the wrapped function. If more than one graph is required
+            for the algorithm (or if the graph is not the first argument), provide
+            a dict keyed to argument names with argument position as values for each
+            graph argument. For example, ``@_dispatchable(graphs={"G": 0, "auxiliary?": 4})``
+            indicates the 0th parameter ``G`` of the function is a required graph,
+            and the 4th parameter ``auxiliary?`` is an optional graph.
+            To indicate that an argument is a list of graphs, do ``"[graphs]"``.
+            Use ``graphs=None``, if *no* arguments are NetworkX graphs such as for
+            graph generators, readers, and conversion functions.
+
+        edge_attrs : str or dict, optional
+            ``edge_attrs`` holds information about edge attribute arguments
+            and default values for those edge attributes.
+            If a string, ``edge_attrs`` holds the function argument name that
+            indicates a single edge attribute to include in the converted graph.
+            The default value for this attribute is 1. To indicate that an argument
+            is a list of attributes (all with default value 1), use e.g. ``"[attrs]"``.
+            If a dict, ``edge_attrs`` holds a dict keyed by argument names, with
+            values that are either the default value or, if a string, the argument
+            name that indicates the default value.
+
+        node_attrs : str or dict, optional
+            Like ``edge_attrs``, but for node attributes.
+
+        preserve_edge_attrs : bool or str or dict, optional
+            For bool, whether to preserve all edge attributes.
+            For str, the parameter name that may indicate (with ``True`` or a
+            callable argument) whether all edge attributes should be preserved
+            when converting.
+            For dict of ``{graph_name: {attr: default}}``, indicate pre-determined
+            edge attributes (and defaults) to preserve for input graphs.
+
+        preserve_node_attrs : bool or str or dict, optional
+            Like ``preserve_edge_attrs``, but for node attributes.
+
+        preserve_graph_attrs : bool or set
+            For bool, whether to preserve all graph attributes.
+            For set, which input graph arguments to preserve graph attributes.
+
+        preserve_all_attrs : bool
+            Whether to preserve all edge, node and graph attributes.
+            This overrides all the other preserve_*_attrs.
+
+        mutates_input : bool or dict, default False
+            For bool, whether the function mutates an input graph argument.
+            For dict of ``{arg_name: arg_pos}``, arguments that indicate whether an
+            input graph will be mutated, and ``arg_name`` may begin with ``"not "``
+            to negate the logic (for example, this is used by ``copy=`` arguments).
+            By default, dispatching doesn't convert input graphs to a different
+            backend for functions that mutate input graphs.
+
+        returns_graph : bool, default False
+            Whether the function can return or yield a graph object. By default,
+            dispatching doesn't convert input graphs to a different backend for
+            functions that return graphs.
+        """
+        if func is None:
+            return partial(
+                _dispatchable,
+                name=name,
+                graphs=graphs,
+                edge_attrs=edge_attrs,
+                node_attrs=node_attrs,
+                preserve_edge_attrs=preserve_edge_attrs,
+                preserve_node_attrs=preserve_node_attrs,
+                preserve_graph_attrs=preserve_graph_attrs,
+                preserve_all_attrs=preserve_all_attrs,
+                mutates_input=mutates_input,
+                returns_graph=returns_graph,
+            )
+        if isinstance(func, str):
+            raise TypeError("'name' and 'graphs' must be passed by keyword") from None
+        # If name not provided, use the name of the function
+        if name is None:
+            name = func.__name__
+
+        self = object.__new__(cls)
+
+        # standard function-wrapping stuff
+        # __annotations__ not used
+        self.__name__ = func.__name__
+        # self.__doc__ = func.__doc__  # __doc__ handled as cached property
+        self.__defaults__ = func.__defaults__
+        # We "magically" add `backend=` keyword argument to allow backend to be specified
+        if func.__kwdefaults__:
+            self.__kwdefaults__ = {**func.__kwdefaults__, "backend": None}
+        else:
+            self.__kwdefaults__ = {"backend": None}
+        self.__module__ = func.__module__
+        self.__qualname__ = func.__qualname__
+        self.__dict__.update(func.__dict__)
+        self.__wrapped__ = func
+
+        # Supplement docstring with backend info; compute and cache when needed
+        self._orig_doc = func.__doc__
+        self._cached_doc = None
+
+        self.orig_func = func
+        self.name = name
+        self.edge_attrs = edge_attrs
+        self.node_attrs = node_attrs
+        self.preserve_edge_attrs = preserve_edge_attrs or preserve_all_attrs
+        self.preserve_node_attrs = preserve_node_attrs or preserve_all_attrs
+        self.preserve_graph_attrs = preserve_graph_attrs or preserve_all_attrs
+        self.mutates_input = mutates_input
+        # Keep `returns_graph` private for now, b/c we may extend info on return types
+        self._returns_graph = returns_graph
+
+        if edge_attrs is not None and not isinstance(edge_attrs, str | dict):
+            raise TypeError(
+                f"Bad type for edge_attrs: {type(edge_attrs)}. Expected str or dict."
+            ) from None
+        if node_attrs is not None and not isinstance(node_attrs, str | dict):
+            raise TypeError(
+                f"Bad type for node_attrs: {type(node_attrs)}. Expected str or dict."
+            ) from None
+        if not isinstance(self.preserve_edge_attrs, bool | str | dict):
+            raise TypeError(
+                f"Bad type for preserve_edge_attrs: {type(self.preserve_edge_attrs)}."
+                " Expected bool, str, or dict."
+            ) from None
+        if not isinstance(self.preserve_node_attrs, bool | str | dict):
+            raise TypeError(
+                f"Bad type for preserve_node_attrs: {type(self.preserve_node_attrs)}."
+                " Expected bool, str, or dict."
+            ) from None
+        if not isinstance(self.preserve_graph_attrs, bool | set):
+            raise TypeError(
+                f"Bad type for preserve_graph_attrs: {type(self.preserve_graph_attrs)}."
+                " Expected bool or set."
+            ) from None
+        if not isinstance(self.mutates_input, bool | dict):
+            raise TypeError(
+                f"Bad type for mutates_input: {type(self.mutates_input)}."
+                " Expected bool or dict."
+            ) from None
+        if not isinstance(self._returns_graph, bool):
+            raise TypeError(
+                f"Bad type for returns_graph: {type(self._returns_graph)}."
+                " Expected bool."
+            ) from None
+
+        if isinstance(graphs, str):
+            graphs = {graphs: 0}
+        elif graphs is None:
+            pass
+        elif not isinstance(graphs, dict):
+            raise TypeError(
+                f"Bad type for graphs: {type(graphs)}. Expected str or dict."
+            ) from None
+        elif len(graphs) == 0:
+            raise KeyError("'graphs' must contain at least one variable name") from None
+
+        # This dict comprehension is complicated for better performance; equivalent shown below.
+        self.optional_graphs = set()
+        self.list_graphs = set()
+        if graphs is None:
+            self.graphs = {}
+        else:
+            self.graphs = {
+                self.optional_graphs.add(val := k[:-1]) or val
+                if (last := k[-1]) == "?"
+                else self.list_graphs.add(val := k[1:-1]) or val
+                if last == "]"
+                else k: v
+                for k, v in graphs.items()
+            }
+        # The above is equivalent to:
+        # self.optional_graphs = {k[:-1] for k in graphs if k[-1] == "?"}
+        # self.list_graphs = {k[1:-1] for k in graphs if k[-1] == "]"}
+        # self.graphs = {k[:-1] if k[-1] == "?" else k: v for k, v in graphs.items()}
+
+        # Compute and cache the signature on-demand
+        self._sig = None
+
+        # Which backends implement this function?
+        self.backends = {
+            backend
+            for backend, info in backend_info.items()
+            if "functions" in info and name in info["functions"]
+        }
+
+        if name in _registered_algorithms:
+            raise KeyError(
+                f"Algorithm already exists in dispatch registry: {name}"
+            ) from None
+        # Use the magic of `argmap` to turn `self` into a function. This does result
+        # in small additional overhead compared to calling `_dispatchable` directly,
+        # but `argmap` has the magical property that it can stack with other `argmap`
+        # decorators "for free". Being a function is better for REPRs and type-checkers.
+        self = argmap(_do_nothing)(self)
+        _registered_algorithms[name] = self
+        return self
+
+    @property
+    def __doc__(self):
+        """If the cached documentation exists, it is returned.
+        Otherwise, the documentation is generated using _make_doc() method,
+        cached, and then returned."""
+
+        if (rv := self._cached_doc) is not None:
+            return rv
+        rv = self._cached_doc = self._make_doc()
+        return rv
+
+    @__doc__.setter
+    def __doc__(self, val):
+        """Sets the original documentation to the given value and resets the
+        cached documentation."""
+
+        self._orig_doc = val
+        self._cached_doc = None
+
+    @property
+    def __signature__(self):
+        """Return the signature of the original function, with the addition of
+        the `backend` and `backend_kwargs` parameters."""
+
+        if self._sig is None:
+            sig = inspect.signature(self.orig_func)
+            # `backend` is now a reserved argument used by dispatching.
+            # assert "backend" not in sig.parameters
+            if not any(
+                p.kind == inspect.Parameter.VAR_KEYWORD for p in sig.parameters.values()
+            ):
+                sig = sig.replace(
+                    parameters=[
+                        *sig.parameters.values(),
+                        inspect.Parameter(
+                            "backend", inspect.Parameter.KEYWORD_ONLY, default=None
+                        ),
+                        inspect.Parameter(
+                            "backend_kwargs", inspect.Parameter.VAR_KEYWORD
+                        ),
+                    ]
+                )
+            else:
+                *parameters, var_keyword = sig.parameters.values()
+                sig = sig.replace(
+                    parameters=[
+                        *parameters,
+                        inspect.Parameter(
+                            "backend", inspect.Parameter.KEYWORD_ONLY, default=None
+                        ),
+                        var_keyword,
+                    ]
+                )
+            self._sig = sig
+        return self._sig
+
+    def __call__(self, /, *args, backend=None, **kwargs):
+        """Returns the result of the original function, or the backend function if
+        the backend is specified and that backend implements `func`."""
+
+        if not backends:
+            # Fast path if no backends are installed
+            if backend is not None and backend != "networkx":
+                raise ImportError(f"'{backend}' backend is not installed")
+            return self.orig_func(*args, **kwargs)
+
+        # Use `backend_name` in this function instead of `backend`.
+        # This is purely for aesthetics and to make it easier to search for this
+        # variable since "backend" is used in many comments and log/error messages.
+        backend_name = backend
+        if backend_name is not None and backend_name not in backend_info:
+            raise ImportError(f"'{backend_name}' backend is not installed")
+
+        graphs_resolved = {}
+        for gname, pos in self.graphs.items():
+            if pos < len(args):
+                if gname in kwargs:
+                    raise TypeError(f"{self.name}() got multiple values for {gname!r}")
+                graph = args[pos]
+            elif gname in kwargs:
+                graph = kwargs[gname]
+            elif gname not in self.optional_graphs:
+                raise TypeError(
+                    f"{self.name}() missing required graph argument: {gname}"
+                )
+            else:
+                continue
+            if graph is None:
+                if gname not in self.optional_graphs:
+                    raise TypeError(
+                        f"{self.name}() required graph argument {gname!r} is None; must be a graph"
+                    )
+            else:
+                graphs_resolved[gname] = graph
+
+        # Alternative to the above that does not check duplicated args or missing required graphs.
+        # graphs_resolved = {
+        #     gname: graph
+        #     for gname, pos in self.graphs.items()
+        #     if (graph := args[pos] if pos < len(args) else kwargs.get(gname)) is not None
+        # }
+
+        # Check if any graph comes from a backend
+        if self.list_graphs:
+            # Make sure we don't lose values by consuming an iterator
+            args = list(args)
+            for gname in self.list_graphs & graphs_resolved.keys():
+                list_of_graphs = list(graphs_resolved[gname])
+                graphs_resolved[gname] = list_of_graphs
+                if gname in kwargs:
+                    kwargs[gname] = list_of_graphs
+                else:
+                    args[self.graphs[gname]] = list_of_graphs
+
+            graph_backend_names = {
+                getattr(g, "__networkx_backend__", None)
+                for gname, g in graphs_resolved.items()
+                if gname not in self.list_graphs
+            }
+            for gname in self.list_graphs & graphs_resolved.keys():
+                graph_backend_names.update(
+                    getattr(g, "__networkx_backend__", None)
+                    for g in graphs_resolved[gname]
+                )
+        else:
+            graph_backend_names = {
+                getattr(g, "__networkx_backend__", None)
+                for g in graphs_resolved.values()
+            }
+
+        backend_priority = nx.config.backend_priority.get(
+            self.name,
+            nx.config.backend_priority.generators
+            if self._returns_graph
+            else nx.config.backend_priority.algos,
+        )
+        if self._is_testing and backend_priority and backend_name is None:
+            # Special path if we are running networkx tests with a backend.
+            # This even runs for (and handles) functions that mutate input graphs.
+            return self._convert_and_call_for_tests(
+                backend_priority[0],
+                args,
+                kwargs,
+                fallback_to_nx=nx.config.fallback_to_nx,
+            )
+
+        graph_backend_names.discard(None)
+        if backend_name is not None:
+            # Must run with the given backend.
+            # `can_run` only used for better log and error messages.
+            # Check `mutates_input` for logging, not behavior.
+            blurb = (
+                "No other backends will be attempted, because the backend was "
+                f"specified with the `backend='{backend_name}'` keyword argument."
+            )
+            extra_message = (
+                f"'{backend_name}' backend raised NotImplementedError when calling "
+                f"`{self.name}'. {blurb}"
+            )
+            if not graph_backend_names or graph_backend_names == {backend_name}:
+                # All graphs are backend graphs--no need to convert!
+                if self._can_backend_run(backend_name, args, kwargs):
+                    return self._call_with_backend(
+                        backend_name, args, kwargs, extra_message=extra_message
+                    )
+                if self._does_backend_have(backend_name):
+                    extra = " for the given arguments"
+                else:
+                    extra = ""
+                raise NotImplementedError(
+                    f"`{self.name}' is not implemented by '{backend_name}' backend"
+                    f"{extra}. {blurb}"
+                )
+            if self._can_convert(backend_name, graph_backend_names):
+                if self._can_backend_run(backend_name, args, kwargs):
+                    if self._will_call_mutate_input(args, kwargs):
+                        _logger.debug(
+                            "`%s' will mutate an input graph. This prevents automatic conversion "
+                            "to, and use of, backends listed in `nx.config.backend_priority`. "
+                            "Using backend specified by the "
+                            "`backend='%s'` keyword argument. This may change behavior by not "
+                            "mutating inputs.",
+                            self.name,
+                            backend_name,
+                        )
+                        mutations = []
+                    else:
+                        mutations = None
+                    rv = self._convert_and_call(
+                        backend_name,
+                        graph_backend_names,
+                        args,
+                        kwargs,
+                        extra_message=extra_message,
+                        mutations=mutations,
+                    )
+                    if mutations:
+                        for cache, key in mutations:
+                            # If the call mutates inputs, then remove all inputs gotten
+                            # from cache. We do this after all conversions (and call) so
+                            # that a graph can be gotten from a cache multiple times.
+                            cache.pop(key, None)
+                    return rv
+                if self._does_backend_have(backend_name):
+                    extra = " for the given arguments"
+                else:
+                    extra = ""
+                raise NotImplementedError(
+                    f"`{self.name}' is not implemented by '{backend_name}' backend"
+                    f"{extra}. {blurb}"
+                )
+            if len(graph_backend_names) == 1:
+                maybe_s = ""
+                graph_backend_names = f"'{next(iter(graph_backend_names))}'"
+            else:
+                maybe_s = "s"
+            raise TypeError(
+                f"`{self.name}' is unable to convert graph from backend{maybe_s} "
+                f"{graph_backend_names} to '{backend_name}' backend, which was "
+                f"specified with the `backend='{backend_name}'` keyword argument. "
+                f"{blurb}"
+            )
+
+        if self._will_call_mutate_input(args, kwargs):
+            # The current behavior for functions that mutate input graphs:
+            #
+            # 1. If backend is specified by `backend=` keyword, use it (done above).
+            # 2. If inputs are from one backend, try to use it.
+            # 3. If all input graphs are instances of `nx.Graph`, then run with the
+            #    default "networkx" implementation.
+            #
+            # Do not automatically convert if a call will mutate inputs, because doing
+            # so would change behavior. Hence, we should fail if there are multiple input
+            # backends or if the input backend does not implement the function. However,
+            # we offer a way for backends to circumvent this if they do not implement
+            # this function: we will fall back to the default "networkx" implementation
+            # without using conversions if all input graphs are subclasses of `nx.Graph`.
+            blurb = (
+                "conversions between backends (if configured) will not be attempted, "
+                "because this may change behavior. You may specify a backend to use "
+                "by passing e.g. `backend='networkx'` keyword, but this may also "
+                "change behavior by not mutating inputs."
+            )
+            fallback_blurb = (
+                "This call will mutate inputs, so fall back to 'networkx' "
+                "backend (without converting) since all input graphs are "
+                "instances of nx.Graph and are hopefully compatible.",
+            )
+            if len(graph_backend_names) == 1:
+                [backend_name] = graph_backend_names
+                msg_template = (
+                    f"Backend '{backend_name}' does not implement `{self.name}'%s. "
+                    f"This call will mutate an input, so automatic {blurb}"
+                )
+                # `can_run` is only used for better log and error messages
+                try:
+                    if self._can_backend_run(backend_name, args, kwargs):
+                        return self._call_with_backend(
+                            backend_name,
+                            args,
+                            kwargs,
+                            extra_message=msg_template % " with these arguments",
+                        )
+                except NotImplementedError as exc:
+                    if all(isinstance(g, nx.Graph) for g in graphs_resolved.values()):
+                        _logger.debug(
+                            "Backend '%s' raised when calling `%s': %s. %s",
+                            backend_name,
+                            self.name,
+                            exc,
+                            fallback_blurb,
+                        )
+                    else:
+                        raise
+                else:
+                    if nx.config.fallback_to_nx and all(
+                        # Consider dropping the `isinstance` check here to allow
+                        # duck-type graphs, but let's wait for a backend to ask us.
+                        isinstance(g, nx.Graph)
+                        for g in graphs_resolved.values()
+                    ):
+                        # Log that we are falling back to networkx
+                        _logger.debug(
+                            "Backend '%s' can't run `%s'. %s",
+                            backend_name,
+                            self.name,
+                            fallback_blurb,
+                        )
+                    else:
+                        if self._does_backend_have(backend_name):
+                            extra = " with these arguments"
+                        else:
+                            extra = ""
+                        raise NotImplementedError(msg_template % extra)
+            elif nx.config.fallback_to_nx and all(
+                # Consider dropping the `isinstance` check here to allow
+                # duck-type graphs, but let's wait for a backend to ask us.
+                isinstance(g, nx.Graph)
+                for g in graphs_resolved.values()
+            ):
+                # Log that we are falling back to networkx
+                _logger.debug(
+                    "`%s' was called with inputs from multiple backends: %s. %s",
+                    self.name,
+                    graph_backend_names,
+                    fallback_blurb,
+                )
+            else:
+                raise RuntimeError(
+                    f"`{self.name}' will mutate an input, but it was called with inputs "
+                    f"from multiple backends: {graph_backend_names}. Automatic {blurb}"
+                )
+            # At this point, no backends are available to handle the call with
+            # the input graph types, but if the input graphs are compatible
+            # nx.Graph instances, fall back to networkx without converting.
+            return self.orig_func(*args, **kwargs)
+
+        # We may generalize fallback configuration as e.g. `nx.config.backend_fallback`
+        if nx.config.fallback_to_nx or not graph_backend_names:
+            # Use "networkx" by default if there are no inputs from backends.
+            # For example, graph generators should probably return NetworkX graphs
+            # instead of raising NotImplementedError.
+            backend_fallback = ["networkx"]
+        else:
+            backend_fallback = []
+
+        # ##########################
+        # # How this behaves today #
+        # ##########################
+        #
+        # The prose below describes the implementation and a *possible* way to
+        # generalize "networkx" as "just another backend". The code is structured
+        # to perhaps someday support backend-to-backend conversions (including
+        # simply passing objects from one backend directly to another backend;
+        # the dispatch machinery does not necessarily need to perform conversions),
+        # but since backend-to-backend matching is not yet supported, the following
+        # code is merely a convenient way to implement dispatch behaviors that have
+        # been carefully developed since NetworkX 3.0 and to include falling back
+        # to the default NetworkX implementation.
+        #
+        # The current behavior for functions that don't mutate input graphs:
+        #
+        # 1. If backend is specified by `backend=` keyword, use it (done above).
+        # 2. If input is from a backend other than "networkx", try to use it.
+        #    - Note: if present, "networkx" graphs will be converted to the backend.
+        # 3. If input is from "networkx" (or no backend), try to use backends from
+        #    `backend_priority` before running with the default "networkx" implementation.
+        # 4. If configured, "fall back" and run with the default "networkx" implementation.
+        #
+        # ################################################
+        # # How this is implemented and may work someday #
+        # ################################################
+        #
+        # Let's determine the order of backends we should try according
+        # to `backend_priority`, `backend_fallback`, and input backends.
+        # There are two† dimensions of priorities to consider:
+        #   backend_priority > unspecified > backend_fallback
+        # and
+        #   backend of an input > not a backend of an input
+        # These are combined to form five groups of priorities as such:
+        #
+        #                    input   ~input
+        #                  +-------+-------+
+        # backend_priority |   1   |   2   |
+        #      unspecified |   3   |  N/A  | (if only 1)
+        # backend_fallback |   4   |   5   |
+        #                  +-------+-------+
+        #
+        # This matches the behaviors we developed in versions 3.0 to 3.2, it
+        # ought to cover virtually all use cases we expect, and I (@eriknw) don't
+        # think it can be done any simpler (although it can be generalized further
+        # and made to be more complicated to capture 100% of *possible* use cases).
+        # Some observations:
+        #
+        #   1. If an input is in `backend_priority`, it will be used before trying a
+        #      backend that is higher priority in `backend_priority` and not an input.
+        #   2. To prioritize converting from one backend to another even if both implement
+        #      a function, list one in `backend_priority` and one in `backend_fallback`.
+        #   3. To disable conversions, set `backend_priority` and `backend_fallback` to [].
+        #
+        # †: There is actually a third dimension of priorities:
+        #        should_run == True > should_run == False
+        #    Backends with `can_run == True` and `should_run == False` are tried last.
+        #
+        seen = set()
+        group1 = []  # In backend_priority, and an input
+        group2 = []  # In backend_priority, but not an input
+        for name in backend_priority:
+            if name in seen:
+                continue
+            seen.add(name)
+            if name in graph_backend_names:
+                group1.append(name)
+            else:
+                group2.append(name)
+        group4 = []  # In backend_fallback, and an input
+        group5 = []  # In backend_fallback, but not an input
+        for name in backend_fallback:
+            if name in seen:
+                continue
+            seen.add(name)
+            if name in graph_backend_names:
+                group4.append(name)
+            else:
+                group5.append(name)
+        # An input, but not in backend_priority or backend_fallback.
+        group3 = graph_backend_names - seen
+        if len(group3) > 1:
+            # `group3` backends are not configured for automatic conversion or fallback.
+            # There are at least two issues if this group contains multiple backends:
+            #
+            #   1. How should we prioritize them? We have no good way to break ties.
+            #      Although we could arbitrarily choose alphabetical or left-most,
+            #      let's follow the Zen of Python and refuse the temptation to guess.
+            #   2. We probably shouldn't automatically convert to these backends,
+            #      because we are not configured to do so.
+            #
+            # (2) is important to allow disabling all conversions by setting both
+            # `nx.config.backend_priority` and `nx.config.backend_fallback` to [].
+            #
+            # If there is a single backend in `group3`, then giving it priority over
+            # the fallback backends is what is generally expected. For example, this
+            # allows input graphs of `backend_fallback` backends (such as "networkx")
+            # to be converted to, and run with, the unspecified backend.
+            _logger.debug(
+                "Call to `%s' has inputs from multiple backends, %s, that "
+                "have no priority set in `nx.config.backend_priority`, "
+                "so automatic conversions to "
+                "these backends will not be attempted.",
+                self.name,
+                group3,
+            )
+            group3 = ()
+
+        try_order = list(itertools.chain(group1, group2, group3, group4, group5))
+        if len(try_order) > 1:
+            # Should we consider adding an option for more verbose logging?
+            # For example, we could explain the order of `try_order` in detail.
+            _logger.debug(
+                "Call to `%s' has inputs from %s backends, and will try to use "
+                "backends in the following order: %s",
+                self.name,
+                graph_backend_names or "no",
+                try_order,
+            )
+        backends_to_try_again = []
+        for is_not_first, backend_name in enumerate(try_order):
+            if is_not_first:
+                _logger.debug("Trying next backend: '%s'", backend_name)
+            try:
+                if not graph_backend_names or graph_backend_names == {backend_name}:
+                    if self._can_backend_run(backend_name, args, kwargs):
+                        return self._call_with_backend(backend_name, args, kwargs)
+                elif self._can_convert(
+                    backend_name, graph_backend_names
+                ) and self._can_backend_run(backend_name, args, kwargs):
+                    if self._should_backend_run(backend_name, args, kwargs):
+                        rv = self._convert_and_call(
+                            backend_name, graph_backend_names, args, kwargs
+                        )
+                        if (
+                            self._returns_graph
+                            and graph_backend_names
+                            and backend_name not in graph_backend_names
+                        ):
+                            # If the function has graph inputs and graph output, we try
+                            # to make it so the backend of the return type will match the
+                            # backend of the input types. In case this is not possible,
+                            # let's tell the user that the backend of the return graph
+                            # has changed. Perhaps we could try to convert back, but
+                            # "fallback" backends for graph generators should typically
+                            # be compatible with NetworkX graphs.
+                            _logger.debug(
+                                "Call to `%s' is returning a graph from a different "
+                                "backend! It has inputs from %s backends, but ran with "
+                                "'%s' backend and is returning graph from '%s' backend",
+                                self.name,
+                                graph_backend_names,
+                                backend_name,
+                                backend_name,
+                            )
+                        return rv
+                    # `should_run` is False, but `can_run` is True, so try again later
+                    backends_to_try_again.append(backend_name)
+            except NotImplementedError as exc:
+                _logger.debug(
+                    "Backend '%s' raised when calling `%s': %s",
+                    backend_name,
+                    self.name,
+                    exc,
+                )
+
+        # We are about to fail. Let's try backends with can_run=True and should_run=False.
+        # This is unlikely to help today since we try to run with "networkx" before this.
+        for backend_name in backends_to_try_again:
+            _logger.debug(
+                "Trying backend: '%s' (ignoring `should_run=False`)", backend_name
+            )
+            try:
+                rv = self._convert_and_call(
+                    backend_name, graph_backend_names, args, kwargs
+                )
+                if (
+                    self._returns_graph
+                    and graph_backend_names
+                    and backend_name not in graph_backend_names
+                ):
+                    _logger.debug(
+                        "Call to `%s' is returning a graph from a different "
+                        "backend! It has inputs from %s backends, but ran with "
+                        "'%s' backend and is returning graph from '%s' backend",
+                        self.name,
+                        graph_backend_names,
+                        backend_name,
+                        backend_name,
+                    )
+                return rv
+            except NotImplementedError as exc:
+                _logger.debug(
+                    "Backend '%s' raised when calling `%s': %s",
+                    backend_name,
+                    self.name,
+                    exc,
+                )
+        # As a final effort, we could try to convert and run with `group3` backends
+        # that we discarded when `len(group3) > 1`, but let's not consider doing
+        # so until there is a reasonable request for it.
+
+        if len(unspecified_backends := graph_backend_names - seen) > 1:
+            raise TypeError(
+                f"Unable to convert inputs from {graph_backend_names} backends and "
+                f"run `{self.name}'. NetworkX is configured to automatically convert "
+                f"to {try_order} backends. To remedy this, you may enable automatic "
+                f"conversion to {unspecified_backends} backends by adding them to "
+                "`nx.config.backend_priority`, or you "
+                "may specify a backend to use with the `backend=` keyword argument."
+            )
+        raise NotImplementedError(
+            f"`{self.name}' is not implemented by {try_order} backends. To remedy "
+            "this, you may enable automatic conversion to more backends (including "
+            "'networkx') by adding them to `nx.config.backend_priority`, "
+            "or you may specify a backend to use with "
+            "the `backend=` keyword argument."
+        )
+
+    def _will_call_mutate_input(self, args, kwargs):
+        return (mutates_input := self.mutates_input) and (
+            mutates_input is True
+            or any(
+                # If `mutates_input` begins with "not ", then assume the argument is bool,
+                # otherwise treat it as a node or edge attribute if it's not None.
+                not (
+                    args[arg_pos]
+                    if len(args) > arg_pos
+                    # This assumes that e.g. `copy=True` is the default
+                    else kwargs.get(arg_name[4:], True)
+                )
+                if arg_name.startswith("not ")
+                else (args[arg_pos] if len(args) > arg_pos else kwargs.get(arg_name))
+                is not None
+                for arg_name, arg_pos in mutates_input.items()
+            )
+        )
+
+    def _can_convert(self, backend_name, graph_backend_names):
+        # Backend-to-backend conversion not supported yet.
+        # We can only convert to and from networkx.
+        rv = backend_name == "networkx" or graph_backend_names.issubset(
+            {"networkx", backend_name}
+        )
+        if not rv:
+            _logger.debug(
+                "Unable to convert from %s backends to '%s' backend",
+                graph_backend_names,
+                backend_name,
+            )
+        return rv
+
+    def _does_backend_have(self, backend_name):
+        """Does the specified backend have this algorithm?"""
+        if backend_name == "networkx":
+            return True
+        # Inspect the backend; don't trust metadata used to create `self.backends`
+        backend = _load_backend(backend_name)
+        return hasattr(backend, self.name)
+
+    def _can_backend_run(self, backend_name, args, kwargs):
+        """Can the specified backend run this algorithm with these arguments?"""
+        if backend_name == "networkx":
+            return True
+        backend = _load_backend(backend_name)
+        # `backend.can_run` and `backend.should_run` may return strings that describe
+        # why they can't or shouldn't be run.
+        if not hasattr(backend, self.name):
+            _logger.debug(
+                "Backend '%s' does not implement `%s'", backend_name, self.name
+            )
+            return False
+        can_run = backend.can_run(self.name, args, kwargs)
+        if isinstance(can_run, str) or not can_run:
+            reason = f", because: {can_run}" if isinstance(can_run, str) else ""
+            _logger.debug(
+                "Backend '%s' can't run `%s` with arguments: %s%s",
+                backend_name,
+                self.name,
+                _LazyArgsRepr(self, args, kwargs),
+                reason,
+            )
+            return False
+        return True
+
+    def _should_backend_run(self, backend_name, args, kwargs):
+        """Should the specified backend run this algorithm with these arguments?
+
+        Note that this does not check ``backend.can_run``.
+        """
+        # `backend.can_run` and `backend.should_run` may return strings that describe
+        # why they can't or shouldn't be run.
+        if backend_name == "networkx":
+            return True
+        backend = _load_backend(backend_name)
+        should_run = backend.should_run(self.name, args, kwargs)
+        if isinstance(should_run, str) or not should_run:
+            reason = f", because: {should_run}" if isinstance(should_run, str) else ""
+            _logger.debug(
+                "Backend '%s' shouldn't run `%s` with arguments: %s%s",
+                backend_name,
+                self.name,
+                _LazyArgsRepr(self, args, kwargs),
+                reason,
+            )
+            return False
+        return True
+
+    def _convert_arguments(self, backend_name, args, kwargs, *, use_cache, mutations):
+        """Convert graph arguments to the specified backend.
+
+        Returns
+        -------
+        args tuple and kwargs dict
+        """
+        bound = self.__signature__.bind(*args, **kwargs)
+        bound.apply_defaults()
+        if not self.graphs:
+            bound_kwargs = bound.kwargs
+            del bound_kwargs["backend"]
+            return bound.args, bound_kwargs
+        if backend_name == "networkx":
+            # `backend_interface.convert_from_nx` preserves everything
+            preserve_edge_attrs = preserve_node_attrs = preserve_graph_attrs = True
+        else:
+            preserve_edge_attrs = self.preserve_edge_attrs
+            preserve_node_attrs = self.preserve_node_attrs
+            preserve_graph_attrs = self.preserve_graph_attrs
+            edge_attrs = self.edge_attrs
+            node_attrs = self.node_attrs
+        # Convert graphs into backend graph-like object
+        # Include the edge and/or node labels if provided to the algorithm
+        if preserve_edge_attrs is False:
+            # e.g. `preserve_edge_attrs=False`
+            pass
+        elif preserve_edge_attrs is True:
+            # e.g. `preserve_edge_attrs=True`
+            edge_attrs = None
+        elif isinstance(preserve_edge_attrs, str):
+            if bound.arguments[preserve_edge_attrs] is True or callable(
+                bound.arguments[preserve_edge_attrs]
+            ):
+                # e.g. `preserve_edge_attrs="attr"` and `func(attr=True)`
+                # e.g. `preserve_edge_attrs="attr"` and `func(attr=myfunc)`
+                preserve_edge_attrs = True
+                edge_attrs = None
+            elif bound.arguments[preserve_edge_attrs] is False and (
+                isinstance(edge_attrs, str)
+                and edge_attrs == preserve_edge_attrs
+                or isinstance(edge_attrs, dict)
+                and preserve_edge_attrs in edge_attrs
+            ):
+                # e.g. `preserve_edge_attrs="attr"` and `func(attr=False)`
+                # Treat `False` argument as meaning "preserve_edge_data=False"
+                # and not `False` as the edge attribute to use.
+                preserve_edge_attrs = False
+                edge_attrs = None
+            else:
+                # e.g. `preserve_edge_attrs="attr"` and `func(attr="weight")`
+                preserve_edge_attrs = False
+        # Else: e.g. `preserve_edge_attrs={"G": {"weight": 1}}`
+
+        if edge_attrs is None:
+            # May have been set to None above b/c all attributes are preserved
+            pass
+        elif isinstance(edge_attrs, str):
+            if edge_attrs[0] == "[":
+                # e.g. `edge_attrs="[edge_attributes]"` (argument of list of attributes)
+                # e.g. `func(edge_attributes=["foo", "bar"])`
+                edge_attrs = {
+                    edge_attr: 1 for edge_attr in bound.arguments[edge_attrs[1:-1]]
+                }
+            elif callable(bound.arguments[edge_attrs]):
+                # e.g. `edge_attrs="weight"` and `func(weight=myfunc)`
+                preserve_edge_attrs = True
+                edge_attrs = None
+            elif bound.arguments[edge_attrs] is not None:
+                # e.g. `edge_attrs="weight"` and `func(weight="foo")` (default of 1)
+                edge_attrs = {bound.arguments[edge_attrs]: 1}
+            elif self.name == "to_numpy_array" and hasattr(
+                bound.arguments["dtype"], "names"
+            ):
+                # Custom handling: attributes may be obtained from `dtype`
+                edge_attrs = {
+                    edge_attr: 1 for edge_attr in bound.arguments["dtype"].names
+                }
+            else:
+                # e.g. `edge_attrs="weight"` and `func(weight=None)`
+                edge_attrs = None
+        else:
+            # e.g. `edge_attrs={"attr": "default"}` and `func(attr="foo", default=7)`
+            # e.g. `edge_attrs={"attr": 0}` and `func(attr="foo")`
+            edge_attrs = {
+                edge_attr: bound.arguments.get(val, 1) if isinstance(val, str) else val
+                for key, val in edge_attrs.items()
+                if (edge_attr := bound.arguments[key]) is not None
+            }
+
+        if preserve_node_attrs is False:
+            # e.g. `preserve_node_attrs=False`
+            pass
+        elif preserve_node_attrs is True:
+            # e.g. `preserve_node_attrs=True`
+            node_attrs = None
+        elif isinstance(preserve_node_attrs, str):
+            if bound.arguments[preserve_node_attrs] is True or callable(
+                bound.arguments[preserve_node_attrs]
+            ):
+                # e.g. `preserve_node_attrs="attr"` and `func(attr=True)`
+                # e.g. `preserve_node_attrs="attr"` and `func(attr=myfunc)`
+                preserve_node_attrs = True
+                node_attrs = None
+            elif bound.arguments[preserve_node_attrs] is False and (
+                isinstance(node_attrs, str)
+                and node_attrs == preserve_node_attrs
+                or isinstance(node_attrs, dict)
+                and preserve_node_attrs in node_attrs
+            ):
+                # e.g. `preserve_node_attrs="attr"` and `func(attr=False)`
+                # Treat `False` argument as meaning "preserve_node_data=False"
+                # and not `False` as the node attribute to use. Is this used?
+                preserve_node_attrs = False
+                node_attrs = None
+            else:
+                # e.g. `preserve_node_attrs="attr"` and `func(attr="weight")`
+                preserve_node_attrs = False
+        # Else: e.g. `preserve_node_attrs={"G": {"pos": None}}`
+
+        if node_attrs is None:
+            # May have been set to None above b/c all attributes are preserved
+            pass
+        elif isinstance(node_attrs, str):
+            if node_attrs[0] == "[":
+                # e.g. `node_attrs="[node_attributes]"` (argument of list of attributes)
+                # e.g. `func(node_attributes=["foo", "bar"])`
+                node_attrs = {
+                    node_attr: None for node_attr in bound.arguments[node_attrs[1:-1]]
+                }
+            elif callable(bound.arguments[node_attrs]):
+                # e.g. `node_attrs="weight"` and `func(weight=myfunc)`
+                preserve_node_attrs = True
+                node_attrs = None
+            elif bound.arguments[node_attrs] is not None:
+                # e.g. `node_attrs="weight"` and `func(weight="foo")`
+                node_attrs = {bound.arguments[node_attrs]: None}
+            else:
+                # e.g. `node_attrs="weight"` and `func(weight=None)`
+                node_attrs = None
+        else:
+            # e.g. `node_attrs={"attr": "default"}` and `func(attr="foo", default=7)`
+            # e.g. `node_attrs={"attr": 0}` and `func(attr="foo")`
+            node_attrs = {
+                node_attr: bound.arguments.get(val) if isinstance(val, str) else val
+                for key, val in node_attrs.items()
+                if (node_attr := bound.arguments[key]) is not None
+            }
+
+        # It should be safe to assume that we either have networkx graphs or backend graphs.
+        # Future work: allow conversions between backends.
+        for gname in self.graphs:
+            if gname in self.list_graphs:
+                bound.arguments[gname] = [
+                    self._convert_graph(
+                        backend_name,
+                        g,
+                        edge_attrs=edge_attrs,
+                        node_attrs=node_attrs,
+                        preserve_edge_attrs=preserve_edge_attrs,
+                        preserve_node_attrs=preserve_node_attrs,
+                        preserve_graph_attrs=preserve_graph_attrs,
+                        graph_name=gname,
+                        use_cache=use_cache,
+                        mutations=mutations,
+                    )
+                    if getattr(g, "__networkx_backend__", "networkx") != backend_name
+                    else g
+                    for g in bound.arguments[gname]
+                ]
+            else:
+                graph = bound.arguments[gname]
+                if graph is None:
+                    if gname in self.optional_graphs:
+                        continue
+                    raise TypeError(
+                        f"Missing required graph argument `{gname}` in {self.name} function"
+                    )
+                if isinstance(preserve_edge_attrs, dict):
+                    preserve_edges = False
+                    edges = preserve_edge_attrs.get(gname, edge_attrs)
+                else:
+                    preserve_edges = preserve_edge_attrs
+                    edges = edge_attrs
+                if isinstance(preserve_node_attrs, dict):
+                    preserve_nodes = False
+                    nodes = preserve_node_attrs.get(gname, node_attrs)
+                else:
+                    preserve_nodes = preserve_node_attrs
+                    nodes = node_attrs
+                if isinstance(preserve_graph_attrs, set):
+                    preserve_graph = gname in preserve_graph_attrs
+                else:
+                    preserve_graph = preserve_graph_attrs
+                if getattr(graph, "__networkx_backend__", "networkx") != backend_name:
+                    bound.arguments[gname] = self._convert_graph(
+                        backend_name,
+                        graph,
+                        edge_attrs=edges,
+                        node_attrs=nodes,
+                        preserve_edge_attrs=preserve_edges,
+                        preserve_node_attrs=preserve_nodes,
+                        preserve_graph_attrs=preserve_graph,
+                        graph_name=gname,
+                        use_cache=use_cache,
+                        mutations=mutations,
+                    )
+        bound_kwargs = bound.kwargs
+        del bound_kwargs["backend"]
+        return bound.args, bound_kwargs
+
+    def _convert_graph(
+        self,
+        backend_name,
+        graph,
+        *,
+        edge_attrs,
+        node_attrs,
+        preserve_edge_attrs,
+        preserve_node_attrs,
+        preserve_graph_attrs,
+        graph_name,
+        use_cache,
+        mutations,
+    ):
+        if (
+            use_cache
+            and (nx_cache := getattr(graph, "__networkx_cache__", None)) is not None
+        ):
+            cache = nx_cache.setdefault("backends", {}).setdefault(backend_name, {})
+            key = _get_cache_key(
+                edge_attrs=edge_attrs,
+                node_attrs=node_attrs,
+                preserve_edge_attrs=preserve_edge_attrs,
+                preserve_node_attrs=preserve_node_attrs,
+                preserve_graph_attrs=preserve_graph_attrs,
+            )
+            compat_key, rv = _get_from_cache(cache, key, mutations=mutations)
+            if rv is not None:
+                if "cache" not in nx.config.warnings_to_ignore:
+                    warnings.warn(
+                        "Note: conversions to backend graphs are saved to cache "
+                        "(`G.__networkx_cache__` on the original graph) by default."
+                        "\n\nThis warning means the cached graph is being used "
+                        f"for the {backend_name!r} backend in the "
+                        f"call to {self.name}.\n\nFor the cache to be consistent "
+                        "(i.e., correct), the input graph must not have been "
+                        "manually mutated since the cached graph was created. "
+                        "Examples of manually mutating the graph data structures "
+                        "resulting in an inconsistent cache include:\n\n"
+                        "    >>> G[u][v][key] = val\n\n"
+                        "and\n\n"
+                        "    >>> for u, v, d in G.edges(data=True):\n"
+                        "    ...     d[key] = val\n\n"
+                        "Using methods such as `G.add_edge(u, v, weight=val)` "
+                        "will correctly clear the cache to keep it consistent. "
+                        "You may also use `G.__networkx_cache__.clear()` to "
+                        "manually clear the cache, or set `G.__networkx_cache__` "
+                        "to None to disable caching for G. Enable or disable caching "
+                        "globally via `nx.config.cache_converted_graphs` config.\n\n"
+                        "To disable this warning:\n\n"
+                        '    >>> nx.config.warnings_to_ignore.add("cache")\n'
+                    )
+                _logger.debug(
+                    "Using cached converted graph (from '%s' to '%s' backend) "
+                    "in call to `%s' for '%s' argument",
+                    getattr(graph, "__networkx_backend__", None),
+                    backend_name,
+                    self.name,
+                    graph_name,
+                )
+                return rv
+
+        if backend_name == "networkx":
+            # Perhaps we should check that "__networkx_backend__" attribute exists
+            # and return the original object if not.
+            if not hasattr(graph, "__networkx_backend__"):
+                _logger.debug(
+                    "Unable to convert input to 'networkx' backend in call to `%s' for "
+                    "'%s argument, because it is not from a backend (i.e., it does not "
+                    "have `G.__networkx_backend__` attribute). Using the original "
+                    "object: %s",
+                    self.name,
+                    graph_name,
+                    graph,
+                )
+                # This may fail, but let it fail in the networkx function
+                return graph
+            backend = _load_backend(graph.__networkx_backend__)
+            rv = backend.convert_to_nx(graph)
+        else:
+            backend = _load_backend(backend_name)
+            rv = backend.convert_from_nx(
+                graph,
+                edge_attrs=edge_attrs,
+                node_attrs=node_attrs,
+                preserve_edge_attrs=preserve_edge_attrs,
+                preserve_node_attrs=preserve_node_attrs,
+                # Always preserve graph attrs when we are caching b/c this should be
+                # cheap and may help prevent extra (unnecessary) conversions. Because
+                # we do this, we don't need `preserve_graph_attrs` in the cache key.
+                preserve_graph_attrs=preserve_graph_attrs or use_cache,
+                name=self.name,
+                graph_name=graph_name,
+            )
+        if use_cache and nx_cache is not None and mutations is None:
+            _set_to_cache(cache, key, rv)
+            _logger.debug(
+                "Caching converted graph (from '%s' to '%s' backend) "
+                "in call to `%s' for '%s' argument",
+                getattr(graph, "__networkx_backend__", None),
+                backend_name,
+                self.name,
+                graph_name,
+            )
+
+        return rv
+
+    def _call_with_backend(self, backend_name, args, kwargs, *, extra_message=None):
+        """Call this dispatchable function with a backend without converting inputs."""
+        if backend_name == "networkx":
+            return self.orig_func(*args, **kwargs)
+        backend = _load_backend(backend_name)
+        _logger.debug(
+            "Using backend '%s' for call to `%s' with arguments: %s",
+            backend_name,
+            self.name,
+            _LazyArgsRepr(self, args, kwargs),
+        )
+        try:
+            return getattr(backend, self.name)(*args, **kwargs)
+        except NotImplementedError as exc:
+            if extra_message is not None:
+                _logger.debug(
+                    "Backend '%s' raised when calling `%s': %s",
+                    backend_name,
+                    self.name,
+                    exc,
+                )
+                raise NotImplementedError(extra_message) from exc
+            raise
+
+    def _convert_and_call(
+        self,
+        backend_name,
+        input_backend_names,
+        args,
+        kwargs,
+        *,
+        extra_message=None,
+        mutations=None,
+    ):
+        """Call this dispatchable function with a backend after converting inputs.
+
+        Parameters
+        ----------
+        backend_name : str
+        input_backend_names : set[str]
+        args : arguments tuple
+        kwargs : keywords dict
+        extra_message : str, optional
+            Additional message to log if NotImplementedError is raised by backend.
+        mutations : list, optional
+            Used to clear objects gotten from cache if inputs will be mutated.
+        """
+        if backend_name == "networkx":
+            func = self.orig_func
+        else:
+            backend = _load_backend(backend_name)
+            func = getattr(backend, self.name)
+        other_backend_names = input_backend_names - {backend_name}
+        _logger.debug(
+            "Converting input graphs from %s backend%s to '%s' backend for call to `%s'",
+            other_backend_names
+            if len(other_backend_names) > 1
+            else f"'{next(iter(other_backend_names))}'",
+            "s" if len(other_backend_names) > 1 else "",
+            backend_name,
+            self.name,
+        )
+        try:
+            converted_args, converted_kwargs = self._convert_arguments(
+                backend_name,
+                args,
+                kwargs,
+                use_cache=nx.config.cache_converted_graphs,
+                mutations=mutations,
+            )
+        except NotImplementedError as exc:
+            # Only log the exception if we are adding an extra message
+            # because we don't want to lose any information.
+            _logger.debug(
+                "Failed to convert graphs from %s to '%s' backend for call to `%s'"
+                + ("" if extra_message is None else ": %s"),
+                input_backend_names,
+                backend_name,
+                self.name,
+                *(() if extra_message is None else (exc,)),
+            )
+            if extra_message is not None:
+                raise NotImplementedError(extra_message) from exc
+            raise
+        if backend_name != "networkx":
+            _logger.debug(
+                "Using backend '%s' for call to `%s' with arguments: %s",
+                backend_name,
+                self.name,
+                _LazyArgsRepr(self, converted_args, converted_kwargs),
+            )
+        try:
+            return func(*converted_args, **converted_kwargs)
+        except NotImplementedError as exc:
+            if extra_message is not None:
+                _logger.debug(
+                    "Backend '%s' raised when calling `%s': %s",
+                    backend_name,
+                    self.name,
+                    exc,
+                )
+                raise NotImplementedError(extra_message) from exc
+            raise
+
+    def _convert_and_call_for_tests(
+        self, backend_name, args, kwargs, *, fallback_to_nx=False
+    ):
+        """Call this dispatchable function with a backend; for use with testing."""
+        backend = _load_backend(backend_name)
+        if not self._can_backend_run(backend_name, args, kwargs):
+            if fallback_to_nx or not self.graphs:
+                if fallback_to_nx:
+                    _logger.debug(
+                        "Falling back to use 'networkx' instead of '%s' backend "
+                        "for call to `%s' with arguments: %s",
+                        backend_name,
+                        self.name,
+                        _LazyArgsRepr(self, args, kwargs),
+                    )
+                return self.orig_func(*args, **kwargs)
+
+            import pytest
+
+            msg = f"'{self.name}' not implemented by {backend_name}"
+            if hasattr(backend, self.name):
+                msg += " with the given arguments"
+            pytest.xfail(msg)
+
+        from collections.abc import Iterable, Iterator, Mapping
+        from copy import copy, deepcopy
+        from io import BufferedReader, BytesIO, StringIO, TextIOWrapper
+        from itertools import tee
+        from random import Random
+
+        import numpy as np
+        from numpy.random import Generator, RandomState
+        from scipy.sparse import sparray
+
+        # We sometimes compare the backend result to the original result,
+        # so we need two sets of arguments. We tee iterators and copy
+        # random state so that they may be used twice.
+        if not args:
+            args1 = args2 = args
+        else:
+            args1, args2 = zip(
+                *(
+                    (arg, deepcopy(arg))
+                    if isinstance(arg, RandomState)
+                    else (arg, copy(arg))
+                    if isinstance(arg, BytesIO | StringIO | Random | Generator)
+                    else tee(arg)
+                    if isinstance(arg, Iterator)
+                    and not isinstance(arg, BufferedReader | TextIOWrapper)
+                    else (arg, arg)
+                    for arg in args
+                )
+            )
+        if not kwargs:
+            kwargs1 = kwargs2 = kwargs
+        else:
+            kwargs1, kwargs2 = zip(
+                *(
+                    ((k, v), (k, deepcopy(v)))
+                    if isinstance(v, RandomState)
+                    else ((k, v), (k, copy(v)))
+                    if isinstance(v, BytesIO | StringIO | Random | Generator)
+                    else ((k, (teed := tee(v))[0]), (k, teed[1]))
+                    if isinstance(v, Iterator)
+                    and not isinstance(v, BufferedReader | TextIOWrapper)
+                    else ((k, v), (k, v))
+                    for k, v in kwargs.items()
+                )
+            )
+            kwargs1 = dict(kwargs1)
+            kwargs2 = dict(kwargs2)
+        try:
+            converted_args, converted_kwargs = self._convert_arguments(
+                backend_name, args1, kwargs1, use_cache=False, mutations=None
+            )
+            _logger.debug(
+                "Using backend '%s' for call to `%s' with arguments: %s",
+                backend_name,
+                self.name,
+                _LazyArgsRepr(self, converted_args, converted_kwargs),
+            )
+            result = getattr(backend, self.name)(*converted_args, **converted_kwargs)
+        except NotImplementedError as exc:
+            if fallback_to_nx:
+                _logger.debug(
+                    "Graph conversion failed; falling back to use 'networkx' instead "
+                    "of '%s' backend for call to `%s'",
+                    backend_name,
+                    self.name,
+                )
+                return self.orig_func(*args2, **kwargs2)
+            import pytest
+
+            pytest.xfail(
+                exc.args[0] if exc.args else f"{self.name} raised {type(exc).__name__}"
+            )
+        # Verify that `self._returns_graph` is correct. This compares the return type
+        # to the type expected from `self._returns_graph`. This handles tuple and list
+        # return types, but *does not* catch functions that yield graphs.
+        if (
+            self._returns_graph
+            != (
+                isinstance(result, nx.Graph)
+                or hasattr(result, "__networkx_backend__")
+                or isinstance(result, tuple | list)
+                and any(
+                    isinstance(x, nx.Graph) or hasattr(x, "__networkx_backend__")
+                    for x in result
+                )
+            )
+            and not (
+                # May return Graph or None
+                self.name in {"check_planarity", "check_planarity_recursive"}
+                and any(x is None for x in result)
+            )
+            and not (
+                # May return Graph or dict
+                self.name in {"held_karp_ascent"}
+                and any(isinstance(x, dict) for x in result)
+            )
+            and self.name
+            not in {
+                # yields graphs
+                "all_triads",
+                "general_k_edge_subgraphs",
+                # yields graphs or arrays
+                "nonisomorphic_trees",
+            }
+        ):
+            raise RuntimeError(f"`returns_graph` is incorrect for {self.name}")
+
+        def check_result(val, depth=0):
+            if isinstance(val, np.number):
+                raise RuntimeError(
+                    f"{self.name} returned a numpy scalar {val} ({type(val)}, depth={depth})"
+                )
+            if isinstance(val, np.ndarray | sparray):
+                return
+            if isinstance(val, nx.Graph):
+                check_result(val._node, depth=depth + 1)
+                check_result(val._adj, depth=depth + 1)
+                return
+            if isinstance(val, Iterator):
+                raise NotImplementedError
+            if isinstance(val, Iterable) and not isinstance(val, str):
+                for x in val:
+                    check_result(x, depth=depth + 1)
+            if isinstance(val, Mapping):
+                for x in val.values():
+                    check_result(x, depth=depth + 1)
+
+        def check_iterator(it):
+            for val in it:
+                try:
+                    check_result(val)
+                except RuntimeError as exc:
+                    raise RuntimeError(
+                        f"{self.name} returned a numpy scalar {val} ({type(val)})"
+                    ) from exc
+                yield val
+
+        if self.name in {"from_edgelist"}:
+            # numpy scalars are explicitly given as values in some tests
+            pass
+        elif isinstance(result, Iterator):
+            result = check_iterator(result)
+        else:
+            try:
+                check_result(result)
+            except RuntimeError as exc:
+                raise RuntimeError(
+                    f"{self.name} returned a numpy scalar {result} ({type(result)})"
+                ) from exc
+            check_result(result)
+
+        if self.name in {
+            "edmonds_karp",
+            "barycenter",
+            "contracted_edge",
+            "contracted_nodes",
+            "stochastic_graph",
+            "relabel_nodes",
+            "maximum_branching",
+            "incremental_closeness_centrality",
+            "minimal_branching",
+            "minimum_spanning_arborescence",
+            "recursive_simple_cycles",
+            "connected_double_edge_swap",
+        }:
+            # Special-case algorithms that mutate input graphs
+            bound = self.__signature__.bind(*converted_args, **converted_kwargs)
+            bound.apply_defaults()
+            bound2 = self.__signature__.bind(*args2, **kwargs2)
+            bound2.apply_defaults()
+            if self.name in {
+                "minimal_branching",
+                "minimum_spanning_arborescence",
+                "recursive_simple_cycles",
+                "connected_double_edge_swap",
+            }:
+                G1 = backend.convert_to_nx(bound.arguments["G"])
+                G2 = bound2.arguments["G"]
+                G2._adj = G1._adj
+                if G2.is_directed():
+                    G2._pred = G1._pred
+                nx._clear_cache(G2)
+            elif self.name == "edmonds_karp":
+                R1 = backend.convert_to_nx(bound.arguments["residual"])
+                R2 = bound2.arguments["residual"]
+                if R1 is not None and R2 is not None:
+                    for k, v in R1.edges.items():
+                        R2.edges[k]["flow"] = v["flow"]
+                    R2.graph.update(R1.graph)
+                    nx._clear_cache(R2)
+            elif self.name == "barycenter" and bound.arguments["attr"] is not None:
+                G1 = backend.convert_to_nx(bound.arguments["G"])
+                G2 = bound2.arguments["G"]
+                attr = bound.arguments["attr"]
+                for k, v in G1.nodes.items():
+                    G2.nodes[k][attr] = v[attr]
+                nx._clear_cache(G2)
+            elif (
+                self.name in {"contracted_nodes", "contracted_edge"}
+                and not bound.arguments["copy"]
+            ):
+                # Edges and nodes changed; node "contraction" and edge "weight" attrs
+                G1 = backend.convert_to_nx(bound.arguments["G"])
+                G2 = bound2.arguments["G"]
+                G2.__dict__.update(G1.__dict__)
+                nx._clear_cache(G2)
+            elif self.name == "stochastic_graph" and not bound.arguments["copy"]:
+                G1 = backend.convert_to_nx(bound.arguments["G"])
+                G2 = bound2.arguments["G"]
+                for k, v in G1.edges.items():
+                    G2.edges[k]["weight"] = v["weight"]
+                nx._clear_cache(G2)
+            elif (
+                self.name == "relabel_nodes"
+                and not bound.arguments["copy"]
+                or self.name in {"incremental_closeness_centrality"}
+            ):
+                G1 = backend.convert_to_nx(bound.arguments["G"])
+                G2 = bound2.arguments["G"]
+                if G1 is G2:
+                    return G2
+                G2._node.clear()
+                G2._node.update(G1._node)
+                G2._adj.clear()
+                G2._adj.update(G1._adj)
+                if hasattr(G1, "_pred") and hasattr(G2, "_pred"):
+                    G2._pred.clear()
+                    G2._pred.update(G1._pred)
+                if hasattr(G1, "_succ") and hasattr(G2, "_succ"):
+                    G2._succ.clear()
+                    G2._succ.update(G1._succ)
+                nx._clear_cache(G2)
+                if self.name == "relabel_nodes":
+                    return G2
+            return backend.convert_to_nx(result)
+
+        converted_result = backend.convert_to_nx(result)
+        if isinstance(converted_result, nx.Graph) and self.name not in {
+            "boykov_kolmogorov",
+            "preflow_push",
+            "quotient_graph",
+            "shortest_augmenting_path",
+            "spectral_graph_forge",
+            # We don't handle tempfile.NamedTemporaryFile arguments
+            "read_gml",
+            "read_graph6",
+            "read_sparse6",
+            # We don't handle io.BufferedReader or io.TextIOWrapper arguments
+            "bipartite_read_edgelist",
+            "read_adjlist",
+            "read_edgelist",
+            "read_graphml",
+            "read_multiline_adjlist",
+            "read_pajek",
+            "from_pydot",
+            "pydot_read_dot",
+            "agraph_read_dot",
+            # graph comparison fails b/c of nan values
+            "read_gexf",
+        }:
+            # For graph return types (e.g. generators), we compare that results are
+            # the same between the backend and networkx, then return the original
+            # networkx result so the iteration order will be consistent in tests.
+            G = self.orig_func(*args2, **kwargs2)
+            if not nx.utils.graphs_equal(G, converted_result):
+                assert G.number_of_nodes() == converted_result.number_of_nodes()
+                assert G.number_of_edges() == converted_result.number_of_edges()
+                assert G.graph == converted_result.graph
+                assert G.nodes == converted_result.nodes
+                assert G.adj == converted_result.adj
+                assert type(G) is type(converted_result)
+                raise AssertionError("Graphs are not equal")
+            return G
+        return converted_result
+
+    def _make_doc(self):
+        """Generate the backends section at the end for functions having an alternate
+        backend implementation(s) using the `backend_info` entry-point."""
+
+        if not self.backends:
+            return self._orig_doc
+        lines = [
+            "Backends",
+            "--------",
+        ]
+        for backend in sorted(self.backends):
+            info = backend_info[backend]
+            if "short_summary" in info:
+                lines.append(f"{backend} : {info['short_summary']}")
+            else:
+                lines.append(backend)
+            if "functions" not in info or self.name not in info["functions"]:
+                lines.append("")
+                continue
+
+            func_info = info["functions"][self.name]
+
+            # Renaming extra_docstring to additional_docs
+            if func_docs := (
+                func_info.get("additional_docs") or func_info.get("extra_docstring")
+            ):
+                lines.extend(
+                    f"  {line}" if line else line for line in func_docs.split("\n")
+                )
+                add_gap = True
+            else:
+                add_gap = False
+
+            # Renaming extra_parameters to additional_parameters
+            if extra_parameters := (
+                func_info.get("extra_parameters")
+                or func_info.get("additional_parameters")
+            ):
+                if add_gap:
+                    lines.append("")
+                lines.append("  Additional parameters:")
+                for param in sorted(extra_parameters):
+                    lines.append(f"    {param}")
+                    if desc := extra_parameters[param]:
+                        lines.append(f"      {desc}")
+                    lines.append("")
+            else:
+                lines.append("")
+
+            if func_url := func_info.get("url"):
+                lines.append(f"[`Source <{func_url}>`_]")
+                lines.append("")
+
+        lines.pop()  # Remove last empty line
+        to_add = "\n    ".join(lines)
+        if not self._orig_doc:
+            return f"The original docstring for {self.name} was empty.\n\n    {to_add}"
+        return f"{self._orig_doc.rstrip()}\n\n    {to_add}"
+
+    def __reduce__(self):
+        """Allow this object to be serialized with pickle.
+
+        This uses the global registry `_registered_algorithms` to deserialize.
+        """
+        return _restore_dispatchable, (self.name,)
+
+
+def _restore_dispatchable(name):
+    return _registered_algorithms[name].__wrapped__
+
+
+def _get_cache_key(
+    *,
+    edge_attrs,
+    node_attrs,
+    preserve_edge_attrs,
+    preserve_node_attrs,
+    preserve_graph_attrs,
+):
+    """Return key used by networkx caching given arguments for ``convert_from_nx``."""
+    # edge_attrs: dict | None
+    # node_attrs: dict | None
+    # preserve_edge_attrs: bool (False if edge_attrs is not None)
+    # preserve_node_attrs: bool (False if node_attrs is not None)
+    return (
+        frozenset(edge_attrs.items())
+        if edge_attrs is not None
+        else preserve_edge_attrs,
+        frozenset(node_attrs.items())
+        if node_attrs is not None
+        else preserve_node_attrs,
+    )
+
+
+def _get_from_cache(cache, key, *, backend_name=None, mutations=None):
+    """Search the networkx cache for a graph that is compatible with ``key``.
+
+    Parameters
+    ----------
+    cache : dict
+        If ``backend_name`` is given, then this is treated as ``G.__networkx_cache__``,
+        but if ``backend_name`` is None, then this is treated as the resolved inner
+        cache such as ``G.__networkx_cache__["backends"][backend_name]``.
+    key : tuple
+        Cache key from ``_get_cache_key``.
+    backend_name : str, optional
+        Name of the backend to control how ``cache`` is interpreted.
+    mutations : list, optional
+        Used internally to clear objects gotten from cache if inputs will be mutated.
+
+    Returns
+    -------
+    tuple or None
+        The key of the compatible graph found in the cache.
+    graph or None
+        A compatible graph or None.
+    """
+    if backend_name is not None:
+        cache = cache.get("backends", {}).get(backend_name, {})
+    if not cache:
+        return None, None
+
+    # Do a simple search for a cached graph with compatible data.
+    # For example, if we need a single attribute, then it's okay
+    # to use a cached graph that preserved all attributes.
+    # This looks for an exact match first.
+    edge_key, node_key = key
+    for compat_key in itertools.product(
+        (edge_key, True) if edge_key is not True else (True,),
+        (node_key, True) if node_key is not True else (True,),
+    ):
+        if (rv := cache.get(compat_key)) is not None:
+            if mutations is not None:
+                # Remove this item from the cache (after all conversions) if
+                # the call to this dispatchable function will mutate an input.
+                mutations.append((cache, compat_key))
+            return compat_key, rv
+    if edge_key is not True and node_key is not True:
+        # Iterate over the items in `cache` to see if any are compatible.
+        # For example, if no edge attributes are needed, then a graph
+        # with any edge attribute will suffice. We use the same logic
+        # below (but switched) to clear unnecessary items from the cache.
+        # Use `list(cache.items())` to be thread-safe.
+        for (ekey, nkey), graph in list(cache.items()):
+            if edge_key is False or ekey is True:
+                pass  # Cache works for edge data!
+            elif edge_key is True or ekey is False or not edge_key.issubset(ekey):
+                continue  # Cache missing required edge data; does not work
+            if node_key is False or nkey is True:
+                pass  # Cache works for node data!
+            elif node_key is True or nkey is False or not node_key.issubset(nkey):
+                continue  # Cache missing required node data; does not work
+            if mutations is not None:
+                # Remove this item from the cache (after all conversions) if
+                # the call to this dispatchable function will mutate an input.
+                mutations.append((cache, (ekey, nkey)))
+            return (ekey, nkey), graph
+    return None, None
+
+
+def _set_to_cache(cache, key, graph, *, backend_name=None):
+    """Set a backend graph to the cache, and remove unnecessary cached items.
+
+    Parameters
+    ----------
+    cache : dict
+        If ``backend_name`` is given, then this is treated as ``G.__networkx_cache__``,
+        but if ``backend_name`` is None, then this is treated as the resolved inner
+        cache such as ``G.__networkx_cache__["backends"][backend_name]``.
+    key : tuple
+        Cache key from ``_get_cache_key``.
+    graph : graph
+    backend_name : str, optional
+        Name of the backend to control how ``cache`` is interpreted.
+
+    Returns
+    -------
+    dict
+        The items that were removed from the cache.
+    """
+    if backend_name is not None:
+        cache = cache.setdefault("backends", {}).setdefault(backend_name, {})
+    # Remove old cached items that are no longer necessary since they
+    # are dominated/subsumed/outdated by what was just calculated.
+    # This uses the same logic as above, but with keys switched.
+    # Also, don't update the cache here if the call will mutate an input.
+    removed = {}
+    edge_key, node_key = key
+    cache[key] = graph  # Set at beginning to be thread-safe
+    for cur_key in list(cache):
+        if cur_key == key:
+            continue
+        ekey, nkey = cur_key
+        if ekey is False or edge_key is True:
+            pass
+        elif ekey is True or edge_key is False or not ekey.issubset(edge_key):
+            continue
+        if nkey is False or node_key is True:
+            pass
+        elif nkey is True or node_key is False or not nkey.issubset(node_key):
+            continue
+        # Use pop instead of del to try to be thread-safe
+        if (graph := cache.pop(cur_key, None)) is not None:
+            removed[cur_key] = graph
+    return removed
+
+
+class _LazyArgsRepr:
+    """Simple wrapper to display arguments of dispatchable functions in logging calls."""
+
+    def __init__(self, func, args, kwargs):
+        self.func = func
+        self.args = args
+        self.kwargs = kwargs
+        self.value = None
+
+    def __repr__(self):
+        if self.value is None:
+            bound = self.func.__signature__.bind_partial(*self.args, **self.kwargs)
+            inner = ", ".join(f"{key}={val!r}" for key, val in bound.arguments.items())
+            self.value = f"({inner})"
+        return self.value
+
+
+if os.environ.get("_NETWORKX_BUILDING_DOCS_"):
+    # When building docs with Sphinx, use the original function with the
+    # dispatched __doc__, b/c Sphinx renders normal Python functions better.
+    # This doesn't show e.g. `*, backend=None, **backend_kwargs` in the
+    # signatures, which is probably okay. It does allow the docstring to be
+    # updated based on the installed backends.
+    _orig_dispatchable = _dispatchable
+
+    def _dispatchable(func=None, **kwargs):  # type: ignore[no-redef]
+        if func is None:
+            return partial(_dispatchable, **kwargs)
+        dispatched_func = _orig_dispatchable(func, **kwargs)
+        func.__doc__ = dispatched_func.__doc__
+        return func
+
+    _dispatchable.__doc__ = _orig_dispatchable.__new__.__doc__  # type: ignore[method-assign,assignment]
+    _sig = inspect.signature(_orig_dispatchable.__new__)
+    _dispatchable.__signature__ = _sig.replace(  # type: ignore[method-assign,assignment]
+        parameters=[v for k, v in _sig.parameters.items() if k != "cls"]
+    )
diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/configs.py b/.venv/lib/python3.12/site-packages/networkx/utils/configs.py
new file mode 100644
index 00000000..24c80f88
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/utils/configs.py
@@ -0,0 +1,387 @@
+import collections
+import os
+import typing
+import warnings
+from dataclasses import dataclass
+
+__all__ = ["Config"]
+
+
+@dataclass(init=False, eq=False, slots=True, kw_only=True, match_args=False)
+class Config:
+    """The base class for NetworkX configuration.
+
+    There are two ways to use this to create configurations. The recommended way
+    is to subclass ``Config`` with docs and annotations.
+
+    >>> class MyConfig(Config):
+    ...     '''Breakfast!'''
+    ...
+    ...     eggs: int
+    ...     spam: int
+    ...
+    ...     def _on_setattr(self, key, value):
+    ...         assert isinstance(value, int) and value >= 0
+    ...         return value
+    >>> cfg = MyConfig(eggs=1, spam=5)
+
+    Another way is to simply pass the initial configuration as keyword arguments to
+    the ``Config`` instance:
+
+    >>> cfg1 = Config(eggs=1, spam=5)
+    >>> cfg1
+    Config(eggs=1, spam=5)
+
+    Once defined, config items may be modified, but can't be added or deleted by default.
+    ``Config`` is a ``Mapping``, and can get and set configs via attributes or brackets:
+
+    >>> cfg.eggs = 2
+    >>> cfg.eggs
+    2
+    >>> cfg["spam"] = 42
+    >>> cfg["spam"]
+    42
+
+    For convenience, it can also set configs within a context with the "with" statement:
+
+    >>> with cfg(spam=3):
+    ...     print("spam (in context):", cfg.spam)
+    spam (in context): 3
+    >>> print("spam (after context):", cfg.spam)
+    spam (after context): 42
+
+    Subclasses may also define ``_on_setattr`` (as done in the example above)
+    to ensure the value being assigned is valid:
+
+    >>> cfg.spam = -1
+    Traceback (most recent call last):
+        ...
+    AssertionError
+
+    If a more flexible configuration object is needed that allows adding and deleting
+    configurations, then pass ``strict=False`` when defining the subclass:
+
+    >>> class FlexibleConfig(Config, strict=False):
+    ...     default_greeting: str = "Hello"
+    >>> flexcfg = FlexibleConfig()
+    >>> flexcfg.name = "Mr. Anderson"
+    >>> flexcfg
+    FlexibleConfig(default_greeting='Hello', name='Mr. Anderson')
+    """
+
+    def __init_subclass__(cls, strict=True):
+        cls._strict = strict
+
+    def __new__(cls, **kwargs):
+        orig_class = cls
+        if cls is Config:
+            # Enable the "simple" case of accepting config definition as keywords
+            cls = type(
+                cls.__name__,
+                (cls,),
+                {"__annotations__": {key: typing.Any for key in kwargs}},
+            )
+        cls = dataclass(
+            eq=False,
+            repr=cls._strict,
+            slots=cls._strict,
+            kw_only=True,
+            match_args=False,
+        )(cls)
+        if not cls._strict:
+            cls.__repr__ = _flexible_repr
+        cls._orig_class = orig_class  # Save original class so we can pickle
+        cls._prev = None  # Stage previous configs to enable use as context manager
+        cls._context_stack = []  # Stack of previous configs when used as context
+        instance = object.__new__(cls)
+        instance.__init__(**kwargs)
+        return instance
+
+    def _on_setattr(self, key, value):
+        """Process config value and check whether it is valid. Useful for subclasses."""
+        return value
+
+    def _on_delattr(self, key):
+        """Callback for when a config item is being deleted. Useful for subclasses."""
+
+    # Control behavior of attributes
+    def __dir__(self):
+        return self.__dataclass_fields__.keys()
+
+    def __setattr__(self, key, value):
+        if self._strict and key not in self.__dataclass_fields__:
+            raise AttributeError(f"Invalid config name: {key!r}")
+        value = self._on_setattr(key, value)
+        object.__setattr__(self, key, value)
+        self.__class__._prev = None
+
+    def __delattr__(self, key):
+        if self._strict:
+            raise TypeError(
+                f"Configuration items can't be deleted (can't delete {key!r})."
+            )
+        self._on_delattr(key)
+        object.__delattr__(self, key)
+        self.__class__._prev = None
+
+    # Be a `collection.abc.Collection`
+    def __contains__(self, key):
+        return (
+            key in self.__dataclass_fields__ if self._strict else key in self.__dict__
+        )
+
+    def __iter__(self):
+        return iter(self.__dataclass_fields__ if self._strict else self.__dict__)
+
+    def __len__(self):
+        return len(self.__dataclass_fields__ if self._strict else self.__dict__)
+
+    def __reversed__(self):
+        return reversed(self.__dataclass_fields__ if self._strict else self.__dict__)
+
+    # Add dunder methods for `collections.abc.Mapping`
+    def __getitem__(self, key):
+        try:
+            return getattr(self, key)
+        except AttributeError as err:
+            raise KeyError(*err.args) from None
+
+    def __setitem__(self, key, value):
+        try:
+            self.__setattr__(key, value)
+        except AttributeError as err:
+            raise KeyError(*err.args) from None
+
+    def __delitem__(self, key):
+        try:
+            self.__delattr__(key)
+        except AttributeError as err:
+            raise KeyError(*err.args) from None
+
+    _ipython_key_completions_ = __dir__  # config["<TAB>
+
+    # Go ahead and make it a `collections.abc.Mapping`
+    def get(self, key, default=None):
+        return getattr(self, key, default)
+
+    def items(self):
+        return collections.abc.ItemsView(self)
+
+    def keys(self):
+        return collections.abc.KeysView(self)
+
+    def values(self):
+        return collections.abc.ValuesView(self)
+
+    # dataclass can define __eq__ for us, but do it here so it works after pickling
+    def __eq__(self, other):
+        if not isinstance(other, Config):
+            return NotImplemented
+        return self._orig_class == other._orig_class and self.items() == other.items()
+
+    # Make pickle work
+    def __reduce__(self):
+        return self._deserialize, (self._orig_class, dict(self))
+
+    @staticmethod
+    def _deserialize(cls, kwargs):
+        return cls(**kwargs)
+
+    # Allow to be used as context manager
+    def __call__(self, **kwargs):
+        kwargs = {key: self._on_setattr(key, val) for key, val in kwargs.items()}
+        prev = dict(self)
+        for key, val in kwargs.items():
+            setattr(self, key, val)
+        self.__class__._prev = prev
+        return self
+
+    def __enter__(self):
+        if self.__class__._prev is None:
+            raise RuntimeError(
+                "Config being used as a context manager without config items being set. "
+                "Set config items via keyword arguments when calling the config object. "
+                "For example, using config as a context manager should be like:\n\n"
+                '    >>> with cfg(breakfast="spam"):\n'
+                "    ...     ...  # Do stuff\n"
+            )
+        self.__class__._context_stack.append(self.__class__._prev)
+        self.__class__._prev = None
+        return self
+
+    def __exit__(self, exc_type, exc_value, traceback):
+        prev = self.__class__._context_stack.pop()
+        for key, val in prev.items():
+            setattr(self, key, val)
+
+
+def _flexible_repr(self):
+    return (
+        f"{self.__class__.__qualname__}("
+        + ", ".join(f"{key}={val!r}" for key, val in self.__dict__.items())
+        + ")"
+    )
+
+
+# Register, b/c `Mapping.__subclasshook__` returns `NotImplemented`
+collections.abc.Mapping.register(Config)
+
+
+class BackendPriorities(Config, strict=False):
+    """Configuration to control automatic conversion to and calling of backends.
+
+    Priority is given to backends listed earlier.
+
+    Parameters
+    ----------
+    algos : list of backend names
+        This controls "algorithms" such as ``nx.pagerank`` that don't return a graph.
+    generators : list of backend names
+        This controls "generators" such as ``nx.from_pandas_edgelist`` that return a graph.
+    kwargs : variadic keyword arguments of function name to list of backend names
+        This allows each function to be configured separately and will override the config
+        in ``algos`` or ``generators`` if present. The dispatchable function name may be
+        gotten from the ``.name`` attribute such as ``nx.pagerank.name`` (it's typically
+        the same as the name of the function).
+    """
+
+    algos: list[str]
+    generators: list[str]
+
+    def _on_setattr(self, key, value):
+        from .backends import _registered_algorithms, backend_info
+
+        if key in {"algos", "generators"}:
+            pass
+        elif key not in _registered_algorithms:
+            raise AttributeError(
+                f"Invalid config name: {key!r}. Expected 'algos', 'generators', or a name "
+                "of a dispatchable function (e.g. `.name` attribute of the function)."
+            )
+        if not (isinstance(value, list) and all(isinstance(x, str) for x in value)):
+            raise TypeError(
+                f"{key!r} config must be a list of backend names; got {value!r}"
+            )
+        if missing := {x for x in value if x not in backend_info}:
+            missing = ", ".join(map(repr, sorted(missing)))
+            raise ValueError(f"Unknown backend when setting {key!r}: {missing}")
+        return value
+
+    def _on_delattr(self, key):
+        if key in {"algos", "generators"}:
+            raise TypeError(f"{key!r} configuration item can't be deleted.")
+
+
+class NetworkXConfig(Config):
+    """Configuration for NetworkX that controls behaviors such as how to use backends.
+
+    Attribute and bracket notation are supported for getting and setting configurations::
+
+        >>> nx.config.backend_priority == nx.config["backend_priority"]
+        True
+
+    Parameters
+    ----------
+    backend_priority : list of backend names or dict or BackendPriorities
+        Enable automatic conversion of graphs to backend graphs for functions
+        implemented by the backend. Priority is given to backends listed earlier.
+        This is a nested configuration with keys ``algos``, ``generators``, and,
+        optionally, function names. Setting this value to a list of backend names
+        will set ``nx.config.backend_priority.algos``. For more information, see
+        ``help(nx.config.backend_priority)``. Default is empty list.
+
+    backends : Config mapping of backend names to backend Config
+        The keys of the Config mapping are names of all installed NetworkX backends,
+        and the values are their configurations as Config mappings.
+
+    cache_converted_graphs : bool
+        If True, then save converted graphs to the cache of the input graph. Graph
+        conversion may occur when automatically using a backend from `backend_priority`
+        or when using the `backend=` keyword argument to a function call. Caching can
+        improve performance by avoiding repeated conversions, but it uses more memory.
+        Care should be taken to not manually mutate a graph that has cached graphs; for
+        example, ``G[u][v][k] = val`` changes the graph, but does not clear the cache.
+        Using methods such as ``G.add_edge(u, v, weight=val)`` will clear the cache to
+        keep it consistent. ``G.__networkx_cache__.clear()`` manually clears the cache.
+        Default is True.
+
+    fallback_to_nx : bool
+        If True, then "fall back" and run with the default "networkx" implementation
+        for dispatchable functions not implemented by backends of input graphs. When a
+        backend graph is passed to a dispatchable function, the default behavior is to
+        use the implementation from that backend if possible and raise if not. Enabling
+        ``fallback_to_nx`` makes the networkx implementation the fallback to use instead
+        of raising, and will convert the backend graph to a networkx-compatible graph.
+        Default is False.
+
+    warnings_to_ignore : set of strings
+        Control which warnings from NetworkX are not emitted. Valid elements:
+
+        - `"cache"`: when a cached value is used from ``G.__networkx_cache__``.
+
+    Notes
+    -----
+    Environment variables may be used to control some default configurations:
+
+    - ``NETWORKX_BACKEND_PRIORITY``: set ``backend_priority.algos`` from comma-separated names.
+    - ``NETWORKX_CACHE_CONVERTED_GRAPHS``: set ``cache_converted_graphs`` to True if nonempty.
+    - ``NETWORKX_FALLBACK_TO_NX``: set ``fallback_to_nx`` to True if nonempty.
+    - ``NETWORKX_WARNINGS_TO_IGNORE``: set `warnings_to_ignore` from comma-separated names.
+
+    and can be used for finer control of ``backend_priority`` such as:
+
+    - ``NETWORKX_BACKEND_PRIORITY_ALGOS``: same as ``NETWORKX_BACKEND_PRIORITY`` to set ``backend_priority.algos``.
+
+    This is a global configuration. Use with caution when using from multiple threads.
+    """
+
+    backend_priority: BackendPriorities
+    backends: Config
+    cache_converted_graphs: bool
+    fallback_to_nx: bool
+    warnings_to_ignore: set[str]
+
+    def _on_setattr(self, key, value):
+        from .backends import backend_info
+
+        if key == "backend_priority":
+            if isinstance(value, list):
+                getattr(self, key).algos = value
+                value = getattr(self, key)
+            elif isinstance(value, dict):
+                kwargs = value
+                value = BackendPriorities(algos=[], generators=[])
+                for key, val in kwargs.items():
+                    setattr(value, key, val)
+            elif not isinstance(value, BackendPriorities):
+                raise TypeError(
+                    f"{key!r} config must be a dict of lists of backend names; got {value!r}"
+                )
+        elif key == "backends":
+            if not (
+                isinstance(value, Config)
+                and all(isinstance(key, str) for key in value)
+                and all(isinstance(val, Config) for val in value.values())
+            ):
+                raise TypeError(
+                    f"{key!r} config must be a Config of backend configs; got {value!r}"
+                )
+            if missing := {x for x in value if x not in backend_info}:
+                missing = ", ".join(map(repr, sorted(missing)))
+                raise ValueError(f"Unknown backend when setting {key!r}: {missing}")
+        elif key in {"cache_converted_graphs", "fallback_to_nx"}:
+            if not isinstance(value, bool):
+                raise TypeError(f"{key!r} config must be True or False; got {value!r}")
+        elif key == "warnings_to_ignore":
+            if not (isinstance(value, set) and all(isinstance(x, str) for x in value)):
+                raise TypeError(
+                    f"{key!r} config must be a set of warning names; got {value!r}"
+                )
+            known_warnings = {"cache"}
+            if missing := {x for x in value if x not in known_warnings}:
+                missing = ", ".join(map(repr, sorted(missing)))
+                raise ValueError(
+                    f"Unknown warning when setting {key!r}: {missing}. Valid entries: "
+                    + ", ".join(sorted(known_warnings))
+                )
+        return value
diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/decorators.py b/.venv/lib/python3.12/site-packages/networkx/utils/decorators.py
new file mode 100644
index 00000000..36ae9be2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/utils/decorators.py
@@ -0,0 +1,1237 @@
+import bz2
+import collections
+import gzip
+import inspect
+import itertools
+import re
+import warnings
+from collections import defaultdict
+from contextlib import contextmanager
+from functools import wraps
+from inspect import Parameter, signature
+from os.path import splitext
+from pathlib import Path
+
+import networkx as nx
+from networkx.utils import create_py_random_state, create_random_state
+
+__all__ = [
+    "not_implemented_for",
+    "open_file",
+    "nodes_or_number",
+    "np_random_state",
+    "py_random_state",
+    "argmap",
+]
+
+
+def not_implemented_for(*graph_types):
+    """Decorator to mark algorithms as not implemented
+
+    Parameters
+    ----------
+    graph_types : container of strings
+        Entries must be one of "directed", "undirected", "multigraph", or "graph".
+
+    Returns
+    -------
+    _require : function
+        The decorated function.
+
+    Raises
+    ------
+    NetworkXNotImplemented
+    If any of the packages cannot be imported
+
+    Notes
+    -----
+    Multiple types are joined logically with "and".
+    For "or" use multiple @not_implemented_for() lines.
+
+    Examples
+    --------
+    Decorate functions like this::
+
+       @not_implemented_for("directed")
+       def sp_function(G):
+           pass
+
+
+       # rule out MultiDiGraph
+       @not_implemented_for("directed", "multigraph")
+       def sp_np_function(G):
+           pass
+
+
+       # rule out all except DiGraph
+       @not_implemented_for("undirected")
+       @not_implemented_for("multigraph")
+       def sp_np_function(G):
+           pass
+    """
+    if ("directed" in graph_types) and ("undirected" in graph_types):
+        raise ValueError("Function not implemented on directed AND undirected graphs?")
+    if ("multigraph" in graph_types) and ("graph" in graph_types):
+        raise ValueError("Function not implemented on graph AND multigraphs?")
+    if not set(graph_types) < {"directed", "undirected", "multigraph", "graph"}:
+        raise KeyError(
+            "use one or more of directed, undirected, multigraph, graph.  "
+            f"You used {graph_types}"
+        )
+
+    # 3-way logic: True if "directed" input, False if "undirected" input, else None
+    dval = ("directed" in graph_types) or "undirected" not in graph_types and None
+    mval = ("multigraph" in graph_types) or "graph" not in graph_types and None
+    errmsg = f"not implemented for {' '.join(graph_types)} type"
+
+    def _not_implemented_for(g):
+        if (mval is None or mval == g.is_multigraph()) and (
+            dval is None or dval == g.is_directed()
+        ):
+            raise nx.NetworkXNotImplemented(errmsg)
+
+        return g
+
+    return argmap(_not_implemented_for, 0)
+
+
+# To handle new extensions, define a function accepting a `path` and `mode`.
+# Then add the extension to _dispatch_dict.
+fopeners = {
+    ".gz": gzip.open,
+    ".gzip": gzip.open,
+    ".bz2": bz2.BZ2File,
+}
+_dispatch_dict = defaultdict(lambda: open, **fopeners)
+
+
+def open_file(path_arg, mode="r"):
+    """Decorator to ensure clean opening and closing of files.
+
+    Parameters
+    ----------
+    path_arg : string or int
+        Name or index of the argument that is a path.
+
+    mode : str
+        String for opening mode.
+
+    Returns
+    -------
+    _open_file : function
+        Function which cleanly executes the io.
+
+    Examples
+    --------
+    Decorate functions like this::
+
+       @open_file(0, "r")
+       def read_function(pathname):
+           pass
+
+
+       @open_file(1, "w")
+       def write_function(G, pathname):
+           pass
+
+
+       @open_file(1, "w")
+       def write_function(G, pathname="graph.dot"):
+           pass
+
+
+       @open_file("pathname", "w")
+       def write_function(G, pathname="graph.dot"):
+           pass
+
+
+       @open_file("path", "w+")
+       def another_function(arg, **kwargs):
+           path = kwargs["path"]
+           pass
+
+    Notes
+    -----
+    Note that this decorator solves the problem when a path argument is
+    specified as a string, but it does not handle the situation when the
+    function wants to accept a default of None (and then handle it).
+
+    Here is an example of how to handle this case::
+
+      @open_file("path")
+      def some_function(arg1, arg2, path=None):
+          if path is None:
+              fobj = tempfile.NamedTemporaryFile(delete=False)
+          else:
+              # `path` could have been a string or file object or something
+              # similar. In any event, the decorator has given us a file object
+              # and it will close it for us, if it should.
+              fobj = path
+
+          try:
+              fobj.write("blah")
+          finally:
+              if path is None:
+                  fobj.close()
+
+    Normally, we'd want to use "with" to ensure that fobj gets closed.
+    However, the decorator will make `path` a file object for us,
+    and using "with" would undesirably close that file object.
+    Instead, we use a try block, as shown above.
+    When we exit the function, fobj will be closed, if it should be, by the decorator.
+    """
+
+    def _open_file(path):
+        # Now we have the path_arg. There are two types of input to consider:
+        #   1) string representing a path that should be opened
+        #   2) an already opened file object
+        if isinstance(path, str):
+            ext = splitext(path)[1]
+        elif isinstance(path, Path):
+            # path is a pathlib reference to a filename
+            ext = path.suffix
+            path = str(path)
+        else:
+            # could be None, or a file handle, in which case the algorithm will deal with it
+            return path, lambda: None
+
+        fobj = _dispatch_dict[ext](path, mode=mode)
+        return fobj, lambda: fobj.close()
+
+    return argmap(_open_file, path_arg, try_finally=True)
+
+
+def nodes_or_number(which_args):
+    """Decorator to allow number of nodes or container of nodes.
+
+    With this decorator, the specified argument can be either a number or a container
+    of nodes. If it is a number, the nodes used are `range(n)`.
+    This allows `nx.complete_graph(50)` in place of `nx.complete_graph(list(range(50)))`.
+    And it also allows `nx.complete_graph(any_list_of_nodes)`.
+
+    Parameters
+    ----------
+    which_args : string or int or sequence of strings or ints
+        If string, the name of the argument to be treated.
+        If int, the index of the argument to be treated.
+        If more than one node argument is allowed, can be a list of locations.
+
+    Returns
+    -------
+    _nodes_or_numbers : function
+        Function which replaces int args with ranges.
+
+    Examples
+    --------
+    Decorate functions like this::
+
+       @nodes_or_number("nodes")
+       def empty_graph(nodes):
+           # nodes is converted to a list of nodes
+
+       @nodes_or_number(0)
+       def empty_graph(nodes):
+           # nodes is converted to a list of nodes
+
+       @nodes_or_number(["m1", "m2"])
+       def grid_2d_graph(m1, m2, periodic=False):
+           # m1 and m2 are each converted to a list of nodes
+
+       @nodes_or_number([0, 1])
+       def grid_2d_graph(m1, m2, periodic=False):
+           # m1 and m2 are each converted to a list of nodes
+
+       @nodes_or_number(1)
+       def full_rary_tree(r, n)
+           # presumably r is a number. It is not handled by this decorator.
+           # n is converted to a list of nodes
+    """
+
+    def _nodes_or_number(n):
+        try:
+            nodes = list(range(n))
+        except TypeError:
+            nodes = tuple(n)
+        else:
+            if n < 0:
+                raise nx.NetworkXError(f"Negative number of nodes not valid: {n}")
+        return (n, nodes)
+
+    try:
+        iter_wa = iter(which_args)
+    except TypeError:
+        iter_wa = (which_args,)
+
+    return argmap(_nodes_or_number, *iter_wa)
+
+
+def np_random_state(random_state_argument):
+    """Decorator to generate a numpy RandomState or Generator instance.
+
+    The decorator processes the argument indicated by `random_state_argument`
+    using :func:`nx.utils.create_random_state`.
+    The argument value can be a seed (integer), or a `numpy.random.RandomState`
+    or `numpy.random.RandomState` instance or (`None` or `numpy.random`).
+    The latter two options use the global random number generator for `numpy.random`.
+
+    The returned instance is a `numpy.random.RandomState` or `numpy.random.Generator`.
+
+    Parameters
+    ----------
+    random_state_argument : string or int
+        The name or index of the argument to be converted
+        to a `numpy.random.RandomState` instance.
+
+    Returns
+    -------
+    _random_state : function
+        Function whose random_state keyword argument is a RandomState instance.
+
+    Examples
+    --------
+    Decorate functions like this::
+
+       @np_random_state("seed")
+       def random_float(seed=None):
+           return seed.rand()
+
+
+       @np_random_state(0)
+       def random_float(rng=None):
+           return rng.rand()
+
+
+       @np_random_state(1)
+       def random_array(dims, random_state=1):
+           return random_state.rand(*dims)
+
+    See Also
+    --------
+    py_random_state
+    """
+    return argmap(create_random_state, random_state_argument)
+
+
+def py_random_state(random_state_argument):
+    """Decorator to generate a random.Random instance (or equiv).
+
+    This decorator processes `random_state_argument` using
+    :func:`nx.utils.create_py_random_state`.
+    The input value can be a seed (integer), or a random number generator::
+
+        If int, return a random.Random instance set with seed=int.
+        If random.Random instance, return it.
+        If None or the `random` package, return the global random number
+        generator used by `random`.
+        If np.random package, or the default numpy RandomState instance,
+        return the default numpy random number generator wrapped in a
+        `PythonRandomViaNumpyBits`  class.
+        If np.random.Generator instance, return it wrapped in a
+        `PythonRandomViaNumpyBits`  class.
+
+        # Legacy options
+        If np.random.RandomState instance, return it wrapped in a
+        `PythonRandomInterface` class.
+        If a `PythonRandomInterface` instance, return it
+
+    Parameters
+    ----------
+    random_state_argument : string or int
+        The name of the argument or the index of the argument in args that is
+        to be converted to the random.Random instance or numpy.random.RandomState
+        instance that mimics basic methods of random.Random.
+
+    Returns
+    -------
+    _random_state : function
+        Function whose random_state_argument is converted to a Random instance.
+
+    Examples
+    --------
+    Decorate functions like this::
+
+       @py_random_state("random_state")
+       def random_float(random_state=None):
+           return random_state.rand()
+
+
+       @py_random_state(0)
+       def random_float(rng=None):
+           return rng.rand()
+
+
+       @py_random_state(1)
+       def random_array(dims, seed=12345):
+           return seed.rand(*dims)
+
+    See Also
+    --------
+    np_random_state
+    """
+
+    return argmap(create_py_random_state, random_state_argument)
+
+
+class argmap:
+    """A decorator to apply a map to arguments before calling the function
+
+    This class provides a decorator that maps (transforms) arguments of the function
+    before the function is called. Thus for example, we have similar code
+    in many functions to determine whether an argument is the number of nodes
+    to be created, or a list of nodes to be handled. The decorator provides
+    the code to accept either -- transforming the indicated argument into a
+    list of nodes before the actual function is called.
+
+    This decorator class allows us to process single or multiple arguments.
+    The arguments to be processed can be specified by string, naming the argument,
+    or by index, specifying the item in the args list.
+
+    Parameters
+    ----------
+    func : callable
+        The function to apply to arguments
+
+    *args : iterable of (int, str or tuple)
+        A list of parameters, specified either as strings (their names), ints
+        (numerical indices) or tuples, which may contain ints, strings, and
+        (recursively) tuples. Each indicates which parameters the decorator
+        should map. Tuples indicate that the map function takes (and returns)
+        multiple parameters in the same order and nested structure as indicated
+        here.
+
+    try_finally : bool (default: False)
+        When True, wrap the function call in a try-finally block with code
+        for the finally block created by `func`. This is used when the map
+        function constructs an object (like a file handle) that requires
+        post-processing (like closing).
+
+        Note: try_finally decorators cannot be used to decorate generator
+        functions.
+
+    Examples
+    --------
+    Most of these examples use `@argmap(...)` to apply the decorator to
+    the function defined on the next line.
+    In the NetworkX codebase however, `argmap` is used within a function to
+    construct a decorator. That is, the decorator defines a mapping function
+    and then uses `argmap` to build and return a decorated function.
+    A simple example is a decorator that specifies which currency to report money.
+    The decorator (named `convert_to`) would be used like::
+
+        @convert_to("US_Dollars", "income")
+        def show_me_the_money(name, income):
+            print(f"{name} : {income}")
+
+    And the code to create the decorator might be::
+
+        def convert_to(currency, which_arg):
+            def _convert(amount):
+                if amount.currency != currency:
+                    amount = amount.to_currency(currency)
+                return amount
+
+            return argmap(_convert, which_arg)
+
+    Despite this common idiom for argmap, most of the following examples
+    use the `@argmap(...)` idiom to save space.
+
+    Here's an example use of argmap to sum the elements of two of the functions
+    arguments. The decorated function::
+
+        @argmap(sum, "xlist", "zlist")
+        def foo(xlist, y, zlist):
+            return xlist - y + zlist
+
+    is syntactic sugar for::
+
+        def foo(xlist, y, zlist):
+            x = sum(xlist)
+            z = sum(zlist)
+            return x - y + z
+
+    and is equivalent to (using argument indexes)::
+
+        @argmap(sum, "xlist", 2)
+        def foo(xlist, y, zlist):
+            return xlist - y + zlist
+
+    or::
+
+        @argmap(sum, "zlist", 0)
+        def foo(xlist, y, zlist):
+            return xlist - y + zlist
+
+    Transforming functions can be applied to multiple arguments, such as::
+
+        def swap(x, y):
+            return y, x
+
+        # the 2-tuple tells argmap that the map `swap` has 2 inputs/outputs.
+        @argmap(swap, ("a", "b")):
+        def foo(a, b, c):
+            return a / b * c
+
+    is equivalent to::
+
+        def foo(a, b, c):
+            a, b = swap(a, b)
+            return a / b * c
+
+    More generally, the applied arguments can be nested tuples of strings or ints.
+    The syntax `@argmap(some_func, ("a", ("b", "c")))` would expect `some_func` to
+    accept 2 inputs with the second expected to be a 2-tuple. It should then return
+    2 outputs with the second a 2-tuple. The returns values would replace input "a"
+    "b" and "c" respectively. Similarly for `@argmap(some_func, (0, ("b", 2)))`.
+
+    Also, note that an index larger than the number of named parameters is allowed
+    for variadic functions. For example::
+
+        def double(a):
+            return 2 * a
+
+
+        @argmap(double, 3)
+        def overflow(a, *args):
+            return a, args
+
+
+        print(overflow(1, 2, 3, 4, 5, 6))  # output is 1, (2, 3, 8, 5, 6)
+
+    **Try Finally**
+
+    Additionally, this `argmap` class can be used to create a decorator that
+    initiates a try...finally block. The decorator must be written to return
+    both the transformed argument and a closing function.
+    This feature was included to enable the `open_file` decorator which might
+    need to close the file or not depending on whether it had to open that file.
+    This feature uses the keyword-only `try_finally` argument to `@argmap`.
+
+    For example this map opens a file and then makes sure it is closed::
+
+        def open_file(fn):
+            f = open(fn)
+            return f, lambda: f.close()
+
+    The decorator applies that to the function `foo`::
+
+        @argmap(open_file, "file", try_finally=True)
+        def foo(file):
+            print(file.read())
+
+    is syntactic sugar for::
+
+        def foo(file):
+            file, close_file = open_file(file)
+            try:
+                print(file.read())
+            finally:
+                close_file()
+
+    and is equivalent to (using indexes)::
+
+        @argmap(open_file, 0, try_finally=True)
+        def foo(file):
+            print(file.read())
+
+    Here's an example of the try_finally feature used to create a decorator::
+
+        def my_closing_decorator(which_arg):
+            def _opener(path):
+                if path is None:
+                    path = open(path)
+                    fclose = path.close
+                else:
+                    # assume `path` handles the closing
+                    fclose = lambda: None
+                return path, fclose
+
+            return argmap(_opener, which_arg, try_finally=True)
+
+    which can then be used as::
+
+        @my_closing_decorator("file")
+        def fancy_reader(file=None):
+            # this code doesn't need to worry about closing the file
+            print(file.read())
+
+    Decorators with try_finally = True cannot be used with generator functions,
+    because the `finally` block is evaluated before the generator is exhausted::
+
+        @argmap(open_file, "file", try_finally=True)
+        def file_to_lines(file):
+            for line in file.readlines():
+                yield line
+
+    is equivalent to::
+
+        def file_to_lines_wrapped(file):
+            for line in file.readlines():
+                yield line
+
+
+        def file_to_lines_wrapper(file):
+            try:
+                file = open_file(file)
+                return file_to_lines_wrapped(file)
+            finally:
+                file.close()
+
+    which behaves similarly to::
+
+        def file_to_lines_whoops(file):
+            file = open_file(file)
+            file.close()
+            for line in file.readlines():
+                yield line
+
+    because the `finally` block of `file_to_lines_wrapper` is executed before
+    the caller has a chance to exhaust the iterator.
+
+    Notes
+    -----
+    An object of this class is callable and intended to be used when
+    defining a decorator. Generally, a decorator takes a function as input
+    and constructs a function as output. Specifically, an `argmap` object
+    returns the input function decorated/wrapped so that specified arguments
+    are mapped (transformed) to new values before the decorated function is called.
+
+    As an overview, the argmap object returns a new function with all the
+    dunder values of the original function (like `__doc__`, `__name__`, etc).
+    Code for this decorated function is built based on the original function's
+    signature. It starts by mapping the input arguments to potentially new
+    values. Then it calls the decorated function with these new values in place
+    of the indicated arguments that have been mapped. The return value of the
+    original function is then returned. This new function is the function that
+    is actually called by the user.
+
+    Three additional features are provided.
+        1) The code is lazily compiled. That is, the new function is returned
+        as an object without the code compiled, but with all information
+        needed so it can be compiled upon it's first invocation. This saves
+        time on import at the cost of additional time on the first call of
+        the function. Subsequent calls are then just as fast as normal.
+
+        2) If the "try_finally" keyword-only argument is True, a try block
+        follows each mapped argument, matched on the other side of the wrapped
+        call, by a finally block closing that mapping.  We expect func to return
+        a 2-tuple: the mapped value and a function to be called in the finally
+        clause.  This feature was included so the `open_file` decorator could
+        provide a file handle to the decorated function and close the file handle
+        after the function call. It even keeps track of whether to close the file
+        handle or not based on whether it had to open the file or the input was
+        already open. So, the decorated function does not need to include any
+        code to open or close files.
+
+        3) The maps applied can process multiple arguments. For example,
+        you could swap two arguments using a mapping, or transform
+        them to their sum and their difference. This was included to allow
+        a decorator in the `quality.py` module that checks that an input
+        `partition` is a valid partition of the nodes of the input graph `G`.
+        In this example, the map has inputs `(G, partition)`. After checking
+        for a valid partition, the map either raises an exception or leaves
+        the inputs unchanged. Thus many functions that make this check can
+        use the decorator rather than copy the checking code into each function.
+        More complicated nested argument structures are described below.
+
+    The remaining notes describe the code structure and methods for this
+    class in broad terms to aid in understanding how to use it.
+
+    Instantiating an `argmap` object simply stores the mapping function and
+    the input identifiers of which arguments to map. The resulting decorator
+    is ready to use this map to decorate any function. Calling that object
+    (`argmap.__call__`, but usually done via `@my_decorator`) a lazily
+    compiled thin wrapper of the decorated function is constructed,
+    wrapped with the necessary function dunder attributes like `__doc__`
+    and `__name__`. That thinly wrapped function is returned as the
+    decorated function. When that decorated function is called, the thin
+    wrapper of code calls `argmap._lazy_compile` which compiles the decorated
+    function (using `argmap.compile`) and replaces the code of the thin
+    wrapper with the newly compiled code. This saves the compilation step
+    every import of networkx, at the cost of compiling upon the first call
+    to the decorated function.
+
+    When the decorated function is compiled, the code is recursively assembled
+    using the `argmap.assemble` method. The recursive nature is needed in
+    case of nested decorators. The result of the assembly is a number of
+    useful objects.
+
+      sig : the function signature of the original decorated function as
+          constructed by :func:`argmap.signature`. This is constructed
+          using `inspect.signature` but enhanced with attribute
+          strings `sig_def` and `sig_call`, and other information
+          specific to mapping arguments of this function.
+          This information is used to construct a string of code defining
+          the new decorated function.
+
+      wrapped_name : a unique internally used name constructed by argmap
+          for the decorated function.
+
+      functions : a dict of the functions used inside the code of this
+          decorated function, to be used as `globals` in `exec`.
+          This dict is recursively updated to allow for nested decorating.
+
+      mapblock : code (as a list of strings) to map the incoming argument
+          values to their mapped values.
+
+      finallys : code (as a list of strings) to provide the possibly nested
+          set of finally clauses if needed.
+
+      mutable_args : a bool indicating whether the `sig.args` tuple should be
+          converted to a list so mutation can occur.
+
+    After this recursive assembly process, the `argmap.compile` method
+    constructs code (as strings) to convert the tuple `sig.args` to a list
+    if needed. It joins the defining code with appropriate indents and
+    compiles the result.  Finally, this code is evaluated and the original
+    wrapper's implementation is replaced with the compiled version (see
+    `argmap._lazy_compile` for more details).
+
+    Other `argmap` methods include `_name` and `_count` which allow internally
+    generated names to be unique within a python session.
+    The methods `_flatten` and `_indent` process the nested lists of strings
+    into properly indented python code ready to be compiled.
+
+    More complicated nested tuples of arguments also allowed though
+    usually not used. For the simple 2 argument case, the argmap
+    input ("a", "b") implies the mapping function will take 2 arguments
+    and return a 2-tuple of mapped values. A more complicated example
+    with argmap input `("a", ("b", "c"))` requires the mapping function
+    take 2 inputs, with the second being a 2-tuple. It then must output
+    the 3 mapped values in the same nested structure `(newa, (newb, newc))`.
+    This level of generality is not often needed, but was convenient
+    to implement when handling the multiple arguments.
+
+    See Also
+    --------
+    not_implemented_for
+    open_file
+    nodes_or_number
+    py_random_state
+    networkx.algorithms.community.quality.require_partition
+
+    """
+
+    def __init__(self, func, *args, try_finally=False):
+        self._func = func
+        self._args = args
+        self._finally = try_finally
+
+    @staticmethod
+    def _lazy_compile(func):
+        """Compile the source of a wrapped function
+
+        Assemble and compile the decorated function, and intrusively replace its
+        code with the compiled version's.  The thinly wrapped function becomes
+        the decorated function.
+
+        Parameters
+        ----------
+        func : callable
+            A function returned by argmap.__call__ which is in the process
+            of being called for the first time.
+
+        Returns
+        -------
+        func : callable
+            The same function, with a new __code__ object.
+
+        Notes
+        -----
+        It was observed in NetworkX issue #4732 [1] that the import time of
+        NetworkX was significantly bloated by the use of decorators: over half
+        of the import time was being spent decorating functions.  This was
+        somewhat improved by a change made to the `decorator` library, at the
+        cost of a relatively heavy-weight call to `inspect.Signature.bind`
+        for each call to the decorated function.
+
+        The workaround we arrived at is to do minimal work at the time of
+        decoration.  When the decorated function is called for the first time,
+        we compile a function with the same function signature as the wrapped
+        function.  The resulting decorated function is faster than one made by
+        the `decorator` library, so that the overhead of the first call is
+        'paid off' after a small number of calls.
+
+        References
+        ----------
+
+        [1] https://github.com/networkx/networkx/issues/4732
+
+        """
+        real_func = func.__argmap__.compile(func.__wrapped__)
+        func.__code__ = real_func.__code__
+        func.__globals__.update(real_func.__globals__)
+        func.__dict__.update(real_func.__dict__)
+        return func
+
+    def __call__(self, f):
+        """Construct a lazily decorated wrapper of f.
+
+        The decorated function will be compiled when it is called for the first time,
+        and it will replace its own __code__ object so subsequent calls are fast.
+
+        Parameters
+        ----------
+        f : callable
+            A function to be decorated.
+
+        Returns
+        -------
+        func : callable
+            The decorated function.
+
+        See Also
+        --------
+        argmap._lazy_compile
+        """
+
+        def func(*args, __wrapper=None, **kwargs):
+            return argmap._lazy_compile(__wrapper)(*args, **kwargs)
+
+        # standard function-wrapping stuff
+        func.__name__ = f.__name__
+        func.__doc__ = f.__doc__
+        func.__defaults__ = f.__defaults__
+        func.__kwdefaults__.update(f.__kwdefaults__ or {})
+        func.__module__ = f.__module__
+        func.__qualname__ = f.__qualname__
+        func.__dict__.update(f.__dict__)
+        func.__wrapped__ = f
+
+        # now that we've wrapped f, we may have picked up some __dict__ or
+        # __kwdefaults__ items that were set by a previous argmap.  Thus, we set
+        # these values after those update() calls.
+
+        # If we attempt to access func from within itself, that happens through
+        # a closure -- which trips an error when we replace func.__code__.  The
+        # standard workaround for functions which can't see themselves is to use
+        # a Y-combinator, as we do here.
+        func.__kwdefaults__["_argmap__wrapper"] = func
+
+        # this self-reference is here because functools.wraps preserves
+        # everything in __dict__, and we don't want to mistake a non-argmap
+        # wrapper for an argmap wrapper
+        func.__self__ = func
+
+        # this is used to variously call self.assemble and self.compile
+        func.__argmap__ = self
+
+        if hasattr(f, "__argmap__"):
+            func.__is_generator = f.__is_generator
+        else:
+            func.__is_generator = inspect.isgeneratorfunction(f)
+
+        if self._finally and func.__is_generator:
+            raise nx.NetworkXError("argmap cannot decorate generators with try_finally")
+
+        return func
+
+    __count = 0
+
+    @classmethod
+    def _count(cls):
+        """Maintain a globally-unique identifier for function names and "file" names
+
+        Note that this counter is a class method reporting a class variable
+        so the count is unique within a Python session. It could differ from
+        session to session for a specific decorator depending on the order
+        that the decorators are created. But that doesn't disrupt `argmap`.
+
+        This is used in two places: to construct unique variable names
+        in the `_name` method and to construct unique fictitious filenames
+        in the `_compile` method.
+
+        Returns
+        -------
+        count : int
+            An integer unique to this Python session (simply counts from zero)
+        """
+        cls.__count += 1
+        return cls.__count
+
+    _bad_chars = re.compile("[^a-zA-Z0-9_]")
+
+    @classmethod
+    def _name(cls, f):
+        """Mangle the name of a function to be unique but somewhat human-readable
+
+        The names are unique within a Python session and set using `_count`.
+
+        Parameters
+        ----------
+        f : str or object
+
+        Returns
+        -------
+        name : str
+            The mangled version of `f.__name__` (if `f.__name__` exists) or `f`
+
+        """
+        f = f.__name__ if hasattr(f, "__name__") else f
+        fname = re.sub(cls._bad_chars, "_", f)
+        return f"argmap_{fname}_{cls._count()}"
+
+    def compile(self, f):
+        """Compile the decorated function.
+
+        Called once for a given decorated function -- collects the code from all
+        argmap decorators in the stack, and compiles the decorated function.
+
+        Much of the work done here uses the `assemble` method to allow recursive
+        treatment of multiple argmap decorators on a single decorated function.
+        That flattens the argmap decorators, collects the source code to construct
+        a single decorated function, then compiles/executes/returns that function.
+
+        The source code for the decorated function is stored as an attribute
+        `_code` on the function object itself.
+
+        Note that Python's `compile` function requires a filename, but this
+        code is constructed without a file, so a fictitious filename is used
+        to describe where the function comes from. The name is something like:
+        "argmap compilation 4".
+
+        Parameters
+        ----------
+        f : callable
+            The function to be decorated
+
+        Returns
+        -------
+        func : callable
+            The decorated file
+
+        """
+        sig, wrapped_name, functions, mapblock, finallys, mutable_args = self.assemble(
+            f
+        )
+
+        call = f"{sig.call_sig.format(wrapped_name)}#"
+        mut_args = f"{sig.args} = list({sig.args})" if mutable_args else ""
+        body = argmap._indent(sig.def_sig, mut_args, mapblock, call, finallys)
+        code = "\n".join(body)
+
+        locl = {}
+        globl = dict(functions.values())
+        filename = f"{self.__class__} compilation {self._count()}"
+        compiled = compile(code, filename, "exec")
+        exec(compiled, globl, locl)
+        func = locl[sig.name]
+        func._code = code
+        return func
+
+    def assemble(self, f):
+        """Collects components of the source for the decorated function wrapping f.
+
+        If `f` has multiple argmap decorators, we recursively assemble the stack of
+        decorators into a single flattened function.
+
+        This method is part of the `compile` method's process yet separated
+        from that method to allow recursive processing. The outputs are
+        strings, dictionaries and lists that collect needed info to
+        flatten any nested argmap-decoration.
+
+        Parameters
+        ----------
+        f : callable
+            The function to be decorated.  If f is argmapped, we assemble it.
+
+        Returns
+        -------
+        sig : argmap.Signature
+            The function signature as an `argmap.Signature` object.
+        wrapped_name : str
+            The mangled name used to represent the wrapped function in the code
+            being assembled.
+        functions : dict
+            A dictionary mapping id(g) -> (mangled_name(g), g) for functions g
+            referred to in the code being assembled. These need to be present
+            in the ``globals`` scope of ``exec`` when defining the decorated
+            function.
+        mapblock : list of lists and/or strings
+            Code that implements mapping of parameters including any try blocks
+            if needed. This code will precede the decorated function call.
+        finallys : list of lists and/or strings
+            Code that implements the finally blocks to post-process the
+            arguments (usually close any files if needed) after the
+            decorated function is called.
+        mutable_args : bool
+            True if the decorator needs to modify positional arguments
+            via their indices. The compile method then turns the argument
+            tuple into a list so that the arguments can be modified.
+        """
+
+        # first, we check if f is already argmapped -- if that's the case,
+        # build up the function recursively.
+        # > mapblock is generally a list of function calls of the sort
+        #     arg = func(arg)
+        # in addition to some try-blocks if needed.
+        # > finallys is a recursive list of finally blocks of the sort
+        #         finally:
+        #             close_func_1()
+        #     finally:
+        #         close_func_2()
+        # > functions is a dict of functions used in the scope of our decorated
+        # function. It will be used to construct globals used in compilation.
+        # We make functions[id(f)] = name_of_f, f to ensure that a given
+        # function is stored and named exactly once even if called by
+        # nested decorators.
+        if hasattr(f, "__argmap__") and f.__self__ is f:
+            (
+                sig,
+                wrapped_name,
+                functions,
+                mapblock,
+                finallys,
+                mutable_args,
+            ) = f.__argmap__.assemble(f.__wrapped__)
+            functions = dict(functions)  # shallow-copy just in case
+        else:
+            sig = self.signature(f)
+            wrapped_name = self._name(f)
+            mapblock, finallys = [], []
+            functions = {id(f): (wrapped_name, f)}
+            mutable_args = False
+
+        if id(self._func) in functions:
+            fname, _ = functions[id(self._func)]
+        else:
+            fname, _ = functions[id(self._func)] = self._name(self._func), self._func
+
+        # this is a bit complicated -- we can call functions with a variety of
+        # nested arguments, so long as their input and output are tuples with
+        # the same nested structure. e.g. ("a", "b") maps arguments a and b.
+        # A more complicated nesting like (0, (3, 4)) maps arguments 0, 3, 4
+        # expecting the mapping to output new values in the same nested shape.
+        # The ability to argmap multiple arguments was necessary for
+        # the decorator `nx.algorithms.community.quality.require_partition`, and
+        # while we're not taking full advantage of the ability to handle
+        # multiply-nested tuples, it was convenient to implement this in
+        # generality because the recursive call to `get_name` is necessary in
+        # any case.
+        applied = set()
+
+        def get_name(arg, first=True):
+            nonlocal mutable_args
+            if isinstance(arg, tuple):
+                name = ", ".join(get_name(x, False) for x in arg)
+                return name if first else f"({name})"
+            if arg in applied:
+                raise nx.NetworkXError(f"argument {arg} is specified multiple times")
+            applied.add(arg)
+            if arg in sig.names:
+                return sig.names[arg]
+            elif isinstance(arg, str):
+                if sig.kwargs is None:
+                    raise nx.NetworkXError(
+                        f"name {arg} is not a named parameter and this function doesn't have kwargs"
+                    )
+                return f"{sig.kwargs}[{arg!r}]"
+            else:
+                if sig.args is None:
+                    raise nx.NetworkXError(
+                        f"index {arg} not a parameter index and this function doesn't have args"
+                    )
+                mutable_args = True
+                return f"{sig.args}[{arg - sig.n_positional}]"
+
+        if self._finally:
+            # here's where we handle try_finally decorators.  Such a decorator
+            # returns a mapped argument and a function to be called in a
+            # finally block.  This feature was required by the open_file
+            # decorator.  The below generates the code
+            #
+            # name, final = func(name)                   #<--append to mapblock
+            # try:                                       #<--append to mapblock
+            #     ... more argmapping and try blocks
+            #     return WRAPPED_FUNCTION(...)
+            #     ... more finally blocks
+            # finally:                                   #<--prepend to finallys
+            #     final()                                #<--prepend to finallys
+            #
+            for a in self._args:
+                name = get_name(a)
+                final = self._name(name)
+                mapblock.append(f"{name}, {final} = {fname}({name})")
+                mapblock.append("try:")
+                finallys = ["finally:", f"{final}()#", "#", finallys]
+        else:
+            mapblock.extend(
+                f"{name} = {fname}({name})" for name in map(get_name, self._args)
+            )
+
+        return sig, wrapped_name, functions, mapblock, finallys, mutable_args
+
+    @classmethod
+    def signature(cls, f):
+        r"""Construct a Signature object describing `f`
+
+        Compute a Signature so that we can write a function wrapping f with
+        the same signature and call-type.
+
+        Parameters
+        ----------
+        f : callable
+            A function to be decorated
+
+        Returns
+        -------
+        sig : argmap.Signature
+            The Signature of f
+
+        Notes
+        -----
+        The Signature is a namedtuple with names:
+
+            name : a unique version of the name of the decorated function
+            signature : the inspect.signature of the decorated function
+            def_sig : a string used as code to define the new function
+            call_sig : a string used as code to call the decorated function
+            names : a dict keyed by argument name and index to the argument's name
+            n_positional : the number of positional arguments in the signature
+            args : the name of the VAR_POSITIONAL argument if any, i.e. \*theseargs
+            kwargs : the name of the VAR_KEYWORDS argument if any, i.e. \*\*kwargs
+
+        These named attributes of the signature are used in `assemble` and `compile`
+        to construct a string of source code for the decorated function.
+
+        """
+        sig = inspect.signature(f, follow_wrapped=False)
+        def_sig = []
+        call_sig = []
+        names = {}
+
+        kind = None
+        args = None
+        kwargs = None
+        npos = 0
+        for i, param in enumerate(sig.parameters.values()):
+            # parameters can be position-only, keyword-or-position, keyword-only
+            # in any combination, but only in the order as above.  we do edge
+            # detection to add the appropriate punctuation
+            prev = kind
+            kind = param.kind
+            if prev == param.POSITIONAL_ONLY != kind:
+                # the last token was position-only, but this one isn't
+                def_sig.append("/")
+            if (
+                param.VAR_POSITIONAL
+                != prev
+                != param.KEYWORD_ONLY
+                == kind
+                != param.VAR_POSITIONAL
+            ):
+                # param is the first keyword-only arg and isn't starred
+                def_sig.append("*")
+
+            # star arguments as appropriate
+            if kind == param.VAR_POSITIONAL:
+                name = "*" + param.name
+                args = param.name
+                count = 0
+            elif kind == param.VAR_KEYWORD:
+                name = "**" + param.name
+                kwargs = param.name
+                count = 0
+            else:
+                names[i] = names[param.name] = param.name
+                name = param.name
+                count = 1
+
+            # assign to keyword-only args in the function call
+            if kind == param.KEYWORD_ONLY:
+                call_sig.append(f"{name} = {name}")
+            else:
+                npos += count
+                call_sig.append(name)
+
+            def_sig.append(name)
+
+        fname = cls._name(f)
+        def_sig = f'def {fname}({", ".join(def_sig)}):'
+
+        call_sig = f"return {{}}({', '.join(call_sig)})"
+
+        return cls.Signature(fname, sig, def_sig, call_sig, names, npos, args, kwargs)
+
+    Signature = collections.namedtuple(
+        "Signature",
+        [
+            "name",
+            "signature",
+            "def_sig",
+            "call_sig",
+            "names",
+            "n_positional",
+            "args",
+            "kwargs",
+        ],
+    )
+
+    @staticmethod
+    def _flatten(nestlist, visited):
+        """flattens a recursive list of lists that doesn't have cyclic references
+
+        Parameters
+        ----------
+        nestlist : iterable
+            A recursive list of objects to be flattened into a single iterable
+
+        visited : set
+            A set of object ids which have been walked -- initialize with an
+            empty set
+
+        Yields
+        ------
+        Non-list objects contained in nestlist
+
+        """
+        for thing in nestlist:
+            if isinstance(thing, list):
+                if id(thing) in visited:
+                    raise ValueError("A cycle was found in nestlist.  Be a tree.")
+                else:
+                    visited.add(id(thing))
+                yield from argmap._flatten(thing, visited)
+            else:
+                yield thing
+
+    _tabs = " " * 64
+
+    @staticmethod
+    def _indent(*lines):
+        """Indent list of code lines to make executable Python code
+
+        Indents a tree-recursive list of strings, following the rule that one
+        space is added to the tab after a line that ends in a colon, and one is
+        removed after a line that ends in an hashmark.
+
+        Parameters
+        ----------
+        *lines : lists and/or strings
+            A recursive list of strings to be assembled into properly indented
+            code.
+
+        Returns
+        -------
+        code : str
+
+        Examples
+        --------
+
+            argmap._indent(*["try:", "try:", "pass#", "finally:", "pass#", "#",
+                             "finally:", "pass#"])
+
+        renders to
+
+            '''try:
+             try:
+              pass#
+             finally:
+              pass#
+             #
+            finally:
+             pass#'''
+        """
+        depth = 0
+        for line in argmap._flatten(lines, set()):
+            yield f"{argmap._tabs[:depth]}{line}"
+            depth += (line[-1:] == ":") - (line[-1:] == "#")
diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/heaps.py b/.venv/lib/python3.12/site-packages/networkx/utils/heaps.py
new file mode 100644
index 00000000..3db27906
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/utils/heaps.py
@@ -0,0 +1,340 @@
+"""
+Min-heaps.
+"""
+
+from heapq import heappop, heappush
+from itertools import count
+
+import networkx as nx
+
+__all__ = ["MinHeap", "PairingHeap", "BinaryHeap"]
+
+
+class MinHeap:
+    """Base class for min-heaps.
+
+    A MinHeap stores a collection of key-value pairs ordered by their values.
+    It supports querying the minimum pair, inserting a new pair, decreasing the
+    value in an existing pair and deleting the minimum pair.
+    """
+
+    class _Item:
+        """Used by subclassess to represent a key-value pair."""
+
+        __slots__ = ("key", "value")
+
+        def __init__(self, key, value):
+            self.key = key
+            self.value = value
+
+        def __repr__(self):
+            return repr((self.key, self.value))
+
+    def __init__(self):
+        """Initialize a new min-heap."""
+        self._dict = {}
+
+    def min(self):
+        """Query the minimum key-value pair.
+
+        Returns
+        -------
+        key, value : tuple
+            The key-value pair with the minimum value in the heap.
+
+        Raises
+        ------
+        NetworkXError
+            If the heap is empty.
+        """
+        raise NotImplementedError
+
+    def pop(self):
+        """Delete the minimum pair in the heap.
+
+        Returns
+        -------
+        key, value : tuple
+            The key-value pair with the minimum value in the heap.
+
+        Raises
+        ------
+        NetworkXError
+            If the heap is empty.
+        """
+        raise NotImplementedError
+
+    def get(self, key, default=None):
+        """Returns the value associated with a key.
+
+        Parameters
+        ----------
+        key : hashable object
+            The key to be looked up.
+
+        default : object
+            Default value to return if the key is not present in the heap.
+            Default value: None.
+
+        Returns
+        -------
+        value : object.
+            The value associated with the key.
+        """
+        raise NotImplementedError
+
+    def insert(self, key, value, allow_increase=False):
+        """Insert a new key-value pair or modify the value in an existing
+        pair.
+
+        Parameters
+        ----------
+        key : hashable object
+            The key.
+
+        value : object comparable with existing values.
+            The value.
+
+        allow_increase : bool
+            Whether the value is allowed to increase. If False, attempts to
+            increase an existing value have no effect. Default value: False.
+
+        Returns
+        -------
+        decreased : bool
+            True if a pair is inserted or the existing value is decreased.
+        """
+        raise NotImplementedError
+
+    def __nonzero__(self):
+        """Returns whether the heap if empty."""
+        return bool(self._dict)
+
+    def __bool__(self):
+        """Returns whether the heap if empty."""
+        return bool(self._dict)
+
+    def __len__(self):
+        """Returns the number of key-value pairs in the heap."""
+        return len(self._dict)
+
+    def __contains__(self, key):
+        """Returns whether a key exists in the heap.
+
+        Parameters
+        ----------
+        key : any hashable object.
+            The key to be looked up.
+        """
+        return key in self._dict
+
+
+class PairingHeap(MinHeap):
+    """A pairing heap."""
+
+    class _Node(MinHeap._Item):
+        """A node in a pairing heap.
+
+        A tree in a pairing heap is stored using the left-child, right-sibling
+        representation.
+        """
+
+        __slots__ = ("left", "next", "prev", "parent")
+
+        def __init__(self, key, value):
+            super().__init__(key, value)
+            # The leftmost child.
+            self.left = None
+            # The next sibling.
+            self.next = None
+            # The previous sibling.
+            self.prev = None
+            # The parent.
+            self.parent = None
+
+    def __init__(self):
+        """Initialize a pairing heap."""
+        super().__init__()
+        self._root = None
+
+    def min(self):
+        if self._root is None:
+            raise nx.NetworkXError("heap is empty.")
+        return (self._root.key, self._root.value)
+
+    def pop(self):
+        if self._root is None:
+            raise nx.NetworkXError("heap is empty.")
+        min_node = self._root
+        self._root = self._merge_children(self._root)
+        del self._dict[min_node.key]
+        return (min_node.key, min_node.value)
+
+    def get(self, key, default=None):
+        node = self._dict.get(key)
+        return node.value if node is not None else default
+
+    def insert(self, key, value, allow_increase=False):
+        node = self._dict.get(key)
+        root = self._root
+        if node is not None:
+            if value < node.value:
+                node.value = value
+                if node is not root and value < node.parent.value:
+                    self._cut(node)
+                    self._root = self._link(root, node)
+                return True
+            elif allow_increase and value > node.value:
+                node.value = value
+                child = self._merge_children(node)
+                # Nonstandard step: Link the merged subtree with the root. See
+                # below for the standard step.
+                if child is not None:
+                    self._root = self._link(self._root, child)
+                # Standard step: Perform a decrease followed by a pop as if the
+                # value were the smallest in the heap. Then insert the new
+                # value into the heap.
+                # if node is not root:
+                #     self._cut(node)
+                #     if child is not None:
+                #         root = self._link(root, child)
+                #     self._root = self._link(root, node)
+                # else:
+                #     self._root = (self._link(node, child)
+                #                   if child is not None else node)
+            return False
+        else:
+            # Insert a new key.
+            node = self._Node(key, value)
+            self._dict[key] = node
+            self._root = self._link(root, node) if root is not None else node
+            return True
+
+    def _link(self, root, other):
+        """Link two nodes, making the one with the smaller value the parent of
+        the other.
+        """
+        if other.value < root.value:
+            root, other = other, root
+        next = root.left
+        other.next = next
+        if next is not None:
+            next.prev = other
+        other.prev = None
+        root.left = other
+        other.parent = root
+        return root
+
+    def _merge_children(self, root):
+        """Merge the subtrees of the root using the standard two-pass method.
+        The resulting subtree is detached from the root.
+        """
+        node = root.left
+        root.left = None
+        if node is not None:
+            link = self._link
+            # Pass 1: Merge pairs of consecutive subtrees from left to right.
+            # At the end of the pass, only the prev pointers of the resulting
+            # subtrees have meaningful values. The other pointers will be fixed
+            # in pass 2.
+            prev = None
+            while True:
+                next = node.next
+                if next is None:
+                    node.prev = prev
+                    break
+                next_next = next.next
+                node = link(node, next)
+                node.prev = prev
+                prev = node
+                if next_next is None:
+                    break
+                node = next_next
+            # Pass 2: Successively merge the subtrees produced by pass 1 from
+            # right to left with the rightmost one.
+            prev = node.prev
+            while prev is not None:
+                prev_prev = prev.prev
+                node = link(prev, node)
+                prev = prev_prev
+            # Now node can become the new root. Its has no parent nor siblings.
+            node.prev = None
+            node.next = None
+            node.parent = None
+        return node
+
+    def _cut(self, node):
+        """Cut a node from its parent."""
+        prev = node.prev
+        next = node.next
+        if prev is not None:
+            prev.next = next
+        else:
+            node.parent.left = next
+        node.prev = None
+        if next is not None:
+            next.prev = prev
+            node.next = None
+        node.parent = None
+
+
+class BinaryHeap(MinHeap):
+    """A binary heap."""
+
+    def __init__(self):
+        """Initialize a binary heap."""
+        super().__init__()
+        self._heap = []
+        self._count = count()
+
+    def min(self):
+        dict = self._dict
+        if not dict:
+            raise nx.NetworkXError("heap is empty")
+        heap = self._heap
+        pop = heappop
+        # Repeatedly remove stale key-value pairs until a up-to-date one is
+        # met.
+        while True:
+            value, _, key = heap[0]
+            if key in dict and value == dict[key]:
+                break
+            pop(heap)
+        return (key, value)
+
+    def pop(self):
+        dict = self._dict
+        if not dict:
+            raise nx.NetworkXError("heap is empty")
+        heap = self._heap
+        pop = heappop
+        # Repeatedly remove stale key-value pairs until a up-to-date one is
+        # met.
+        while True:
+            value, _, key = heap[0]
+            pop(heap)
+            if key in dict and value == dict[key]:
+                break
+        del dict[key]
+        return (key, value)
+
+    def get(self, key, default=None):
+        return self._dict.get(key, default)
+
+    def insert(self, key, value, allow_increase=False):
+        dict = self._dict
+        if key in dict:
+            old_value = dict[key]
+            if value < old_value or (allow_increase and value > old_value):
+                # Since there is no way to efficiently obtain the location of a
+                # key-value pair in the heap, insert a new pair even if ones
+                # with the same key may already be present. Deem the old ones
+                # as stale and skip them when the minimum pair is queried.
+                dict[key] = value
+                heappush(self._heap, (value, next(self._count), key))
+                return value < old_value
+            return False
+        else:
+            dict[key] = value
+            heappush(self._heap, (value, next(self._count), key))
+            return True
diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/mapped_queue.py b/.venv/lib/python3.12/site-packages/networkx/utils/mapped_queue.py
new file mode 100644
index 00000000..0dcea368
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/utils/mapped_queue.py
@@ -0,0 +1,297 @@
+"""Priority queue class with updatable priorities."""
+
+import heapq
+
+__all__ = ["MappedQueue"]
+
+
+class _HeapElement:
+    """This proxy class separates the heap element from its priority.
+
+    The idea is that using a 2-tuple (priority, element) works
+    for sorting, but not for dict lookup because priorities are
+    often floating point values so round-off can mess up equality.
+
+    So, we need inequalities to look at the priority (for sorting)
+    and equality (and hash) to look at the element to enable
+    updates to the priority.
+
+    Unfortunately, this class can be tricky to work with if you forget that
+    `__lt__` compares the priority while `__eq__` compares the element.
+    In `greedy_modularity_communities()` the following code is
+    used to check that two _HeapElements differ in either element or priority:
+
+        if d_oldmax != row_max or d_oldmax.priority != row_max.priority:
+
+    If the priorities are the same, this implementation uses the element
+    as a tiebreaker. This provides compatibility with older systems that
+    use tuples to combine priority and elements.
+    """
+
+    __slots__ = ["priority", "element", "_hash"]
+
+    def __init__(self, priority, element):
+        self.priority = priority
+        self.element = element
+        self._hash = hash(element)
+
+    def __lt__(self, other):
+        try:
+            other_priority = other.priority
+        except AttributeError:
+            return self.priority < other
+        # assume comparing to another _HeapElement
+        if self.priority == other_priority:
+            try:
+                return self.element < other.element
+            except TypeError as err:
+                raise TypeError(
+                    "Consider using a tuple, with a priority value that can be compared."
+                )
+        return self.priority < other_priority
+
+    def __gt__(self, other):
+        try:
+            other_priority = other.priority
+        except AttributeError:
+            return self.priority > other
+        # assume comparing to another _HeapElement
+        if self.priority == other_priority:
+            try:
+                return self.element > other.element
+            except TypeError as err:
+                raise TypeError(
+                    "Consider using a tuple, with a priority value that can be compared."
+                )
+        return self.priority > other_priority
+
+    def __eq__(self, other):
+        try:
+            return self.element == other.element
+        except AttributeError:
+            return self.element == other
+
+    def __hash__(self):
+        return self._hash
+
+    def __getitem__(self, indx):
+        return self.priority if indx == 0 else self.element[indx - 1]
+
+    def __iter__(self):
+        yield self.priority
+        try:
+            yield from self.element
+        except TypeError:
+            yield self.element
+
+    def __repr__(self):
+        return f"_HeapElement({self.priority}, {self.element})"
+
+
+class MappedQueue:
+    """The MappedQueue class implements a min-heap with removal and update-priority.
+
+    The min heap uses heapq as well as custom written _siftup and _siftdown
+    methods to allow the heap positions to be tracked by an additional dict
+    keyed by element to position. The smallest element can be popped in O(1) time,
+    new elements can be pushed in O(log n) time, and any element can be removed
+    or updated in O(log n) time. The queue cannot contain duplicate elements
+    and an attempt to push an element already in the queue will have no effect.
+
+    MappedQueue complements the heapq package from the python standard
+    library. While MappedQueue is designed for maximum compatibility with
+    heapq, it adds element removal, lookup, and priority update.
+
+    Parameters
+    ----------
+    data : dict or iterable
+
+    Examples
+    --------
+
+    A `MappedQueue` can be created empty, or optionally, given a dictionary
+    of initial elements and priorities.  The methods `push`, `pop`,
+    `remove`, and `update` operate on the queue.
+
+    >>> colors_nm = {"red": 665, "blue": 470, "green": 550}
+    >>> q = MappedQueue(colors_nm)
+    >>> q.remove("red")
+    >>> q.update("green", "violet", 400)
+    >>> q.push("indigo", 425)
+    True
+    >>> [q.pop().element for i in range(len(q.heap))]
+    ['violet', 'indigo', 'blue']
+
+    A `MappedQueue` can also be initialized with a list or other iterable. The priority is assumed
+    to be the sort order of the items in the list.
+
+    >>> q = MappedQueue([916, 50, 4609, 493, 237])
+    >>> q.remove(493)
+    >>> q.update(237, 1117)
+    >>> [q.pop() for i in range(len(q.heap))]
+    [50, 916, 1117, 4609]
+
+    An exception is raised if the elements are not comparable.
+
+    >>> q = MappedQueue([100, "a"])
+    Traceback (most recent call last):
+    ...
+    TypeError: '<' not supported between instances of 'int' and 'str'
+
+    To avoid the exception, use a dictionary to assign priorities to the elements.
+
+    >>> q = MappedQueue({100: 0, "a": 1})
+
+    References
+    ----------
+    .. [1] Cormen, T. H., Leiserson, C. E., Rivest, R. L., & Stein, C. (2001).
+       Introduction to algorithms second edition.
+    .. [2] Knuth, D. E. (1997). The art of computer programming (Vol. 3).
+       Pearson Education.
+    """
+
+    def __init__(self, data=None):
+        """Priority queue class with updatable priorities."""
+        if data is None:
+            self.heap = []
+        elif isinstance(data, dict):
+            self.heap = [_HeapElement(v, k) for k, v in data.items()]
+        else:
+            self.heap = list(data)
+        self.position = {}
+        self._heapify()
+
+    def _heapify(self):
+        """Restore heap invariant and recalculate map."""
+        heapq.heapify(self.heap)
+        self.position = {elt: pos for pos, elt in enumerate(self.heap)}
+        if len(self.heap) != len(self.position):
+            raise AssertionError("Heap contains duplicate elements")
+
+    def __len__(self):
+        return len(self.heap)
+
+    def push(self, elt, priority=None):
+        """Add an element to the queue."""
+        if priority is not None:
+            elt = _HeapElement(priority, elt)
+        # If element is already in queue, do nothing
+        if elt in self.position:
+            return False
+        # Add element to heap and dict
+        pos = len(self.heap)
+        self.heap.append(elt)
+        self.position[elt] = pos
+        # Restore invariant by sifting down
+        self._siftdown(0, pos)
+        return True
+
+    def pop(self):
+        """Remove and return the smallest element in the queue."""
+        # Remove smallest element
+        elt = self.heap[0]
+        del self.position[elt]
+        # If elt is last item, remove and return
+        if len(self.heap) == 1:
+            self.heap.pop()
+            return elt
+        # Replace root with last element
+        last = self.heap.pop()
+        self.heap[0] = last
+        self.position[last] = 0
+        # Restore invariant by sifting up
+        self._siftup(0)
+        # Return smallest element
+        return elt
+
+    def update(self, elt, new, priority=None):
+        """Replace an element in the queue with a new one."""
+        if priority is not None:
+            new = _HeapElement(priority, new)
+        # Replace
+        pos = self.position[elt]
+        self.heap[pos] = new
+        del self.position[elt]
+        self.position[new] = pos
+        # Restore invariant by sifting up
+        self._siftup(pos)
+
+    def remove(self, elt):
+        """Remove an element from the queue."""
+        # Find and remove element
+        try:
+            pos = self.position[elt]
+            del self.position[elt]
+        except KeyError:
+            # Not in queue
+            raise
+        # If elt is last item, remove and return
+        if pos == len(self.heap) - 1:
+            self.heap.pop()
+            return
+        # Replace elt with last element
+        last = self.heap.pop()
+        self.heap[pos] = last
+        self.position[last] = pos
+        # Restore invariant by sifting up
+        self._siftup(pos)
+
+    def _siftup(self, pos):
+        """Move smaller child up until hitting a leaf.
+
+        Built to mimic code for heapq._siftup
+        only updating position dict too.
+        """
+        heap, position = self.heap, self.position
+        end_pos = len(heap)
+        startpos = pos
+        newitem = heap[pos]
+        # Shift up the smaller child until hitting a leaf
+        child_pos = (pos << 1) + 1  # start with leftmost child position
+        while child_pos < end_pos:
+            # Set child_pos to index of smaller child.
+            child = heap[child_pos]
+            right_pos = child_pos + 1
+            if right_pos < end_pos:
+                right = heap[right_pos]
+                if not child < right:
+                    child = right
+                    child_pos = right_pos
+            # Move the smaller child up.
+            heap[pos] = child
+            position[child] = pos
+            pos = child_pos
+            child_pos = (pos << 1) + 1
+        # pos is a leaf position. Put newitem there, and bubble it up
+        # to its final resting place (by sifting its parents down).
+        while pos > 0:
+            parent_pos = (pos - 1) >> 1
+            parent = heap[parent_pos]
+            if not newitem < parent:
+                break
+            heap[pos] = parent
+            position[parent] = pos
+            pos = parent_pos
+        heap[pos] = newitem
+        position[newitem] = pos
+
+    def _siftdown(self, start_pos, pos):
+        """Restore invariant. keep swapping with parent until smaller.
+
+        Built to mimic code for heapq._siftdown
+        only updating position dict too.
+        """
+        heap, position = self.heap, self.position
+        newitem = heap[pos]
+        # Follow the path to the root, moving parents down until finding a place
+        # newitem fits.
+        while pos > start_pos:
+            parent_pos = (pos - 1) >> 1
+            parent = heap[parent_pos]
+            if not newitem < parent:
+                break
+            heap[pos] = parent
+            position[parent] = pos
+            pos = parent_pos
+        heap[pos] = newitem
+        position[newitem] = pos
diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/misc.py b/.venv/lib/python3.12/site-packages/networkx/utils/misc.py
new file mode 100644
index 00000000..b42d8908
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/utils/misc.py
@@ -0,0 +1,653 @@
+"""
+Miscellaneous Helpers for NetworkX.
+
+These are not imported into the base networkx namespace but
+can be accessed, for example, as
+
+>>> import networkx
+>>> networkx.utils.make_list_of_ints({1, 2, 3})
+[1, 2, 3]
+>>> networkx.utils.arbitrary_element({5, 1, 7})  # doctest: +SKIP
+1
+"""
+
+import random
+import sys
+import uuid
+import warnings
+from collections import defaultdict, deque
+from collections.abc import Iterable, Iterator, Sized
+from itertools import chain, tee
+
+import networkx as nx
+
+__all__ = [
+    "flatten",
+    "make_list_of_ints",
+    "dict_to_numpy_array",
+    "arbitrary_element",
+    "pairwise",
+    "groups",
+    "create_random_state",
+    "create_py_random_state",
+    "PythonRandomInterface",
+    "PythonRandomViaNumpyBits",
+    "nodes_equal",
+    "edges_equal",
+    "graphs_equal",
+    "_clear_cache",
+]
+
+
+# some cookbook stuff
+# used in deciding whether something is a bunch of nodes, edges, etc.
+# see G.add_nodes and others in Graph Class in networkx/base.py
+
+
+def flatten(obj, result=None):
+    """Return flattened version of (possibly nested) iterable object."""
+    if not isinstance(obj, Iterable | Sized) or isinstance(obj, str):
+        return obj
+    if result is None:
+        result = []
+    for item in obj:
+        if not isinstance(item, Iterable | Sized) or isinstance(item, str):
+            result.append(item)
+        else:
+            flatten(item, result)
+    return tuple(result)
+
+
+def make_list_of_ints(sequence):
+    """Return list of ints from sequence of integral numbers.
+
+    All elements of the sequence must satisfy int(element) == element
+    or a ValueError is raised. Sequence is iterated through once.
+
+    If sequence is a list, the non-int values are replaced with ints.
+    So, no new list is created
+    """
+    if not isinstance(sequence, list):
+        result = []
+        for i in sequence:
+            errmsg = f"sequence is not all integers: {i}"
+            try:
+                ii = int(i)
+            except ValueError:
+                raise nx.NetworkXError(errmsg) from None
+            if ii != i:
+                raise nx.NetworkXError(errmsg)
+            result.append(ii)
+        return result
+    # original sequence is a list... in-place conversion to ints
+    for indx, i in enumerate(sequence):
+        errmsg = f"sequence is not all integers: {i}"
+        if isinstance(i, int):
+            continue
+        try:
+            ii = int(i)
+        except ValueError:
+            raise nx.NetworkXError(errmsg) from None
+        if ii != i:
+            raise nx.NetworkXError(errmsg)
+        sequence[indx] = ii
+    return sequence
+
+
+def dict_to_numpy_array(d, mapping=None):
+    """Convert a dictionary of dictionaries to a numpy array
+    with optional mapping."""
+    try:
+        return _dict_to_numpy_array2(d, mapping)
+    except (AttributeError, TypeError):
+        # AttributeError is when no mapping was provided and v.keys() fails.
+        # TypeError is when a mapping was provided and d[k1][k2] fails.
+        return _dict_to_numpy_array1(d, mapping)
+
+
+def _dict_to_numpy_array2(d, mapping=None):
+    """Convert a dictionary of dictionaries to a 2d numpy array
+    with optional mapping.
+
+    """
+    import numpy as np
+
+    if mapping is None:
+        s = set(d.keys())
+        for k, v in d.items():
+            s.update(v.keys())
+        mapping = dict(zip(s, range(len(s))))
+    n = len(mapping)
+    a = np.zeros((n, n))
+    for k1, i in mapping.items():
+        for k2, j in mapping.items():
+            try:
+                a[i, j] = d[k1][k2]
+            except KeyError:
+                pass
+    return a
+
+
+def _dict_to_numpy_array1(d, mapping=None):
+    """Convert a dictionary of numbers to a 1d numpy array with optional mapping."""
+    import numpy as np
+
+    if mapping is None:
+        s = set(d.keys())
+        mapping = dict(zip(s, range(len(s))))
+    n = len(mapping)
+    a = np.zeros(n)
+    for k1, i in mapping.items():
+        i = mapping[k1]
+        a[i] = d[k1]
+    return a
+
+
+def arbitrary_element(iterable):
+    """Returns an arbitrary element of `iterable` without removing it.
+
+    This is most useful for "peeking" at an arbitrary element of a set,
+    but can be used for any list, dictionary, etc., as well.
+
+    Parameters
+    ----------
+    iterable : `abc.collections.Iterable` instance
+        Any object that implements ``__iter__``, e.g. set, dict, list, tuple,
+        etc.
+
+    Returns
+    -------
+    The object that results from ``next(iter(iterable))``
+
+    Raises
+    ------
+    ValueError
+        If `iterable` is an iterator (because the current implementation of
+        this function would consume an element from the iterator).
+
+    Examples
+    --------
+    Arbitrary elements from common Iterable objects:
+
+    >>> nx.utils.arbitrary_element([1, 2, 3])  # list
+    1
+    >>> nx.utils.arbitrary_element((1, 2, 3))  # tuple
+    1
+    >>> nx.utils.arbitrary_element({1, 2, 3})  # set
+    1
+    >>> d = {k: v for k, v in zip([1, 2, 3], [3, 2, 1])}
+    >>> nx.utils.arbitrary_element(d)  # dict_keys
+    1
+    >>> nx.utils.arbitrary_element(d.values())  # dict values
+    3
+
+    `str` is also an Iterable:
+
+    >>> nx.utils.arbitrary_element("hello")
+    'h'
+
+    :exc:`ValueError` is raised if `iterable` is an iterator:
+
+    >>> iterator = iter([1, 2, 3])  # Iterator, *not* Iterable
+    >>> nx.utils.arbitrary_element(iterator)
+    Traceback (most recent call last):
+        ...
+    ValueError: cannot return an arbitrary item from an iterator
+
+    Notes
+    -----
+    This function does not return a *random* element. If `iterable` is
+    ordered, sequential calls will return the same value::
+
+        >>> l = [1, 2, 3]
+        >>> nx.utils.arbitrary_element(l)
+        1
+        >>> nx.utils.arbitrary_element(l)
+        1
+
+    """
+    if isinstance(iterable, Iterator):
+        raise ValueError("cannot return an arbitrary item from an iterator")
+    # Another possible implementation is ``for x in iterable: return x``.
+    return next(iter(iterable))
+
+
+# Recipe from the itertools documentation.
+def pairwise(iterable, cyclic=False):
+    "s -> (s0, s1), (s1, s2), (s2, s3), ..."
+    a, b = tee(iterable)
+    first = next(b, None)
+    if cyclic is True:
+        return zip(a, chain(b, (first,)))
+    return zip(a, b)
+
+
+def groups(many_to_one):
+    """Converts a many-to-one mapping into a one-to-many mapping.
+
+    `many_to_one` must be a dictionary whose keys and values are all
+    :term:`hashable`.
+
+    The return value is a dictionary mapping values from `many_to_one`
+    to sets of keys from `many_to_one` that have that value.
+
+    Examples
+    --------
+    >>> from networkx.utils import groups
+    >>> many_to_one = {"a": 1, "b": 1, "c": 2, "d": 3, "e": 3}
+    >>> groups(many_to_one)  # doctest: +SKIP
+    {1: {'a', 'b'}, 2: {'c'}, 3: {'e', 'd'}}
+    """
+    one_to_many = defaultdict(set)
+    for v, k in many_to_one.items():
+        one_to_many[k].add(v)
+    return dict(one_to_many)
+
+
+def create_random_state(random_state=None):
+    """Returns a numpy.random.RandomState or numpy.random.Generator instance
+    depending on input.
+
+    Parameters
+    ----------
+    random_state : int or NumPy RandomState or Generator instance, optional (default=None)
+        If int, return a numpy.random.RandomState instance set with seed=int.
+        if `numpy.random.RandomState` instance, return it.
+        if `numpy.random.Generator` instance, return it.
+        if None or numpy.random, return the global random number generator used
+        by numpy.random.
+    """
+    import numpy as np
+
+    if random_state is None or random_state is np.random:
+        return np.random.mtrand._rand
+    if isinstance(random_state, np.random.RandomState):
+        return random_state
+    if isinstance(random_state, int):
+        return np.random.RandomState(random_state)
+    if isinstance(random_state, np.random.Generator):
+        return random_state
+    msg = (
+        f"{random_state} cannot be used to create a numpy.random.RandomState or\n"
+        "numpy.random.Generator instance"
+    )
+    raise ValueError(msg)
+
+
+class PythonRandomViaNumpyBits(random.Random):
+    """Provide the random.random algorithms using a numpy.random bit generator
+
+    The intent is to allow people to contribute code that uses Python's random
+    library, but still allow users to provide a single easily controlled random
+    bit-stream for all work with NetworkX. This implementation is based on helpful
+    comments and code from Robert Kern on NumPy's GitHub Issue #24458.
+
+    This implementation supersedes that of `PythonRandomInterface` which rewrote
+    methods to account for subtle differences in API between `random` and
+    `numpy.random`. Instead this subclasses `random.Random` and overwrites
+    the methods `random`, `getrandbits`, `getstate`, `setstate` and `seed`.
+    It makes them use the rng values from an input numpy `RandomState` or `Generator`.
+    Those few methods allow the rest of the `random.Random` methods to provide
+    the API interface of `random.random` while using randomness generated by
+    a numpy generator.
+    """
+
+    def __init__(self, rng=None):
+        try:
+            import numpy as np
+        except ImportError:
+            msg = "numpy not found, only random.random available."
+            warnings.warn(msg, ImportWarning)
+
+        if rng is None:
+            self._rng = np.random.mtrand._rand
+        else:
+            self._rng = rng
+
+        # Not necessary, given our overriding of gauss() below, but it's
+        # in the superclass and nominally public, so initialize it here.
+        self.gauss_next = None
+
+    def random(self):
+        """Get the next random number in the range 0.0 <= X < 1.0."""
+        return self._rng.random()
+
+    def getrandbits(self, k):
+        """getrandbits(k) -> x.  Generates an int with k random bits."""
+        if k < 0:
+            raise ValueError("number of bits must be non-negative")
+        numbytes = (k + 7) // 8  # bits / 8 and rounded up
+        x = int.from_bytes(self._rng.bytes(numbytes), "big")
+        return x >> (numbytes * 8 - k)  # trim excess bits
+
+    def getstate(self):
+        return self._rng.__getstate__()
+
+    def setstate(self, state):
+        self._rng.__setstate__(state)
+
+    def seed(self, *args, **kwds):
+        "Do nothing override method."
+        raise NotImplementedError("seed() not implemented in PythonRandomViaNumpyBits")
+
+
+##################################################################
+class PythonRandomInterface:
+    """PythonRandomInterface is included for backward compatibility
+    New code should use PythonRandomViaNumpyBits instead.
+    """
+
+    def __init__(self, rng=None):
+        try:
+            import numpy as np
+        except ImportError:
+            msg = "numpy not found, only random.random available."
+            warnings.warn(msg, ImportWarning)
+
+        if rng is None:
+            self._rng = np.random.mtrand._rand
+        else:
+            self._rng = rng
+
+    def random(self):
+        return self._rng.random()
+
+    def uniform(self, a, b):
+        return a + (b - a) * self._rng.random()
+
+    def randrange(self, a, b=None):
+        import numpy as np
+
+        if b is None:
+            a, b = 0, a
+        if b > 9223372036854775807:  # from np.iinfo(np.int64).max
+            tmp_rng = PythonRandomViaNumpyBits(self._rng)
+            return tmp_rng.randrange(a, b)
+
+        if isinstance(self._rng, np.random.Generator):
+            return self._rng.integers(a, b)
+        return self._rng.randint(a, b)
+
+    # NOTE: the numpy implementations of `choice` don't support strings, so
+    # this cannot be replaced with self._rng.choice
+    def choice(self, seq):
+        import numpy as np
+
+        if isinstance(self._rng, np.random.Generator):
+            idx = self._rng.integers(0, len(seq))
+        else:
+            idx = self._rng.randint(0, len(seq))
+        return seq[idx]
+
+    def gauss(self, mu, sigma):
+        return self._rng.normal(mu, sigma)
+
+    def shuffle(self, seq):
+        return self._rng.shuffle(seq)
+
+    #    Some methods don't match API for numpy RandomState.
+    #    Commented out versions are not used by NetworkX
+
+    def sample(self, seq, k):
+        return self._rng.choice(list(seq), size=(k,), replace=False)
+
+    def randint(self, a, b):
+        import numpy as np
+
+        if b > 9223372036854775807:  # from np.iinfo(np.int64).max
+            tmp_rng = PythonRandomViaNumpyBits(self._rng)
+            return tmp_rng.randint(a, b)
+
+        if isinstance(self._rng, np.random.Generator):
+            return self._rng.integers(a, b + 1)
+        return self._rng.randint(a, b + 1)
+
+    #    exponential as expovariate with 1/argument,
+    def expovariate(self, scale):
+        return self._rng.exponential(1 / scale)
+
+    #    pareto as paretovariate with 1/argument,
+    def paretovariate(self, shape):
+        return self._rng.pareto(shape)
+
+
+#    weibull as weibullvariate multiplied by beta,
+#    def weibullvariate(self, alpha, beta):
+#        return self._rng.weibull(alpha) * beta
+#
+#    def triangular(self, low, high, mode):
+#        return self._rng.triangular(low, mode, high)
+#
+#    def choices(self, seq, weights=None, cum_weights=None, k=1):
+#        return self._rng.choice(seq
+
+
+def create_py_random_state(random_state=None):
+    """Returns a random.Random instance depending on input.
+
+    Parameters
+    ----------
+    random_state : int or random number generator or None (default=None)
+        - If int, return a `random.Random` instance set with seed=int.
+        - If `random.Random` instance, return it.
+        - If None or the `np.random` package, return the global random number
+          generator used by `np.random`.
+        - If an `np.random.Generator` instance, or the `np.random` package, or
+          the global numpy random number generator, then return it.
+          wrapped in a `PythonRandomViaNumpyBits` class.
+        - If a `PythonRandomViaNumpyBits` instance, return it.
+        - If a `PythonRandomInterface` instance, return it.
+        - If a `np.random.RandomState` instance and not the global numpy default,
+          return it wrapped in `PythonRandomInterface` for backward bit-stream
+          matching with legacy code.
+
+    Notes
+    -----
+    - A diagram intending to illustrate the relationships behind our support
+      for numpy random numbers is called
+      `NetworkX Numpy Random Numbers <https://excalidraw.com/#room=b5303f2b03d3af7ccc6a,e5ZDIWdWWCTTsg8OqoRvPA>`_.
+    - More discussion about this support also appears in
+      `gh-6869#comment <https://github.com/networkx/networkx/pull/6869#issuecomment-1944799534>`_.
+    - Wrappers of numpy.random number generators allow them to mimic the Python random
+      number generation algorithms. For example, Python can create arbitrarily large
+      random ints, and the wrappers use Numpy bit-streams with CPython's random module
+      to choose arbitrarily large random integers too.
+    - We provide two wrapper classes:
+      `PythonRandomViaNumpyBits` is usually what you want and is always used for
+      `np.Generator` instances. But for users who need to recreate random numbers
+      produced in NetworkX 3.2 or earlier, we maintain the `PythonRandomInterface`
+      wrapper as well. We use it only used if passed a (non-default) `np.RandomState`
+      instance pre-initialized from a seed. Otherwise the newer wrapper is used.
+    """
+    if random_state is None or random_state is random:
+        return random._inst
+    if isinstance(random_state, random.Random):
+        return random_state
+    if isinstance(random_state, int):
+        return random.Random(random_state)
+
+    try:
+        import numpy as np
+    except ImportError:
+        pass
+    else:
+        if isinstance(random_state, PythonRandomInterface | PythonRandomViaNumpyBits):
+            return random_state
+        if isinstance(random_state, np.random.Generator):
+            return PythonRandomViaNumpyBits(random_state)
+        if random_state is np.random:
+            return PythonRandomViaNumpyBits(np.random.mtrand._rand)
+
+        if isinstance(random_state, np.random.RandomState):
+            if random_state is np.random.mtrand._rand:
+                return PythonRandomViaNumpyBits(random_state)
+            # Only need older interface if specially constructed RandomState used
+            return PythonRandomInterface(random_state)
+
+    msg = f"{random_state} cannot be used to generate a random.Random instance"
+    raise ValueError(msg)
+
+
+def nodes_equal(nodes1, nodes2):
+    """Check if nodes are equal.
+
+    Equality here means equal as Python objects.
+    Node data must match if included.
+    The order of nodes is not relevant.
+
+    Parameters
+    ----------
+    nodes1, nodes2 : iterables of nodes, or (node, datadict) tuples
+
+    Returns
+    -------
+    bool
+        True if nodes are equal, False otherwise.
+    """
+    nlist1 = list(nodes1)
+    nlist2 = list(nodes2)
+    try:
+        d1 = dict(nlist1)
+        d2 = dict(nlist2)
+    except (ValueError, TypeError):
+        d1 = dict.fromkeys(nlist1)
+        d2 = dict.fromkeys(nlist2)
+    return d1 == d2
+
+
+def edges_equal(edges1, edges2):
+    """Check if edges are equal.
+
+    Equality here means equal as Python objects.
+    Edge data must match if included.
+    The order of the edges is not relevant.
+
+    Parameters
+    ----------
+    edges1, edges2 : iterables of with u, v nodes as
+        edge tuples (u, v), or
+        edge tuples with data dicts (u, v, d), or
+        edge tuples with keys and data dicts (u, v, k, d)
+
+    Returns
+    -------
+    bool
+        True if edges are equal, False otherwise.
+    """
+    from collections import defaultdict
+
+    d1 = defaultdict(dict)
+    d2 = defaultdict(dict)
+    c1 = 0
+    for c1, e in enumerate(edges1):
+        u, v = e[0], e[1]
+        data = [e[2:]]
+        if v in d1[u]:
+            data = d1[u][v] + data
+        d1[u][v] = data
+        d1[v][u] = data
+    c2 = 0
+    for c2, e in enumerate(edges2):
+        u, v = e[0], e[1]
+        data = [e[2:]]
+        if v in d2[u]:
+            data = d2[u][v] + data
+        d2[u][v] = data
+        d2[v][u] = data
+    if c1 != c2:
+        return False
+    # can check one direction because lengths are the same.
+    for n, nbrdict in d1.items():
+        for nbr, datalist in nbrdict.items():
+            if n not in d2:
+                return False
+            if nbr not in d2[n]:
+                return False
+            d2datalist = d2[n][nbr]
+            for data in datalist:
+                if datalist.count(data) != d2datalist.count(data):
+                    return False
+    return True
+
+
+def graphs_equal(graph1, graph2):
+    """Check if graphs are equal.
+
+    Equality here means equal as Python objects (not isomorphism).
+    Node, edge and graph data must match.
+
+    Parameters
+    ----------
+    graph1, graph2 : graph
+
+    Returns
+    -------
+    bool
+        True if graphs are equal, False otherwise.
+    """
+    return (
+        graph1.adj == graph2.adj
+        and graph1.nodes == graph2.nodes
+        and graph1.graph == graph2.graph
+    )
+
+
+def _clear_cache(G):
+    """Clear the cache of a graph (currently stores converted graphs).
+
+    Caching is controlled via ``nx.config.cache_converted_graphs`` configuration.
+    """
+    if cache := getattr(G, "__networkx_cache__", None):
+        cache.clear()
+
+
+def check_create_using(create_using, *, directed=None, multigraph=None, default=None):
+    """Assert that create_using has good properties
+
+    This checks for desired directedness and multi-edge properties.
+    It returns `create_using` unless that is `None` when it returns
+    the optionally specified default value.
+
+    Parameters
+    ----------
+    create_using : None, graph class or instance
+        The input value of create_using for a function.
+    directed : None or bool
+        Whether to check `create_using.is_directed() == directed`.
+        If None, do not assert directedness.
+    multigraph : None or bool
+        Whether to check `create_using.is_multigraph() == multigraph`.
+        If None, do not assert multi-edge property.
+    default : None or graph class
+        The graph class to return if create_using is None.
+
+    Returns
+    -------
+    create_using : graph class or instance
+        The provided graph class or instance, or if None, the `default` value.
+
+    Raises
+    ------
+    NetworkXError
+        When `create_using` doesn't match the properties specified by `directed`
+        or `multigraph` parameters.
+    """
+    if default is None:
+        default = nx.Graph
+    G = create_using if create_using is not None else default
+
+    G_directed = G.is_directed(None) if isinstance(G, type) else G.is_directed()
+    G_multigraph = G.is_multigraph(None) if isinstance(G, type) else G.is_multigraph()
+
+    if directed is not None:
+        if directed and not G_directed:
+            raise nx.NetworkXError("create_using must be directed")
+        if not directed and G_directed:
+            raise nx.NetworkXError("create_using must not be directed")
+
+    if multigraph is not None:
+        if multigraph and not G_multigraph:
+            raise nx.NetworkXError("create_using must be a multi-graph")
+        if not multigraph and G_multigraph:
+            raise nx.NetworkXError("create_using must not be a multi-graph")
+    return G
diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/random_sequence.py b/.venv/lib/python3.12/site-packages/networkx/utils/random_sequence.py
new file mode 100644
index 00000000..20a7b5e0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/utils/random_sequence.py
@@ -0,0 +1,164 @@
+"""
+Utilities for generating random numbers, random sequences, and
+random selections.
+"""
+
+import networkx as nx
+from networkx.utils import py_random_state
+
+__all__ = [
+    "powerlaw_sequence",
+    "zipf_rv",
+    "cumulative_distribution",
+    "discrete_sequence",
+    "random_weighted_sample",
+    "weighted_choice",
+]
+
+
+# The same helpers for choosing random sequences from distributions
+# uses Python's random module
+# https://docs.python.org/3/library/random.html
+
+
+@py_random_state(2)
+def powerlaw_sequence(n, exponent=2.0, seed=None):
+    """
+    Return sample sequence of length n from a power law distribution.
+    """
+    return [seed.paretovariate(exponent - 1) for i in range(n)]
+
+
+@py_random_state(2)
+def zipf_rv(alpha, xmin=1, seed=None):
+    r"""Returns a random value chosen from the Zipf distribution.
+
+    The return value is an integer drawn from the probability distribution
+
+    .. math::
+
+        p(x)=\frac{x^{-\alpha}}{\zeta(\alpha, x_{\min})},
+
+    where $\zeta(\alpha, x_{\min})$ is the Hurwitz zeta function.
+
+    Parameters
+    ----------
+    alpha : float
+      Exponent value of the distribution
+    xmin : int
+      Minimum value
+    seed : integer, random_state, or None (default)
+        Indicator of random number generation state.
+        See :ref:`Randomness<randomness>`.
+
+    Returns
+    -------
+    x : int
+      Random value from Zipf distribution
+
+    Raises
+    ------
+    ValueError:
+      If xmin < 1 or
+      If alpha <= 1
+
+    Notes
+    -----
+    The rejection algorithm generates random values for a the power-law
+    distribution in uniformly bounded expected time dependent on
+    parameters.  See [1]_ for details on its operation.
+
+    Examples
+    --------
+    >>> nx.utils.zipf_rv(alpha=2, xmin=3, seed=42)
+    8
+
+    References
+    ----------
+    .. [1] Luc Devroye, Non-Uniform Random Variate Generation,
+       Springer-Verlag, New York, 1986.
+    """
+    if xmin < 1:
+        raise ValueError("xmin < 1")
+    if alpha <= 1:
+        raise ValueError("a <= 1.0")
+    a1 = alpha - 1.0
+    b = 2**a1
+    while True:
+        u = 1.0 - seed.random()  # u in (0,1]
+        v = seed.random()  # v in [0,1)
+        x = int(xmin * u ** -(1.0 / a1))
+        t = (1.0 + (1.0 / x)) ** a1
+        if v * x * (t - 1.0) / (b - 1.0) <= t / b:
+            break
+    return x
+
+
+def cumulative_distribution(distribution):
+    """Returns normalized cumulative distribution from discrete distribution."""
+
+    cdf = [0.0]
+    psum = sum(distribution)
+    for i in range(len(distribution)):
+        cdf.append(cdf[i] + distribution[i] / psum)
+    return cdf
+
+
+@py_random_state(3)
+def discrete_sequence(n, distribution=None, cdistribution=None, seed=None):
+    """
+    Return sample sequence of length n from a given discrete distribution
+    or discrete cumulative distribution.
+
+    One of the following must be specified.
+
+    distribution = histogram of values, will be normalized
+
+    cdistribution = normalized discrete cumulative distribution
+
+    """
+    import bisect
+
+    if cdistribution is not None:
+        cdf = cdistribution
+    elif distribution is not None:
+        cdf = cumulative_distribution(distribution)
+    else:
+        raise nx.NetworkXError(
+            "discrete_sequence: distribution or cdistribution missing"
+        )
+
+    # get a uniform random number
+    inputseq = [seed.random() for i in range(n)]
+
+    # choose from CDF
+    seq = [bisect.bisect_left(cdf, s) - 1 for s in inputseq]
+    return seq
+
+
+@py_random_state(2)
+def random_weighted_sample(mapping, k, seed=None):
+    """Returns k items without replacement from a weighted sample.
+
+    The input is a dictionary of items with weights as values.
+    """
+    if k > len(mapping):
+        raise ValueError("sample larger than population")
+    sample = set()
+    while len(sample) < k:
+        sample.add(weighted_choice(mapping, seed))
+    return list(sample)
+
+
+@py_random_state(1)
+def weighted_choice(mapping, seed=None):
+    """Returns a single element from a weighted sample.
+
+    The input is a dictionary of items with weights as values.
+    """
+    # use roulette method
+    rnd = seed.random() * sum(mapping.values())
+    for k, w in mapping.items():
+        rnd -= w
+        if rnd < 0:
+            return k
diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/rcm.py b/.venv/lib/python3.12/site-packages/networkx/utils/rcm.py
new file mode 100644
index 00000000..e7366fff
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/utils/rcm.py
@@ -0,0 +1,159 @@
+"""
+Cuthill-McKee ordering of graph nodes to produce sparse matrices
+"""
+
+from collections import deque
+from operator import itemgetter
+
+import networkx as nx
+
+from ..utils import arbitrary_element
+
+__all__ = ["cuthill_mckee_ordering", "reverse_cuthill_mckee_ordering"]
+
+
+def cuthill_mckee_ordering(G, heuristic=None):
+    """Generate an ordering (permutation) of the graph nodes to make
+    a sparse matrix.
+
+    Uses the Cuthill-McKee heuristic (based on breadth-first search) [1]_.
+
+    Parameters
+    ----------
+    G : graph
+      A NetworkX graph
+
+    heuristic : function, optional
+      Function to choose starting node for RCM algorithm.  If None
+      a node from a pseudo-peripheral pair is used.  A user-defined function
+      can be supplied that takes a graph object and returns a single node.
+
+    Returns
+    -------
+    nodes : generator
+       Generator of nodes in Cuthill-McKee ordering.
+
+    Examples
+    --------
+    >>> from networkx.utils import cuthill_mckee_ordering
+    >>> G = nx.path_graph(4)
+    >>> rcm = list(cuthill_mckee_ordering(G))
+    >>> A = nx.adjacency_matrix(G, nodelist=rcm)
+
+    Smallest degree node as heuristic function:
+
+    >>> def smallest_degree(G):
+    ...     return min(G, key=G.degree)
+    >>> rcm = list(cuthill_mckee_ordering(G, heuristic=smallest_degree))
+
+
+    See Also
+    --------
+    reverse_cuthill_mckee_ordering
+
+    Notes
+    -----
+    The optimal solution the bandwidth reduction is NP-complete [2]_.
+
+
+    References
+    ----------
+    .. [1] E. Cuthill and J. McKee.
+       Reducing the bandwidth of sparse symmetric matrices,
+       In Proc. 24th Nat. Conf. ACM, pages 157-172, 1969.
+       http://doi.acm.org/10.1145/800195.805928
+    .. [2]  Steven S. Skiena. 1997. The Algorithm Design Manual.
+       Springer-Verlag New York, Inc., New York, NY, USA.
+    """
+    for c in nx.connected_components(G):
+        yield from connected_cuthill_mckee_ordering(G.subgraph(c), heuristic)
+
+
+def reverse_cuthill_mckee_ordering(G, heuristic=None):
+    """Generate an ordering (permutation) of the graph nodes to make
+    a sparse matrix.
+
+    Uses the reverse Cuthill-McKee heuristic (based on breadth-first search)
+    [1]_.
+
+    Parameters
+    ----------
+    G : graph
+      A NetworkX graph
+
+    heuristic : function, optional
+      Function to choose starting node for RCM algorithm.  If None
+      a node from a pseudo-peripheral pair is used.  A user-defined function
+      can be supplied that takes a graph object and returns a single node.
+
+    Returns
+    -------
+    nodes : generator
+       Generator of nodes in reverse Cuthill-McKee ordering.
+
+    Examples
+    --------
+    >>> from networkx.utils import reverse_cuthill_mckee_ordering
+    >>> G = nx.path_graph(4)
+    >>> rcm = list(reverse_cuthill_mckee_ordering(G))
+    >>> A = nx.adjacency_matrix(G, nodelist=rcm)
+
+    Smallest degree node as heuristic function:
+
+    >>> def smallest_degree(G):
+    ...     return min(G, key=G.degree)
+    >>> rcm = list(reverse_cuthill_mckee_ordering(G, heuristic=smallest_degree))
+
+
+    See Also
+    --------
+    cuthill_mckee_ordering
+
+    Notes
+    -----
+    The optimal solution the bandwidth reduction is NP-complete [2]_.
+
+    References
+    ----------
+    .. [1] E. Cuthill and J. McKee.
+       Reducing the bandwidth of sparse symmetric matrices,
+       In Proc. 24th Nat. Conf. ACM, pages 157-72, 1969.
+       http://doi.acm.org/10.1145/800195.805928
+    .. [2]  Steven S. Skiena. 1997. The Algorithm Design Manual.
+       Springer-Verlag New York, Inc., New York, NY, USA.
+    """
+    return reversed(list(cuthill_mckee_ordering(G, heuristic=heuristic)))
+
+
+def connected_cuthill_mckee_ordering(G, heuristic=None):
+    # the cuthill mckee algorithm for connected graphs
+    if heuristic is None:
+        start = pseudo_peripheral_node(G)
+    else:
+        start = heuristic(G)
+    visited = {start}
+    queue = deque([start])
+    while queue:
+        parent = queue.popleft()
+        yield parent
+        nd = sorted(G.degree(set(G[parent]) - visited), key=itemgetter(1))
+        children = [n for n, d in nd]
+        visited.update(children)
+        queue.extend(children)
+
+
+def pseudo_peripheral_node(G):
+    # helper for cuthill-mckee to find a node in a "pseudo peripheral pair"
+    # to use as good starting node
+    u = arbitrary_element(G)
+    lp = 0
+    v = u
+    while True:
+        spl = dict(nx.shortest_path_length(G, v))
+        l = max(spl.values())
+        if l <= lp:
+            break
+        lp = l
+        farthest = (n for n, dist in spl.items() if dist == l)
+        v, deg = min(G.degree(farthest), key=itemgetter(1))
+    return v
diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/tests/__init__.py b/.venv/lib/python3.12/site-packages/networkx/utils/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/utils/tests/__init__.py
diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/tests/test__init.py b/.venv/lib/python3.12/site-packages/networkx/utils/tests/test__init.py
new file mode 100644
index 00000000..ecbcce36
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/utils/tests/test__init.py
@@ -0,0 +1,11 @@
+import pytest
+
+
+def test_utils_namespace():
+    """Ensure objects are not unintentionally exposed in utils namespace."""
+    with pytest.raises(ImportError):
+        from networkx.utils import nx
+    with pytest.raises(ImportError):
+        from networkx.utils import sys
+    with pytest.raises(ImportError):
+        from networkx.utils import defaultdict, deque
diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_backends.py b/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_backends.py
new file mode 100644
index 00000000..ad006f00
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_backends.py
@@ -0,0 +1,170 @@
+import pickle
+
+import pytest
+
+import networkx as nx
+
+sp = pytest.importorskip("scipy")
+pytest.importorskip("numpy")
+
+
+def test_dispatch_kwds_vs_args():
+    G = nx.path_graph(4)
+    nx.pagerank(G)
+    nx.pagerank(G=G)
+    with pytest.raises(TypeError):
+        nx.pagerank()
+
+
+def test_pickle():
+    count = 0
+    for name, func in nx.utils.backends._registered_algorithms.items():
+        pickled = pickle.dumps(func.__wrapped__)
+        assert pickle.loads(pickled) is func.__wrapped__
+        try:
+            # Some functions can't be pickled, but it's not b/c of _dispatchable
+            pickled = pickle.dumps(func)
+        except pickle.PicklingError:
+            continue
+        assert pickle.loads(pickled) is func
+        count += 1
+    assert count > 0
+    assert pickle.loads(pickle.dumps(nx.inverse_line_graph)) is nx.inverse_line_graph
+
+
+@pytest.mark.skipif(
+    "not nx.config.backend_priority.algos "
+    "or nx.config.backend_priority.algos[0] != 'nx_loopback'"
+)
+def test_graph_converter_needs_backend():
+    # When testing, `nx.from_scipy_sparse_array` will *always* call the backend
+    # implementation if it's implemented. If `backend=` isn't given, then the result
+    # will be converted back to NetworkX via `convert_to_nx`.
+    # If not testing, then calling `nx.from_scipy_sparse_array` w/o `backend=` will
+    # always call the original version. `backend=` is *required* to call the backend.
+    from networkx.classes.tests.dispatch_interface import (
+        LoopbackBackendInterface,
+        LoopbackGraph,
+    )
+
+    A = sp.sparse.coo_array([[0, 3, 2], [3, 0, 1], [2, 1, 0]])
+
+    side_effects = []
+
+    def from_scipy_sparse_array(self, *args, **kwargs):
+        side_effects.append(1)  # Just to prove this was called
+        return self.convert_from_nx(
+            self.__getattr__("from_scipy_sparse_array")(*args, **kwargs),
+            preserve_edge_attrs=True,
+            preserve_node_attrs=True,
+            preserve_graph_attrs=True,
+        )
+
+    @staticmethod
+    def convert_to_nx(obj, *, name=None):
+        if type(obj) is nx.Graph:
+            return obj
+        return nx.Graph(obj)
+
+    # *This mutates LoopbackBackendInterface!*
+    orig_convert_to_nx = LoopbackBackendInterface.convert_to_nx
+    LoopbackBackendInterface.convert_to_nx = convert_to_nx
+    LoopbackBackendInterface.from_scipy_sparse_array = from_scipy_sparse_array
+
+    try:
+        assert side_effects == []
+        assert type(nx.from_scipy_sparse_array(A)) is nx.Graph
+        assert side_effects == [1]
+        assert (
+            type(nx.from_scipy_sparse_array(A, backend="nx_loopback")) is LoopbackGraph
+        )
+        assert side_effects == [1, 1]
+        # backend="networkx" is default implementation
+        assert type(nx.from_scipy_sparse_array(A, backend="networkx")) is nx.Graph
+        assert side_effects == [1, 1]
+    finally:
+        LoopbackBackendInterface.convert_to_nx = staticmethod(orig_convert_to_nx)
+        del LoopbackBackendInterface.from_scipy_sparse_array
+    with pytest.raises(ImportError, match="backend is not installed"):
+        nx.from_scipy_sparse_array(A, backend="bad-backend-name")
+
+
+@pytest.mark.skipif(
+    "not nx.config.backend_priority.algos "
+    "or nx.config.backend_priority.algos[0] != 'nx_loopback'"
+)
+def test_networkx_backend():
+    """Test using `backend="networkx"` in a dispatchable function."""
+    # (Implementing this test is harder than it should be)
+    from networkx.classes.tests.dispatch_interface import (
+        LoopbackBackendInterface,
+        LoopbackGraph,
+    )
+
+    G = LoopbackGraph()
+    G.add_edges_from([(0, 1), (1, 2), (1, 3), (2, 4)])
+
+    @staticmethod
+    def convert_to_nx(obj, *, name=None):
+        if isinstance(obj, LoopbackGraph):
+            new_graph = nx.Graph()
+            new_graph.__dict__.update(obj.__dict__)
+            return new_graph
+        return obj
+
+    # *This mutates LoopbackBackendInterface!*
+    # This uses the same trick as in the previous test.
+    orig_convert_to_nx = LoopbackBackendInterface.convert_to_nx
+    LoopbackBackendInterface.convert_to_nx = convert_to_nx
+    try:
+        G2 = nx.ego_graph(G, 0, backend="networkx")
+        assert type(G2) is nx.Graph
+    finally:
+        LoopbackBackendInterface.convert_to_nx = staticmethod(orig_convert_to_nx)
+
+
+def test_dispatchable_are_functions():
+    assert type(nx.pagerank) is type(nx.pagerank.orig_func)
+
+
+@pytest.mark.skipif("not nx.utils.backends.backends")
+def test_mixing_backend_graphs():
+    from networkx.classes.tests import dispatch_interface
+
+    G = nx.Graph()
+    G.add_edge(1, 2)
+    G.add_edge(2, 3)
+    H = nx.Graph()
+    H.add_edge(2, 3)
+    rv = nx.intersection(G, H)
+    assert set(nx.intersection(G, H)) == {2, 3}
+    G2 = dispatch_interface.convert(G)
+    H2 = dispatch_interface.convert(H)
+    if "nx_loopback" in nx.config.backend_priority:
+        # Auto-convert
+        assert set(nx.intersection(G2, H)) == {2, 3}
+        assert set(nx.intersection(G, H2)) == {2, 3}
+    elif not nx.config.backend_priority and "nx_loopback" not in nx.config.backends:
+        # G2 and H2 are backend objects for a backend that is not registered!
+        with pytest.raises(ImportError, match="backend is not installed"):
+            nx.intersection(G2, H)
+        with pytest.raises(ImportError, match="backend is not installed"):
+            nx.intersection(G, H2)
+    # It would be nice to test passing graphs from *different* backends,
+    # but we are not set up to do this yet.
+
+
+def test_bad_backend_name():
+    """Using `backend=` raises with unknown backend even if there are no backends."""
+    with pytest.raises(
+        ImportError, match="'this_backend_does_not_exist' backend is not installed"
+    ):
+        nx.null_graph(backend="this_backend_does_not_exist")
+
+
+def test_fallback_to_nx():
+    with pytest.warns(DeprecationWarning, match="_fallback_to_nx"):
+        # Check as class property
+        assert nx._dispatchable._fallback_to_nx == nx.config.fallback_to_nx
+        # Check as instance property
+        assert nx.pagerank.__wrapped__._fallback_to_nx == nx.config.fallback_to_nx
diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_config.py b/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_config.py
new file mode 100644
index 00000000..7416b0ac
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_config.py
@@ -0,0 +1,231 @@
+import collections
+import pickle
+
+import pytest
+
+import networkx as nx
+from networkx.utils.configs import BackendPriorities, Config
+
+
+# Define this at module level so we can test pickling
+class ExampleConfig(Config):
+    """Example configuration."""
+
+    x: int
+    y: str
+
+    def _on_setattr(self, key, value):
+        if key == "x" and value <= 0:
+            raise ValueError("x must be positive")
+        if key == "y" and not isinstance(value, str):
+            raise TypeError("y must be a str")
+        return value
+
+
+class EmptyConfig(Config):
+    pass
+
+
+@pytest.mark.parametrize("cfg", [EmptyConfig(), Config()])
+def test_config_empty(cfg):
+    assert dir(cfg) == []
+    with pytest.raises(AttributeError):
+        cfg.x = 1
+    with pytest.raises(KeyError):
+        cfg["x"] = 1
+    with pytest.raises(AttributeError):
+        cfg.x
+    with pytest.raises(KeyError):
+        cfg["x"]
+    assert len(cfg) == 0
+    assert "x" not in cfg
+    assert cfg == cfg
+    assert cfg.get("x", 2) == 2
+    assert set(cfg.keys()) == set()
+    assert set(cfg.values()) == set()
+    assert set(cfg.items()) == set()
+    cfg2 = pickle.loads(pickle.dumps(cfg))
+    assert cfg == cfg2
+    assert isinstance(cfg, collections.abc.Collection)
+    assert isinstance(cfg, collections.abc.Mapping)
+
+
+def test_config_subclass():
+    with pytest.raises(TypeError, match="missing 2 required keyword-only"):
+        ExampleConfig()
+    with pytest.raises(ValueError, match="x must be positive"):
+        ExampleConfig(x=0, y="foo")
+    with pytest.raises(TypeError, match="unexpected keyword"):
+        ExampleConfig(x=1, y="foo", z="bad config")
+    with pytest.raises(TypeError, match="unexpected keyword"):
+        EmptyConfig(z="bad config")
+    cfg = ExampleConfig(x=1, y="foo")
+    assert cfg.x == 1
+    assert cfg["x"] == 1
+    assert cfg["y"] == "foo"
+    assert cfg.y == "foo"
+    assert "x" in cfg
+    assert "y" in cfg
+    assert "z" not in cfg
+    assert len(cfg) == 2
+    assert set(iter(cfg)) == {"x", "y"}
+    assert set(cfg.keys()) == {"x", "y"}
+    assert set(cfg.values()) == {1, "foo"}
+    assert set(cfg.items()) == {("x", 1), ("y", "foo")}
+    assert dir(cfg) == ["x", "y"]
+    cfg.x = 2
+    cfg["y"] = "bar"
+    assert cfg["x"] == 2
+    assert cfg.y == "bar"
+    with pytest.raises(TypeError, match="can't be deleted"):
+        del cfg.x
+    with pytest.raises(TypeError, match="can't be deleted"):
+        del cfg["y"]
+    assert cfg.x == 2
+    assert cfg == cfg
+    assert cfg == ExampleConfig(x=2, y="bar")
+    assert cfg != ExampleConfig(x=3, y="baz")
+    assert cfg != Config(x=2, y="bar")
+    with pytest.raises(TypeError, match="y must be a str"):
+        cfg["y"] = 5
+    with pytest.raises(ValueError, match="x must be positive"):
+        cfg.x = -5
+    assert cfg.get("x", 10) == 2
+    with pytest.raises(AttributeError):
+        cfg.z = 5
+    with pytest.raises(KeyError):
+        cfg["z"] = 5
+    with pytest.raises(AttributeError):
+        cfg.z
+    with pytest.raises(KeyError):
+        cfg["z"]
+    cfg2 = pickle.loads(pickle.dumps(cfg))
+    assert cfg == cfg2
+    assert cfg.__doc__ == "Example configuration."
+    assert cfg2.__doc__ == "Example configuration."
+
+
+def test_config_defaults():
+    class DefaultConfig(Config):
+        x: int = 0
+        y: int
+
+    cfg = DefaultConfig(y=1)
+    assert cfg.x == 0
+    cfg = DefaultConfig(x=2, y=1)
+    assert cfg.x == 2
+
+
+def test_nxconfig():
+    assert isinstance(nx.config.backend_priority, BackendPriorities)
+    assert isinstance(nx.config.backend_priority.algos, list)
+    assert isinstance(nx.config.backends, Config)
+    with pytest.raises(TypeError, match="must be a list of backend names"):
+        nx.config.backend_priority.algos = "nx_loopback"
+    with pytest.raises(ValueError, match="Unknown backend when setting"):
+        nx.config.backend_priority.algos = ["this_almost_certainly_is_not_a_backend"]
+    with pytest.raises(TypeError, match="must be a Config of backend configs"):
+        nx.config.backends = {}
+    with pytest.raises(TypeError, match="must be a Config of backend configs"):
+        nx.config.backends = Config(plausible_backend_name={})
+    with pytest.raises(ValueError, match="Unknown backend when setting"):
+        nx.config.backends = Config(this_almost_certainly_is_not_a_backend=Config())
+    with pytest.raises(TypeError, match="must be True or False"):
+        nx.config.cache_converted_graphs = "bad value"
+    with pytest.raises(TypeError, match="must be a set of "):
+        nx.config.warnings_to_ignore = 7
+    with pytest.raises(ValueError, match="Unknown warning "):
+        nx.config.warnings_to_ignore = {"bad value"}
+
+
+def test_not_strict():
+    class FlexibleConfig(Config, strict=False):
+        x: int
+
+    cfg = FlexibleConfig(x=1)
+    assert "_strict" not in cfg
+    assert len(cfg) == 1
+    assert list(cfg) == ["x"]
+    assert list(cfg.keys()) == ["x"]
+    assert list(cfg.values()) == [1]
+    assert list(cfg.items()) == [("x", 1)]
+    assert cfg.x == 1
+    assert cfg["x"] == 1
+    assert "x" in cfg
+    assert hasattr(cfg, "x")
+    assert "FlexibleConfig(x=1)" in repr(cfg)
+    assert cfg == FlexibleConfig(x=1)
+    del cfg.x
+    assert "FlexibleConfig()" in repr(cfg)
+    assert len(cfg) == 0
+    assert not hasattr(cfg, "x")
+    assert "x" not in cfg
+    assert not hasattr(cfg, "y")
+    assert "y" not in cfg
+    cfg.y = 2
+    assert len(cfg) == 1
+    assert list(cfg) == ["y"]
+    assert list(cfg.keys()) == ["y"]
+    assert list(cfg.values()) == [2]
+    assert list(cfg.items()) == [("y", 2)]
+    assert cfg.y == 2
+    assert cfg["y"] == 2
+    assert hasattr(cfg, "y")
+    assert "y" in cfg
+    del cfg["y"]
+    assert len(cfg) == 0
+    assert list(cfg) == []
+    with pytest.raises(AttributeError, match="y"):
+        del cfg.y
+    with pytest.raises(KeyError, match="y"):
+        del cfg["y"]
+    with pytest.raises(TypeError, match="missing 1 required keyword-only"):
+        FlexibleConfig()
+    # Be strict when first creating the config object
+    with pytest.raises(TypeError, match="unexpected keyword argument 'y'"):
+        FlexibleConfig(x=1, y=2)
+
+    class FlexibleConfigWithDefault(Config, strict=False):
+        x: int = 0
+
+    assert FlexibleConfigWithDefault().x == 0
+    assert FlexibleConfigWithDefault(x=1)["x"] == 1
+
+
+def test_context():
+    cfg = Config(x=1)
+    with cfg(x=2) as c:
+        assert c.x == 2
+        c.x = 3
+        assert cfg.x == 3
+    assert cfg.x == 1
+
+    with cfg(x=2) as c:
+        assert c == cfg
+        assert cfg.x == 2
+        with cfg(x=3) as c2:
+            assert c2 == cfg
+            assert cfg.x == 3
+            with pytest.raises(RuntimeError, match="context manager without"):
+                with cfg as c3:  # Forgot to call `cfg(...)`
+                    pass
+            assert cfg.x == 3
+        assert cfg.x == 2
+    assert cfg.x == 1
+
+    c = cfg(x=4)  # Not yet as context (not recommended, but possible)
+    assert c == cfg
+    assert cfg.x == 4
+    # Cheat by looking at internal data; context stack should only grow with __enter__
+    assert cfg._prev is not None
+    assert cfg._context_stack == []
+    with c:
+        assert c == cfg
+        assert cfg.x == 4
+    assert cfg.x == 1
+    # Cheat again; there was no preceding `cfg(...)` call this time
+    assert cfg._prev is None
+    with pytest.raises(RuntimeError, match="context manager without"):
+        with cfg:
+            pass
+    assert cfg.x == 1
diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_decorators.py b/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_decorators.py
new file mode 100644
index 00000000..0a4aeabf
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_decorators.py
@@ -0,0 +1,510 @@
+import os
+import pathlib
+import random
+import tempfile
+
+import pytest
+
+import networkx as nx
+from networkx.utils.decorators import (
+    argmap,
+    not_implemented_for,
+    np_random_state,
+    open_file,
+    py_random_state,
+)
+from networkx.utils.misc import PythonRandomInterface, PythonRandomViaNumpyBits
+
+
+def test_not_implemented_decorator():
+    @not_implemented_for("directed")
+    def test_d(G):
+        pass
+
+    test_d(nx.Graph())
+    with pytest.raises(nx.NetworkXNotImplemented):
+        test_d(nx.DiGraph())
+
+    @not_implemented_for("undirected")
+    def test_u(G):
+        pass
+
+    test_u(nx.DiGraph())
+    with pytest.raises(nx.NetworkXNotImplemented):
+        test_u(nx.Graph())
+
+    @not_implemented_for("multigraph")
+    def test_m(G):
+        pass
+
+    test_m(nx.Graph())
+    with pytest.raises(nx.NetworkXNotImplemented):
+        test_m(nx.MultiGraph())
+
+    @not_implemented_for("graph")
+    def test_g(G):
+        pass
+
+    test_g(nx.MultiGraph())
+    with pytest.raises(nx.NetworkXNotImplemented):
+        test_g(nx.Graph())
+
+    # not MultiDiGraph  (multiple arguments => AND)
+    @not_implemented_for("directed", "multigraph")
+    def test_not_md(G):
+        pass
+
+    test_not_md(nx.Graph())
+    test_not_md(nx.DiGraph())
+    test_not_md(nx.MultiGraph())
+    with pytest.raises(nx.NetworkXNotImplemented):
+        test_not_md(nx.MultiDiGraph())
+
+    # Graph only      (multiple decorators =>  OR)
+    @not_implemented_for("directed")
+    @not_implemented_for("multigraph")
+    def test_graph_only(G):
+        pass
+
+    test_graph_only(nx.Graph())
+    with pytest.raises(nx.NetworkXNotImplemented):
+        test_graph_only(nx.DiGraph())
+    with pytest.raises(nx.NetworkXNotImplemented):
+        test_graph_only(nx.MultiGraph())
+    with pytest.raises(nx.NetworkXNotImplemented):
+        test_graph_only(nx.MultiDiGraph())
+
+    with pytest.raises(ValueError):
+        not_implemented_for("directed", "undirected")
+
+    with pytest.raises(ValueError):
+        not_implemented_for("multigraph", "graph")
+
+
+def test_not_implemented_decorator_key():
+    with pytest.raises(KeyError):
+
+        @not_implemented_for("foo")
+        def test1(G):
+            pass
+
+        test1(nx.Graph())
+
+
+def test_not_implemented_decorator_raise():
+    with pytest.raises(nx.NetworkXNotImplemented):
+
+        @not_implemented_for("graph")
+        def test1(G):
+            pass
+
+        test1(nx.Graph())
+
+
+class TestOpenFileDecorator:
+    def setup_method(self):
+        self.text = ["Blah... ", "BLAH ", "BLAH!!!!"]
+        self.fobj = tempfile.NamedTemporaryFile("wb+", delete=False)
+        self.name = self.fobj.name
+
+    def teardown_method(self):
+        self.fobj.close()
+        os.unlink(self.name)
+
+    def write(self, path):
+        for text in self.text:
+            path.write(text.encode("ascii"))
+
+    @open_file(1, "r")
+    def read(self, path):
+        return path.readlines()[0]
+
+    @staticmethod
+    @open_file(0, "wb")
+    def writer_arg0(path):
+        path.write(b"demo")
+
+    @open_file(1, "wb+")
+    def writer_arg1(self, path):
+        self.write(path)
+
+    @open_file(2, "wb")
+    def writer_arg2default(self, x, path=None):
+        if path is None:
+            with tempfile.NamedTemporaryFile("wb+") as fh:
+                self.write(fh)
+        else:
+            self.write(path)
+
+    @open_file(4, "wb")
+    def writer_arg4default(self, x, y, other="hello", path=None, **kwargs):
+        if path is None:
+            with tempfile.NamedTemporaryFile("wb+") as fh:
+                self.write(fh)
+        else:
+            self.write(path)
+
+    @open_file("path", "wb")
+    def writer_kwarg(self, **kwargs):
+        path = kwargs.get("path", None)
+        if path is None:
+            with tempfile.NamedTemporaryFile("wb+") as fh:
+                self.write(fh)
+        else:
+            self.write(path)
+
+    def test_writer_arg0_str(self):
+        self.writer_arg0(self.name)
+
+    def test_writer_arg0_fobj(self):
+        self.writer_arg0(self.fobj)
+
+    def test_writer_arg0_pathlib(self):
+        self.writer_arg0(pathlib.Path(self.name))
+
+    def test_writer_arg1_str(self):
+        self.writer_arg1(self.name)
+        assert self.read(self.name) == "".join(self.text)
+
+    def test_writer_arg1_fobj(self):
+        self.writer_arg1(self.fobj)
+        assert not self.fobj.closed
+        self.fobj.close()
+        assert self.read(self.name) == "".join(self.text)
+
+    def test_writer_arg2default_str(self):
+        self.writer_arg2default(0, path=None)
+        self.writer_arg2default(0, path=self.name)
+        assert self.read(self.name) == "".join(self.text)
+
+    def test_writer_arg2default_fobj(self):
+        self.writer_arg2default(0, path=self.fobj)
+        assert not self.fobj.closed
+        self.fobj.close()
+        assert self.read(self.name) == "".join(self.text)
+
+    def test_writer_arg2default_fobj_path_none(self):
+        self.writer_arg2default(0, path=None)
+
+    def test_writer_arg4default_fobj(self):
+        self.writer_arg4default(0, 1, dog="dog", other="other")
+        self.writer_arg4default(0, 1, dog="dog", other="other", path=self.name)
+        assert self.read(self.name) == "".join(self.text)
+
+    def test_writer_kwarg_str(self):
+        self.writer_kwarg(path=self.name)
+        assert self.read(self.name) == "".join(self.text)
+
+    def test_writer_kwarg_fobj(self):
+        self.writer_kwarg(path=self.fobj)
+        self.fobj.close()
+        assert self.read(self.name) == "".join(self.text)
+
+    def test_writer_kwarg_path_none(self):
+        self.writer_kwarg(path=None)
+
+
+class TestRandomState:
+    @classmethod
+    def setup_class(cls):
+        global np
+        np = pytest.importorskip("numpy")
+
+    @np_random_state(1)
+    def instantiate_np_random_state(self, random_state):
+        allowed = (np.random.RandomState, np.random.Generator)
+        assert isinstance(random_state, allowed)
+        return random_state.random()
+
+    @py_random_state(1)
+    def instantiate_py_random_state(self, random_state):
+        allowed = (random.Random, PythonRandomInterface, PythonRandomViaNumpyBits)
+        assert isinstance(random_state, allowed)
+        return random_state.random()
+
+    def test_random_state_None(self):
+        np.random.seed(42)
+        rv = np.random.random()
+        np.random.seed(42)
+        assert rv == self.instantiate_np_random_state(None)
+
+        random.seed(42)
+        rv = random.random()
+        random.seed(42)
+        assert rv == self.instantiate_py_random_state(None)
+
+    def test_random_state_np_random(self):
+        np.random.seed(42)
+        rv = np.random.random()
+        np.random.seed(42)
+        assert rv == self.instantiate_np_random_state(np.random)
+        np.random.seed(42)
+        assert rv == self.instantiate_py_random_state(np.random)
+
+    def test_random_state_int(self):
+        np.random.seed(42)
+        np_rv = np.random.random()
+        random.seed(42)
+        py_rv = random.random()
+
+        np.random.seed(42)
+        seed = 1
+        rval = self.instantiate_np_random_state(seed)
+        rval_expected = np.random.RandomState(seed).rand()
+        assert rval == rval_expected
+        # test that global seed wasn't changed in function
+        assert np_rv == np.random.random()
+
+        random.seed(42)
+        rval = self.instantiate_py_random_state(seed)
+        rval_expected = random.Random(seed).random()
+        assert rval == rval_expected
+        # test that global seed wasn't changed in function
+        assert py_rv == random.random()
+
+    def test_random_state_np_random_Generator(self):
+        np.random.seed(42)
+        np_rv = np.random.random()
+        np.random.seed(42)
+        seed = 1
+
+        rng = np.random.default_rng(seed)
+        rval = self.instantiate_np_random_state(rng)
+        rval_expected = np.random.default_rng(seed).random()
+        assert rval == rval_expected
+
+        rval = self.instantiate_py_random_state(rng)
+        rval_expected = np.random.default_rng(seed).random(size=2)[1]
+        assert rval == rval_expected
+        # test that global seed wasn't changed in function
+        assert np_rv == np.random.random()
+
+    def test_random_state_np_random_RandomState(self):
+        np.random.seed(42)
+        np_rv = np.random.random()
+        np.random.seed(42)
+        seed = 1
+
+        rng = np.random.RandomState(seed)
+        rval = self.instantiate_np_random_state(rng)
+        rval_expected = np.random.RandomState(seed).random()
+        assert rval == rval_expected
+
+        rval = self.instantiate_py_random_state(rng)
+        rval_expected = np.random.RandomState(seed).random(size=2)[1]
+        assert rval == rval_expected
+        # test that global seed wasn't changed in function
+        assert np_rv == np.random.random()
+
+    def test_random_state_py_random(self):
+        seed = 1
+        rng = random.Random(seed)
+        rv = self.instantiate_py_random_state(rng)
+        assert rv == random.Random(seed).random()
+
+        pytest.raises(ValueError, self.instantiate_np_random_state, rng)
+
+
+def test_random_state_string_arg_index():
+    with pytest.raises(nx.NetworkXError):
+
+        @np_random_state("a")
+        def make_random_state(rs):
+            pass
+
+        rstate = make_random_state(1)
+
+
+def test_py_random_state_string_arg_index():
+    with pytest.raises(nx.NetworkXError):
+
+        @py_random_state("a")
+        def make_random_state(rs):
+            pass
+
+        rstate = make_random_state(1)
+
+
+def test_random_state_invalid_arg_index():
+    with pytest.raises(nx.NetworkXError):
+
+        @np_random_state(2)
+        def make_random_state(rs):
+            pass
+
+        rstate = make_random_state(1)
+
+
+def test_py_random_state_invalid_arg_index():
+    with pytest.raises(nx.NetworkXError):
+
+        @py_random_state(2)
+        def make_random_state(rs):
+            pass
+
+        rstate = make_random_state(1)
+
+
+class TestArgmap:
+    class ArgmapError(RuntimeError):
+        pass
+
+    def test_trivial_function(self):
+        def do_not_call(x):
+            raise ArgmapError("do not call this function")
+
+        @argmap(do_not_call)
+        def trivial_argmap():
+            return 1
+
+        assert trivial_argmap() == 1
+
+    def test_trivial_iterator(self):
+        def do_not_call(x):
+            raise ArgmapError("do not call this function")
+
+        @argmap(do_not_call)
+        def trivial_argmap():
+            yield from (1, 2, 3)
+
+        assert tuple(trivial_argmap()) == (1, 2, 3)
+
+    def test_contextmanager(self):
+        container = []
+
+        def contextmanager(x):
+            nonlocal container
+            return x, lambda: container.append(x)
+
+        @argmap(contextmanager, 0, 1, 2, try_finally=True)
+        def foo(x, y, z):
+            return x, y, z
+
+        x, y, z = foo("a", "b", "c")
+
+        # context exits are called in reverse
+        assert container == ["c", "b", "a"]
+
+    def test_tryfinally_generator(self):
+        container = []
+
+        def singleton(x):
+            return (x,)
+
+        with pytest.raises(nx.NetworkXError):
+
+            @argmap(singleton, 0, 1, 2, try_finally=True)
+            def foo(x, y, z):
+                yield from (x, y, z)
+
+        @argmap(singleton, 0, 1, 2)
+        def foo(x, y, z):
+            return x + y + z
+
+        q = foo("a", "b", "c")
+
+        assert q == ("a", "b", "c")
+
+    def test_actual_vararg(self):
+        @argmap(lambda x: -x, 4)
+        def foo(x, y, *args):
+            return (x, y) + tuple(args)
+
+        assert foo(1, 2, 3, 4, 5, 6) == (1, 2, 3, 4, -5, 6)
+
+    def test_signature_destroying_intermediate_decorator(self):
+        def add_one_to_first_bad_decorator(f):
+            """Bad because it doesn't wrap the f signature (clobbers it)"""
+
+            def decorated(a, *args, **kwargs):
+                return f(a + 1, *args, **kwargs)
+
+            return decorated
+
+        add_two_to_second = argmap(lambda b: b + 2, 1)
+
+        @add_two_to_second
+        @add_one_to_first_bad_decorator
+        def add_one_and_two(a, b):
+            return a, b
+
+        assert add_one_and_two(5, 5) == (6, 7)
+
+    def test_actual_kwarg(self):
+        @argmap(lambda x: -x, "arg")
+        def foo(*, arg):
+            return arg
+
+        assert foo(arg=3) == -3
+
+    def test_nested_tuple(self):
+        def xform(x, y):
+            u, v = y
+            return x + u + v, (x + u, x + v)
+
+        # we're testing args and kwargs here, too
+        @argmap(xform, (0, ("t", 2)))
+        def foo(a, *args, **kwargs):
+            return a, args, kwargs
+
+        a, args, kwargs = foo(1, 2, 3, t=4)
+
+        assert a == 1 + 4 + 3
+        assert args == (2, 1 + 3)
+        assert kwargs == {"t": 1 + 4}
+
+    def test_flatten(self):
+        assert tuple(argmap._flatten([[[[[], []], [], []], [], [], []]], set())) == ()
+
+        rlist = ["a", ["b", "c"], [["d"], "e"], "f"]
+        assert "".join(argmap._flatten(rlist, set())) == "abcdef"
+
+    def test_indent(self):
+        code = "\n".join(
+            argmap._indent(
+                *[
+                    "try:",
+                    "try:",
+                    "pass#",
+                    "finally:",
+                    "pass#",
+                    "#",
+                    "finally:",
+                    "pass#",
+                ]
+            )
+        )
+        assert (
+            code
+            == """try:
+ try:
+  pass#
+ finally:
+  pass#
+ #
+finally:
+ pass#"""
+        )
+
+    def test_immediate_raise(self):
+        @not_implemented_for("directed")
+        def yield_nodes(G):
+            yield from G
+
+        G = nx.Graph([(1, 2)])
+        D = nx.DiGraph()
+
+        # test first call (argmap is compiled and executed)
+        with pytest.raises(nx.NetworkXNotImplemented):
+            node_iter = yield_nodes(D)
+
+        # test second call (argmap is only executed)
+        with pytest.raises(nx.NetworkXNotImplemented):
+            node_iter = yield_nodes(D)
+
+        # ensure that generators still make generators
+        node_iter = yield_nodes(G)
+        next(node_iter)
+        next(node_iter)
+        with pytest.raises(StopIteration):
+            next(node_iter)
diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_heaps.py b/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_heaps.py
new file mode 100644
index 00000000..5ea38716
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_heaps.py
@@ -0,0 +1,131 @@
+import pytest
+
+import networkx as nx
+from networkx.utils import BinaryHeap, PairingHeap
+
+
+class X:
+    def __eq__(self, other):
+        raise self is other
+
+    def __ne__(self, other):
+        raise self is not other
+
+    def __lt__(self, other):
+        raise TypeError("cannot compare")
+
+    def __le__(self, other):
+        raise TypeError("cannot compare")
+
+    def __ge__(self, other):
+        raise TypeError("cannot compare")
+
+    def __gt__(self, other):
+        raise TypeError("cannot compare")
+
+    def __hash__(self):
+        return hash(id(self))
+
+
+x = X()
+
+
+data = [  # min should not invent an element.
+    ("min", nx.NetworkXError),
+    # Popping an empty heap should fail.
+    ("pop", nx.NetworkXError),
+    # Getting nonexisting elements should return None.
+    ("get", 0, None),
+    ("get", x, None),
+    ("get", None, None),
+    # Inserting a new key should succeed.
+    ("insert", x, 1, True),
+    ("get", x, 1),
+    ("min", (x, 1)),
+    # min should not pop the top element.
+    ("min", (x, 1)),
+    # Inserting a new key of different type should succeed.
+    ("insert", 1, -2.0, True),
+    # int and float values should interop.
+    ("min", (1, -2.0)),
+    # pop removes minimum-valued element.
+    ("insert", 3, -(10**100), True),
+    ("insert", 4, 5, True),
+    ("pop", (3, -(10**100))),
+    ("pop", (1, -2.0)),
+    # Decrease-insert should succeed.
+    ("insert", 4, -50, True),
+    ("insert", 4, -60, False, True),
+    # Decrease-insert should not create duplicate keys.
+    ("pop", (4, -60)),
+    ("pop", (x, 1)),
+    # Popping all elements should empty the heap.
+    ("min", nx.NetworkXError),
+    ("pop", nx.NetworkXError),
+    # Non-value-changing insert should fail.
+    ("insert", x, 0, True),
+    ("insert", x, 0, False, False),
+    ("min", (x, 0)),
+    ("insert", x, 0, True, False),
+    ("min", (x, 0)),
+    # Failed insert should not create duplicate keys.
+    ("pop", (x, 0)),
+    ("pop", nx.NetworkXError),
+    # Increase-insert should succeed when allowed.
+    ("insert", None, 0, True),
+    ("insert", 2, -1, True),
+    ("min", (2, -1)),
+    ("insert", 2, 1, True, False),
+    ("min", (None, 0)),
+    # Increase-insert should fail when disallowed.
+    ("insert", None, 2, False, False),
+    ("min", (None, 0)),
+    # Failed increase-insert should not create duplicate keys.
+    ("pop", (None, 0)),
+    ("pop", (2, 1)),
+    ("min", nx.NetworkXError),
+    ("pop", nx.NetworkXError),
+]
+
+
+def _test_heap_class(cls, *args, **kwargs):
+    heap = cls(*args, **kwargs)
+    # Basic behavioral test
+    for op in data:
+        if op[-1] is not nx.NetworkXError:
+            assert op[-1] == getattr(heap, op[0])(*op[1:-1])
+        else:
+            pytest.raises(op[-1], getattr(heap, op[0]), *op[1:-1])
+    # Coverage test.
+    for i in range(99, -1, -1):
+        assert heap.insert(i, i)
+    for i in range(50):
+        assert heap.pop() == (i, i)
+    for i in range(100):
+        assert heap.insert(i, i) == (i < 50)
+    for i in range(100):
+        assert not heap.insert(i, i + 1)
+    for i in range(50):
+        assert heap.pop() == (i, i)
+    for i in range(100):
+        assert heap.insert(i, i + 1) == (i < 50)
+    for i in range(49):
+        assert heap.pop() == (i, i + 1)
+    assert sorted([heap.pop(), heap.pop()]) == [(49, 50), (50, 50)]
+    for i in range(51, 100):
+        assert not heap.insert(i, i + 1, True)
+    for i in range(51, 70):
+        assert heap.pop() == (i, i + 1)
+    for i in range(100):
+        assert heap.insert(i, i)
+    for i in range(100):
+        assert heap.pop() == (i, i)
+    pytest.raises(nx.NetworkXError, heap.pop)
+
+
+def test_PairingHeap():
+    _test_heap_class(PairingHeap)
+
+
+def test_BinaryHeap():
+    _test_heap_class(BinaryHeap)
diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_mapped_queue.py b/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_mapped_queue.py
new file mode 100644
index 00000000..ca9b7e42
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_mapped_queue.py
@@ -0,0 +1,268 @@
+import pytest
+
+from networkx.utils.mapped_queue import MappedQueue, _HeapElement
+
+
+def test_HeapElement_gtlt():
+    bar = _HeapElement(1.1, "a")
+    foo = _HeapElement(1, "b")
+    assert foo < bar
+    assert bar > foo
+    assert foo < 1.1
+    assert 1 < bar
+
+
+def test_HeapElement_gtlt_tied_priority():
+    bar = _HeapElement(1, "a")
+    foo = _HeapElement(1, "b")
+    assert foo > bar
+    assert bar < foo
+
+
+def test_HeapElement_eq():
+    bar = _HeapElement(1.1, "a")
+    foo = _HeapElement(1, "a")
+    assert foo == bar
+    assert bar == foo
+    assert foo == "a"
+
+
+def test_HeapElement_iter():
+    foo = _HeapElement(1, "a")
+    bar = _HeapElement(1.1, (3, 2, 1))
+    assert list(foo) == [1, "a"]
+    assert list(bar) == [1.1, 3, 2, 1]
+
+
+def test_HeapElement_getitem():
+    foo = _HeapElement(1, "a")
+    bar = _HeapElement(1.1, (3, 2, 1))
+    assert foo[1] == "a"
+    assert foo[0] == 1
+    assert bar[0] == 1.1
+    assert bar[2] == 2
+    assert bar[3] == 1
+    pytest.raises(IndexError, bar.__getitem__, 4)
+    pytest.raises(IndexError, foo.__getitem__, 2)
+
+
+class TestMappedQueue:
+    def setup_method(self):
+        pass
+
+    def _check_map(self, q):
+        assert q.position == {elt: pos for pos, elt in enumerate(q.heap)}
+
+    def _make_mapped_queue(self, h):
+        q = MappedQueue()
+        q.heap = h
+        q.position = {elt: pos for pos, elt in enumerate(h)}
+        return q
+
+    def test_heapify(self):
+        h = [5, 4, 3, 2, 1, 0]
+        q = self._make_mapped_queue(h)
+        q._heapify()
+        self._check_map(q)
+
+    def test_init(self):
+        h = [5, 4, 3, 2, 1, 0]
+        q = MappedQueue(h)
+        self._check_map(q)
+
+    def test_incomparable(self):
+        h = [5, 4, "a", 2, 1, 0]
+        pytest.raises(TypeError, MappedQueue, h)
+
+    def test_len(self):
+        h = [5, 4, 3, 2, 1, 0]
+        q = MappedQueue(h)
+        self._check_map(q)
+        assert len(q) == 6
+
+    def test_siftup_leaf(self):
+        h = [2]
+        h_sifted = [2]
+        q = self._make_mapped_queue(h)
+        q._siftup(0)
+        assert q.heap == h_sifted
+        self._check_map(q)
+
+    def test_siftup_one_child(self):
+        h = [2, 0]
+        h_sifted = [0, 2]
+        q = self._make_mapped_queue(h)
+        q._siftup(0)
+        assert q.heap == h_sifted
+        self._check_map(q)
+
+    def test_siftup_left_child(self):
+        h = [2, 0, 1]
+        h_sifted = [0, 2, 1]
+        q = self._make_mapped_queue(h)
+        q._siftup(0)
+        assert q.heap == h_sifted
+        self._check_map(q)
+
+    def test_siftup_right_child(self):
+        h = [2, 1, 0]
+        h_sifted = [0, 1, 2]
+        q = self._make_mapped_queue(h)
+        q._siftup(0)
+        assert q.heap == h_sifted
+        self._check_map(q)
+
+    def test_siftup_multiple(self):
+        h = [0, 1, 2, 4, 3, 5, 6]
+        h_sifted = [0, 1, 2, 4, 3, 5, 6]
+        q = self._make_mapped_queue(h)
+        q._siftup(0)
+        assert q.heap == h_sifted
+        self._check_map(q)
+
+    def test_siftdown_leaf(self):
+        h = [2]
+        h_sifted = [2]
+        q = self._make_mapped_queue(h)
+        q._siftdown(0, 0)
+        assert q.heap == h_sifted
+        self._check_map(q)
+
+    def test_siftdown_single(self):
+        h = [1, 0]
+        h_sifted = [0, 1]
+        q = self._make_mapped_queue(h)
+        q._siftdown(0, len(h) - 1)
+        assert q.heap == h_sifted
+        self._check_map(q)
+
+    def test_siftdown_multiple(self):
+        h = [1, 2, 3, 4, 5, 6, 7, 0]
+        h_sifted = [0, 1, 3, 2, 5, 6, 7, 4]
+        q = self._make_mapped_queue(h)
+        q._siftdown(0, len(h) - 1)
+        assert q.heap == h_sifted
+        self._check_map(q)
+
+    def test_push(self):
+        to_push = [6, 1, 4, 3, 2, 5, 0]
+        h_sifted = [0, 2, 1, 6, 3, 5, 4]
+        q = MappedQueue()
+        for elt in to_push:
+            q.push(elt)
+        assert q.heap == h_sifted
+        self._check_map(q)
+
+    def test_push_duplicate(self):
+        to_push = [2, 1, 0]
+        h_sifted = [0, 2, 1]
+        q = MappedQueue()
+        for elt in to_push:
+            inserted = q.push(elt)
+            assert inserted
+        assert q.heap == h_sifted
+        self._check_map(q)
+        inserted = q.push(1)
+        assert not inserted
+
+    def test_pop(self):
+        h = [3, 4, 6, 0, 1, 2, 5]
+        h_sorted = sorted(h)
+        q = self._make_mapped_queue(h)
+        q._heapify()
+        popped = [q.pop() for _ in range(len(h))]
+        assert popped == h_sorted
+        self._check_map(q)
+
+    def test_remove_leaf(self):
+        h = [0, 2, 1, 6, 3, 5, 4]
+        h_removed = [0, 2, 1, 6, 4, 5]
+        q = self._make_mapped_queue(h)
+        removed = q.remove(3)
+        assert q.heap == h_removed
+
+    def test_remove_root(self):
+        h = [0, 2, 1, 6, 3, 5, 4]
+        h_removed = [1, 2, 4, 6, 3, 5]
+        q = self._make_mapped_queue(h)
+        removed = q.remove(0)
+        assert q.heap == h_removed
+
+    def test_update_leaf(self):
+        h = [0, 20, 10, 60, 30, 50, 40]
+        h_updated = [0, 15, 10, 60, 20, 50, 40]
+        q = self._make_mapped_queue(h)
+        removed = q.update(30, 15)
+        assert q.heap == h_updated
+
+    def test_update_root(self):
+        h = [0, 20, 10, 60, 30, 50, 40]
+        h_updated = [10, 20, 35, 60, 30, 50, 40]
+        q = self._make_mapped_queue(h)
+        removed = q.update(0, 35)
+        assert q.heap == h_updated
+
+
+class TestMappedDict(TestMappedQueue):
+    def _make_mapped_queue(self, h):
+        priority_dict = {elt: elt for elt in h}
+        return MappedQueue(priority_dict)
+
+    def test_init(self):
+        d = {5: 0, 4: 1, "a": 2, 2: 3, 1: 4}
+        q = MappedQueue(d)
+        assert q.position == d
+
+    def test_ties(self):
+        d = {5: 0, 4: 1, 3: 2, 2: 3, 1: 4}
+        q = MappedQueue(d)
+        assert q.position == {elt: pos for pos, elt in enumerate(q.heap)}
+
+    def test_pop(self):
+        d = {5: 0, 4: 1, 3: 2, 2: 3, 1: 4}
+        q = MappedQueue(d)
+        assert q.pop() == _HeapElement(0, 5)
+        assert q.position == {elt: pos for pos, elt in enumerate(q.heap)}
+
+    def test_empty_pop(self):
+        q = MappedQueue()
+        pytest.raises(IndexError, q.pop)
+
+    def test_incomparable_ties(self):
+        d = {5: 0, 4: 0, "a": 0, 2: 0, 1: 0}
+        pytest.raises(TypeError, MappedQueue, d)
+
+    def test_push(self):
+        to_push = [6, 1, 4, 3, 2, 5, 0]
+        h_sifted = [0, 2, 1, 6, 3, 5, 4]
+        q = MappedQueue()
+        for elt in to_push:
+            q.push(elt, priority=elt)
+        assert q.heap == h_sifted
+        self._check_map(q)
+
+    def test_push_duplicate(self):
+        to_push = [2, 1, 0]
+        h_sifted = [0, 2, 1]
+        q = MappedQueue()
+        for elt in to_push:
+            inserted = q.push(elt, priority=elt)
+            assert inserted
+        assert q.heap == h_sifted
+        self._check_map(q)
+        inserted = q.push(1, priority=1)
+        assert not inserted
+
+    def test_update_leaf(self):
+        h = [0, 20, 10, 60, 30, 50, 40]
+        h_updated = [0, 15, 10, 60, 20, 50, 40]
+        q = self._make_mapped_queue(h)
+        removed = q.update(30, 15, priority=15)
+        assert q.heap == h_updated
+
+    def test_update_root(self):
+        h = [0, 20, 10, 60, 30, 50, 40]
+        h_updated = [10, 20, 35, 60, 30, 50, 40]
+        q = self._make_mapped_queue(h)
+        removed = q.update(0, 35, priority=35)
+        assert q.heap == h_updated
diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_misc.py b/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_misc.py
new file mode 100644
index 00000000..eff36b2a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_misc.py
@@ -0,0 +1,268 @@
+import random
+from copy import copy
+
+import pytest
+
+import networkx as nx
+from networkx.utils import (
+    PythonRandomInterface,
+    PythonRandomViaNumpyBits,
+    arbitrary_element,
+    create_py_random_state,
+    create_random_state,
+    dict_to_numpy_array,
+    discrete_sequence,
+    flatten,
+    groups,
+    make_list_of_ints,
+    pairwise,
+    powerlaw_sequence,
+)
+from networkx.utils.misc import _dict_to_numpy_array1, _dict_to_numpy_array2
+
+nested_depth = (
+    1,
+    2,
+    (3, 4, ((5, 6, (7,), (8, (9, 10), 11), (12, 13, (14, 15)), 16), 17), 18, 19),
+    20,
+)
+
+nested_set = {
+    (1, 2, 3, 4),
+    (5, 6, 7, 8, 9),
+    (10, 11, (12, 13, 14), (15, 16, 17, 18)),
+    19,
+    20,
+}
+
+nested_mixed = [
+    1,
+    (2, 3, {4, (5, 6), 7}, [8, 9]),
+    {10: "foo", 11: "bar", (12, 13): "baz"},
+    {(14, 15): "qwe", 16: "asd"},
+    (17, (18, "19"), 20),
+]
+
+
+@pytest.mark.parametrize("result", [None, [], ["existing"], ["existing1", "existing2"]])
+@pytest.mark.parametrize("nested", [nested_depth, nested_mixed, nested_set])
+def test_flatten(nested, result):
+    if result is None:
+        val = flatten(nested, result)
+        assert len(val) == 20
+    else:
+        _result = copy(result)  # because pytest passes parameters as is
+        nexisting = len(_result)
+        val = flatten(nested, _result)
+        assert len(val) == len(_result) == 20 + nexisting
+
+    assert issubclass(type(val), tuple)
+
+
+def test_make_list_of_ints():
+    mylist = [1, 2, 3.0, 42, -2]
+    assert make_list_of_ints(mylist) is mylist
+    assert make_list_of_ints(mylist) == mylist
+    assert type(make_list_of_ints(mylist)[2]) is int
+    pytest.raises(nx.NetworkXError, make_list_of_ints, [1, 2, 3, "kermit"])
+    pytest.raises(nx.NetworkXError, make_list_of_ints, [1, 2, 3.1])
+
+
+def test_random_number_distribution():
+    # smoke test only
+    z = powerlaw_sequence(20, exponent=2.5)
+    z = discrete_sequence(20, distribution=[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3])
+
+
+class TestNumpyArray:
+    @classmethod
+    def setup_class(cls):
+        global np
+        np = pytest.importorskip("numpy")
+
+    def test_numpy_to_list_of_ints(self):
+        a = np.array([1, 2, 3], dtype=np.int64)
+        b = np.array([1.0, 2, 3])
+        c = np.array([1.1, 2, 3])
+        assert type(make_list_of_ints(a)) == list
+        assert make_list_of_ints(b) == list(b)
+        B = make_list_of_ints(b)
+        assert type(B[0]) == int
+        pytest.raises(nx.NetworkXError, make_list_of_ints, c)
+
+    def test__dict_to_numpy_array1(self):
+        d = {"a": 1, "b": 2}
+        a = _dict_to_numpy_array1(d, mapping={"a": 0, "b": 1})
+        np.testing.assert_allclose(a, np.array([1, 2]))
+        a = _dict_to_numpy_array1(d, mapping={"b": 0, "a": 1})
+        np.testing.assert_allclose(a, np.array([2, 1]))
+
+        a = _dict_to_numpy_array1(d)
+        np.testing.assert_allclose(a.sum(), 3)
+
+    def test__dict_to_numpy_array2(self):
+        d = {"a": {"a": 1, "b": 2}, "b": {"a": 10, "b": 20}}
+
+        mapping = {"a": 1, "b": 0}
+        a = _dict_to_numpy_array2(d, mapping=mapping)
+        np.testing.assert_allclose(a, np.array([[20, 10], [2, 1]]))
+
+        a = _dict_to_numpy_array2(d)
+        np.testing.assert_allclose(a.sum(), 33)
+
+    def test_dict_to_numpy_array_a(self):
+        d = {"a": {"a": 1, "b": 2}, "b": {"a": 10, "b": 20}}
+
+        mapping = {"a": 0, "b": 1}
+        a = dict_to_numpy_array(d, mapping=mapping)
+        np.testing.assert_allclose(a, np.array([[1, 2], [10, 20]]))
+
+        mapping = {"a": 1, "b": 0}
+        a = dict_to_numpy_array(d, mapping=mapping)
+        np.testing.assert_allclose(a, np.array([[20, 10], [2, 1]]))
+
+        a = _dict_to_numpy_array2(d)
+        np.testing.assert_allclose(a.sum(), 33)
+
+    def test_dict_to_numpy_array_b(self):
+        d = {"a": 1, "b": 2}
+
+        mapping = {"a": 0, "b": 1}
+        a = dict_to_numpy_array(d, mapping=mapping)
+        np.testing.assert_allclose(a, np.array([1, 2]))
+
+        a = _dict_to_numpy_array1(d)
+        np.testing.assert_allclose(a.sum(), 3)
+
+
+def test_pairwise():
+    nodes = range(4)
+    node_pairs = [(0, 1), (1, 2), (2, 3)]
+    node_pairs_cycle = node_pairs + [(3, 0)]
+    assert list(pairwise(nodes)) == node_pairs
+    assert list(pairwise(iter(nodes))) == node_pairs
+    assert list(pairwise(nodes, cyclic=True)) == node_pairs_cycle
+    empty_iter = iter(())
+    assert list(pairwise(empty_iter)) == []
+    empty_iter = iter(())
+    assert list(pairwise(empty_iter, cyclic=True)) == []
+
+
+def test_groups():
+    many_to_one = dict(zip("abcde", [0, 0, 1, 1, 2]))
+    actual = groups(many_to_one)
+    expected = {0: {"a", "b"}, 1: {"c", "d"}, 2: {"e"}}
+    assert actual == expected
+    assert {} == groups({})
+
+
+def test_create_random_state():
+    np = pytest.importorskip("numpy")
+    rs = np.random.RandomState
+
+    assert isinstance(create_random_state(1), rs)
+    assert isinstance(create_random_state(None), rs)
+    assert isinstance(create_random_state(np.random), rs)
+    assert isinstance(create_random_state(rs(1)), rs)
+    # Support for numpy.random.Generator
+    rng = np.random.default_rng()
+    assert isinstance(create_random_state(rng), np.random.Generator)
+    pytest.raises(ValueError, create_random_state, "a")
+
+    assert np.all(rs(1).rand(10) == create_random_state(1).rand(10))
+
+
+def test_create_py_random_state():
+    pyrs = random.Random
+
+    assert isinstance(create_py_random_state(1), pyrs)
+    assert isinstance(create_py_random_state(None), pyrs)
+    assert isinstance(create_py_random_state(pyrs(1)), pyrs)
+    pytest.raises(ValueError, create_py_random_state, "a")
+
+    np = pytest.importorskip("numpy")
+
+    rs = np.random.RandomState
+    rng = np.random.default_rng(1000)
+    rng_explicit = np.random.Generator(np.random.SFC64())
+    old_nprs = PythonRandomInterface
+    nprs = PythonRandomViaNumpyBits
+    assert isinstance(create_py_random_state(np.random), nprs)
+    assert isinstance(create_py_random_state(rs(1)), old_nprs)
+    assert isinstance(create_py_random_state(rng), nprs)
+    assert isinstance(create_py_random_state(rng_explicit), nprs)
+    # test default rng input
+    assert isinstance(PythonRandomInterface(), old_nprs)
+    assert isinstance(PythonRandomViaNumpyBits(), nprs)
+
+    # VeryLargeIntegers Smoke test (they raise error for np.random)
+    int64max = 9223372036854775807  # from np.iinfo(np.int64).max
+    for r in (rng, rs(1)):
+        prs = create_py_random_state(r)
+        prs.randrange(3, int64max + 5)
+        prs.randint(3, int64max + 5)
+
+
+def test_PythonRandomInterface_RandomState():
+    np = pytest.importorskip("numpy")
+
+    seed = 42
+    rs = np.random.RandomState
+    rng = PythonRandomInterface(rs(seed))
+    rs42 = rs(seed)
+
+    # make sure these functions are same as expected outcome
+    assert rng.randrange(3, 5) == rs42.randint(3, 5)
+    assert rng.choice([1, 2, 3]) == rs42.choice([1, 2, 3])
+    assert rng.gauss(0, 1) == rs42.normal(0, 1)
+    assert rng.expovariate(1.5) == rs42.exponential(1 / 1.5)
+    assert np.all(rng.shuffle([1, 2, 3]) == rs42.shuffle([1, 2, 3]))
+    assert np.all(
+        rng.sample([1, 2, 3], 2) == rs42.choice([1, 2, 3], (2,), replace=False)
+    )
+    assert np.all(
+        [rng.randint(3, 5) for _ in range(100)]
+        == [rs42.randint(3, 6) for _ in range(100)]
+    )
+    assert rng.random() == rs42.random_sample()
+
+
+def test_PythonRandomInterface_Generator():
+    np = pytest.importorskip("numpy")
+
+    seed = 42
+    rng = np.random.default_rng(seed)
+    pri = PythonRandomInterface(np.random.default_rng(seed))
+
+    # make sure these functions are same as expected outcome
+    assert pri.randrange(3, 5) == rng.integers(3, 5)
+    assert pri.choice([1, 2, 3]) == rng.choice([1, 2, 3])
+    assert pri.gauss(0, 1) == rng.normal(0, 1)
+    assert pri.expovariate(1.5) == rng.exponential(1 / 1.5)
+    assert np.all(pri.shuffle([1, 2, 3]) == rng.shuffle([1, 2, 3]))
+    assert np.all(
+        pri.sample([1, 2, 3], 2) == rng.choice([1, 2, 3], (2,), replace=False)
+    )
+    assert np.all(
+        [pri.randint(3, 5) for _ in range(100)]
+        == [rng.integers(3, 6) for _ in range(100)]
+    )
+    assert pri.random() == rng.random()
+
+
+@pytest.mark.parametrize(
+    ("iterable_type", "expected"), ((list, 1), (tuple, 1), (str, "["), (set, 1))
+)
+def test_arbitrary_element(iterable_type, expected):
+    iterable = iterable_type([1, 2, 3])
+    assert arbitrary_element(iterable) == expected
+
+
+@pytest.mark.parametrize(
+    "iterator",
+    ((i for i in range(3)), iter([1, 2, 3])),  # generator
+)
+def test_arbitrary_element_raises(iterator):
+    """Value error is raised when input is an iterator."""
+    with pytest.raises(ValueError, match="from an iterator"):
+        arbitrary_element(iterator)
diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_random_sequence.py b/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_random_sequence.py
new file mode 100644
index 00000000..1d1b9579
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_random_sequence.py
@@ -0,0 +1,38 @@
+import pytest
+
+from networkx.utils import (
+    powerlaw_sequence,
+    random_weighted_sample,
+    weighted_choice,
+    zipf_rv,
+)
+
+
+def test_degree_sequences():
+    seq = powerlaw_sequence(10, seed=1)
+    seq = powerlaw_sequence(10)
+    assert len(seq) == 10
+
+
+def test_zipf_rv():
+    r = zipf_rv(2.3, xmin=2, seed=1)
+    r = zipf_rv(2.3, 2, 1)
+    r = zipf_rv(2.3)
+    assert type(r), int
+    pytest.raises(ValueError, zipf_rv, 0.5)
+    pytest.raises(ValueError, zipf_rv, 2, xmin=0)
+
+
+def test_random_weighted_sample():
+    mapping = {"a": 10, "b": 20}
+    s = random_weighted_sample(mapping, 2, seed=1)
+    s = random_weighted_sample(mapping, 2)
+    assert sorted(s) == sorted(mapping.keys())
+    pytest.raises(ValueError, random_weighted_sample, mapping, 3)
+
+
+def test_random_weighted_choice():
+    mapping = {"a": 10, "b": 0}
+    c = weighted_choice(mapping, seed=1)
+    c = weighted_choice(mapping)
+    assert c == "a"
diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_rcm.py b/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_rcm.py
new file mode 100644
index 00000000..88702b36
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_rcm.py
@@ -0,0 +1,63 @@
+import networkx as nx
+from networkx.utils import reverse_cuthill_mckee_ordering
+
+
+def test_reverse_cuthill_mckee():
+    # example graph from
+    # http://www.boost.org/doc/libs/1_37_0/libs/graph/example/cuthill_mckee_ordering.cpp
+    G = nx.Graph(
+        [
+            (0, 3),
+            (0, 5),
+            (1, 2),
+            (1, 4),
+            (1, 6),
+            (1, 9),
+            (2, 3),
+            (2, 4),
+            (3, 5),
+            (3, 8),
+            (4, 6),
+            (5, 6),
+            (5, 7),
+            (6, 7),
+        ]
+    )
+    rcm = list(reverse_cuthill_mckee_ordering(G))
+    assert rcm in [[0, 8, 5, 7, 3, 6, 2, 4, 1, 9], [0, 8, 5, 7, 3, 6, 4, 2, 1, 9]]
+
+
+def test_rcm_alternate_heuristic():
+    # example from
+    G = nx.Graph(
+        [
+            (0, 0),
+            (0, 4),
+            (1, 1),
+            (1, 2),
+            (1, 5),
+            (1, 7),
+            (2, 2),
+            (2, 4),
+            (3, 3),
+            (3, 6),
+            (4, 4),
+            (5, 5),
+            (5, 7),
+            (6, 6),
+            (7, 7),
+        ]
+    )
+
+    answers = [
+        [6, 3, 5, 7, 1, 2, 4, 0],
+        [6, 3, 7, 5, 1, 2, 4, 0],
+        [7, 5, 1, 2, 4, 0, 6, 3],
+    ]
+
+    def smallest_degree(G):
+        deg, node = min((d, n) for n, d in G.degree())
+        return node
+
+    rcm = list(reverse_cuthill_mckee_ordering(G, heuristic=smallest_degree))
+    assert rcm in answers
diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_unionfind.py b/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_unionfind.py
new file mode 100644
index 00000000..2d30580f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/utils/tests/test_unionfind.py
@@ -0,0 +1,55 @@
+import networkx as nx
+
+
+def test_unionfind():
+    # Fixed by: 2cddd5958689bdecdcd89b91ac9aaf6ce0e4f6b8
+    # Previously (in 2.x), the UnionFind class could handle mixed types.
+    # But in Python 3.x, this causes a TypeError such as:
+    #   TypeError: unorderable types: str() > int()
+    #
+    # Now we just make sure that no exception is raised.
+    x = nx.utils.UnionFind()
+    x.union(0, "a")
+
+
+def test_subtree_union():
+    # See https://github.com/networkx/networkx/pull/3224
+    # (35db1b551ee65780794a357794f521d8768d5049).
+    # Test if subtree unions hare handled correctly by to_sets().
+    uf = nx.utils.UnionFind()
+    uf.union(1, 2)
+    uf.union(3, 4)
+    uf.union(4, 5)
+    uf.union(1, 5)
+    assert list(uf.to_sets()) == [{1, 2, 3, 4, 5}]
+
+
+def test_unionfind_weights():
+    # Tests if weights are computed correctly with unions of many elements
+    uf = nx.utils.UnionFind()
+    uf.union(1, 4, 7)
+    uf.union(2, 5, 8)
+    uf.union(3, 6, 9)
+    uf.union(1, 2, 3, 4, 5, 6, 7, 8, 9)
+    assert uf.weights[uf[1]] == 9
+
+
+def test_unbalanced_merge_weights():
+    # Tests if the largest set's root is used as the new root when merging
+    uf = nx.utils.UnionFind()
+    uf.union(1, 2, 3)
+    uf.union(4, 5, 6, 7, 8, 9)
+    assert uf.weights[uf[1]] == 3
+    assert uf.weights[uf[4]] == 6
+    largest_root = uf[4]
+    uf.union(1, 4)
+    assert uf[1] == largest_root
+    assert uf.weights[largest_root] == 9
+
+
+def test_empty_union():
+    # Tests if a null-union does nothing.
+    uf = nx.utils.UnionFind((0, 1))
+    uf.union()
+    assert uf[0] == 0
+    assert uf[1] == 1
diff --git a/.venv/lib/python3.12/site-packages/networkx/utils/union_find.py b/.venv/lib/python3.12/site-packages/networkx/utils/union_find.py
new file mode 100644
index 00000000..2a07129f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/utils/union_find.py
@@ -0,0 +1,106 @@
+"""
+Union-find data structure.
+"""
+
+from networkx.utils import groups
+
+
+class UnionFind:
+    """Union-find data structure.
+
+    Each unionFind instance X maintains a family of disjoint sets of
+    hashable objects, supporting the following two methods:
+
+    - X[item] returns a name for the set containing the given item.
+      Each set is named by an arbitrarily-chosen one of its members; as
+      long as the set remains unchanged it will keep the same name. If
+      the item is not yet part of a set in X, a new singleton set is
+      created for it.
+
+    - X.union(item1, item2, ...) merges the sets containing each item
+      into a single larger set.  If any item is not yet part of a set
+      in X, it is added to X as one of the members of the merged set.
+
+      Union-find data structure. Based on Josiah Carlson's code,
+      https://code.activestate.com/recipes/215912/
+      with significant additional changes by D. Eppstein.
+      http://www.ics.uci.edu/~eppstein/PADS/UnionFind.py
+
+    """
+
+    def __init__(self, elements=None):
+        """Create a new empty union-find structure.
+
+        If *elements* is an iterable, this structure will be initialized
+        with the discrete partition on the given set of elements.
+
+        """
+        if elements is None:
+            elements = ()
+        self.parents = {}
+        self.weights = {}
+        for x in elements:
+            self.weights[x] = 1
+            self.parents[x] = x
+
+    def __getitem__(self, object):
+        """Find and return the name of the set containing the object."""
+
+        # check for previously unknown object
+        if object not in self.parents:
+            self.parents[object] = object
+            self.weights[object] = 1
+            return object
+
+        # find path of objects leading to the root
+        path = []
+        root = self.parents[object]
+        while root != object:
+            path.append(object)
+            object = root
+            root = self.parents[object]
+
+        # compress the path and return
+        for ancestor in path:
+            self.parents[ancestor] = root
+        return root
+
+    def __iter__(self):
+        """Iterate through all items ever found or unioned by this structure."""
+        return iter(self.parents)
+
+    def to_sets(self):
+        """Iterates over the sets stored in this structure.
+
+        For example::
+
+            >>> partition = UnionFind("xyz")
+            >>> sorted(map(sorted, partition.to_sets()))
+            [['x'], ['y'], ['z']]
+            >>> partition.union("x", "y")
+            >>> sorted(map(sorted, partition.to_sets()))
+            [['x', 'y'], ['z']]
+
+        """
+        # Ensure fully pruned paths
+        for x in self.parents:
+            _ = self[x]  # Evaluated for side-effect only
+
+        yield from groups(self.parents).values()
+
+    def union(self, *objects):
+        """Find the sets containing the objects and merge them all."""
+        # Find the heaviest root according to its weight.
+        roots = iter(
+            sorted(
+                {self[x] for x in objects}, key=lambda r: self.weights[r], reverse=True
+            )
+        )
+        try:
+            root = next(roots)
+        except StopIteration:
+            return
+
+        for r in roots:
+            self.weights[root] += self.weights[r]
+            self.parents[r] = root