about summary refs log tree commit diff
path: root/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality
diff options
context:
space:
mode:
Diffstat (limited to '.venv/lib/python3.12/site-packages/networkx/algorithms/centrality')
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__init__.py20
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/betweenness.py436
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/betweenness_subset.py275
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/closeness.py282
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/current_flow_betweenness.py342
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/current_flow_betweenness_subset.py227
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/current_flow_closeness.py96
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/degree_alg.py150
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/dispersion.py107
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/eigenvector.py357
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/flow_matrix.py130
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/group.py787
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/harmonic.py89
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/katz.py331
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/laplacian.py150
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/load.py200
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/percolation.py128
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/reaching.py209
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/second_order.py141
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/subgraph_alg.py340
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__init__.py0
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_betweenness_centrality.py780
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_betweenness_centrality_subset.py340
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_closeness_centrality.py307
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality.py197
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality_subset.py147
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_current_flow_closeness.py43
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_degree_centrality.py144
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_dispersion.py73
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_eigenvector_centrality.py187
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_group.py277
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_harmonic_centrality.py122
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_katz_centrality.py345
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_laplacian_centrality.py221
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_load_centrality.py344
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_percolation_centrality.py87
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_reaching.py140
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_second_order_centrality.py82
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_subgraph.py110
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_trophic.py302
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_voterank.py64
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/trophic.py163
-rw-r--r--.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/voterank_alg.py95
43 files changed, 9367 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__init__.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__init__.py
new file mode 100644
index 00000000..c91a904a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/__init__.py
@@ -0,0 +1,20 @@
+from .betweenness import *
+from .betweenness_subset import *
+from .closeness import *
+from .current_flow_betweenness import *
+from .current_flow_betweenness_subset import *
+from .current_flow_closeness import *
+from .degree_alg import *
+from .dispersion import *
+from .eigenvector import *
+from .group import *
+from .harmonic import *
+from .katz import *
+from .load import *
+from .percolation import *
+from .reaching import *
+from .second_order import *
+from .subgraph_alg import *
+from .trophic import *
+from .voterank_alg import *
+from .laplacian import *
diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/betweenness.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/betweenness.py
new file mode 100644
index 00000000..42e09771
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/betweenness.py
@@ -0,0 +1,436 @@
+"""Betweenness centrality measures."""
+
+from collections import deque
+from heapq import heappop, heappush
+from itertools import count
+
+import networkx as nx
+from networkx.algorithms.shortest_paths.weighted import _weight_function
+from networkx.utils import py_random_state
+from networkx.utils.decorators import not_implemented_for
+
+__all__ = ["betweenness_centrality", "edge_betweenness_centrality"]
+
+
+@py_random_state(5)
+@nx._dispatchable(edge_attrs="weight")
+def betweenness_centrality(
+    G, k=None, normalized=True, weight=None, endpoints=False, seed=None
+):
+    r"""Compute the shortest-path betweenness centrality for nodes.
+
+    Betweenness centrality of a node $v$ is the sum of the
+    fraction of all-pairs shortest paths that pass through $v$
+
+    .. math::
+
+       c_B(v) =\sum_{s,t \in V} \frac{\sigma(s, t|v)}{\sigma(s, t)}
+
+    where $V$ is the set of nodes, $\sigma(s, t)$ is the number of
+    shortest $(s, t)$-paths,  and $\sigma(s, t|v)$ is the number of
+    those paths  passing through some  node $v$ other than $s, t$.
+    If $s = t$, $\sigma(s, t) = 1$, and if $v \in {s, t}$,
+    $\sigma(s, t|v) = 0$ [2]_.
+
+    Parameters
+    ----------
+    G : graph
+      A NetworkX graph.
+
+    k : int, optional (default=None)
+      If k is not None use k node samples to estimate betweenness.
+      The value of k <= n where n is the number of nodes in the graph.
+      Higher values give better approximation.
+
+    normalized : bool, optional
+      If True the betweenness values are normalized by `2/((n-1)(n-2))`
+      for graphs, and `1/((n-1)(n-2))` for directed graphs where `n`
+      is the number of nodes in G.
+
+    weight : None or string, optional (default=None)
+      If None, all edge weights are considered equal.
+      Otherwise holds the name of the edge attribute used as weight.
+      Weights are used to calculate weighted shortest paths, so they are
+      interpreted as distances.
+
+    endpoints : bool, optional
+      If True include the endpoints in the shortest path counts.
+
+    seed : integer, random_state, or None (default)
+        Indicator of random number generation state.
+        See :ref:`Randomness<randomness>`.
+        Note that this is only used if k is not None.
+
+    Returns
+    -------
+    nodes : dictionary
+       Dictionary of nodes with betweenness centrality as the value.
+
+    See Also
+    --------
+    edge_betweenness_centrality
+    load_centrality
+
+    Notes
+    -----
+    The algorithm is from Ulrik Brandes [1]_.
+    See [4]_ for the original first published version and [2]_ for details on
+    algorithms for variations and related metrics.
+
+    For approximate betweenness calculations set k=#samples to use
+    k nodes ("pivots") to estimate the betweenness values. For an estimate
+    of the number of pivots needed see [3]_.
+
+    For weighted graphs the edge weights must be greater than zero.
+    Zero edge weights can produce an infinite number of equal length
+    paths between pairs of nodes.
+
+    The total number of paths between source and target is counted
+    differently for directed and undirected graphs. Directed paths
+    are easy to count. Undirected paths are tricky: should a path
+    from "u" to "v" count as 1 undirected path or as 2 directed paths?
+
+    For betweenness_centrality we report the number of undirected
+    paths when G is undirected.
+
+    For betweenness_centrality_subset the reporting is different.
+    If the source and target subsets are the same, then we want
+    to count undirected paths. But if the source and target subsets
+    differ -- for example, if sources is {0} and targets is {1},
+    then we are only counting the paths in one direction. They are
+    undirected paths but we are counting them in a directed way.
+    To count them as undirected paths, each should count as half a path.
+
+    This algorithm is not guaranteed to be correct if edge weights
+    are floating point numbers. As a workaround you can use integer
+    numbers by multiplying the relevant edge attributes by a convenient
+    constant factor (eg 100) and converting to integers.
+
+    References
+    ----------
+    .. [1] Ulrik Brandes:
+       A Faster Algorithm for Betweenness Centrality.
+       Journal of Mathematical Sociology 25(2):163-177, 2001.
+       https://doi.org/10.1080/0022250X.2001.9990249
+    .. [2] Ulrik Brandes:
+       On Variants of Shortest-Path Betweenness
+       Centrality and their Generic Computation.
+       Social Networks 30(2):136-145, 2008.
+       https://doi.org/10.1016/j.socnet.2007.11.001
+    .. [3] Ulrik Brandes and Christian Pich:
+       Centrality Estimation in Large Networks.
+       International Journal of Bifurcation and Chaos 17(7):2303-2318, 2007.
+       https://dx.doi.org/10.1142/S0218127407018403
+    .. [4] Linton C. Freeman:
+       A set of measures of centrality based on betweenness.
+       Sociometry 40: 35–41, 1977
+       https://doi.org/10.2307/3033543
+    """
+    betweenness = dict.fromkeys(G, 0.0)  # b[v]=0 for v in G
+    if k is None:
+        nodes = G
+    else:
+        nodes = seed.sample(list(G.nodes()), k)
+    for s in nodes:
+        # single source shortest paths
+        if weight is None:  # use BFS
+            S, P, sigma, _ = _single_source_shortest_path_basic(G, s)
+        else:  # use Dijkstra's algorithm
+            S, P, sigma, _ = _single_source_dijkstra_path_basic(G, s, weight)
+        # accumulation
+        if endpoints:
+            betweenness, _ = _accumulate_endpoints(betweenness, S, P, sigma, s)
+        else:
+            betweenness, _ = _accumulate_basic(betweenness, S, P, sigma, s)
+    # rescaling
+    betweenness = _rescale(
+        betweenness,
+        len(G),
+        normalized=normalized,
+        directed=G.is_directed(),
+        k=k,
+        endpoints=endpoints,
+    )
+    return betweenness
+
+
+@py_random_state(4)
+@nx._dispatchable(edge_attrs="weight")
+def edge_betweenness_centrality(G, k=None, normalized=True, weight=None, seed=None):
+    r"""Compute betweenness centrality for edges.
+
+    Betweenness centrality of an edge $e$ is the sum of the
+    fraction of all-pairs shortest paths that pass through $e$
+
+    .. math::
+
+       c_B(e) =\sum_{s,t \in V} \frac{\sigma(s, t|e)}{\sigma(s, t)}
+
+    where $V$ is the set of nodes, $\sigma(s, t)$ is the number of
+    shortest $(s, t)$-paths, and $\sigma(s, t|e)$ is the number of
+    those paths passing through edge $e$ [2]_.
+
+    Parameters
+    ----------
+    G : graph
+      A NetworkX graph.
+
+    k : int, optional (default=None)
+      If k is not None use k node samples to estimate betweenness.
+      The value of k <= n where n is the number of nodes in the graph.
+      Higher values give better approximation.
+
+    normalized : bool, optional
+      If True the betweenness values are normalized by $2/(n(n-1))$
+      for graphs, and $1/(n(n-1))$ for directed graphs where $n$
+      is the number of nodes in G.
+
+    weight : None or string, optional (default=None)
+      If None, all edge weights are considered equal.
+      Otherwise holds the name of the edge attribute used as weight.
+      Weights are used to calculate weighted shortest paths, so they are
+      interpreted as distances.
+
+    seed : integer, random_state, or None (default)
+        Indicator of random number generation state.
+        See :ref:`Randomness<randomness>`.
+        Note that this is only used if k is not None.
+
+    Returns
+    -------
+    edges : dictionary
+       Dictionary of edges with betweenness centrality as the value.
+
+    See Also
+    --------
+    betweenness_centrality
+    edge_load
+
+    Notes
+    -----
+    The algorithm is from Ulrik Brandes [1]_.
+
+    For weighted graphs the edge weights must be greater than zero.
+    Zero edge weights can produce an infinite number of equal length
+    paths between pairs of nodes.
+
+    References
+    ----------
+    .. [1]  A Faster Algorithm for Betweenness Centrality. Ulrik Brandes,
+       Journal of Mathematical Sociology 25(2):163-177, 2001.
+       https://doi.org/10.1080/0022250X.2001.9990249
+    .. [2] Ulrik Brandes: On Variants of Shortest-Path Betweenness
+       Centrality and their Generic Computation.
+       Social Networks 30(2):136-145, 2008.
+       https://doi.org/10.1016/j.socnet.2007.11.001
+    """
+    betweenness = dict.fromkeys(G, 0.0)  # b[v]=0 for v in G
+    # b[e]=0 for e in G.edges()
+    betweenness.update(dict.fromkeys(G.edges(), 0.0))
+    if k is None:
+        nodes = G
+    else:
+        nodes = seed.sample(list(G.nodes()), k)
+    for s in nodes:
+        # single source shortest paths
+        if weight is None:  # use BFS
+            S, P, sigma, _ = _single_source_shortest_path_basic(G, s)
+        else:  # use Dijkstra's algorithm
+            S, P, sigma, _ = _single_source_dijkstra_path_basic(G, s, weight)
+        # accumulation
+        betweenness = _accumulate_edges(betweenness, S, P, sigma, s)
+    # rescaling
+    for n in G:  # remove nodes to only return edges
+        del betweenness[n]
+    betweenness = _rescale_e(
+        betweenness, len(G), normalized=normalized, directed=G.is_directed()
+    )
+    if G.is_multigraph():
+        betweenness = _add_edge_keys(G, betweenness, weight=weight)
+    return betweenness
+
+
+# helpers for betweenness centrality
+
+
+def _single_source_shortest_path_basic(G, s):
+    S = []
+    P = {}
+    for v in G:
+        P[v] = []
+    sigma = dict.fromkeys(G, 0.0)  # sigma[v]=0 for v in G
+    D = {}
+    sigma[s] = 1.0
+    D[s] = 0
+    Q = deque([s])
+    while Q:  # use BFS to find shortest paths
+        v = Q.popleft()
+        S.append(v)
+        Dv = D[v]
+        sigmav = sigma[v]
+        for w in G[v]:
+            if w not in D:
+                Q.append(w)
+                D[w] = Dv + 1
+            if D[w] == Dv + 1:  # this is a shortest path, count paths
+                sigma[w] += sigmav
+                P[w].append(v)  # predecessors
+    return S, P, sigma, D
+
+
+def _single_source_dijkstra_path_basic(G, s, weight):
+    weight = _weight_function(G, weight)
+    # modified from Eppstein
+    S = []
+    P = {}
+    for v in G:
+        P[v] = []
+    sigma = dict.fromkeys(G, 0.0)  # sigma[v]=0 for v in G
+    D = {}
+    sigma[s] = 1.0
+    push = heappush
+    pop = heappop
+    seen = {s: 0}
+    c = count()
+    Q = []  # use Q as heap with (distance,node id) tuples
+    push(Q, (0, next(c), s, s))
+    while Q:
+        (dist, _, pred, v) = pop(Q)
+        if v in D:
+            continue  # already searched this node.
+        sigma[v] += sigma[pred]  # count paths
+        S.append(v)
+        D[v] = dist
+        for w, edgedata in G[v].items():
+            vw_dist = dist + weight(v, w, edgedata)
+            if w not in D and (w not in seen or vw_dist < seen[w]):
+                seen[w] = vw_dist
+                push(Q, (vw_dist, next(c), v, w))
+                sigma[w] = 0.0
+                P[w] = [v]
+            elif vw_dist == seen[w]:  # handle equal paths
+                sigma[w] += sigma[v]
+                P[w].append(v)
+    return S, P, sigma, D
+
+
+def _accumulate_basic(betweenness, S, P, sigma, s):
+    delta = dict.fromkeys(S, 0)
+    while S:
+        w = S.pop()
+        coeff = (1 + delta[w]) / sigma[w]
+        for v in P[w]:
+            delta[v] += sigma[v] * coeff
+        if w != s:
+            betweenness[w] += delta[w]
+    return betweenness, delta
+
+
+def _accumulate_endpoints(betweenness, S, P, sigma, s):
+    betweenness[s] += len(S) - 1
+    delta = dict.fromkeys(S, 0)
+    while S:
+        w = S.pop()
+        coeff = (1 + delta[w]) / sigma[w]
+        for v in P[w]:
+            delta[v] += sigma[v] * coeff
+        if w != s:
+            betweenness[w] += delta[w] + 1
+    return betweenness, delta
+
+
+def _accumulate_edges(betweenness, S, P, sigma, s):
+    delta = dict.fromkeys(S, 0)
+    while S:
+        w = S.pop()
+        coeff = (1 + delta[w]) / sigma[w]
+        for v in P[w]:
+            c = sigma[v] * coeff
+            if (v, w) not in betweenness:
+                betweenness[(w, v)] += c
+            else:
+                betweenness[(v, w)] += c
+            delta[v] += c
+        if w != s:
+            betweenness[w] += delta[w]
+    return betweenness
+
+
+def _rescale(betweenness, n, normalized, directed=False, k=None, endpoints=False):
+    if normalized:
+        if endpoints:
+            if n < 2:
+                scale = None  # no normalization
+            else:
+                # Scale factor should include endpoint nodes
+                scale = 1 / (n * (n - 1))
+        elif n <= 2:
+            scale = None  # no normalization b=0 for all nodes
+        else:
+            scale = 1 / ((n - 1) * (n - 2))
+    else:  # rescale by 2 for undirected graphs
+        if not directed:
+            scale = 0.5
+        else:
+            scale = None
+    if scale is not None:
+        if k is not None:
+            scale = scale * n / k
+        for v in betweenness:
+            betweenness[v] *= scale
+    return betweenness
+
+
+def _rescale_e(betweenness, n, normalized, directed=False, k=None):
+    if normalized:
+        if n <= 1:
+            scale = None  # no normalization b=0 for all nodes
+        else:
+            scale = 1 / (n * (n - 1))
+    else:  # rescale by 2 for undirected graphs
+        if not directed:
+            scale = 0.5
+        else:
+            scale = None
+    if scale is not None:
+        if k is not None:
+            scale = scale * n / k
+        for v in betweenness:
+            betweenness[v] *= scale
+    return betweenness
+
+
+@not_implemented_for("graph")
+def _add_edge_keys(G, betweenness, weight=None):
+    r"""Adds the corrected betweenness centrality (BC) values for multigraphs.
+
+    Parameters
+    ----------
+    G : NetworkX graph.
+
+    betweenness : dictionary
+        Dictionary mapping adjacent node tuples to betweenness centrality values.
+
+    weight : string or function
+        See `_weight_function` for details. Defaults to `None`.
+
+    Returns
+    -------
+    edges : dictionary
+        The parameter `betweenness` including edges with keys and their
+        betweenness centrality values.
+
+    The BC value is divided among edges of equal weight.
+    """
+    _weight = _weight_function(G, weight)
+
+    edge_bc = dict.fromkeys(G.edges, 0.0)
+    for u, v in betweenness:
+        d = G[u][v]
+        wt = _weight(u, v, d)
+        keys = [k for k in d if _weight(u, v, {k: d[k]}) == wt]
+        bc = betweenness[(u, v)] / len(keys)
+        for k in keys:
+            edge_bc[(u, v, k)] = bc
+
+    return edge_bc
diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/betweenness_subset.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/betweenness_subset.py
new file mode 100644
index 00000000..b9e99365
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/betweenness_subset.py
@@ -0,0 +1,275 @@
+"""Betweenness centrality measures for subsets of nodes."""
+
+import networkx as nx
+from networkx.algorithms.centrality.betweenness import (
+    _add_edge_keys,
+)
+from networkx.algorithms.centrality.betweenness import (
+    _single_source_dijkstra_path_basic as dijkstra,
+)
+from networkx.algorithms.centrality.betweenness import (
+    _single_source_shortest_path_basic as shortest_path,
+)
+
+__all__ = [
+    "betweenness_centrality_subset",
+    "edge_betweenness_centrality_subset",
+]
+
+
+@nx._dispatchable(edge_attrs="weight")
+def betweenness_centrality_subset(G, sources, targets, normalized=False, weight=None):
+    r"""Compute betweenness centrality for a subset of nodes.
+
+    .. math::
+
+       c_B(v) =\sum_{s\in S, t \in T} \frac{\sigma(s, t|v)}{\sigma(s, t)}
+
+    where $S$ is the set of sources, $T$ is the set of targets,
+    $\sigma(s, t)$ is the number of shortest $(s, t)$-paths,
+    and $\sigma(s, t|v)$ is the number of those paths
+    passing through some  node $v$ other than $s, t$.
+    If $s = t$, $\sigma(s, t) = 1$,
+    and if $v \in {s, t}$, $\sigma(s, t|v) = 0$ [2]_.
+
+
+    Parameters
+    ----------
+    G : graph
+      A NetworkX graph.
+
+    sources: list of nodes
+      Nodes to use as sources for shortest paths in betweenness
+
+    targets: list of nodes
+      Nodes to use as targets for shortest paths in betweenness
+
+    normalized : bool, optional
+      If True the betweenness values are normalized by $2/((n-1)(n-2))$
+      for graphs, and $1/((n-1)(n-2))$ for directed graphs where $n$
+      is the number of nodes in G.
+
+    weight : None or string, optional (default=None)
+      If None, all edge weights are considered equal.
+      Otherwise holds the name of the edge attribute used as weight.
+      Weights are used to calculate weighted shortest paths, so they are
+      interpreted as distances.
+
+    Returns
+    -------
+    nodes : dictionary
+       Dictionary of nodes with betweenness centrality as the value.
+
+    See Also
+    --------
+    edge_betweenness_centrality
+    load_centrality
+
+    Notes
+    -----
+    The basic algorithm is from [1]_.
+
+    For weighted graphs the edge weights must be greater than zero.
+    Zero edge weights can produce an infinite number of equal length
+    paths between pairs of nodes.
+
+    The normalization might seem a little strange but it is
+    designed to make betweenness_centrality(G) be the same as
+    betweenness_centrality_subset(G,sources=G.nodes(),targets=G.nodes()).
+
+    The total number of paths between source and target is counted
+    differently for directed and undirected graphs. Directed paths
+    are easy to count. Undirected paths are tricky: should a path
+    from "u" to "v" count as 1 undirected path or as 2 directed paths?
+
+    For betweenness_centrality we report the number of undirected
+    paths when G is undirected.
+
+    For betweenness_centrality_subset the reporting is different.
+    If the source and target subsets are the same, then we want
+    to count undirected paths. But if the source and target subsets
+    differ -- for example, if sources is {0} and targets is {1},
+    then we are only counting the paths in one direction. They are
+    undirected paths but we are counting them in a directed way.
+    To count them as undirected paths, each should count as half a path.
+
+    References
+    ----------
+    .. [1] Ulrik Brandes, A Faster Algorithm for Betweenness Centrality.
+       Journal of Mathematical Sociology 25(2):163-177, 2001.
+       https://doi.org/10.1080/0022250X.2001.9990249
+    .. [2] Ulrik Brandes: On Variants of Shortest-Path Betweenness
+       Centrality and their Generic Computation.
+       Social Networks 30(2):136-145, 2008.
+       https://doi.org/10.1016/j.socnet.2007.11.001
+    """
+    b = dict.fromkeys(G, 0.0)  # b[v]=0 for v in G
+    for s in sources:
+        # single source shortest paths
+        if weight is None:  # use BFS
+            S, P, sigma, _ = shortest_path(G, s)
+        else:  # use Dijkstra's algorithm
+            S, P, sigma, _ = dijkstra(G, s, weight)
+        b = _accumulate_subset(b, S, P, sigma, s, targets)
+    b = _rescale(b, len(G), normalized=normalized, directed=G.is_directed())
+    return b
+
+
+@nx._dispatchable(edge_attrs="weight")
+def edge_betweenness_centrality_subset(
+    G, sources, targets, normalized=False, weight=None
+):
+    r"""Compute betweenness centrality for edges for a subset of nodes.
+
+    .. math::
+
+       c_B(v) =\sum_{s\in S,t \in T} \frac{\sigma(s, t|e)}{\sigma(s, t)}
+
+    where $S$ is the set of sources, $T$ is the set of targets,
+    $\sigma(s, t)$ is the number of shortest $(s, t)$-paths,
+    and $\sigma(s, t|e)$ is the number of those paths
+    passing through edge $e$ [2]_.
+
+    Parameters
+    ----------
+    G : graph
+      A networkx graph.
+
+    sources: list of nodes
+      Nodes to use as sources for shortest paths in betweenness
+
+    targets: list of nodes
+      Nodes to use as targets for shortest paths in betweenness
+
+    normalized : bool, optional
+      If True the betweenness values are normalized by `2/(n(n-1))`
+      for graphs, and `1/(n(n-1))` for directed graphs where `n`
+      is the number of nodes in G.
+
+    weight : None or string, optional (default=None)
+      If None, all edge weights are considered equal.
+      Otherwise holds the name of the edge attribute used as weight.
+      Weights are used to calculate weighted shortest paths, so they are
+      interpreted as distances.
+
+    Returns
+    -------
+    edges : dictionary
+       Dictionary of edges with Betweenness centrality as the value.
+
+    See Also
+    --------
+    betweenness_centrality
+    edge_load
+
+    Notes
+    -----
+    The basic algorithm is from [1]_.
+
+    For weighted graphs the edge weights must be greater than zero.
+    Zero edge weights can produce an infinite number of equal length
+    paths between pairs of nodes.
+
+    The normalization might seem a little strange but it is the same
+    as in edge_betweenness_centrality() and is designed to make
+    edge_betweenness_centrality(G) be the same as
+    edge_betweenness_centrality_subset(G,sources=G.nodes(),targets=G.nodes()).
+
+    References
+    ----------
+    .. [1] Ulrik Brandes, A Faster Algorithm for Betweenness Centrality.
+       Journal of Mathematical Sociology 25(2):163-177, 2001.
+       https://doi.org/10.1080/0022250X.2001.9990249
+    .. [2] Ulrik Brandes: On Variants of Shortest-Path Betweenness
+       Centrality and their Generic Computation.
+       Social Networks 30(2):136-145, 2008.
+       https://doi.org/10.1016/j.socnet.2007.11.001
+    """
+    b = dict.fromkeys(G, 0.0)  # b[v]=0 for v in G
+    b.update(dict.fromkeys(G.edges(), 0.0))  # b[e] for e in G.edges()
+    for s in sources:
+        # single source shortest paths
+        if weight is None:  # use BFS
+            S, P, sigma, _ = shortest_path(G, s)
+        else:  # use Dijkstra's algorithm
+            S, P, sigma, _ = dijkstra(G, s, weight)
+        b = _accumulate_edges_subset(b, S, P, sigma, s, targets)
+    for n in G:  # remove nodes to only return edges
+        del b[n]
+    b = _rescale_e(b, len(G), normalized=normalized, directed=G.is_directed())
+    if G.is_multigraph():
+        b = _add_edge_keys(G, b, weight=weight)
+    return b
+
+
+def _accumulate_subset(betweenness, S, P, sigma, s, targets):
+    delta = dict.fromkeys(S, 0.0)
+    target_set = set(targets) - {s}
+    while S:
+        w = S.pop()
+        if w in target_set:
+            coeff = (delta[w] + 1.0) / sigma[w]
+        else:
+            coeff = delta[w] / sigma[w]
+        for v in P[w]:
+            delta[v] += sigma[v] * coeff
+        if w != s:
+            betweenness[w] += delta[w]
+    return betweenness
+
+
+def _accumulate_edges_subset(betweenness, S, P, sigma, s, targets):
+    """edge_betweenness_centrality_subset helper."""
+    delta = dict.fromkeys(S, 0)
+    target_set = set(targets)
+    while S:
+        w = S.pop()
+        for v in P[w]:
+            if w in target_set:
+                c = (sigma[v] / sigma[w]) * (1.0 + delta[w])
+            else:
+                c = delta[w] / len(P[w])
+            if (v, w) not in betweenness:
+                betweenness[(w, v)] += c
+            else:
+                betweenness[(v, w)] += c
+            delta[v] += c
+        if w != s:
+            betweenness[w] += delta[w]
+    return betweenness
+
+
+def _rescale(betweenness, n, normalized, directed=False):
+    """betweenness_centrality_subset helper."""
+    if normalized:
+        if n <= 2:
+            scale = None  # no normalization b=0 for all nodes
+        else:
+            scale = 1.0 / ((n - 1) * (n - 2))
+    else:  # rescale by 2 for undirected graphs
+        if not directed:
+            scale = 0.5
+        else:
+            scale = None
+    if scale is not None:
+        for v in betweenness:
+            betweenness[v] *= scale
+    return betweenness
+
+
+def _rescale_e(betweenness, n, normalized, directed=False):
+    """edge_betweenness_centrality_subset helper."""
+    if normalized:
+        if n <= 1:
+            scale = None  # no normalization b=0 for all nodes
+        else:
+            scale = 1.0 / (n * (n - 1))
+    else:  # rescale by 2 for undirected graphs
+        if not directed:
+            scale = 0.5
+        else:
+            scale = None
+    if scale is not None:
+        for v in betweenness:
+            betweenness[v] *= scale
+    return betweenness
diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/closeness.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/closeness.py
new file mode 100644
index 00000000..1cc2f959
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/closeness.py
@@ -0,0 +1,282 @@
+"""
+Closeness centrality measures.
+"""
+
+import functools
+
+import networkx as nx
+from networkx.exception import NetworkXError
+from networkx.utils.decorators import not_implemented_for
+
+__all__ = ["closeness_centrality", "incremental_closeness_centrality"]
+
+
+@nx._dispatchable(edge_attrs="distance")
+def closeness_centrality(G, u=None, distance=None, wf_improved=True):
+    r"""Compute closeness centrality for nodes.
+
+    Closeness centrality [1]_ of a node `u` is the reciprocal of the
+    average shortest path distance to `u` over all `n-1` reachable nodes.
+
+    .. math::
+
+        C(u) = \frac{n - 1}{\sum_{v=1}^{n-1} d(v, u)},
+
+    where `d(v, u)` is the shortest-path distance between `v` and `u`,
+    and `n-1` is the number of nodes reachable from `u`. Notice that the
+    closeness distance function computes the incoming distance to `u`
+    for directed graphs. To use outward distance, act on `G.reverse()`.
+
+    Notice that higher values of closeness indicate higher centrality.
+
+    Wasserman and Faust propose an improved formula for graphs with
+    more than one connected component. The result is "a ratio of the
+    fraction of actors in the group who are reachable, to the average
+    distance" from the reachable actors [2]_. You might think this
+    scale factor is inverted but it is not. As is, nodes from small
+    components receive a smaller closeness value. Letting `N` denote
+    the number of nodes in the graph,
+
+    .. math::
+
+        C_{WF}(u) = \frac{n-1}{N-1} \frac{n - 1}{\sum_{v=1}^{n-1} d(v, u)},
+
+    Parameters
+    ----------
+    G : graph
+      A NetworkX graph
+
+    u : node, optional
+      Return only the value for node u
+
+    distance : edge attribute key, optional (default=None)
+      Use the specified edge attribute as the edge distance in shortest
+      path calculations.  If `None` (the default) all edges have a distance of 1.
+      Absent edge attributes are assigned a distance of 1. Note that no check
+      is performed to ensure that edges have the provided attribute.
+
+    wf_improved : bool, optional (default=True)
+      If True, scale by the fraction of nodes reachable. This gives the
+      Wasserman and Faust improved formula. For single component graphs
+      it is the same as the original formula.
+
+    Returns
+    -------
+    nodes : dictionary
+      Dictionary of nodes with closeness centrality as the value.
+
+    Examples
+    --------
+    >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)])
+    >>> nx.closeness_centrality(G)
+    {0: 1.0, 1: 1.0, 2: 0.75, 3: 0.75}
+
+    See Also
+    --------
+    betweenness_centrality, load_centrality, eigenvector_centrality,
+    degree_centrality, incremental_closeness_centrality
+
+    Notes
+    -----
+    The closeness centrality is normalized to `(n-1)/(|G|-1)` where
+    `n` is the number of nodes in the connected part of graph
+    containing the node.  If the graph is not completely connected,
+    this algorithm computes the closeness centrality for each
+    connected part separately scaled by that parts size.
+
+    If the 'distance' keyword is set to an edge attribute key then the
+    shortest-path length will be computed using Dijkstra's algorithm with
+    that edge attribute as the edge weight.
+
+    The closeness centrality uses *inward* distance to a node, not outward.
+    If you want to use outword distances apply the function to `G.reverse()`
+
+    In NetworkX 2.2 and earlier a bug caused Dijkstra's algorithm to use the
+    outward distance rather than the inward distance. If you use a 'distance'
+    keyword and a DiGraph, your results will change between v2.2 and v2.3.
+
+    References
+    ----------
+    .. [1] Linton C. Freeman: Centrality in networks: I.
+       Conceptual clarification. Social Networks 1:215-239, 1979.
+       https://doi.org/10.1016/0378-8733(78)90021-7
+    .. [2] pg. 201 of Wasserman, S. and Faust, K.,
+       Social Network Analysis: Methods and Applications, 1994,
+       Cambridge University Press.
+    """
+    if G.is_directed():
+        G = G.reverse()  # create a reversed graph view
+
+    if distance is not None:
+        # use Dijkstra's algorithm with specified attribute as edge weight
+        path_length = functools.partial(
+            nx.single_source_dijkstra_path_length, weight=distance
+        )
+    else:
+        path_length = nx.single_source_shortest_path_length
+
+    if u is None:
+        nodes = G.nodes
+    else:
+        nodes = [u]
+    closeness_dict = {}
+    for n in nodes:
+        sp = path_length(G, n)
+        totsp = sum(sp.values())
+        len_G = len(G)
+        _closeness_centrality = 0.0
+        if totsp > 0.0 and len_G > 1:
+            _closeness_centrality = (len(sp) - 1.0) / totsp
+            # normalize to number of nodes-1 in connected part
+            if wf_improved:
+                s = (len(sp) - 1.0) / (len_G - 1)
+                _closeness_centrality *= s
+        closeness_dict[n] = _closeness_centrality
+    if u is not None:
+        return closeness_dict[u]
+    return closeness_dict
+
+
+@not_implemented_for("directed")
+@nx._dispatchable(mutates_input=True)
+def incremental_closeness_centrality(
+    G, edge, prev_cc=None, insertion=True, wf_improved=True
+):
+    r"""Incremental closeness centrality for nodes.
+
+    Compute closeness centrality for nodes using level-based work filtering
+    as described in Incremental Algorithms for Closeness Centrality by Sariyuce et al.
+
+    Level-based work filtering detects unnecessary updates to the closeness
+    centrality and filters them out.
+
+    ---
+    From "Incremental Algorithms for Closeness Centrality":
+
+    Theorem 1: Let :math:`G = (V, E)` be a graph and u and v be two vertices in V
+    such that there is no edge (u, v) in E. Let :math:`G' = (V, E \cup uv)`
+    Then :math:`cc[s] = cc'[s]` if and only if :math:`\left|dG(s, u) - dG(s, v)\right| \leq 1`.
+
+    Where :math:`dG(u, v)` denotes the length of the shortest path between
+    two vertices u, v in a graph G, cc[s] is the closeness centrality for a
+    vertex s in V, and cc'[s] is the closeness centrality for a
+    vertex s in V, with the (u, v) edge added.
+    ---
+
+    We use Theorem 1 to filter out updates when adding or removing an edge.
+    When adding an edge (u, v), we compute the shortest path lengths from all
+    other nodes to u and to v before the node is added. When removing an edge,
+    we compute the shortest path lengths after the edge is removed. Then we
+    apply Theorem 1 to use previously computed closeness centrality for nodes
+    where :math:`\left|dG(s, u) - dG(s, v)\right| \leq 1`. This works only for
+    undirected, unweighted graphs; the distance argument is not supported.
+
+    Closeness centrality [1]_ of a node `u` is the reciprocal of the
+    sum of the shortest path distances from `u` to all `n-1` other nodes.
+    Since the sum of distances depends on the number of nodes in the
+    graph, closeness is normalized by the sum of minimum possible
+    distances `n-1`.
+
+    .. math::
+
+        C(u) = \frac{n - 1}{\sum_{v=1}^{n-1} d(v, u)},
+
+    where `d(v, u)` is the shortest-path distance between `v` and `u`,
+    and `n` is the number of nodes in the graph.
+
+    Notice that higher values of closeness indicate higher centrality.
+
+    Parameters
+    ----------
+    G : graph
+      A NetworkX graph
+
+    edge : tuple
+      The modified edge (u, v) in the graph.
+
+    prev_cc : dictionary
+      The previous closeness centrality for all nodes in the graph.
+
+    insertion : bool, optional
+      If True (default) the edge was inserted, otherwise it was deleted from the graph.
+
+    wf_improved : bool, optional (default=True)
+      If True, scale by the fraction of nodes reachable. This gives the
+      Wasserman and Faust improved formula. For single component graphs
+      it is the same as the original formula.
+
+    Returns
+    -------
+    nodes : dictionary
+      Dictionary of nodes with closeness centrality as the value.
+
+    See Also
+    --------
+    betweenness_centrality, load_centrality, eigenvector_centrality,
+    degree_centrality, closeness_centrality
+
+    Notes
+    -----
+    The closeness centrality is normalized to `(n-1)/(|G|-1)` where
+    `n` is the number of nodes in the connected part of graph
+    containing the node.  If the graph is not completely connected,
+    this algorithm computes the closeness centrality for each
+    connected part separately.
+
+    References
+    ----------
+    .. [1] Freeman, L.C., 1979. Centrality in networks: I.
+       Conceptual clarification.  Social Networks 1, 215--239.
+       https://doi.org/10.1016/0378-8733(78)90021-7
+    .. [2] Sariyuce, A.E. ; Kaya, K. ; Saule, E. ; Catalyiirek, U.V. Incremental
+       Algorithms for Closeness Centrality. 2013 IEEE International Conference on Big Data
+       http://sariyuce.com/papers/bigdata13.pdf
+    """
+    if prev_cc is not None and set(prev_cc.keys()) != set(G.nodes()):
+        raise NetworkXError("prev_cc and G do not have the same nodes")
+
+    # Unpack edge
+    (u, v) = edge
+    path_length = nx.single_source_shortest_path_length
+
+    if insertion:
+        # For edge insertion, we want shortest paths before the edge is inserted
+        du = path_length(G, u)
+        dv = path_length(G, v)
+
+        G.add_edge(u, v)
+    else:
+        G.remove_edge(u, v)
+
+        # For edge removal, we want shortest paths after the edge is removed
+        du = path_length(G, u)
+        dv = path_length(G, v)
+
+    if prev_cc is None:
+        return nx.closeness_centrality(G)
+
+    nodes = G.nodes()
+    closeness_dict = {}
+    for n in nodes:
+        if n in du and n in dv and abs(du[n] - dv[n]) <= 1:
+            closeness_dict[n] = prev_cc[n]
+        else:
+            sp = path_length(G, n)
+            totsp = sum(sp.values())
+            len_G = len(G)
+            _closeness_centrality = 0.0
+            if totsp > 0.0 and len_G > 1:
+                _closeness_centrality = (len(sp) - 1.0) / totsp
+                # normalize to number of nodes-1 in connected part
+                if wf_improved:
+                    s = (len(sp) - 1.0) / (len_G - 1)
+                    _closeness_centrality *= s
+            closeness_dict[n] = _closeness_centrality
+
+    # Leave the graph as we found it
+    if insertion:
+        G.remove_edge(u, v)
+    else:
+        G.add_edge(u, v)
+
+    return closeness_dict
diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/current_flow_betweenness.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/current_flow_betweenness.py
new file mode 100644
index 00000000..bfde279a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/current_flow_betweenness.py
@@ -0,0 +1,342 @@
+"""Current-flow betweenness centrality measures."""
+
+import networkx as nx
+from networkx.algorithms.centrality.flow_matrix import (
+    CGInverseLaplacian,
+    FullInverseLaplacian,
+    SuperLUInverseLaplacian,
+    flow_matrix_row,
+)
+from networkx.utils import (
+    not_implemented_for,
+    py_random_state,
+    reverse_cuthill_mckee_ordering,
+)
+
+__all__ = [
+    "current_flow_betweenness_centrality",
+    "approximate_current_flow_betweenness_centrality",
+    "edge_current_flow_betweenness_centrality",
+]
+
+
+@not_implemented_for("directed")
+@py_random_state(7)
+@nx._dispatchable(edge_attrs="weight")
+def approximate_current_flow_betweenness_centrality(
+    G,
+    normalized=True,
+    weight=None,
+    dtype=float,
+    solver="full",
+    epsilon=0.5,
+    kmax=10000,
+    seed=None,
+):
+    r"""Compute the approximate current-flow betweenness centrality for nodes.
+
+    Approximates the current-flow betweenness centrality within absolute
+    error of epsilon with high probability [1]_.
+
+
+    Parameters
+    ----------
+    G : graph
+      A NetworkX graph
+
+    normalized : bool, optional (default=True)
+      If True the betweenness values are normalized by 2/[(n-1)(n-2)] where
+      n is the number of nodes in G.
+
+    weight : string or None, optional (default=None)
+      Key for edge data used as the edge weight.
+      If None, then use 1 as each edge weight.
+      The weight reflects the capacity or the strength of the
+      edge.
+
+    dtype : data type (float)
+      Default data type for internal matrices.
+      Set to np.float32 for lower memory consumption.
+
+    solver : string (default='full')
+       Type of linear solver to use for computing the flow matrix.
+       Options are "full" (uses most memory), "lu" (recommended), and
+       "cg" (uses least memory).
+
+    epsilon: float
+        Absolute error tolerance.
+
+    kmax: int
+       Maximum number of sample node pairs to use for approximation.
+
+    seed : integer, random_state, or None (default)
+        Indicator of random number generation state.
+        See :ref:`Randomness<randomness>`.
+
+    Returns
+    -------
+    nodes : dictionary
+       Dictionary of nodes with betweenness centrality as the value.
+
+    See Also
+    --------
+    current_flow_betweenness_centrality
+
+    Notes
+    -----
+    The running time is $O((1/\epsilon^2)m{\sqrt k} \log n)$
+    and the space required is $O(m)$ for $n$ nodes and $m$ edges.
+
+    If the edges have a 'weight' attribute they will be used as
+    weights in this algorithm.  Unspecified weights are set to 1.
+
+    References
+    ----------
+    .. [1] Ulrik Brandes and Daniel Fleischer:
+       Centrality Measures Based on Current Flow.
+       Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
+       LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
+       https://doi.org/10.1007/978-3-540-31856-9_44
+    """
+    import numpy as np
+
+    if not nx.is_connected(G):
+        raise nx.NetworkXError("Graph not connected.")
+    solvername = {
+        "full": FullInverseLaplacian,
+        "lu": SuperLUInverseLaplacian,
+        "cg": CGInverseLaplacian,
+    }
+    n = G.number_of_nodes()
+    ordering = list(reverse_cuthill_mckee_ordering(G))
+    # make a copy with integer labels according to rcm ordering
+    # this could be done without a copy if we really wanted to
+    H = nx.relabel_nodes(G, dict(zip(ordering, range(n))))
+    L = nx.laplacian_matrix(H, nodelist=range(n), weight=weight).asformat("csc")
+    L = L.astype(dtype)
+    C = solvername[solver](L, dtype=dtype)  # initialize solver
+    betweenness = dict.fromkeys(H, 0.0)
+    nb = (n - 1.0) * (n - 2.0)  # normalization factor
+    cstar = n * (n - 1) / nb
+    l = 1  # parameter in approximation, adjustable
+    k = l * int(np.ceil((cstar / epsilon) ** 2 * np.log(n)))
+    if k > kmax:
+        msg = f"Number random pairs k>kmax ({k}>{kmax}) "
+        raise nx.NetworkXError(msg, "Increase kmax or epsilon")
+    cstar2k = cstar / (2 * k)
+    for _ in range(k):
+        s, t = pair = seed.sample(range(n), 2)
+        b = np.zeros(n, dtype=dtype)
+        b[s] = 1
+        b[t] = -1
+        p = C.solve(b)
+        for v in H:
+            if v in pair:
+                continue
+            for nbr in H[v]:
+                w = H[v][nbr].get(weight, 1.0)
+                betweenness[v] += float(w * np.abs(p[v] - p[nbr]) * cstar2k)
+    if normalized:
+        factor = 1.0
+    else:
+        factor = nb / 2.0
+    # remap to original node names and "unnormalize" if required
+    return {ordering[k]: v * factor for k, v in betweenness.items()}
+
+
+@not_implemented_for("directed")
+@nx._dispatchable(edge_attrs="weight")
+def current_flow_betweenness_centrality(
+    G, normalized=True, weight=None, dtype=float, solver="full"
+):
+    r"""Compute current-flow betweenness centrality for nodes.
+
+    Current-flow betweenness centrality uses an electrical current
+    model for information spreading in contrast to betweenness
+    centrality which uses shortest paths.
+
+    Current-flow betweenness centrality is also known as
+    random-walk betweenness centrality [2]_.
+
+    Parameters
+    ----------
+    G : graph
+      A NetworkX graph
+
+    normalized : bool, optional (default=True)
+      If True the betweenness values are normalized by 2/[(n-1)(n-2)] where
+      n is the number of nodes in G.
+
+    weight : string or None, optional (default=None)
+      Key for edge data used as the edge weight.
+      If None, then use 1 as each edge weight.
+      The weight reflects the capacity or the strength of the
+      edge.
+
+    dtype : data type (float)
+      Default data type for internal matrices.
+      Set to np.float32 for lower memory consumption.
+
+    solver : string (default='full')
+       Type of linear solver to use for computing the flow matrix.
+       Options are "full" (uses most memory), "lu" (recommended), and
+       "cg" (uses least memory).
+
+    Returns
+    -------
+    nodes : dictionary
+       Dictionary of nodes with betweenness centrality as the value.
+
+    See Also
+    --------
+    approximate_current_flow_betweenness_centrality
+    betweenness_centrality
+    edge_betweenness_centrality
+    edge_current_flow_betweenness_centrality
+
+    Notes
+    -----
+    Current-flow betweenness can be computed in  $O(I(n-1)+mn \log n)$
+    time [1]_, where $I(n-1)$ is the time needed to compute the
+    inverse Laplacian.  For a full matrix this is $O(n^3)$ but using
+    sparse methods you can achieve $O(nm{\sqrt k})$ where $k$ is the
+    Laplacian matrix condition number.
+
+    The space required is $O(nw)$ where $w$ is the width of the sparse
+    Laplacian matrix.  Worse case is $w=n$ for $O(n^2)$.
+
+    If the edges have a 'weight' attribute they will be used as
+    weights in this algorithm.  Unspecified weights are set to 1.
+
+    References
+    ----------
+    .. [1] Centrality Measures Based on Current Flow.
+       Ulrik Brandes and Daniel Fleischer,
+       Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
+       LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
+       https://doi.org/10.1007/978-3-540-31856-9_44
+
+    .. [2] A measure of betweenness centrality based on random walks,
+       M. E. J. Newman, Social Networks 27, 39-54 (2005).
+    """
+    if not nx.is_connected(G):
+        raise nx.NetworkXError("Graph not connected.")
+    N = G.number_of_nodes()
+    ordering = list(reverse_cuthill_mckee_ordering(G))
+    # make a copy with integer labels according to rcm ordering
+    # this could be done without a copy if we really wanted to
+    H = nx.relabel_nodes(G, dict(zip(ordering, range(N))))
+    betweenness = dict.fromkeys(H, 0.0)  # b[n]=0 for n in H
+    for row, (s, t) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver):
+        pos = dict(zip(row.argsort()[::-1], range(N)))
+        for i in range(N):
+            betweenness[s] += (i - pos[i]) * row.item(i)
+            betweenness[t] += (N - i - 1 - pos[i]) * row.item(i)
+    if normalized:
+        nb = (N - 1.0) * (N - 2.0)  # normalization factor
+    else:
+        nb = 2.0
+    return {ordering[n]: (b - n) * 2.0 / nb for n, b in betweenness.items()}
+
+
+@not_implemented_for("directed")
+@nx._dispatchable(edge_attrs="weight")
+def edge_current_flow_betweenness_centrality(
+    G, normalized=True, weight=None, dtype=float, solver="full"
+):
+    r"""Compute current-flow betweenness centrality for edges.
+
+    Current-flow betweenness centrality uses an electrical current
+    model for information spreading in contrast to betweenness
+    centrality which uses shortest paths.
+
+    Current-flow betweenness centrality is also known as
+    random-walk betweenness centrality [2]_.
+
+    Parameters
+    ----------
+    G : graph
+      A NetworkX graph
+
+    normalized : bool, optional (default=True)
+      If True the betweenness values are normalized by 2/[(n-1)(n-2)] where
+      n is the number of nodes in G.
+
+    weight : string or None, optional (default=None)
+      Key for edge data used as the edge weight.
+      If None, then use 1 as each edge weight.
+      The weight reflects the capacity or the strength of the
+      edge.
+
+    dtype : data type (default=float)
+      Default data type for internal matrices.
+      Set to np.float32 for lower memory consumption.
+
+    solver : string (default='full')
+       Type of linear solver to use for computing the flow matrix.
+       Options are "full" (uses most memory), "lu" (recommended), and
+       "cg" (uses least memory).
+
+    Returns
+    -------
+    nodes : dictionary
+       Dictionary of edge tuples with betweenness centrality as the value.
+
+    Raises
+    ------
+    NetworkXError
+        The algorithm does not support DiGraphs.
+        If the input graph is an instance of DiGraph class, NetworkXError
+        is raised.
+
+    See Also
+    --------
+    betweenness_centrality
+    edge_betweenness_centrality
+    current_flow_betweenness_centrality
+
+    Notes
+    -----
+    Current-flow betweenness can be computed in $O(I(n-1)+mn \log n)$
+    time [1]_, where $I(n-1)$ is the time needed to compute the
+    inverse Laplacian.  For a full matrix this is $O(n^3)$ but using
+    sparse methods you can achieve $O(nm{\sqrt k})$ where $k$ is the
+    Laplacian matrix condition number.
+
+    The space required is $O(nw)$ where $w$ is the width of the sparse
+    Laplacian matrix.  Worse case is $w=n$ for $O(n^2)$.
+
+    If the edges have a 'weight' attribute they will be used as
+    weights in this algorithm.  Unspecified weights are set to 1.
+
+    References
+    ----------
+    .. [1] Centrality Measures Based on Current Flow.
+       Ulrik Brandes and Daniel Fleischer,
+       Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
+       LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
+       https://doi.org/10.1007/978-3-540-31856-9_44
+
+    .. [2] A measure of betweenness centrality based on random walks,
+       M. E. J. Newman, Social Networks 27, 39-54 (2005).
+    """
+    if not nx.is_connected(G):
+        raise nx.NetworkXError("Graph not connected.")
+    N = G.number_of_nodes()
+    ordering = list(reverse_cuthill_mckee_ordering(G))
+    # make a copy with integer labels according to rcm ordering
+    # this could be done without a copy if we really wanted to
+    H = nx.relabel_nodes(G, dict(zip(ordering, range(N))))
+    edges = (tuple(sorted((u, v))) for u, v in H.edges())
+    betweenness = dict.fromkeys(edges, 0.0)
+    if normalized:
+        nb = (N - 1.0) * (N - 2.0)  # normalization factor
+    else:
+        nb = 2.0
+    for row, (e) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver):
+        pos = dict(zip(row.argsort()[::-1], range(1, N + 1)))
+        for i in range(N):
+            betweenness[e] += (i + 1 - pos[i]) * row.item(i)
+            betweenness[e] += (N - i - pos[i]) * row.item(i)
+        betweenness[e] /= nb
+    return {(ordering[s], ordering[t]): b for (s, t), b in betweenness.items()}
diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/current_flow_betweenness_subset.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/current_flow_betweenness_subset.py
new file mode 100644
index 00000000..911718c8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/current_flow_betweenness_subset.py
@@ -0,0 +1,227 @@
+"""Current-flow betweenness centrality measures for subsets of nodes."""
+
+import networkx as nx
+from networkx.algorithms.centrality.flow_matrix import flow_matrix_row
+from networkx.utils import not_implemented_for, reverse_cuthill_mckee_ordering
+
+__all__ = [
+    "current_flow_betweenness_centrality_subset",
+    "edge_current_flow_betweenness_centrality_subset",
+]
+
+
+@not_implemented_for("directed")
+@nx._dispatchable(edge_attrs="weight")
+def current_flow_betweenness_centrality_subset(
+    G, sources, targets, normalized=True, weight=None, dtype=float, solver="lu"
+):
+    r"""Compute current-flow betweenness centrality for subsets of nodes.
+
+    Current-flow betweenness centrality uses an electrical current
+    model for information spreading in contrast to betweenness
+    centrality which uses shortest paths.
+
+    Current-flow betweenness centrality is also known as
+    random-walk betweenness centrality [2]_.
+
+    Parameters
+    ----------
+    G : graph
+      A NetworkX graph
+
+    sources: list of nodes
+      Nodes to use as sources for current
+
+    targets: list of nodes
+      Nodes to use as sinks for current
+
+    normalized : bool, optional (default=True)
+      If True the betweenness values are normalized by b=b/(n-1)(n-2) where
+      n is the number of nodes in G.
+
+    weight : string or None, optional (default=None)
+      Key for edge data used as the edge weight.
+      If None, then use 1 as each edge weight.
+      The weight reflects the capacity or the strength of the
+      edge.
+
+    dtype: data type (float)
+      Default data type for internal matrices.
+      Set to np.float32 for lower memory consumption.
+
+    solver: string (default='lu')
+       Type of linear solver to use for computing the flow matrix.
+       Options are "full" (uses most memory), "lu" (recommended), and
+       "cg" (uses least memory).
+
+    Returns
+    -------
+    nodes : dictionary
+       Dictionary of nodes with betweenness centrality as the value.
+
+    See Also
+    --------
+    approximate_current_flow_betweenness_centrality
+    betweenness_centrality
+    edge_betweenness_centrality
+    edge_current_flow_betweenness_centrality
+
+    Notes
+    -----
+    Current-flow betweenness can be computed in $O(I(n-1)+mn \log n)$
+    time [1]_, where $I(n-1)$ is the time needed to compute the
+    inverse Laplacian.  For a full matrix this is $O(n^3)$ but using
+    sparse methods you can achieve $O(nm{\sqrt k})$ where $k$ is the
+    Laplacian matrix condition number.
+
+    The space required is $O(nw)$ where $w$ is the width of the sparse
+    Laplacian matrix.  Worse case is $w=n$ for $O(n^2)$.
+
+    If the edges have a 'weight' attribute they will be used as
+    weights in this algorithm.  Unspecified weights are set to 1.
+
+    References
+    ----------
+    .. [1] Centrality Measures Based on Current Flow.
+       Ulrik Brandes and Daniel Fleischer,
+       Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
+       LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
+       https://doi.org/10.1007/978-3-540-31856-9_44
+
+    .. [2] A measure of betweenness centrality based on random walks,
+       M. E. J. Newman, Social Networks 27, 39-54 (2005).
+    """
+    import numpy as np
+
+    from networkx.utils import reverse_cuthill_mckee_ordering
+
+    if not nx.is_connected(G):
+        raise nx.NetworkXError("Graph not connected.")
+    N = G.number_of_nodes()
+    ordering = list(reverse_cuthill_mckee_ordering(G))
+    # make a copy with integer labels according to rcm ordering
+    # this could be done without a copy if we really wanted to
+    mapping = dict(zip(ordering, range(N)))
+    H = nx.relabel_nodes(G, mapping)
+    betweenness = dict.fromkeys(H, 0.0)  # b[n]=0 for n in H
+    for row, (s, t) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver):
+        for ss in sources:
+            i = mapping[ss]
+            for tt in targets:
+                j = mapping[tt]
+                betweenness[s] += 0.5 * abs(row.item(i) - row.item(j))
+                betweenness[t] += 0.5 * abs(row.item(i) - row.item(j))
+    if normalized:
+        nb = (N - 1.0) * (N - 2.0)  # normalization factor
+    else:
+        nb = 2.0
+    for node in H:
+        betweenness[node] = betweenness[node] / nb + 1.0 / (2 - N)
+    return {ordering[node]: value for node, value in betweenness.items()}
+
+
+@not_implemented_for("directed")
+@nx._dispatchable(edge_attrs="weight")
+def edge_current_flow_betweenness_centrality_subset(
+    G, sources, targets, normalized=True, weight=None, dtype=float, solver="lu"
+):
+    r"""Compute current-flow betweenness centrality for edges using subsets
+    of nodes.
+
+    Current-flow betweenness centrality uses an electrical current
+    model for information spreading in contrast to betweenness
+    centrality which uses shortest paths.
+
+    Current-flow betweenness centrality is also known as
+    random-walk betweenness centrality [2]_.
+
+    Parameters
+    ----------
+    G : graph
+      A NetworkX graph
+
+    sources: list of nodes
+      Nodes to use as sources for current
+
+    targets: list of nodes
+      Nodes to use as sinks for current
+
+    normalized : bool, optional (default=True)
+      If True the betweenness values are normalized by b=b/(n-1)(n-2) where
+      n is the number of nodes in G.
+
+    weight : string or None, optional (default=None)
+      Key for edge data used as the edge weight.
+      If None, then use 1 as each edge weight.
+      The weight reflects the capacity or the strength of the
+      edge.
+
+    dtype: data type (float)
+      Default data type for internal matrices.
+      Set to np.float32 for lower memory consumption.
+
+    solver: string (default='lu')
+       Type of linear solver to use for computing the flow matrix.
+       Options are "full" (uses most memory), "lu" (recommended), and
+       "cg" (uses least memory).
+
+    Returns
+    -------
+    nodes : dict
+       Dictionary of edge tuples with betweenness centrality as the value.
+
+    See Also
+    --------
+    betweenness_centrality
+    edge_betweenness_centrality
+    current_flow_betweenness_centrality
+
+    Notes
+    -----
+    Current-flow betweenness can be computed in $O(I(n-1)+mn \log n)$
+    time [1]_, where $I(n-1)$ is the time needed to compute the
+    inverse Laplacian.  For a full matrix this is $O(n^3)$ but using
+    sparse methods you can achieve $O(nm{\sqrt k})$ where $k$ is the
+    Laplacian matrix condition number.
+
+    The space required is $O(nw)$ where $w$ is the width of the sparse
+    Laplacian matrix.  Worse case is $w=n$ for $O(n^2)$.
+
+    If the edges have a 'weight' attribute they will be used as
+    weights in this algorithm.  Unspecified weights are set to 1.
+
+    References
+    ----------
+    .. [1] Centrality Measures Based on Current Flow.
+       Ulrik Brandes and Daniel Fleischer,
+       Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
+       LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
+       https://doi.org/10.1007/978-3-540-31856-9_44
+
+    .. [2] A measure of betweenness centrality based on random walks,
+       M. E. J. Newman, Social Networks 27, 39-54 (2005).
+    """
+    import numpy as np
+
+    if not nx.is_connected(G):
+        raise nx.NetworkXError("Graph not connected.")
+    N = G.number_of_nodes()
+    ordering = list(reverse_cuthill_mckee_ordering(G))
+    # make a copy with integer labels according to rcm ordering
+    # this could be done without a copy if we really wanted to
+    mapping = dict(zip(ordering, range(N)))
+    H = nx.relabel_nodes(G, mapping)
+    edges = (tuple(sorted((u, v))) for u, v in H.edges())
+    betweenness = dict.fromkeys(edges, 0.0)
+    if normalized:
+        nb = (N - 1.0) * (N - 2.0)  # normalization factor
+    else:
+        nb = 2.0
+    for row, (e) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver):
+        for ss in sources:
+            i = mapping[ss]
+            for tt in targets:
+                j = mapping[tt]
+                betweenness[e] += 0.5 * abs(row.item(i) - row.item(j))
+        betweenness[e] /= nb
+    return {(ordering[s], ordering[t]): value for (s, t), value in betweenness.items()}
diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/current_flow_closeness.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/current_flow_closeness.py
new file mode 100644
index 00000000..67f86397
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/current_flow_closeness.py
@@ -0,0 +1,96 @@
+"""Current-flow closeness centrality measures."""
+
+import networkx as nx
+from networkx.algorithms.centrality.flow_matrix import (
+    CGInverseLaplacian,
+    FullInverseLaplacian,
+    SuperLUInverseLaplacian,
+)
+from networkx.utils import not_implemented_for, reverse_cuthill_mckee_ordering
+
+__all__ = ["current_flow_closeness_centrality", "information_centrality"]
+
+
+@not_implemented_for("directed")
+@nx._dispatchable(edge_attrs="weight")
+def current_flow_closeness_centrality(G, weight=None, dtype=float, solver="lu"):
+    """Compute current-flow closeness centrality for nodes.
+
+    Current-flow closeness centrality is variant of closeness
+    centrality based on effective resistance between nodes in
+    a network. This metric is also known as information centrality.
+
+    Parameters
+    ----------
+    G : graph
+      A NetworkX graph.
+
+    weight : None or string, optional (default=None)
+      If None, all edge weights are considered equal.
+      Otherwise holds the name of the edge attribute used as weight.
+      The weight reflects the capacity or the strength of the
+      edge.
+
+    dtype: data type (default=float)
+      Default data type for internal matrices.
+      Set to np.float32 for lower memory consumption.
+
+    solver: string (default='lu')
+       Type of linear solver to use for computing the flow matrix.
+       Options are "full" (uses most memory), "lu" (recommended), and
+       "cg" (uses least memory).
+
+    Returns
+    -------
+    nodes : dictionary
+       Dictionary of nodes with current flow closeness centrality as the value.
+
+    See Also
+    --------
+    closeness_centrality
+
+    Notes
+    -----
+    The algorithm is from Brandes [1]_.
+
+    See also [2]_ for the original definition of information centrality.
+
+    References
+    ----------
+    .. [1] Ulrik Brandes and Daniel Fleischer,
+       Centrality Measures Based on Current Flow.
+       Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
+       LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
+       https://doi.org/10.1007/978-3-540-31856-9_44
+
+    .. [2] Karen Stephenson and Marvin Zelen:
+       Rethinking centrality: Methods and examples.
+       Social Networks 11(1):1-37, 1989.
+       https://doi.org/10.1016/0378-8733(89)90016-6
+    """
+    if not nx.is_connected(G):
+        raise nx.NetworkXError("Graph not connected.")
+    solvername = {
+        "full": FullInverseLaplacian,
+        "lu": SuperLUInverseLaplacian,
+        "cg": CGInverseLaplacian,
+    }
+    N = G.number_of_nodes()
+    ordering = list(reverse_cuthill_mckee_ordering(G))
+    # make a copy with integer labels according to rcm ordering
+    # this could be done without a copy if we really wanted to
+    H = nx.relabel_nodes(G, dict(zip(ordering, range(N))))
+    betweenness = dict.fromkeys(H, 0.0)  # b[n]=0 for n in H
+    N = H.number_of_nodes()
+    L = nx.laplacian_matrix(H, nodelist=range(N), weight=weight).asformat("csc")
+    L = L.astype(dtype)
+    C2 = solvername[solver](L, width=1, dtype=dtype)  # initialize solver
+    for v in H:
+        col = C2.get_row(v)
+        for w in H:
+            betweenness[v] += col.item(v) - 2 * col.item(w)
+            betweenness[w] += col.item(v)
+    return {ordering[node]: 1 / value for node, value in betweenness.items()}
+
+
+information_centrality = current_flow_closeness_centrality
diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/degree_alg.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/degree_alg.py
new file mode 100644
index 00000000..b3c1e321
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/degree_alg.py
@@ -0,0 +1,150 @@
+"""Degree centrality measures."""
+
+import networkx as nx
+from networkx.utils.decorators import not_implemented_for
+
+__all__ = ["degree_centrality", "in_degree_centrality", "out_degree_centrality"]
+
+
+@nx._dispatchable
+def degree_centrality(G):
+    """Compute the degree centrality for nodes.
+
+    The degree centrality for a node v is the fraction of nodes it
+    is connected to.
+
+    Parameters
+    ----------
+    G : graph
+      A networkx graph
+
+    Returns
+    -------
+    nodes : dictionary
+       Dictionary of nodes with degree centrality as the value.
+
+    Examples
+    --------
+    >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)])
+    >>> nx.degree_centrality(G)
+    {0: 1.0, 1: 1.0, 2: 0.6666666666666666, 3: 0.6666666666666666}
+
+    See Also
+    --------
+    betweenness_centrality, load_centrality, eigenvector_centrality
+
+    Notes
+    -----
+    The degree centrality values are normalized by dividing by the maximum
+    possible degree in a simple graph n-1 where n is the number of nodes in G.
+
+    For multigraphs or graphs with self loops the maximum degree might
+    be higher than n-1 and values of degree centrality greater than 1
+    are possible.
+    """
+    if len(G) <= 1:
+        return {n: 1 for n in G}
+
+    s = 1.0 / (len(G) - 1.0)
+    centrality = {n: d * s for n, d in G.degree()}
+    return centrality
+
+
+@not_implemented_for("undirected")
+@nx._dispatchable
+def in_degree_centrality(G):
+    """Compute the in-degree centrality for nodes.
+
+    The in-degree centrality for a node v is the fraction of nodes its
+    incoming edges are connected to.
+
+    Parameters
+    ----------
+    G : graph
+        A NetworkX graph
+
+    Returns
+    -------
+    nodes : dictionary
+        Dictionary of nodes with in-degree centrality as values.
+
+    Raises
+    ------
+    NetworkXNotImplemented
+        If G is undirected.
+
+    Examples
+    --------
+    >>> G = nx.DiGraph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)])
+    >>> nx.in_degree_centrality(G)
+    {0: 0.0, 1: 0.3333333333333333, 2: 0.6666666666666666, 3: 0.6666666666666666}
+
+    See Also
+    --------
+    degree_centrality, out_degree_centrality
+
+    Notes
+    -----
+    The degree centrality values are normalized by dividing by the maximum
+    possible degree in a simple graph n-1 where n is the number of nodes in G.
+
+    For multigraphs or graphs with self loops the maximum degree might
+    be higher than n-1 and values of degree centrality greater than 1
+    are possible.
+    """
+    if len(G) <= 1:
+        return {n: 1 for n in G}
+
+    s = 1.0 / (len(G) - 1.0)
+    centrality = {n: d * s for n, d in G.in_degree()}
+    return centrality
+
+
+@not_implemented_for("undirected")
+@nx._dispatchable
+def out_degree_centrality(G):
+    """Compute the out-degree centrality for nodes.
+
+    The out-degree centrality for a node v is the fraction of nodes its
+    outgoing edges are connected to.
+
+    Parameters
+    ----------
+    G : graph
+        A NetworkX graph
+
+    Returns
+    -------
+    nodes : dictionary
+        Dictionary of nodes with out-degree centrality as values.
+
+    Raises
+    ------
+    NetworkXNotImplemented
+        If G is undirected.
+
+    Examples
+    --------
+    >>> G = nx.DiGraph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)])
+    >>> nx.out_degree_centrality(G)
+    {0: 1.0, 1: 0.6666666666666666, 2: 0.0, 3: 0.0}
+
+    See Also
+    --------
+    degree_centrality, in_degree_centrality
+
+    Notes
+    -----
+    The degree centrality values are normalized by dividing by the maximum
+    possible degree in a simple graph n-1 where n is the number of nodes in G.
+
+    For multigraphs or graphs with self loops the maximum degree might
+    be higher than n-1 and values of degree centrality greater than 1
+    are possible.
+    """
+    if len(G) <= 1:
+        return {n: 1 for n in G}
+
+    s = 1.0 / (len(G) - 1.0)
+    centrality = {n: d * s for n, d in G.out_degree()}
+    return centrality
diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/dispersion.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/dispersion.py
new file mode 100644
index 00000000..a3fa6858
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/dispersion.py
@@ -0,0 +1,107 @@
+from itertools import combinations
+
+import networkx as nx
+
+__all__ = ["dispersion"]
+
+
+@nx._dispatchable
+def dispersion(G, u=None, v=None, normalized=True, alpha=1.0, b=0.0, c=0.0):
+    r"""Calculate dispersion between `u` and `v` in `G`.
+
+    A link between two actors (`u` and `v`) has a high dispersion when their
+    mutual ties (`s` and `t`) are not well connected with each other.
+
+    Parameters
+    ----------
+    G : graph
+        A NetworkX graph.
+    u : node, optional
+        The source for the dispersion score (e.g. ego node of the network).
+    v : node, optional
+        The target of the dispersion score if specified.
+    normalized : bool
+        If True (default) normalize by the embeddedness of the nodes (u and v).
+    alpha, b, c : float
+        Parameters for the normalization procedure. When `normalized` is True,
+        the dispersion value is normalized by::
+
+            result = ((dispersion + b) ** alpha) / (embeddedness + c)
+
+        as long as the denominator is nonzero.
+
+    Returns
+    -------
+    nodes : dictionary
+        If u (v) is specified, returns a dictionary of nodes with dispersion
+        score for all "target" ("source") nodes. If neither u nor v is
+        specified, returns a dictionary of dictionaries for all nodes 'u' in the
+        graph with a dispersion score for each node 'v'.
+
+    Notes
+    -----
+    This implementation follows Lars Backstrom and Jon Kleinberg [1]_. Typical
+    usage would be to run dispersion on the ego network $G_u$ if $u$ were
+    specified.  Running :func:`dispersion` with neither $u$ nor $v$ specified
+    can take some time to complete.
+
+    References
+    ----------
+    .. [1] Romantic Partnerships and the Dispersion of Social Ties:
+        A Network Analysis of Relationship Status on Facebook.
+        Lars Backstrom, Jon Kleinberg.
+        https://arxiv.org/pdf/1310.6753v1.pdf
+
+    """
+
+    def _dispersion(G_u, u, v):
+        """dispersion for all nodes 'v' in a ego network G_u of node 'u'"""
+        u_nbrs = set(G_u[u])
+        ST = {n for n in G_u[v] if n in u_nbrs}
+        set_uv = {u, v}
+        # all possible ties of connections that u and b share
+        possib = combinations(ST, 2)
+        total = 0
+        for s, t in possib:
+            # neighbors of s that are in G_u, not including u and v
+            nbrs_s = u_nbrs.intersection(G_u[s]) - set_uv
+            # s and t are not directly connected
+            if t not in nbrs_s:
+                # s and t do not share a connection
+                if nbrs_s.isdisjoint(G_u[t]):
+                    # tick for disp(u, v)
+                    total += 1
+        # neighbors that u and v share
+        embeddedness = len(ST)
+
+        dispersion_val = total
+        if normalized:
+            dispersion_val = (total + b) ** alpha
+            if embeddedness + c != 0:
+                dispersion_val /= embeddedness + c
+
+        return dispersion_val
+
+    if u is None:
+        # v and u are not specified
+        if v is None:
+            results = {n: {} for n in G}
+            for u in G:
+                for v in G[u]:
+                    results[u][v] = _dispersion(G, u, v)
+        # u is not specified, but v is
+        else:
+            results = dict.fromkeys(G[v], {})
+            for u in G[v]:
+                results[u] = _dispersion(G, v, u)
+    else:
+        # u is specified with no target v
+        if v is None:
+            results = dict.fromkeys(G[u], {})
+            for v in G[u]:
+                results[v] = _dispersion(G, u, v)
+        # both u and v are specified
+        else:
+            results = _dispersion(G, u, v)
+
+    return results
diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/eigenvector.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/eigenvector.py
new file mode 100644
index 00000000..b8cf63e8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/eigenvector.py
@@ -0,0 +1,357 @@
+"""Functions for computing eigenvector centrality."""
+
+import math
+
+import networkx as nx
+from networkx.utils import not_implemented_for
+
+__all__ = ["eigenvector_centrality", "eigenvector_centrality_numpy"]
+
+
+@not_implemented_for("multigraph")
+@nx._dispatchable(edge_attrs="weight")
+def eigenvector_centrality(G, max_iter=100, tol=1.0e-6, nstart=None, weight=None):
+    r"""Compute the eigenvector centrality for the graph G.
+
+    Eigenvector centrality computes the centrality for a node by adding
+    the centrality of its predecessors. The centrality for node $i$ is the
+    $i$-th element of a left eigenvector associated with the eigenvalue $\lambda$
+    of maximum modulus that is positive. Such an eigenvector $x$ is
+    defined up to a multiplicative constant by the equation
+
+    .. math::
+
+         \lambda x^T = x^T A,
+
+    where $A$ is the adjacency matrix of the graph G. By definition of
+    row-column product, the equation above is equivalent to
+
+    .. math::
+
+        \lambda x_i = \sum_{j\to i}x_j.
+
+    That is, adding the eigenvector centralities of the predecessors of
+    $i$ one obtains the eigenvector centrality of $i$ multiplied by
+    $\lambda$. In the case of undirected graphs, $x$ also solves the familiar
+    right-eigenvector equation $Ax = \lambda x$.
+
+    By virtue of the Perron–Frobenius theorem [1]_, if G is strongly
+    connected there is a unique eigenvector $x$, and all its entries
+    are strictly positive.
+
+    If G is not strongly connected there might be several left
+    eigenvectors associated with $\lambda$, and some of their elements
+    might be zero.
+
+    Parameters
+    ----------
+    G : graph
+      A networkx graph.
+
+    max_iter : integer, optional (default=100)
+      Maximum number of power iterations.
+
+    tol : float, optional (default=1.0e-6)
+      Error tolerance (in Euclidean norm) used to check convergence in
+      power iteration.
+
+    nstart : dictionary, optional (default=None)
+      Starting value of power iteration for each node. Must have a nonzero
+      projection on the desired eigenvector for the power method to converge.
+      If None, this implementation uses an all-ones vector, which is a safe
+      choice.
+
+    weight : None or string, optional (default=None)
+      If None, all edge weights are considered equal. Otherwise holds the
+      name of the edge attribute used as weight. In this measure the
+      weight is interpreted as the connection strength.
+
+    Returns
+    -------
+    nodes : dictionary
+       Dictionary of nodes with eigenvector centrality as the value. The
+       associated vector has unit Euclidean norm and the values are
+       nonegative.
+
+    Examples
+    --------
+    >>> G = nx.path_graph(4)
+    >>> centrality = nx.eigenvector_centrality(G)
+    >>> sorted((v, f"{c:0.2f}") for v, c in centrality.items())
+    [(0, '0.37'), (1, '0.60'), (2, '0.60'), (3, '0.37')]
+
+    Raises
+    ------
+    NetworkXPointlessConcept
+        If the graph G is the null graph.
+
+    NetworkXError
+        If each value in `nstart` is zero.
+
+    PowerIterationFailedConvergence
+        If the algorithm fails to converge to the specified tolerance
+        within the specified number of iterations of the power iteration
+        method.
+
+    See Also
+    --------
+    eigenvector_centrality_numpy
+    :func:`~networkx.algorithms.link_analysis.pagerank_alg.pagerank`
+    :func:`~networkx.algorithms.link_analysis.hits_alg.hits`
+
+    Notes
+    -----
+    Eigenvector centrality was introduced by Landau [2]_ for chess
+    tournaments. It was later rediscovered by Wei [3]_ and then
+    popularized by Kendall [4]_ in the context of sport ranking. Berge
+    introduced a general definition for graphs based on social connections
+    [5]_. Bonacich [6]_ reintroduced again eigenvector centrality and made
+    it popular in link analysis.
+
+    This function computes the left dominant eigenvector, which corresponds
+    to adding the centrality of predecessors: this is the usual approach.
+    To add the centrality of successors first reverse the graph with
+    ``G.reverse()``.
+
+    The implementation uses power iteration [7]_ to compute a dominant
+    eigenvector starting from the provided vector `nstart`. Convergence is
+    guaranteed as long as `nstart` has a nonzero projection on a dominant
+    eigenvector, which certainly happens using the default value.
+
+    The method stops when the change in the computed vector between two
+    iterations is smaller than an error tolerance of ``G.number_of_nodes()
+    * tol`` or after ``max_iter`` iterations, but in the second case it
+    raises an exception.
+
+    This implementation uses $(A + I)$ rather than the adjacency matrix
+    $A$ because the change preserves eigenvectors, but it shifts the
+    spectrum, thus guaranteeing convergence even for networks with
+    negative eigenvalues of maximum modulus.
+
+    References
+    ----------
+    .. [1] Abraham Berman and Robert J. Plemmons.
+       "Nonnegative Matrices in the Mathematical Sciences."
+       Classics in Applied Mathematics. SIAM, 1994.
+
+    .. [2] Edmund Landau.
+       "Zur relativen Wertbemessung der Turnierresultate."
+       Deutsches Wochenschach, 11:366–369, 1895.
+
+    .. [3] Teh-Hsing Wei.
+       "The Algebraic Foundations of Ranking Theory."
+       PhD thesis, University of Cambridge, 1952.
+
+    .. [4] Maurice G. Kendall.
+       "Further contributions to the theory of paired comparisons."
+       Biometrics, 11(1):43–62, 1955.
+       https://www.jstor.org/stable/3001479
+
+    .. [5] Claude Berge
+       "Théorie des graphes et ses applications."
+       Dunod, Paris, France, 1958.
+
+    .. [6] Phillip Bonacich.
+       "Technique for analyzing overlapping memberships."
+       Sociological Methodology, 4:176–185, 1972.
+       https://www.jstor.org/stable/270732
+
+    .. [7] Power iteration:: https://en.wikipedia.org/wiki/Power_iteration
+
+    """
+    if len(G) == 0:
+        raise nx.NetworkXPointlessConcept(
+            "cannot compute centrality for the null graph"
+        )
+    # If no initial vector is provided, start with the all-ones vector.
+    if nstart is None:
+        nstart = {v: 1 for v in G}
+    if all(v == 0 for v in nstart.values()):
+        raise nx.NetworkXError("initial vector cannot have all zero values")
+    # Normalize the initial vector so that each entry is in [0, 1]. This is
+    # guaranteed to never have a divide-by-zero error by the previous line.
+    nstart_sum = sum(nstart.values())
+    x = {k: v / nstart_sum for k, v in nstart.items()}
+    nnodes = G.number_of_nodes()
+    # make up to max_iter iterations
+    for _ in range(max_iter):
+        xlast = x
+        x = xlast.copy()  # Start with xlast times I to iterate with (A+I)
+        # do the multiplication y^T = x^T A (left eigenvector)
+        for n in x:
+            for nbr in G[n]:
+                w = G[n][nbr].get(weight, 1) if weight else 1
+                x[nbr] += xlast[n] * w
+        # Normalize the vector. The normalization denominator `norm`
+        # should never be zero by the Perron--Frobenius
+        # theorem. However, in case it is due to numerical error, we
+        # assume the norm to be one instead.
+        norm = math.hypot(*x.values()) or 1
+        x = {k: v / norm for k, v in x.items()}
+        # Check for convergence (in the L_1 norm).
+        if sum(abs(x[n] - xlast[n]) for n in x) < nnodes * tol:
+            return x
+    raise nx.PowerIterationFailedConvergence(max_iter)
+
+
+@nx._dispatchable(edge_attrs="weight")
+def eigenvector_centrality_numpy(G, weight=None, max_iter=50, tol=0):
+    r"""Compute the eigenvector centrality for the graph `G`.
+
+    Eigenvector centrality computes the centrality for a node by adding
+    the centrality of its predecessors. The centrality for node $i$ is the
+    $i$-th element of a left eigenvector associated with the eigenvalue $\lambda$
+    of maximum modulus that is positive. Such an eigenvector $x$ is
+    defined up to a multiplicative constant by the equation
+
+    .. math::
+
+         \lambda x^T = x^T A,
+
+    where $A$ is the adjacency matrix of the graph `G`. By definition of
+    row-column product, the equation above is equivalent to
+
+    .. math::
+
+        \lambda x_i = \sum_{j\to i}x_j.
+
+    That is, adding the eigenvector centralities of the predecessors of
+    $i$ one obtains the eigenvector centrality of $i$ multiplied by
+    $\lambda$. In the case of undirected graphs, $x$ also solves the familiar
+    right-eigenvector equation $Ax = \lambda x$.
+
+    By virtue of the Perron--Frobenius theorem [1]_, if `G` is (strongly)
+    connected, there is a unique eigenvector $x$, and all its entries
+    are strictly positive.
+
+    However, if `G` is not (strongly) connected, there might be several left
+    eigenvectors associated with $\lambda$, and some of their elements
+    might be zero.
+    Depending on the method used to choose eigenvectors, round-off error can affect
+    which of the infinitely many eigenvectors is reported.
+    This can lead to inconsistent results for the same graph,
+    which the underlying implementation is not robust to.
+    For this reason, only (strongly) connected graphs are accepted.
+
+    Parameters
+    ----------
+    G : graph
+        A connected NetworkX graph.
+
+    weight : None or string, optional (default=None)
+        If ``None``, all edge weights are considered equal. Otherwise holds the
+        name of the edge attribute used as weight. In this measure the
+        weight is interpreted as the connection strength.
+
+    max_iter : integer, optional (default=50)
+        Maximum number of Arnoldi update iterations allowed.
+
+    tol : float, optional (default=0)
+        Relative accuracy for eigenvalues (stopping criterion).
+        The default value of 0 implies machine precision.
+
+    Returns
+    -------
+    nodes : dict of nodes
+        Dictionary of nodes with eigenvector centrality as the value. The
+        associated vector has unit Euclidean norm and the values are
+        nonnegative.
+
+    Examples
+    --------
+    >>> G = nx.path_graph(4)
+    >>> centrality = nx.eigenvector_centrality_numpy(G)
+    >>> print([f"{node} {centrality[node]:0.2f}" for node in centrality])
+    ['0 0.37', '1 0.60', '2 0.60', '3 0.37']
+
+    Raises
+    ------
+    NetworkXPointlessConcept
+        If the graph `G` is the null graph.
+
+    ArpackNoConvergence
+        When the requested convergence is not obtained. The currently
+        converged eigenvalues and eigenvectors can be found as
+        eigenvalues and eigenvectors attributes of the exception object.
+
+    AmbiguousSolution
+        If `G` is not connected.
+
+    See Also
+    --------
+    :func:`scipy.sparse.linalg.eigs`
+    eigenvector_centrality
+    :func:`~networkx.algorithms.link_analysis.pagerank_alg.pagerank`
+    :func:`~networkx.algorithms.link_analysis.hits_alg.hits`
+
+    Notes
+    -----
+    Eigenvector centrality was introduced by Landau [2]_ for chess
+    tournaments. It was later rediscovered by Wei [3]_ and then
+    popularized by Kendall [4]_ in the context of sport ranking. Berge
+    introduced a general definition for graphs based on social connections
+    [5]_. Bonacich [6]_ reintroduced again eigenvector centrality and made
+    it popular in link analysis.
+
+    This function computes the left dominant eigenvector, which corresponds
+    to adding the centrality of predecessors: this is the usual approach.
+    To add the centrality of successors first reverse the graph with
+    ``G.reverse()``.
+
+    This implementation uses the
+    :func:`SciPy sparse eigenvalue solver<scipy.sparse.linalg.eigs>` (ARPACK)
+    to find the largest eigenvalue/eigenvector pair using Arnoldi iterations
+    [7]_.
+
+    References
+    ----------
+    .. [1] Abraham Berman and Robert J. Plemmons.
+       "Nonnegative Matrices in the Mathematical Sciences".
+       Classics in Applied Mathematics. SIAM, 1994.
+
+    .. [2] Edmund Landau.
+       "Zur relativen Wertbemessung der Turnierresultate".
+       Deutsches Wochenschach, 11:366--369, 1895.
+
+    .. [3] Teh-Hsing Wei.
+       "The Algebraic Foundations of Ranking Theory".
+       PhD thesis, University of Cambridge, 1952.
+
+    .. [4] Maurice G. Kendall.
+       "Further contributions to the theory of paired comparisons".
+       Biometrics, 11(1):43--62, 1955.
+       https://www.jstor.org/stable/3001479
+
+    .. [5] Claude Berge.
+       "Théorie des graphes et ses applications".
+       Dunod, Paris, France, 1958.
+
+    .. [6] Phillip Bonacich.
+       "Technique for analyzing overlapping memberships".
+       Sociological Methodology, 4:176--185, 1972.
+       https://www.jstor.org/stable/270732
+
+    .. [7] Arnoldi, W. E. (1951).
+       "The principle of minimized iterations in the solution of the matrix eigenvalue problem".
+       Quarterly of Applied Mathematics. 9 (1): 17--29.
+       https://doi.org/10.1090/qam/42792
+    """
+    import numpy as np
+    import scipy as sp
+
+    if len(G) == 0:
+        raise nx.NetworkXPointlessConcept(
+            "cannot compute centrality for the null graph"
+        )
+    connected = nx.is_strongly_connected(G) if G.is_directed() else nx.is_connected(G)
+    if not connected:  # See gh-6888.
+        raise nx.AmbiguousSolution(
+            "`eigenvector_centrality_numpy` does not give consistent results for disconnected graphs"
+        )
+    M = nx.to_scipy_sparse_array(G, nodelist=list(G), weight=weight, dtype=float)
+    _, eigenvector = sp.sparse.linalg.eigs(
+        M.T, k=1, which="LR", maxiter=max_iter, tol=tol
+    )
+    largest = eigenvector.flatten().real
+    norm = np.sign(largest.sum()) * sp.linalg.norm(largest)
+    return dict(zip(G, (largest / norm).tolist()))
diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/flow_matrix.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/flow_matrix.py
new file mode 100644
index 00000000..e72b5e97
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/flow_matrix.py
@@ -0,0 +1,130 @@
+# Helpers for current-flow betweenness and current-flow closeness
+# Lazy computations for inverse Laplacian and flow-matrix rows.
+import networkx as nx
+
+
+@nx._dispatchable(edge_attrs="weight")
+def flow_matrix_row(G, weight=None, dtype=float, solver="lu"):
+    # Generate a row of the current-flow matrix
+    import numpy as np
+
+    solvername = {
+        "full": FullInverseLaplacian,
+        "lu": SuperLUInverseLaplacian,
+        "cg": CGInverseLaplacian,
+    }
+    n = G.number_of_nodes()
+    L = nx.laplacian_matrix(G, nodelist=range(n), weight=weight).asformat("csc")
+    L = L.astype(dtype)
+    C = solvername[solver](L, dtype=dtype)  # initialize solver
+    w = C.w  # w is the Laplacian matrix width
+    # row-by-row flow matrix
+    for u, v in sorted(sorted((u, v)) for u, v in G.edges()):
+        B = np.zeros(w, dtype=dtype)
+        c = G[u][v].get(weight, 1.0)
+        B[u % w] = c
+        B[v % w] = -c
+        # get only the rows needed in the inverse laplacian
+        # and multiply to get the flow matrix row
+        row = B @ C.get_rows(u, v)
+        yield row, (u, v)
+
+
+# Class to compute the inverse laplacian only for specified rows
+# Allows computation of the current-flow matrix without storing entire
+# inverse laplacian matrix
+class InverseLaplacian:
+    def __init__(self, L, width=None, dtype=None):
+        global np
+        import numpy as np
+
+        (n, n) = L.shape
+        self.dtype = dtype
+        self.n = n
+        if width is None:
+            self.w = self.width(L)
+        else:
+            self.w = width
+        self.C = np.zeros((self.w, n), dtype=dtype)
+        self.L1 = L[1:, 1:]
+        self.init_solver(L)
+
+    def init_solver(self, L):
+        pass
+
+    def solve(self, r):
+        raise nx.NetworkXError("Implement solver")
+
+    def solve_inverse(self, r):
+        raise nx.NetworkXError("Implement solver")
+
+    def get_rows(self, r1, r2):
+        for r in range(r1, r2 + 1):
+            self.C[r % self.w, 1:] = self.solve_inverse(r)
+        return self.C
+
+    def get_row(self, r):
+        self.C[r % self.w, 1:] = self.solve_inverse(r)
+        return self.C[r % self.w]
+
+    def width(self, L):
+        m = 0
+        for i, row in enumerate(L):
+            w = 0
+            y = np.nonzero(row)[-1]
+            if len(y) > 0:
+                v = y - i
+                w = v.max() - v.min() + 1
+                m = max(w, m)
+        return m
+
+
+class FullInverseLaplacian(InverseLaplacian):
+    def init_solver(self, L):
+        self.IL = np.zeros(L.shape, dtype=self.dtype)
+        self.IL[1:, 1:] = np.linalg.inv(self.L1.todense())
+
+    def solve(self, rhs):
+        s = np.zeros(rhs.shape, dtype=self.dtype)
+        s = self.IL @ rhs
+        return s
+
+    def solve_inverse(self, r):
+        return self.IL[r, 1:]
+
+
+class SuperLUInverseLaplacian(InverseLaplacian):
+    def init_solver(self, L):
+        import scipy as sp
+
+        self.lusolve = sp.sparse.linalg.factorized(self.L1.tocsc())
+
+    def solve_inverse(self, r):
+        rhs = np.zeros(self.n, dtype=self.dtype)
+        rhs[r] = 1
+        return self.lusolve(rhs[1:])
+
+    def solve(self, rhs):
+        s = np.zeros(rhs.shape, dtype=self.dtype)
+        s[1:] = self.lusolve(rhs[1:])
+        return s
+
+
+class CGInverseLaplacian(InverseLaplacian):
+    def init_solver(self, L):
+        global sp
+        import scipy as sp
+
+        ilu = sp.sparse.linalg.spilu(self.L1.tocsc())
+        n = self.n - 1
+        self.M = sp.sparse.linalg.LinearOperator(shape=(n, n), matvec=ilu.solve)
+
+    def solve(self, rhs):
+        s = np.zeros(rhs.shape, dtype=self.dtype)
+        s[1:] = sp.sparse.linalg.cg(self.L1, rhs[1:], M=self.M, atol=0)[0]
+        return s
+
+    def solve_inverse(self, r):
+        rhs = np.zeros(self.n, self.dtype)
+        rhs[r] = 1
+        return sp.sparse.linalg.cg(self.L1, rhs[1:], M=self.M, atol=0)[0]
diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/group.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/group.py
new file mode 100644
index 00000000..7c48742a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/group.py
@@ -0,0 +1,787 @@
+"""Group centrality measures."""
+
+from copy import deepcopy
+
+import networkx as nx
+from networkx.algorithms.centrality.betweenness import (
+    _accumulate_endpoints,
+    _single_source_dijkstra_path_basic,
+    _single_source_shortest_path_basic,
+)
+from networkx.utils.decorators import not_implemented_for
+
+__all__ = [
+    "group_betweenness_centrality",
+    "group_closeness_centrality",
+    "group_degree_centrality",
+    "group_in_degree_centrality",
+    "group_out_degree_centrality",
+    "prominent_group",
+]
+
+
+@nx._dispatchable(edge_attrs="weight")
+def group_betweenness_centrality(G, C, normalized=True, weight=None, endpoints=False):
+    r"""Compute the group betweenness centrality for a group of nodes.
+
+    Group betweenness centrality of a group of nodes $C$ is the sum of the
+    fraction of all-pairs shortest paths that pass through any vertex in $C$
+
+    .. math::
+
+       c_B(v) =\sum_{s,t \in V} \frac{\sigma(s, t|v)}{\sigma(s, t)}
+
+    where $V$ is the set of nodes, $\sigma(s, t)$ is the number of
+    shortest $(s, t)$-paths, and $\sigma(s, t|C)$ is the number of
+    those paths passing through some node in group $C$. Note that
+    $(s, t)$ are not members of the group ($V-C$ is the set of nodes
+    in $V$ that are not in $C$).
+
+    Parameters
+    ----------
+    G : graph
+      A NetworkX graph.
+
+    C : list or set or list of lists or list of sets
+      A group or a list of groups containing nodes which belong to G, for which group betweenness
+      centrality is to be calculated.
+
+    normalized : bool, optional (default=True)
+      If True, group betweenness is normalized by `1/((|V|-|C|)(|V|-|C|-1))`
+      where `|V|` is the number of nodes in G and `|C|` is the number of nodes in C.
+
+    weight : None or string, optional (default=None)
+      If None, all edge weights are considered equal.
+      Otherwise holds the name of the edge attribute used as weight.
+      The weight of an edge is treated as the length or distance between the two sides.
+
+    endpoints : bool, optional (default=False)
+      If True include the endpoints in the shortest path counts.
+
+    Raises
+    ------
+    NodeNotFound
+       If node(s) in C are not present in G.
+
+    Returns
+    -------
+    betweenness : list of floats or float
+       If C is a single group then return a float. If C is a list with
+       several groups then return a list of group betweenness centralities.
+
+    See Also
+    --------
+    betweenness_centrality
+
+    Notes
+    -----
+    Group betweenness centrality is described in [1]_ and its importance discussed in [3]_.
+    The initial implementation of the algorithm is mentioned in [2]_. This function uses
+    an improved algorithm presented in [4]_.
+
+    The number of nodes in the group must be a maximum of n - 2 where `n`
+    is the total number of nodes in the graph.
+
+    For weighted graphs the edge weights must be greater than zero.
+    Zero edge weights can produce an infinite number of equal length
+    paths between pairs of nodes.
+
+    The total number of paths between source and target is counted
+    differently for directed and undirected graphs. Directed paths
+    between "u" and "v" are counted as two possible paths (one each
+    direction) while undirected paths between "u" and "v" are counted
+    as one path. Said another way, the sum in the expression above is
+    over all ``s != t`` for directed graphs and for ``s < t`` for undirected graphs.
+
+
+    References
+    ----------
+    .. [1] M G Everett and S P Borgatti:
+       The Centrality of Groups and Classes.
+       Journal of Mathematical Sociology. 23(3): 181-201. 1999.
+       http://www.analytictech.com/borgatti/group_centrality.htm
+    .. [2] Ulrik Brandes:
+       On Variants of Shortest-Path Betweenness
+       Centrality and their Generic Computation.
+       Social Networks 30(2):136-145, 2008.
+       http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.72.9610&rep=rep1&type=pdf
+    .. [3] Sourav Medya et. al.:
+       Group Centrality Maximization via Network Design.
+       SIAM International Conference on Data Mining, SDM 2018, 126–134.
+       https://sites.cs.ucsb.edu/~arlei/pubs/sdm18.pdf
+    .. [4] Rami Puzis, Yuval Elovici, and Shlomi Dolev.
+       "Fast algorithm for successive computation of group betweenness centrality."
+       https://journals.aps.org/pre/pdf/10.1103/PhysRevE.76.056709
+
+    """
+    GBC = []  # initialize betweenness
+    list_of_groups = True
+    #  check weather C contains one or many groups
+    if any(el in G for el in C):
+        C = [C]
+        list_of_groups = False
+    set_v = {node for group in C for node in group}
+    if set_v - G.nodes:  # element(s) of C not in G
+        raise nx.NodeNotFound(f"The node(s) {set_v - G.nodes} are in C but not in G.")
+
+    # pre-processing
+    PB, sigma, D = _group_preprocessing(G, set_v, weight)
+
+    # the algorithm for each group
+    for group in C:
+        group = set(group)  # set of nodes in group
+        # initialize the matrices of the sigma and the PB
+        GBC_group = 0
+        sigma_m = deepcopy(sigma)
+        PB_m = deepcopy(PB)
+        sigma_m_v = deepcopy(sigma_m)
+        PB_m_v = deepcopy(PB_m)
+        for v in group:
+            GBC_group += PB_m[v][v]
+            for x in group:
+                for y in group:
+                    dxvy = 0
+                    dxyv = 0
+                    dvxy = 0
+                    if not (
+                        sigma_m[x][y] == 0 or sigma_m[x][v] == 0 or sigma_m[v][y] == 0
+                    ):
+                        if D[x][v] == D[x][y] + D[y][v]:
+                            dxyv = sigma_m[x][y] * sigma_m[y][v] / sigma_m[x][v]
+                        if D[x][y] == D[x][v] + D[v][y]:
+                            dxvy = sigma_m[x][v] * sigma_m[v][y] / sigma_m[x][y]
+                        if D[v][y] == D[v][x] + D[x][y]:
+                            dvxy = sigma_m[v][x] * sigma[x][y] / sigma[v][y]
+                    sigma_m_v[x][y] = sigma_m[x][y] * (1 - dxvy)
+                    PB_m_v[x][y] = PB_m[x][y] - PB_m[x][y] * dxvy
+                    if y != v:
+                        PB_m_v[x][y] -= PB_m[x][v] * dxyv
+                    if x != v:
+                        PB_m_v[x][y] -= PB_m[v][y] * dvxy
+            sigma_m, sigma_m_v = sigma_m_v, sigma_m
+            PB_m, PB_m_v = PB_m_v, PB_m
+
+        # endpoints
+        v, c = len(G), len(group)
+        if not endpoints:
+            scale = 0
+            # if the graph is connected then subtract the endpoints from
+            # the count for all the nodes in the graph. else count how many
+            # nodes are connected to the group's nodes and subtract that.
+            if nx.is_directed(G):
+                if nx.is_strongly_connected(G):
+                    scale = c * (2 * v - c - 1)
+            elif nx.is_connected(G):
+                scale = c * (2 * v - c - 1)
+            if scale == 0:
+                for group_node1 in group:
+                    for node in D[group_node1]:
+                        if node != group_node1:
+                            if node in group:
+                                scale += 1
+                            else:
+                                scale += 2
+            GBC_group -= scale
+
+        # normalized
+        if normalized:
+            scale = 1 / ((v - c) * (v - c - 1))
+            GBC_group *= scale
+
+        # If undirected than count only the undirected edges
+        elif not G.is_directed():
+            GBC_group /= 2
+
+        GBC.append(GBC_group)
+    if list_of_groups:
+        return GBC
+    return GBC[0]
+
+
+def _group_preprocessing(G, set_v, weight):
+    sigma = {}
+    delta = {}
+    D = {}
+    betweenness = dict.fromkeys(G, 0)
+    for s in G:
+        if weight is None:  # use BFS
+            S, P, sigma[s], D[s] = _single_source_shortest_path_basic(G, s)
+        else:  # use Dijkstra's algorithm
+            S, P, sigma[s], D[s] = _single_source_dijkstra_path_basic(G, s, weight)
+        betweenness, delta[s] = _accumulate_endpoints(betweenness, S, P, sigma[s], s)
+        for i in delta[s]:  # add the paths from s to i and rescale sigma
+            if s != i:
+                delta[s][i] += 1
+            if weight is not None:
+                sigma[s][i] = sigma[s][i] / 2
+    # building the path betweenness matrix only for nodes that appear in the group
+    PB = dict.fromkeys(G)
+    for group_node1 in set_v:
+        PB[group_node1] = dict.fromkeys(G, 0.0)
+        for group_node2 in set_v:
+            if group_node2 not in D[group_node1]:
+                continue
+            for node in G:
+                # if node is connected to the two group nodes than continue
+                if group_node2 in D[node] and group_node1 in D[node]:
+                    if (
+                        D[node][group_node2]
+                        == D[node][group_node1] + D[group_node1][group_node2]
+                    ):
+                        PB[group_node1][group_node2] += (
+                            delta[node][group_node2]
+                            * sigma[node][group_node1]
+                            * sigma[group_node1][group_node2]
+                            / sigma[node][group_node2]
+                        )
+    return PB, sigma, D
+
+
+@nx._dispatchable(edge_attrs="weight")
+def prominent_group(
+    G, k, weight=None, C=None, endpoints=False, normalized=True, greedy=False
+):
+    r"""Find the prominent group of size $k$ in graph $G$. The prominence of the
+    group is evaluated by the group betweenness centrality.
+
+    Group betweenness centrality of a group of nodes $C$ is the sum of the
+    fraction of all-pairs shortest paths that pass through any vertex in $C$
+
+    .. math::
+
+       c_B(v) =\sum_{s,t \in V} \frac{\sigma(s, t|v)}{\sigma(s, t)}
+
+    where $V$ is the set of nodes, $\sigma(s, t)$ is the number of
+    shortest $(s, t)$-paths, and $\sigma(s, t|C)$ is the number of
+    those paths passing through some node in group $C$. Note that
+    $(s, t)$ are not members of the group ($V-C$ is the set of nodes
+    in $V$ that are not in $C$).
+
+    Parameters
+    ----------
+    G : graph
+       A NetworkX graph.
+
+    k : int
+       The number of nodes in the group.
+
+    normalized : bool, optional (default=True)
+       If True, group betweenness is normalized by ``1/((|V|-|C|)(|V|-|C|-1))``
+       where ``|V|`` is the number of nodes in G and ``|C|`` is the number of
+       nodes in C.
+
+    weight : None or string, optional (default=None)
+       If None, all edge weights are considered equal.
+       Otherwise holds the name of the edge attribute used as weight.
+       The weight of an edge is treated as the length or distance between the two sides.
+
+    endpoints : bool, optional (default=False)
+       If True include the endpoints in the shortest path counts.
+
+    C : list or set, optional (default=None)
+       list of nodes which won't be candidates of the prominent group.
+
+    greedy : bool, optional (default=False)
+       Using a naive greedy algorithm in order to find non-optimal prominent
+       group. For scale free networks the results are negligibly below the optimal
+       results.
+
+    Raises
+    ------
+    NodeNotFound
+       If node(s) in C are not present in G.
+
+    Returns
+    -------
+    max_GBC : float
+       The group betweenness centrality of the prominent group.
+
+    max_group : list
+        The list of nodes in the prominent group.
+
+    See Also
+    --------
+    betweenness_centrality, group_betweenness_centrality
+
+    Notes
+    -----
+    Group betweenness centrality is described in [1]_ and its importance discussed in [3]_.
+    The algorithm is described in [2]_ and is based on techniques mentioned in [4]_.
+
+    The number of nodes in the group must be a maximum of ``n - 2`` where ``n``
+    is the total number of nodes in the graph.
+
+    For weighted graphs the edge weights must be greater than zero.
+    Zero edge weights can produce an infinite number of equal length
+    paths between pairs of nodes.
+
+    The total number of paths between source and target is counted
+    differently for directed and undirected graphs. Directed paths
+    between "u" and "v" are counted as two possible paths (one each
+    direction) while undirected paths between "u" and "v" are counted
+    as one path. Said another way, the sum in the expression above is
+    over all ``s != t`` for directed graphs and for ``s < t`` for undirected graphs.
+
+    References
+    ----------
+    .. [1] M G Everett and S P Borgatti:
+       The Centrality of Groups and Classes.
+       Journal of Mathematical Sociology. 23(3): 181-201. 1999.
+       http://www.analytictech.com/borgatti/group_centrality.htm
+    .. [2] Rami Puzis, Yuval Elovici, and Shlomi Dolev:
+       "Finding the Most Prominent Group in Complex Networks"
+       AI communications 20(4): 287-296, 2007.
+       https://www.researchgate.net/profile/Rami_Puzis2/publication/220308855
+    .. [3] Sourav Medya et. al.:
+       Group Centrality Maximization via Network Design.
+       SIAM International Conference on Data Mining, SDM 2018, 126–134.
+       https://sites.cs.ucsb.edu/~arlei/pubs/sdm18.pdf
+    .. [4] Rami Puzis, Yuval Elovici, and Shlomi Dolev.
+       "Fast algorithm for successive computation of group betweenness centrality."
+       https://journals.aps.org/pre/pdf/10.1103/PhysRevE.76.056709
+    """
+    import numpy as np
+    import pandas as pd
+
+    if C is not None:
+        C = set(C)
+        if C - G.nodes:  # element(s) of C not in G
+            raise nx.NodeNotFound(f"The node(s) {C - G.nodes} are in C but not in G.")
+        nodes = list(G.nodes - C)
+    else:
+        nodes = list(G.nodes)
+    DF_tree = nx.Graph()
+    DF_tree.__networkx_cache__ = None  # Disable caching
+    PB, sigma, D = _group_preprocessing(G, nodes, weight)
+    betweenness = pd.DataFrame.from_dict(PB)
+    if C is not None:
+        for node in C:
+            # remove from the betweenness all the nodes not part of the group
+            betweenness.drop(index=node, inplace=True)
+            betweenness.drop(columns=node, inplace=True)
+    CL = [node for _, node in sorted(zip(np.diag(betweenness), nodes), reverse=True)]
+    max_GBC = 0
+    max_group = []
+    DF_tree.add_node(
+        1,
+        CL=CL,
+        betweenness=betweenness,
+        GBC=0,
+        GM=[],
+        sigma=sigma,
+        cont=dict(zip(nodes, np.diag(betweenness))),
+    )
+
+    # the algorithm
+    DF_tree.nodes[1]["heu"] = 0
+    for i in range(k):
+        DF_tree.nodes[1]["heu"] += DF_tree.nodes[1]["cont"][DF_tree.nodes[1]["CL"][i]]
+    max_GBC, DF_tree, max_group = _dfbnb(
+        G, k, DF_tree, max_GBC, 1, D, max_group, nodes, greedy
+    )
+
+    v = len(G)
+    if not endpoints:
+        scale = 0
+        # if the graph is connected then subtract the endpoints from
+        # the count for all the nodes in the graph. else count how many
+        # nodes are connected to the group's nodes and subtract that.
+        if nx.is_directed(G):
+            if nx.is_strongly_connected(G):
+                scale = k * (2 * v - k - 1)
+        elif nx.is_connected(G):
+            scale = k * (2 * v - k - 1)
+        if scale == 0:
+            for group_node1 in max_group:
+                for node in D[group_node1]:
+                    if node != group_node1:
+                        if node in max_group:
+                            scale += 1
+                        else:
+                            scale += 2
+        max_GBC -= scale
+
+    # normalized
+    if normalized:
+        scale = 1 / ((v - k) * (v - k - 1))
+        max_GBC *= scale
+
+    # If undirected then count only the undirected edges
+    elif not G.is_directed():
+        max_GBC /= 2
+    max_GBC = float(f"{max_GBC:.2f}")
+    return max_GBC, max_group
+
+
+def _dfbnb(G, k, DF_tree, max_GBC, root, D, max_group, nodes, greedy):
+    # stopping condition - if we found a group of size k and with higher GBC then prune
+    if len(DF_tree.nodes[root]["GM"]) == k and DF_tree.nodes[root]["GBC"] > max_GBC:
+        return DF_tree.nodes[root]["GBC"], DF_tree, DF_tree.nodes[root]["GM"]
+    # stopping condition - if the size of group members equal to k or there are less than
+    # k - |GM| in the candidate list or the heuristic function plus the GBC is below the
+    # maximal GBC found then prune
+    if (
+        len(DF_tree.nodes[root]["GM"]) == k
+        or len(DF_tree.nodes[root]["CL"]) <= k - len(DF_tree.nodes[root]["GM"])
+        or DF_tree.nodes[root]["GBC"] + DF_tree.nodes[root]["heu"] <= max_GBC
+    ):
+        return max_GBC, DF_tree, max_group
+
+    # finding the heuristic of both children
+    node_p, node_m, DF_tree = _heuristic(k, root, DF_tree, D, nodes, greedy)
+
+    # finding the child with the bigger heuristic + GBC and expand
+    # that node first if greedy then only expand the plus node
+    if greedy:
+        max_GBC, DF_tree, max_group = _dfbnb(
+            G, k, DF_tree, max_GBC, node_p, D, max_group, nodes, greedy
+        )
+
+    elif (
+        DF_tree.nodes[node_p]["GBC"] + DF_tree.nodes[node_p]["heu"]
+        > DF_tree.nodes[node_m]["GBC"] + DF_tree.nodes[node_m]["heu"]
+    ):
+        max_GBC, DF_tree, max_group = _dfbnb(
+            G, k, DF_tree, max_GBC, node_p, D, max_group, nodes, greedy
+        )
+        max_GBC, DF_tree, max_group = _dfbnb(
+            G, k, DF_tree, max_GBC, node_m, D, max_group, nodes, greedy
+        )
+    else:
+        max_GBC, DF_tree, max_group = _dfbnb(
+            G, k, DF_tree, max_GBC, node_m, D, max_group, nodes, greedy
+        )
+        max_GBC, DF_tree, max_group = _dfbnb(
+            G, k, DF_tree, max_GBC, node_p, D, max_group, nodes, greedy
+        )
+    return max_GBC, DF_tree, max_group
+
+
+def _heuristic(k, root, DF_tree, D, nodes, greedy):
+    import numpy as np
+
+    # This helper function add two nodes to DF_tree - one left son and the
+    # other right son, finds their heuristic, CL, GBC, and GM
+    node_p = DF_tree.number_of_nodes() + 1
+    node_m = DF_tree.number_of_nodes() + 2
+    added_node = DF_tree.nodes[root]["CL"][0]
+
+    # adding the plus node
+    DF_tree.add_nodes_from([(node_p, deepcopy(DF_tree.nodes[root]))])
+    DF_tree.nodes[node_p]["GM"].append(added_node)
+    DF_tree.nodes[node_p]["GBC"] += DF_tree.nodes[node_p]["cont"][added_node]
+    root_node = DF_tree.nodes[root]
+    for x in nodes:
+        for y in nodes:
+            dxvy = 0
+            dxyv = 0
+            dvxy = 0
+            if not (
+                root_node["sigma"][x][y] == 0
+                or root_node["sigma"][x][added_node] == 0
+                or root_node["sigma"][added_node][y] == 0
+            ):
+                if D[x][added_node] == D[x][y] + D[y][added_node]:
+                    dxyv = (
+                        root_node["sigma"][x][y]
+                        * root_node["sigma"][y][added_node]
+                        / root_node["sigma"][x][added_node]
+                    )
+                if D[x][y] == D[x][added_node] + D[added_node][y]:
+                    dxvy = (
+                        root_node["sigma"][x][added_node]
+                        * root_node["sigma"][added_node][y]
+                        / root_node["sigma"][x][y]
+                    )
+                if D[added_node][y] == D[added_node][x] + D[x][y]:
+                    dvxy = (
+                        root_node["sigma"][added_node][x]
+                        * root_node["sigma"][x][y]
+                        / root_node["sigma"][added_node][y]
+                    )
+            DF_tree.nodes[node_p]["sigma"][x][y] = root_node["sigma"][x][y] * (1 - dxvy)
+            DF_tree.nodes[node_p]["betweenness"].loc[y, x] = (
+                root_node["betweenness"][x][y] - root_node["betweenness"][x][y] * dxvy
+            )
+            if y != added_node:
+                DF_tree.nodes[node_p]["betweenness"].loc[y, x] -= (
+                    root_node["betweenness"][x][added_node] * dxyv
+                )
+            if x != added_node:
+                DF_tree.nodes[node_p]["betweenness"].loc[y, x] -= (
+                    root_node["betweenness"][added_node][y] * dvxy
+                )
+
+    DF_tree.nodes[node_p]["CL"] = [
+        node
+        for _, node in sorted(
+            zip(np.diag(DF_tree.nodes[node_p]["betweenness"]), nodes), reverse=True
+        )
+        if node not in DF_tree.nodes[node_p]["GM"]
+    ]
+    DF_tree.nodes[node_p]["cont"] = dict(
+        zip(nodes, np.diag(DF_tree.nodes[node_p]["betweenness"]))
+    )
+    DF_tree.nodes[node_p]["heu"] = 0
+    for i in range(k - len(DF_tree.nodes[node_p]["GM"])):
+        DF_tree.nodes[node_p]["heu"] += DF_tree.nodes[node_p]["cont"][
+            DF_tree.nodes[node_p]["CL"][i]
+        ]
+
+    # adding the minus node - don't insert the first node in the CL to GM
+    # Insert minus node only if isn't greedy type algorithm
+    if not greedy:
+        DF_tree.add_nodes_from([(node_m, deepcopy(DF_tree.nodes[root]))])
+        DF_tree.nodes[node_m]["CL"].pop(0)
+        DF_tree.nodes[node_m]["cont"].pop(added_node)
+        DF_tree.nodes[node_m]["heu"] = 0
+        for i in range(k - len(DF_tree.nodes[node_m]["GM"])):
+            DF_tree.nodes[node_m]["heu"] += DF_tree.nodes[node_m]["cont"][
+                DF_tree.nodes[node_m]["CL"][i]
+            ]
+    else:
+        node_m = None
+
+    return node_p, node_m, DF_tree
+
+
+@nx._dispatchable(edge_attrs="weight")
+def group_closeness_centrality(G, S, weight=None):
+    r"""Compute the group closeness centrality for a group of nodes.
+
+    Group closeness centrality of a group of nodes $S$ is a measure
+    of how close the group is to the other nodes in the graph.
+
+    .. math::
+
+       c_{close}(S) = \frac{|V-S|}{\sum_{v \in V-S} d_{S, v}}
+
+       d_{S, v} = min_{u \in S} (d_{u, v})
+
+    where $V$ is the set of nodes, $d_{S, v}$ is the distance of
+    the group $S$ from $v$ defined as above. ($V-S$ is the set of nodes
+    in $V$ that are not in $S$).
+
+    Parameters
+    ----------
+    G : graph
+       A NetworkX graph.
+
+    S : list or set
+       S is a group of nodes which belong to G, for which group closeness
+       centrality is to be calculated.
+
+    weight : None or string, optional (default=None)
+       If None, all edge weights are considered equal.
+       Otherwise holds the name of the edge attribute used as weight.
+       The weight of an edge is treated as the length or distance between the two sides.
+
+    Raises
+    ------
+    NodeNotFound
+       If node(s) in S are not present in G.
+
+    Returns
+    -------
+    closeness : float
+       Group closeness centrality of the group S.
+
+    See Also
+    --------
+    closeness_centrality
+
+    Notes
+    -----
+    The measure was introduced in [1]_.
+    The formula implemented here is described in [2]_.
+
+    Higher values of closeness indicate greater centrality.
+
+    It is assumed that 1 / 0 is 0 (required in the case of directed graphs,
+    or when a shortest path length is 0).
+
+    The number of nodes in the group must be a maximum of n - 1 where `n`
+    is the total number of nodes in the graph.
+
+    For directed graphs, the incoming distance is utilized here. To use the
+    outward distance, act on `G.reverse()`.
+
+    For weighted graphs the edge weights must be greater than zero.
+    Zero edge weights can produce an infinite number of equal length
+    paths between pairs of nodes.
+
+    References
+    ----------
+    .. [1] M G Everett and S P Borgatti:
+       The Centrality of Groups and Classes.
+       Journal of Mathematical Sociology. 23(3): 181-201. 1999.
+       http://www.analytictech.com/borgatti/group_centrality.htm
+    .. [2] J. Zhao et. al.:
+       Measuring and Maximizing Group Closeness Centrality over
+       Disk Resident Graphs.
+       WWWConference Proceedings, 2014. 689-694.
+       https://doi.org/10.1145/2567948.2579356
+    """
+    if G.is_directed():
+        G = G.reverse()  # reverse view
+    closeness = 0  # initialize to 0
+    V = set(G)  # set of nodes in G
+    S = set(S)  # set of nodes in group S
+    V_S = V - S  # set of nodes in V but not S
+    shortest_path_lengths = nx.multi_source_dijkstra_path_length(G, S, weight=weight)
+    # accumulation
+    for v in V_S:
+        try:
+            closeness += shortest_path_lengths[v]
+        except KeyError:  # no path exists
+            closeness += 0
+    try:
+        closeness = len(V_S) / closeness
+    except ZeroDivisionError:  # 1 / 0 assumed as 0
+        closeness = 0
+    return closeness
+
+
+@nx._dispatchable
+def group_degree_centrality(G, S):
+    """Compute the group degree centrality for a group of nodes.
+
+    Group degree centrality of a group of nodes $S$ is the fraction
+    of non-group members connected to group members.
+
+    Parameters
+    ----------
+    G : graph
+       A NetworkX graph.
+
+    S : list or set
+       S is a group of nodes which belong to G, for which group degree
+       centrality is to be calculated.
+
+    Raises
+    ------
+    NetworkXError
+       If node(s) in S are not in G.
+
+    Returns
+    -------
+    centrality : float
+       Group degree centrality of the group S.
+
+    See Also
+    --------
+    degree_centrality
+    group_in_degree_centrality
+    group_out_degree_centrality
+
+    Notes
+    -----
+    The measure was introduced in [1]_.
+
+    The number of nodes in the group must be a maximum of n - 1 where `n`
+    is the total number of nodes in the graph.
+
+    References
+    ----------
+    .. [1] M G Everett and S P Borgatti:
+       The Centrality of Groups and Classes.
+       Journal of Mathematical Sociology. 23(3): 181-201. 1999.
+       http://www.analytictech.com/borgatti/group_centrality.htm
+    """
+    centrality = len(set().union(*[set(G.neighbors(i)) for i in S]) - set(S))
+    centrality /= len(G.nodes()) - len(S)
+    return centrality
+
+
+@not_implemented_for("undirected")
+@nx._dispatchable
+def group_in_degree_centrality(G, S):
+    """Compute the group in-degree centrality for a group of nodes.
+
+    Group in-degree centrality of a group of nodes $S$ is the fraction
+    of non-group members connected to group members by incoming edges.
+
+    Parameters
+    ----------
+    G : graph
+       A NetworkX graph.
+
+    S : list or set
+       S is a group of nodes which belong to G, for which group in-degree
+       centrality is to be calculated.
+
+    Returns
+    -------
+    centrality : float
+       Group in-degree centrality of the group S.
+
+    Raises
+    ------
+    NetworkXNotImplemented
+       If G is undirected.
+
+    NodeNotFound
+       If node(s) in S are not in G.
+
+    See Also
+    --------
+    degree_centrality
+    group_degree_centrality
+    group_out_degree_centrality
+
+    Notes
+    -----
+    The number of nodes in the group must be a maximum of n - 1 where `n`
+    is the total number of nodes in the graph.
+
+    `G.neighbors(i)` gives nodes with an outward edge from i, in a DiGraph,
+    so for group in-degree centrality, the reverse graph is used.
+    """
+    return group_degree_centrality(G.reverse(), S)
+
+
+@not_implemented_for("undirected")
+@nx._dispatchable
+def group_out_degree_centrality(G, S):
+    """Compute the group out-degree centrality for a group of nodes.
+
+    Group out-degree centrality of a group of nodes $S$ is the fraction
+    of non-group members connected to group members by outgoing edges.
+
+    Parameters
+    ----------
+    G : graph
+       A NetworkX graph.
+
+    S : list or set
+       S is a group of nodes which belong to G, for which group in-degree
+       centrality is to be calculated.
+
+    Returns
+    -------
+    centrality : float
+       Group out-degree centrality of the group S.
+
+    Raises
+    ------
+    NetworkXNotImplemented
+       If G is undirected.
+
+    NodeNotFound
+       If node(s) in S are not in G.
+
+    See Also
+    --------
+    degree_centrality
+    group_degree_centrality
+    group_in_degree_centrality
+
+    Notes
+    -----
+    The number of nodes in the group must be a maximum of n - 1 where `n`
+    is the total number of nodes in the graph.
+
+    `G.neighbors(i)` gives nodes with an outward edge from i, in a DiGraph,
+    so for group out-degree centrality, the graph itself is used.
+    """
+    return group_degree_centrality(G, S)
diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/harmonic.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/harmonic.py
new file mode 100644
index 00000000..236e1491
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/harmonic.py
@@ -0,0 +1,89 @@
+"""Functions for computing the harmonic centrality of a graph."""
+
+from functools import partial
+
+import networkx as nx
+
+__all__ = ["harmonic_centrality"]
+
+
+@nx._dispatchable(edge_attrs="distance")
+def harmonic_centrality(G, nbunch=None, distance=None, sources=None):
+    r"""Compute harmonic centrality for nodes.
+
+    Harmonic centrality [1]_ of a node `u` is the sum of the reciprocal
+    of the shortest path distances from all other nodes to `u`
+
+    .. math::
+
+        C(u) = \sum_{v \neq u} \frac{1}{d(v, u)}
+
+    where `d(v, u)` is the shortest-path distance between `v` and `u`.
+
+    If `sources` is given as an argument, the returned harmonic centrality
+    values are calculated as the sum of the reciprocals of the shortest
+    path distances from the nodes specified in `sources` to `u` instead
+    of from all nodes to `u`.
+
+    Notice that higher values indicate higher centrality.
+
+    Parameters
+    ----------
+    G : graph
+      A NetworkX graph
+
+    nbunch : container (default: all nodes in G)
+      Container of nodes for which harmonic centrality values are calculated.
+
+    sources : container (default: all nodes in G)
+      Container of nodes `v` over which reciprocal distances are computed.
+      Nodes not in `G` are silently ignored.
+
+    distance : edge attribute key, optional (default=None)
+      Use the specified edge attribute as the edge distance in shortest
+      path calculations.  If `None`, then each edge will have distance equal to 1.
+
+    Returns
+    -------
+    nodes : dictionary
+      Dictionary of nodes with harmonic centrality as the value.
+
+    See Also
+    --------
+    betweenness_centrality, load_centrality, eigenvector_centrality,
+    degree_centrality, closeness_centrality
+
+    Notes
+    -----
+    If the 'distance' keyword is set to an edge attribute key then the
+    shortest-path length will be computed using Dijkstra's algorithm with
+    that edge attribute as the edge weight.
+
+    References
+    ----------
+    .. [1] Boldi, Paolo, and Sebastiano Vigna. "Axioms for centrality."
+           Internet Mathematics 10.3-4 (2014): 222-262.
+    """
+
+    nbunch = set(G.nbunch_iter(nbunch) if nbunch is not None else G.nodes)
+    sources = set(G.nbunch_iter(sources) if sources is not None else G.nodes)
+
+    centrality = {u: 0 for u in nbunch}
+
+    transposed = False
+    if len(nbunch) < len(sources):
+        transposed = True
+        nbunch, sources = sources, nbunch
+        if nx.is_directed(G):
+            G = nx.reverse(G, copy=False)
+
+    spl = partial(nx.shortest_path_length, G, weight=distance)
+    for v in sources:
+        dist = spl(v)
+        for u in nbunch.intersection(dist):
+            d = dist[u]
+            if d == 0:  # handle u == v and edges with 0 weight
+                continue
+            centrality[v if transposed else u] += 1 / d
+
+    return centrality
diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/katz.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/katz.py
new file mode 100644
index 00000000..4bd087bc
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/katz.py
@@ -0,0 +1,331 @@
+"""Katz centrality."""
+
+import math
+
+import networkx as nx
+from networkx.utils import not_implemented_for
+
+__all__ = ["katz_centrality", "katz_centrality_numpy"]
+
+
+@not_implemented_for("multigraph")
+@nx._dispatchable(edge_attrs="weight")
+def katz_centrality(
+    G,
+    alpha=0.1,
+    beta=1.0,
+    max_iter=1000,
+    tol=1.0e-6,
+    nstart=None,
+    normalized=True,
+    weight=None,
+):
+    r"""Compute the Katz centrality for the nodes of the graph G.
+
+    Katz centrality computes the centrality for a node based on the centrality
+    of its neighbors. It is a generalization of the eigenvector centrality. The
+    Katz centrality for node $i$ is
+
+    .. math::
+
+        x_i = \alpha \sum_{j} A_{ij} x_j + \beta,
+
+    where $A$ is the adjacency matrix of graph G with eigenvalues $\lambda$.
+
+    The parameter $\beta$ controls the initial centrality and
+
+    .. math::
+
+        \alpha < \frac{1}{\lambda_{\max}}.
+
+    Katz centrality computes the relative influence of a node within a
+    network by measuring the number of the immediate neighbors (first
+    degree nodes) and also all other nodes in the network that connect
+    to the node under consideration through these immediate neighbors.
+
+    Extra weight can be provided to immediate neighbors through the
+    parameter $\beta$.  Connections made with distant neighbors
+    are, however, penalized by an attenuation factor $\alpha$ which
+    should be strictly less than the inverse largest eigenvalue of the
+    adjacency matrix in order for the Katz centrality to be computed
+    correctly. More information is provided in [1]_.
+
+    Parameters
+    ----------
+    G : graph
+      A NetworkX graph.
+
+    alpha : float, optional (default=0.1)
+      Attenuation factor
+
+    beta : scalar or dictionary, optional (default=1.0)
+      Weight attributed to the immediate neighborhood. If not a scalar, the
+      dictionary must have a value for every node.
+
+    max_iter : integer, optional (default=1000)
+      Maximum number of iterations in power method.
+
+    tol : float, optional (default=1.0e-6)
+      Error tolerance used to check convergence in power method iteration.
+
+    nstart : dictionary, optional
+      Starting value of Katz iteration for each node.
+
+    normalized : bool, optional (default=True)
+      If True normalize the resulting values.
+
+    weight : None or string, optional (default=None)
+      If None, all edge weights are considered equal.
+      Otherwise holds the name of the edge attribute used as weight.
+      In this measure the weight is interpreted as the connection strength.
+
+    Returns
+    -------
+    nodes : dictionary
+       Dictionary of nodes with Katz centrality as the value.
+
+    Raises
+    ------
+    NetworkXError
+       If the parameter `beta` is not a scalar but lacks a value for at least
+       one node
+
+    PowerIterationFailedConvergence
+        If the algorithm fails to converge to the specified tolerance
+        within the specified number of iterations of the power iteration
+        method.
+
+    Examples
+    --------
+    >>> import math
+    >>> G = nx.path_graph(4)
+    >>> phi = (1 + math.sqrt(5)) / 2.0  # largest eigenvalue of adj matrix
+    >>> centrality = nx.katz_centrality(G, 1 / phi - 0.01)
+    >>> for n, c in sorted(centrality.items()):
+    ...     print(f"{n} {c:.2f}")
+    0 0.37
+    1 0.60
+    2 0.60
+    3 0.37
+
+    See Also
+    --------
+    katz_centrality_numpy
+    eigenvector_centrality
+    eigenvector_centrality_numpy
+    :func:`~networkx.algorithms.link_analysis.pagerank_alg.pagerank`
+    :func:`~networkx.algorithms.link_analysis.hits_alg.hits`
+
+    Notes
+    -----
+    Katz centrality was introduced by [2]_.
+
+    This algorithm it uses the power method to find the eigenvector
+    corresponding to the largest eigenvalue of the adjacency matrix of ``G``.
+    The parameter ``alpha`` should be strictly less than the inverse of largest
+    eigenvalue of the adjacency matrix for the algorithm to converge.
+    You can use ``max(nx.adjacency_spectrum(G))`` to get $\lambda_{\max}$ the largest
+    eigenvalue of the adjacency matrix.
+    The iteration will stop after ``max_iter`` iterations or an error tolerance of
+    ``number_of_nodes(G) * tol`` has been reached.
+
+    For strongly connected graphs, as $\alpha \to 1/\lambda_{\max}$, and $\beta > 0$,
+    Katz centrality approaches the results for eigenvector centrality.
+
+    For directed graphs this finds "left" eigenvectors which corresponds
+    to the in-edges in the graph. For out-edges Katz centrality,
+    first reverse the graph with ``G.reverse()``.
+
+    References
+    ----------
+    .. [1] Mark E. J. Newman:
+       Networks: An Introduction.
+       Oxford University Press, USA, 2010, p. 720.
+    .. [2] Leo Katz:
+       A New Status Index Derived from Sociometric Index.
+       Psychometrika 18(1):39–43, 1953
+       https://link.springer.com/content/pdf/10.1007/BF02289026.pdf
+    """
+    if len(G) == 0:
+        return {}
+
+    nnodes = G.number_of_nodes()
+
+    if nstart is None:
+        # choose starting vector with entries of 0
+        x = {n: 0 for n in G}
+    else:
+        x = nstart
+
+    try:
+        b = dict.fromkeys(G, float(beta))
+    except (TypeError, ValueError, AttributeError) as err:
+        b = beta
+        if set(beta) != set(G):
+            raise nx.NetworkXError(
+                "beta dictionary must have a value for every node"
+            ) from err
+
+    # make up to max_iter iterations
+    for _ in range(max_iter):
+        xlast = x
+        x = dict.fromkeys(xlast, 0)
+        # do the multiplication y^T = Alpha * x^T A + Beta
+        for n in x:
+            for nbr in G[n]:
+                x[nbr] += xlast[n] * G[n][nbr].get(weight, 1)
+        for n in x:
+            x[n] = alpha * x[n] + b[n]
+
+        # check convergence
+        error = sum(abs(x[n] - xlast[n]) for n in x)
+        if error < nnodes * tol:
+            if normalized:
+                # normalize vector
+                try:
+                    s = 1.0 / math.hypot(*x.values())
+                except ZeroDivisionError:
+                    s = 1.0
+            else:
+                s = 1
+            for n in x:
+                x[n] *= s
+            return x
+    raise nx.PowerIterationFailedConvergence(max_iter)
+
+
+@not_implemented_for("multigraph")
+@nx._dispatchable(edge_attrs="weight")
+def katz_centrality_numpy(G, alpha=0.1, beta=1.0, normalized=True, weight=None):
+    r"""Compute the Katz centrality for the graph G.
+
+    Katz centrality computes the centrality for a node based on the centrality
+    of its neighbors. It is a generalization of the eigenvector centrality. The
+    Katz centrality for node $i$ is
+
+    .. math::
+
+        x_i = \alpha \sum_{j} A_{ij} x_j + \beta,
+
+    where $A$ is the adjacency matrix of graph G with eigenvalues $\lambda$.
+
+    The parameter $\beta$ controls the initial centrality and
+
+    .. math::
+
+        \alpha < \frac{1}{\lambda_{\max}}.
+
+    Katz centrality computes the relative influence of a node within a
+    network by measuring the number of the immediate neighbors (first
+    degree nodes) and also all other nodes in the network that connect
+    to the node under consideration through these immediate neighbors.
+
+    Extra weight can be provided to immediate neighbors through the
+    parameter $\beta$.  Connections made with distant neighbors
+    are, however, penalized by an attenuation factor $\alpha$ which
+    should be strictly less than the inverse largest eigenvalue of the
+    adjacency matrix in order for the Katz centrality to be computed
+    correctly. More information is provided in [1]_.
+
+    Parameters
+    ----------
+    G : graph
+      A NetworkX graph
+
+    alpha : float
+      Attenuation factor
+
+    beta : scalar or dictionary, optional (default=1.0)
+      Weight attributed to the immediate neighborhood. If not a scalar the
+      dictionary must have an value for every node.
+
+    normalized : bool
+      If True normalize the resulting values.
+
+    weight : None or string, optional
+      If None, all edge weights are considered equal.
+      Otherwise holds the name of the edge attribute used as weight.
+      In this measure the weight is interpreted as the connection strength.
+
+    Returns
+    -------
+    nodes : dictionary
+       Dictionary of nodes with Katz centrality as the value.
+
+    Raises
+    ------
+    NetworkXError
+       If the parameter `beta` is not a scalar but lacks a value for at least
+       one node
+
+    Examples
+    --------
+    >>> import math
+    >>> G = nx.path_graph(4)
+    >>> phi = (1 + math.sqrt(5)) / 2.0  # largest eigenvalue of adj matrix
+    >>> centrality = nx.katz_centrality_numpy(G, 1 / phi)
+    >>> for n, c in sorted(centrality.items()):
+    ...     print(f"{n} {c:.2f}")
+    0 0.37
+    1 0.60
+    2 0.60
+    3 0.37
+
+    See Also
+    --------
+    katz_centrality
+    eigenvector_centrality_numpy
+    eigenvector_centrality
+    :func:`~networkx.algorithms.link_analysis.pagerank_alg.pagerank`
+    :func:`~networkx.algorithms.link_analysis.hits_alg.hits`
+
+    Notes
+    -----
+    Katz centrality was introduced by [2]_.
+
+    This algorithm uses a direct linear solver to solve the above equation.
+    The parameter ``alpha`` should be strictly less than the inverse of largest
+    eigenvalue of the adjacency matrix for there to be a solution.
+    You can use ``max(nx.adjacency_spectrum(G))`` to get $\lambda_{\max}$ the largest
+    eigenvalue of the adjacency matrix.
+
+    For strongly connected graphs, as $\alpha \to 1/\lambda_{\max}$, and $\beta > 0$,
+    Katz centrality approaches the results for eigenvector centrality.
+
+    For directed graphs this finds "left" eigenvectors which corresponds
+    to the in-edges in the graph. For out-edges Katz centrality,
+    first reverse the graph with ``G.reverse()``.
+
+    References
+    ----------
+    .. [1] Mark E. J. Newman:
+       Networks: An Introduction.
+       Oxford University Press, USA, 2010, p. 173.
+    .. [2] Leo Katz:
+       A New Status Index Derived from Sociometric Index.
+       Psychometrika 18(1):39–43, 1953
+       https://link.springer.com/content/pdf/10.1007/BF02289026.pdf
+    """
+    import numpy as np
+
+    if len(G) == 0:
+        return {}
+    try:
+        nodelist = beta.keys()
+        if set(nodelist) != set(G):
+            raise nx.NetworkXError("beta dictionary must have a value for every node")
+        b = np.array(list(beta.values()), dtype=float)
+    except AttributeError:
+        nodelist = list(G)
+        try:
+            b = np.ones((len(nodelist), 1)) * beta
+        except (TypeError, ValueError, AttributeError) as err:
+            raise nx.NetworkXError("beta must be a number") from err
+
+    A = nx.adjacency_matrix(G, nodelist=nodelist, weight=weight).todense().T
+    n = A.shape[0]
+    centrality = np.linalg.solve(np.eye(n, n) - (alpha * A), b).squeeze()
+
+    # Normalize: rely on truediv to cast to float, then tolist to make Python numbers
+    norm = np.sign(sum(centrality)) * np.linalg.norm(centrality) if normalized else 1
+    return dict(zip(nodelist, (centrality / norm).tolist()))
diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/laplacian.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/laplacian.py
new file mode 100644
index 00000000..efb6e8f6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/laplacian.py
@@ -0,0 +1,150 @@
+"""
+Laplacian centrality measures.
+"""
+
+import networkx as nx
+
+__all__ = ["laplacian_centrality"]
+
+
+@nx._dispatchable(edge_attrs="weight")
+def laplacian_centrality(
+    G, normalized=True, nodelist=None, weight="weight", walk_type=None, alpha=0.95
+):
+    r"""Compute the Laplacian centrality for nodes in the graph `G`.
+
+    The Laplacian Centrality of a node ``i`` is measured by the drop in the
+    Laplacian Energy after deleting node ``i`` from the graph. The Laplacian Energy
+    is the sum of the squared eigenvalues of a graph's Laplacian matrix.
+
+    .. math::
+
+        C_L(u_i,G) = \frac{(\Delta E)_i}{E_L (G)} = \frac{E_L (G)-E_L (G_i)}{E_L (G)}
+
+        E_L (G) = \sum_{i=0}^n \lambda_i^2
+
+    Where $E_L (G)$ is the Laplacian energy of graph `G`,
+    E_L (G_i) is the Laplacian energy of graph `G` after deleting node ``i``
+    and $\lambda_i$ are the eigenvalues of `G`'s Laplacian matrix.
+    This formula shows the normalized value. Without normalization,
+    the numerator on the right side is returned.
+
+    Parameters
+    ----------
+    G : graph
+        A networkx graph
+
+    normalized : bool (default = True)
+        If True the centrality score is scaled so the sum over all nodes is 1.
+        If False the centrality score for each node is the drop in Laplacian
+        energy when that node is removed.
+
+    nodelist : list, optional (default = None)
+        The rows and columns are ordered according to the nodes in nodelist.
+        If nodelist is None, then the ordering is produced by G.nodes().
+
+    weight: string or None, optional (default=`weight`)
+        Optional parameter `weight` to compute the Laplacian matrix.
+        The edge data key used to compute each value in the matrix.
+        If None, then each edge has weight 1.
+
+    walk_type : string or None, optional (default=None)
+        Optional parameter `walk_type` used when calling
+        :func:`directed_laplacian_matrix <networkx.directed_laplacian_matrix>`.
+        One of ``"random"``, ``"lazy"``, or ``"pagerank"``. If ``walk_type=None``
+        (the default), then a value is selected according to the properties of `G`:
+        - ``walk_type="random"`` if `G` is strongly connected and aperiodic
+        - ``walk_type="lazy"`` if `G` is strongly connected but not aperiodic
+        - ``walk_type="pagerank"`` for all other cases.
+
+    alpha : real (default = 0.95)
+        Optional parameter `alpha` used when calling
+        :func:`directed_laplacian_matrix <networkx.directed_laplacian_matrix>`.
+        (1 - alpha) is the teleportation probability used with pagerank.
+
+    Returns
+    -------
+    nodes : dictionary
+       Dictionary of nodes with Laplacian centrality as the value.
+
+    Examples
+    --------
+    >>> G = nx.Graph()
+    >>> edges = [(0, 1, 4), (0, 2, 2), (2, 1, 1), (1, 3, 2), (1, 4, 2), (4, 5, 1)]
+    >>> G.add_weighted_edges_from(edges)
+    >>> sorted((v, f"{c:0.2f}") for v, c in laplacian_centrality(G).items())
+    [(0, '0.70'), (1, '0.90'), (2, '0.28'), (3, '0.22'), (4, '0.26'), (5, '0.04')]
+
+    Notes
+    -----
+    The algorithm is implemented based on [1]_ with an extension to directed graphs
+    using the ``directed_laplacian_matrix`` function.
+
+    Raises
+    ------
+    NetworkXPointlessConcept
+        If the graph `G` is the null graph.
+    ZeroDivisionError
+        If the graph `G` has no edges (is empty) and normalization is requested.
+
+    References
+    ----------
+    .. [1] Qi, X., Fuller, E., Wu, Q., Wu, Y., and Zhang, C.-Q. (2012).
+       Laplacian centrality: A new centrality measure for weighted networks.
+       Information Sciences, 194:240-253.
+       https://math.wvu.edu/~cqzhang/Publication-files/my-paper/INS-2012-Laplacian-W.pdf
+
+    See Also
+    --------
+    :func:`~networkx.linalg.laplacianmatrix.directed_laplacian_matrix`
+    :func:`~networkx.linalg.laplacianmatrix.laplacian_matrix`
+    """
+    import numpy as np
+    import scipy as sp
+
+    if len(G) == 0:
+        raise nx.NetworkXPointlessConcept("null graph has no centrality defined")
+    if G.size(weight=weight) == 0:
+        if normalized:
+            raise ZeroDivisionError("graph with no edges has zero full energy")
+        return {n: 0 for n in G}
+
+    if nodelist is not None:
+        nodeset = set(G.nbunch_iter(nodelist))
+        if len(nodeset) != len(nodelist):
+            raise nx.NetworkXError("nodelist has duplicate nodes or nodes not in G")
+        nodes = nodelist + [n for n in G if n not in nodeset]
+    else:
+        nodelist = nodes = list(G)
+
+    if G.is_directed():
+        lap_matrix = nx.directed_laplacian_matrix(G, nodes, weight, walk_type, alpha)
+    else:
+        lap_matrix = nx.laplacian_matrix(G, nodes, weight).toarray()
+
+    full_energy = np.power(sp.linalg.eigh(lap_matrix, eigvals_only=True), 2).sum()
+
+    # calculate laplacian centrality
+    laplace_centralities_dict = {}
+    for i, node in enumerate(nodelist):
+        # remove row and col i from lap_matrix
+        all_but_i = list(np.arange(lap_matrix.shape[0]))
+        all_but_i.remove(i)
+        A_2 = lap_matrix[all_but_i, :][:, all_but_i]
+
+        # Adjust diagonal for removed row
+        new_diag = lap_matrix.diagonal() - abs(lap_matrix[:, i])
+        np.fill_diagonal(A_2, new_diag[all_but_i])
+
+        if len(all_but_i) > 0:  # catches degenerate case of single node
+            new_energy = np.power(sp.linalg.eigh(A_2, eigvals_only=True), 2).sum()
+        else:
+            new_energy = 0.0
+
+        lapl_cent = full_energy - new_energy
+        if normalized:
+            lapl_cent = lapl_cent / full_energy
+
+        laplace_centralities_dict[node] = float(lapl_cent)
+
+    return laplace_centralities_dict
diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/load.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/load.py
new file mode 100644
index 00000000..fc46edd6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/load.py
@@ -0,0 +1,200 @@
+"""Load centrality."""
+
+from operator import itemgetter
+
+import networkx as nx
+
+__all__ = ["load_centrality", "edge_load_centrality"]
+
+
+@nx._dispatchable(edge_attrs="weight")
+def newman_betweenness_centrality(G, v=None, cutoff=None, normalized=True, weight=None):
+    """Compute load centrality for nodes.
+
+    The load centrality of a node is the fraction of all shortest
+    paths that pass through that node.
+
+    Parameters
+    ----------
+    G : graph
+      A networkx graph.
+
+    normalized : bool, optional (default=True)
+      If True the betweenness values are normalized by b=b/(n-1)(n-2) where
+      n is the number of nodes in G.
+
+    weight : None or string, optional (default=None)
+      If None, edge weights are ignored.
+      Otherwise holds the name of the edge attribute used as weight.
+      The weight of an edge is treated as the length or distance between the two sides.
+
+    cutoff : bool, optional (default=None)
+      If specified, only consider paths of length <= cutoff.
+
+    Returns
+    -------
+    nodes : dictionary
+       Dictionary of nodes with centrality as the value.
+
+    See Also
+    --------
+    betweenness_centrality
+
+    Notes
+    -----
+    Load centrality is slightly different than betweenness. It was originally
+    introduced by [2]_. For this load algorithm see [1]_.
+
+    References
+    ----------
+    .. [1] Mark E. J. Newman:
+       Scientific collaboration networks. II.
+       Shortest paths, weighted networks, and centrality.
+       Physical Review E 64, 016132, 2001.
+       http://journals.aps.org/pre/abstract/10.1103/PhysRevE.64.016132
+    .. [2] Kwang-Il Goh, Byungnam Kahng and Doochul Kim
+       Universal behavior of Load Distribution in Scale-Free Networks.
+       Physical Review Letters 87(27):1–4, 2001.
+       https://doi.org/10.1103/PhysRevLett.87.278701
+    """
+    if v is not None:  # only one node
+        betweenness = 0.0
+        for source in G:
+            ubetween = _node_betweenness(G, source, cutoff, False, weight)
+            betweenness += ubetween[v] if v in ubetween else 0
+        if normalized:
+            order = G.order()
+            if order <= 2:
+                return betweenness  # no normalization b=0 for all nodes
+            betweenness *= 1.0 / ((order - 1) * (order - 2))
+    else:
+        betweenness = {}.fromkeys(G, 0.0)
+        for source in betweenness:
+            ubetween = _node_betweenness(G, source, cutoff, False, weight)
+            for vk in ubetween:
+                betweenness[vk] += ubetween[vk]
+        if normalized:
+            order = G.order()
+            if order <= 2:
+                return betweenness  # no normalization b=0 for all nodes
+            scale = 1.0 / ((order - 1) * (order - 2))
+            for v in betweenness:
+                betweenness[v] *= scale
+    return betweenness  # all nodes
+
+
+def _node_betweenness(G, source, cutoff=False, normalized=True, weight=None):
+    """Node betweenness_centrality helper:
+
+    See betweenness_centrality for what you probably want.
+    This actually computes "load" and not betweenness.
+    See https://networkx.lanl.gov/ticket/103
+
+    This calculates the load of each node for paths from a single source.
+    (The fraction of number of shortests paths from source that go
+    through each node.)
+
+    To get the load for a node you need to do all-pairs shortest paths.
+
+    If weight is not None then use Dijkstra for finding shortest paths.
+    """
+    # get the predecessor and path length data
+    if weight is None:
+        (pred, length) = nx.predecessor(G, source, cutoff=cutoff, return_seen=True)
+    else:
+        (pred, length) = nx.dijkstra_predecessor_and_distance(G, source, cutoff, weight)
+
+    # order the nodes by path length
+    onodes = [(l, vert) for (vert, l) in length.items()]
+    onodes.sort()
+    onodes[:] = [vert for (l, vert) in onodes if l > 0]
+
+    # initialize betweenness
+    between = {}.fromkeys(length, 1.0)
+
+    while onodes:
+        v = onodes.pop()
+        if v in pred:
+            num_paths = len(pred[v])  # Discount betweenness if more than
+            for x in pred[v]:  # one shortest path.
+                if x == source:  # stop if hit source because all remaining v
+                    break  # also have pred[v]==[source]
+                between[x] += between[v] / num_paths
+    #  remove source
+    for v in between:
+        between[v] -= 1
+    # rescale to be between 0 and 1
+    if normalized:
+        l = len(between)
+        if l > 2:
+            # scale by 1/the number of possible paths
+            scale = 1 / ((l - 1) * (l - 2))
+            for v in between:
+                between[v] *= scale
+    return between
+
+
+load_centrality = newman_betweenness_centrality
+
+
+@nx._dispatchable
+def edge_load_centrality(G, cutoff=False):
+    """Compute edge load.
+
+    WARNING: This concept of edge load has not been analysed
+    or discussed outside of NetworkX that we know of.
+    It is based loosely on load_centrality in the sense that
+    it counts the number of shortest paths which cross each edge.
+    This function is for demonstration and testing purposes.
+
+    Parameters
+    ----------
+    G : graph
+        A networkx graph
+
+    cutoff : bool, optional (default=False)
+        If specified, only consider paths of length <= cutoff.
+
+    Returns
+    -------
+    A dict keyed by edge 2-tuple to the number of shortest paths
+    which use that edge. Where more than one path is shortest
+    the count is divided equally among paths.
+    """
+    betweenness = {}
+    for u, v in G.edges():
+        betweenness[(u, v)] = 0.0
+        betweenness[(v, u)] = 0.0
+
+    for source in G:
+        ubetween = _edge_betweenness(G, source, cutoff=cutoff)
+        for e, ubetweenv in ubetween.items():
+            betweenness[e] += ubetweenv  # cumulative total
+    return betweenness
+
+
+def _edge_betweenness(G, source, nodes=None, cutoff=False):
+    """Edge betweenness helper."""
+    # get the predecessor data
+    (pred, length) = nx.predecessor(G, source, cutoff=cutoff, return_seen=True)
+    # order the nodes by path length
+    onodes = [n for n, d in sorted(length.items(), key=itemgetter(1))]
+    # initialize betweenness, doesn't account for any edge weights
+    between = {}
+    for u, v in G.edges(nodes):
+        between[(u, v)] = 1.0
+        between[(v, u)] = 1.0
+
+    while onodes:  # work through all paths
+        v = onodes.pop()
+        if v in pred:
+            # Discount betweenness if more than one shortest path.
+            num_paths = len(pred[v])
+            for w in pred[v]:
+                if w in pred:
+                    # Discount betweenness, mult path
+                    num_paths = len(pred[w])
+                    for x in pred[w]:
+                        between[(w, x)] += between[(v, w)] / num_paths
+                        between[(x, w)] += between[(w, v)] / num_paths
+    return between
diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/percolation.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/percolation.py
new file mode 100644
index 00000000..0d4c8713
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/percolation.py
@@ -0,0 +1,128 @@
+"""Percolation centrality measures."""
+
+import networkx as nx
+from networkx.algorithms.centrality.betweenness import (
+    _single_source_dijkstra_path_basic as dijkstra,
+)
+from networkx.algorithms.centrality.betweenness import (
+    _single_source_shortest_path_basic as shortest_path,
+)
+
+__all__ = ["percolation_centrality"]
+
+
+@nx._dispatchable(node_attrs="attribute", edge_attrs="weight")
+def percolation_centrality(G, attribute="percolation", states=None, weight=None):
+    r"""Compute the percolation centrality for nodes.
+
+    Percolation centrality of a node $v$, at a given time, is defined
+    as the proportion of ‘percolated paths’ that go through that node.
+
+    This measure quantifies relative impact of nodes based on their
+    topological connectivity, as well as their percolation states.
+
+    Percolation states of nodes are used to depict network percolation
+    scenarios (such as during infection transmission in a social network
+    of individuals, spreading of computer viruses on computer networks, or
+    transmission of disease over a network of towns) over time. In this
+    measure usually the percolation state is expressed as a decimal
+    between 0.0 and 1.0.
+
+    When all nodes are in the same percolated state this measure is
+    equivalent to betweenness centrality.
+
+    Parameters
+    ----------
+    G : graph
+      A NetworkX graph.
+
+    attribute : None or string, optional (default='percolation')
+      Name of the node attribute to use for percolation state, used
+      if `states` is None. If a node does not set the attribute the
+      state of that node will be set to the default value of 1.
+      If all nodes do not have the attribute all nodes will be set to
+      1 and the centrality measure will be equivalent to betweenness centrality.
+
+    states : None or dict, optional (default=None)
+      Specify percolation states for the nodes, nodes as keys states
+      as values.
+
+    weight : None or string, optional (default=None)
+      If None, all edge weights are considered equal.
+      Otherwise holds the name of the edge attribute used as weight.
+      The weight of an edge is treated as the length or distance between the two sides.
+
+
+    Returns
+    -------
+    nodes : dictionary
+       Dictionary of nodes with percolation centrality as the value.
+
+    See Also
+    --------
+    betweenness_centrality
+
+    Notes
+    -----
+    The algorithm is from Mahendra Piraveenan, Mikhail Prokopenko, and
+    Liaquat Hossain [1]_
+    Pair dependencies are calculated and accumulated using [2]_
+
+    For weighted graphs the edge weights must be greater than zero.
+    Zero edge weights can produce an infinite number of equal length
+    paths between pairs of nodes.
+
+    References
+    ----------
+    .. [1] Mahendra Piraveenan, Mikhail Prokopenko, Liaquat Hossain
+       Percolation Centrality: Quantifying Graph-Theoretic Impact of Nodes
+       during Percolation in Networks
+       http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0053095
+    .. [2] Ulrik Brandes:
+       A Faster Algorithm for Betweenness Centrality.
+       Journal of Mathematical Sociology 25(2):163-177, 2001.
+       https://doi.org/10.1080/0022250X.2001.9990249
+    """
+    percolation = dict.fromkeys(G, 0.0)  # b[v]=0 for v in G
+
+    nodes = G
+
+    if states is None:
+        states = nx.get_node_attributes(nodes, attribute, default=1)
+
+    # sum of all percolation states
+    p_sigma_x_t = 0.0
+    for v in states.values():
+        p_sigma_x_t += v
+
+    for s in nodes:
+        # single source shortest paths
+        if weight is None:  # use BFS
+            S, P, sigma, _ = shortest_path(G, s)
+        else:  # use Dijkstra's algorithm
+            S, P, sigma, _ = dijkstra(G, s, weight)
+        # accumulation
+        percolation = _accumulate_percolation(
+            percolation, S, P, sigma, s, states, p_sigma_x_t
+        )
+
+    n = len(G)
+
+    for v in percolation:
+        percolation[v] *= 1 / (n - 2)
+
+    return percolation
+
+
+def _accumulate_percolation(percolation, S, P, sigma, s, states, p_sigma_x_t):
+    delta = dict.fromkeys(S, 0)
+    while S:
+        w = S.pop()
+        coeff = (1 + delta[w]) / sigma[w]
+        for v in P[w]:
+            delta[v] += sigma[v] * coeff
+        if w != s:
+            # percolation weight
+            pw_s_w = states[s] / (p_sigma_x_t - states[w])
+            percolation[w] += delta[w] * pw_s_w
+    return percolation
diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/reaching.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/reaching.py
new file mode 100644
index 00000000..378e8a05
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/reaching.py
@@ -0,0 +1,209 @@
+"""Functions for computing reaching centrality of a node or a graph."""
+
+import networkx as nx
+from networkx.utils import pairwise
+
+__all__ = ["global_reaching_centrality", "local_reaching_centrality"]
+
+
+def _average_weight(G, path, weight=None):
+    """Returns the average weight of an edge in a weighted path.
+
+    Parameters
+    ----------
+    G : graph
+      A networkx graph.
+
+    path: list
+      A list of vertices that define the path.
+
+    weight : None or string, optional (default=None)
+      If None, edge weights are ignored.  Then the average weight of an edge
+      is assumed to be the multiplicative inverse of the length of the path.
+      Otherwise holds the name of the edge attribute used as weight.
+    """
+    path_length = len(path) - 1
+    if path_length <= 0:
+        return 0
+    if weight is None:
+        return 1 / path_length
+    total_weight = sum(G.edges[i, j][weight] for i, j in pairwise(path))
+    return total_weight / path_length
+
+
+@nx._dispatchable(edge_attrs="weight")
+def global_reaching_centrality(G, weight=None, normalized=True):
+    """Returns the global reaching centrality of a directed graph.
+
+    The *global reaching centrality* of a weighted directed graph is the
+    average over all nodes of the difference between the local reaching
+    centrality of the node and the greatest local reaching centrality of
+    any node in the graph [1]_. For more information on the local
+    reaching centrality, see :func:`local_reaching_centrality`.
+    Informally, the local reaching centrality is the proportion of the
+    graph that is reachable from the neighbors of the node.
+
+    Parameters
+    ----------
+    G : DiGraph
+        A networkx DiGraph.
+
+    weight : None or string, optional (default=None)
+        Attribute to use for edge weights. If ``None``, each edge weight
+        is assumed to be one. A higher weight implies a stronger
+        connection between nodes and a *shorter* path length.
+
+    normalized : bool, optional (default=True)
+        Whether to normalize the edge weights by the total sum of edge
+        weights.
+
+    Returns
+    -------
+    h : float
+        The global reaching centrality of the graph.
+
+    Examples
+    --------
+    >>> G = nx.DiGraph()
+    >>> G.add_edge(1, 2)
+    >>> G.add_edge(1, 3)
+    >>> nx.global_reaching_centrality(G)
+    1.0
+    >>> G.add_edge(3, 2)
+    >>> nx.global_reaching_centrality(G)
+    0.75
+
+    See also
+    --------
+    local_reaching_centrality
+
+    References
+    ----------
+    .. [1] Mones, Enys, Lilla Vicsek, and Tamás Vicsek.
+           "Hierarchy Measure for Complex Networks."
+           *PLoS ONE* 7.3 (2012): e33799.
+           https://doi.org/10.1371/journal.pone.0033799
+    """
+    if nx.is_negatively_weighted(G, weight=weight):
+        raise nx.NetworkXError("edge weights must be positive")
+    total_weight = G.size(weight=weight)
+    if total_weight <= 0:
+        raise nx.NetworkXError("Size of G must be positive")
+    # If provided, weights must be interpreted as connection strength
+    # (so higher weights are more likely to be chosen). However, the
+    # shortest path algorithms in NetworkX assume the provided "weight"
+    # is actually a distance (so edges with higher weight are less
+    # likely to be chosen). Therefore we need to invert the weights when
+    # computing shortest paths.
+    #
+    # If weight is None, we leave it as-is so that the shortest path
+    # algorithm can use a faster, unweighted algorithm.
+    if weight is not None:
+
+        def as_distance(u, v, d):
+            return total_weight / d.get(weight, 1)
+
+        shortest_paths = nx.shortest_path(G, weight=as_distance)
+    else:
+        shortest_paths = nx.shortest_path(G)
+
+    centrality = local_reaching_centrality
+    # TODO This can be trivially parallelized.
+    lrc = [
+        centrality(G, node, paths=paths, weight=weight, normalized=normalized)
+        for node, paths in shortest_paths.items()
+    ]
+
+    max_lrc = max(lrc)
+    return sum(max_lrc - c for c in lrc) / (len(G) - 1)
+
+
+@nx._dispatchable(edge_attrs="weight")
+def local_reaching_centrality(G, v, paths=None, weight=None, normalized=True):
+    """Returns the local reaching centrality of a node in a directed
+    graph.
+
+    The *local reaching centrality* of a node in a directed graph is the
+    proportion of other nodes reachable from that node [1]_.
+
+    Parameters
+    ----------
+    G : DiGraph
+        A NetworkX DiGraph.
+
+    v : node
+        A node in the directed graph `G`.
+
+    paths : dictionary (default=None)
+        If this is not `None` it must be a dictionary representation
+        of single-source shortest paths, as computed by, for example,
+        :func:`networkx.shortest_path` with source node `v`. Use this
+        keyword argument if you intend to invoke this function many
+        times but don't want the paths to be recomputed each time.
+
+    weight : None or string, optional (default=None)
+        Attribute to use for edge weights.  If `None`, each edge weight
+        is assumed to be one. A higher weight implies a stronger
+        connection between nodes and a *shorter* path length.
+
+    normalized : bool, optional (default=True)
+        Whether to normalize the edge weights by the total sum of edge
+        weights.
+
+    Returns
+    -------
+    h : float
+        The local reaching centrality of the node ``v`` in the graph
+        ``G``.
+
+    Examples
+    --------
+    >>> G = nx.DiGraph()
+    >>> G.add_edges_from([(1, 2), (1, 3)])
+    >>> nx.local_reaching_centrality(G, 3)
+    0.0
+    >>> G.add_edge(3, 2)
+    >>> nx.local_reaching_centrality(G, 3)
+    0.5
+
+    See also
+    --------
+    global_reaching_centrality
+
+    References
+    ----------
+    .. [1] Mones, Enys, Lilla Vicsek, and Tamás Vicsek.
+           "Hierarchy Measure for Complex Networks."
+           *PLoS ONE* 7.3 (2012): e33799.
+           https://doi.org/10.1371/journal.pone.0033799
+    """
+    # Corner case: graph with single node containing a self-loop
+    if (total_weight := G.size(weight=weight)) > 0 and len(G) == 1:
+        raise nx.NetworkXError(
+            "local_reaching_centrality of a single node with self-loop not well-defined"
+        )
+    if paths is None:
+        if nx.is_negatively_weighted(G, weight=weight):
+            raise nx.NetworkXError("edge weights must be positive")
+        if total_weight <= 0:
+            raise nx.NetworkXError("Size of G must be positive")
+        if weight is not None:
+            # Interpret weights as lengths.
+            def as_distance(u, v, d):
+                return total_weight / d.get(weight, 1)
+
+            paths = nx.shortest_path(G, source=v, weight=as_distance)
+        else:
+            paths = nx.shortest_path(G, source=v)
+    # If the graph is unweighted, simply return the proportion of nodes
+    # reachable from the source node ``v``.
+    if weight is None and G.is_directed():
+        return (len(paths) - 1) / (len(G) - 1)
+    if normalized and weight is not None:
+        norm = G.size(weight=weight) / G.size()
+    else:
+        norm = 1
+    # TODO This can be trivially parallelized.
+    avgw = (_average_weight(G, path, weight=weight) for path in paths.values())
+    sum_avg_weight = sum(avgw) / norm
+    return sum_avg_weight / (len(G) - 1)
diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/second_order.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/second_order.py
new file mode 100644
index 00000000..35583cd6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/second_order.py
@@ -0,0 +1,141 @@
+"""Copyright (c) 2015 – Thomson Licensing, SAS
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted (subject to the limitations in the
+disclaimer below) provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+
+* Neither the name of Thomson Licensing, or Technicolor, nor the names
+of its contributors may be used to endorse or promote products derived
+from this software without specific prior written permission.
+
+NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
+GRANTED BY THIS LICENSE.  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
+HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
+WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+"""
+
+import networkx as nx
+from networkx.utils import not_implemented_for
+
+# Authors: Erwan Le Merrer (erwan.lemerrer@technicolor.com)
+
+__all__ = ["second_order_centrality"]
+
+
+@not_implemented_for("directed")
+@nx._dispatchable(edge_attrs="weight")
+def second_order_centrality(G, weight="weight"):
+    """Compute the second order centrality for nodes of G.
+
+    The second order centrality of a given node is the standard deviation of
+    the return times to that node of a perpetual random walk on G:
+
+    Parameters
+    ----------
+    G : graph
+      A NetworkX connected and undirected graph.
+
+    weight : string or None, optional (default="weight")
+        The name of an edge attribute that holds the numerical value
+        used as a weight. If None then each edge has weight 1.
+
+    Returns
+    -------
+    nodes : dictionary
+       Dictionary keyed by node with second order centrality as the value.
+
+    Examples
+    --------
+    >>> G = nx.star_graph(10)
+    >>> soc = nx.second_order_centrality(G)
+    >>> print(sorted(soc.items(), key=lambda x: x[1])[0][0])  # pick first id
+    0
+
+    Raises
+    ------
+    NetworkXException
+        If the graph G is empty, non connected or has negative weights.
+
+    See Also
+    --------
+    betweenness_centrality
+
+    Notes
+    -----
+    Lower values of second order centrality indicate higher centrality.
+
+    The algorithm is from Kermarrec, Le Merrer, Sericola and Trédan [1]_.
+
+    This code implements the analytical version of the algorithm, i.e.,
+    there is no simulation of a random walk process involved. The random walk
+    is here unbiased (corresponding to eq 6 of the paper [1]_), thus the
+    centrality values are the standard deviations for random walk return times
+    on the transformed input graph G (equal in-degree at each nodes by adding
+    self-loops).
+
+    Complexity of this implementation, made to run locally on a single machine,
+    is O(n^3), with n the size of G, which makes it viable only for small
+    graphs.
+
+    References
+    ----------
+    .. [1] Anne-Marie Kermarrec, Erwan Le Merrer, Bruno Sericola, Gilles Trédan
+       "Second order centrality: Distributed assessment of nodes criticity in
+       complex networks", Elsevier Computer Communications 34(5):619-628, 2011.
+    """
+    import numpy as np
+
+    n = len(G)
+
+    if n == 0:
+        raise nx.NetworkXException("Empty graph.")
+    if not nx.is_connected(G):
+        raise nx.NetworkXException("Non connected graph.")
+    if any(d.get(weight, 0) < 0 for u, v, d in G.edges(data=True)):
+        raise nx.NetworkXException("Graph has negative edge weights.")
+
+    # balancing G for Metropolis-Hastings random walks
+    G = nx.DiGraph(G)
+    in_deg = dict(G.in_degree(weight=weight))
+    d_max = max(in_deg.values())
+    for i, deg in in_deg.items():
+        if deg < d_max:
+            G.add_edge(i, i, weight=d_max - deg)
+
+    P = nx.to_numpy_array(G)
+    P /= P.sum(axis=1)[:, np.newaxis]  # to transition probability matrix
+
+    def _Qj(P, j):
+        P = P.copy()
+        P[:, j] = 0
+        return P
+
+    M = np.empty([n, n])
+
+    for i in range(n):
+        M[:, i] = np.linalg.solve(
+            np.identity(n) - _Qj(P, i), np.ones([n, 1])[:, 0]
+        )  # eq 3
+
+    return dict(
+        zip(
+            G.nodes,
+            (float(np.sqrt(2 * np.sum(M[:, i]) - n * (n + 1))) for i in range(n)),
+        )
+    )  # eq 6
diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/subgraph_alg.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/subgraph_alg.py
new file mode 100644
index 00000000..0a49e6f4
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/subgraph_alg.py
@@ -0,0 +1,340 @@
+"""
+Subraph centrality and communicability betweenness.
+"""
+
+import networkx as nx
+from networkx.utils import not_implemented_for
+
+__all__ = [
+    "subgraph_centrality_exp",
+    "subgraph_centrality",
+    "communicability_betweenness_centrality",
+    "estrada_index",
+]
+
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
+@nx._dispatchable
+def subgraph_centrality_exp(G):
+    r"""Returns the subgraph centrality for each node of G.
+
+    Subgraph centrality  of a node `n` is the sum of weighted closed
+    walks of all lengths starting and ending at node `n`. The weights
+    decrease with path length. Each closed walk is associated with a
+    connected subgraph ([1]_).
+
+    Parameters
+    ----------
+    G: graph
+
+    Returns
+    -------
+    nodes:dictionary
+        Dictionary of nodes with subgraph centrality as the value.
+
+    Raises
+    ------
+    NetworkXError
+        If the graph is not undirected and simple.
+
+    See Also
+    --------
+    subgraph_centrality:
+        Alternative algorithm of the subgraph centrality for each node of G.
+
+    Notes
+    -----
+    This version of the algorithm exponentiates the adjacency matrix.
+
+    The subgraph centrality of a node `u` in G can be found using
+    the matrix exponential of the adjacency matrix of G [1]_,
+
+    .. math::
+
+        SC(u)=(e^A)_{uu} .
+
+    References
+    ----------
+    .. [1] Ernesto Estrada, Juan A. Rodriguez-Velazquez,
+       "Subgraph centrality in complex networks",
+       Physical Review E 71, 056103 (2005).
+       https://arxiv.org/abs/cond-mat/0504730
+
+    Examples
+    --------
+    (Example from [1]_)
+    >>> G = nx.Graph(
+    ...     [
+    ...         (1, 2),
+    ...         (1, 5),
+    ...         (1, 8),
+    ...         (2, 3),
+    ...         (2, 8),
+    ...         (3, 4),
+    ...         (3, 6),
+    ...         (4, 5),
+    ...         (4, 7),
+    ...         (5, 6),
+    ...         (6, 7),
+    ...         (7, 8),
+    ...     ]
+    ... )
+    >>> sc = nx.subgraph_centrality_exp(G)
+    >>> print([f"{node} {sc[node]:0.2f}" for node in sorted(sc)])
+    ['1 3.90', '2 3.90', '3 3.64', '4 3.71', '5 3.64', '6 3.71', '7 3.64', '8 3.90']
+    """
+    # alternative implementation that calculates the matrix exponential
+    import scipy as sp
+
+    nodelist = list(G)  # ordering of nodes in matrix
+    A = nx.to_numpy_array(G, nodelist)
+    # convert to 0-1 matrix
+    A[A != 0.0] = 1
+    expA = sp.linalg.expm(A)
+    # convert diagonal to dictionary keyed by node
+    sc = dict(zip(nodelist, map(float, expA.diagonal())))
+    return sc
+
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
+@nx._dispatchable
+def subgraph_centrality(G):
+    r"""Returns subgraph centrality for each node in G.
+
+    Subgraph centrality  of a node `n` is the sum of weighted closed
+    walks of all lengths starting and ending at node `n`. The weights
+    decrease with path length. Each closed walk is associated with a
+    connected subgraph ([1]_).
+
+    Parameters
+    ----------
+    G: graph
+
+    Returns
+    -------
+    nodes : dictionary
+       Dictionary of nodes with subgraph centrality as the value.
+
+    Raises
+    ------
+    NetworkXError
+       If the graph is not undirected and simple.
+
+    See Also
+    --------
+    subgraph_centrality_exp:
+        Alternative algorithm of the subgraph centrality for each node of G.
+
+    Notes
+    -----
+    This version of the algorithm computes eigenvalues and eigenvectors
+    of the adjacency matrix.
+
+    Subgraph centrality of a node `u` in G can be found using
+    a spectral decomposition of the adjacency matrix [1]_,
+
+    .. math::
+
+       SC(u)=\sum_{j=1}^{N}(v_{j}^{u})^2 e^{\lambda_{j}},
+
+    where `v_j` is an eigenvector of the adjacency matrix `A` of G
+    corresponding to the eigenvalue `\lambda_j`.
+
+    Examples
+    --------
+    (Example from [1]_)
+    >>> G = nx.Graph(
+    ...     [
+    ...         (1, 2),
+    ...         (1, 5),
+    ...         (1, 8),
+    ...         (2, 3),
+    ...         (2, 8),
+    ...         (3, 4),
+    ...         (3, 6),
+    ...         (4, 5),
+    ...         (4, 7),
+    ...         (5, 6),
+    ...         (6, 7),
+    ...         (7, 8),
+    ...     ]
+    ... )
+    >>> sc = nx.subgraph_centrality(G)
+    >>> print([f"{node} {sc[node]:0.2f}" for node in sorted(sc)])
+    ['1 3.90', '2 3.90', '3 3.64', '4 3.71', '5 3.64', '6 3.71', '7 3.64', '8 3.90']
+
+    References
+    ----------
+    .. [1] Ernesto Estrada, Juan A. Rodriguez-Velazquez,
+       "Subgraph centrality in complex networks",
+       Physical Review E 71, 056103 (2005).
+       https://arxiv.org/abs/cond-mat/0504730
+
+    """
+    import numpy as np
+
+    nodelist = list(G)  # ordering of nodes in matrix
+    A = nx.to_numpy_array(G, nodelist)
+    # convert to 0-1 matrix
+    A[np.nonzero(A)] = 1
+    w, v = np.linalg.eigh(A)
+    vsquare = np.array(v) ** 2
+    expw = np.exp(w)
+    xg = vsquare @ expw
+    # convert vector dictionary keyed by node
+    sc = dict(zip(nodelist, map(float, xg)))
+    return sc
+
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
+@nx._dispatchable
+def communicability_betweenness_centrality(G):
+    r"""Returns subgraph communicability for all pairs of nodes in G.
+
+    Communicability betweenness measure makes use of the number of walks
+    connecting every pair of nodes as the basis of a betweenness centrality
+    measure.
+
+    Parameters
+    ----------
+    G: graph
+
+    Returns
+    -------
+    nodes : dictionary
+        Dictionary of nodes with communicability betweenness as the value.
+
+    Raises
+    ------
+    NetworkXError
+        If the graph is not undirected and simple.
+
+    Notes
+    -----
+    Let `G=(V,E)` be a simple undirected graph with `n` nodes and `m` edges,
+    and `A` denote the adjacency matrix of `G`.
+
+    Let `G(r)=(V,E(r))` be the graph resulting from
+    removing all edges connected to node `r` but not the node itself.
+
+    The adjacency matrix for `G(r)` is `A+E(r)`,  where `E(r)` has nonzeros
+    only in row and column `r`.
+
+    The subraph betweenness of a node `r`  is [1]_
+
+    .. math::
+
+         \omega_{r} = \frac{1}{C}\sum_{p}\sum_{q}\frac{G_{prq}}{G_{pq}},
+         p\neq q, q\neq r,
+
+    where
+    `G_{prq}=(e^{A}_{pq} - (e^{A+E(r)})_{pq}`  is the number of walks
+    involving node r,
+    `G_{pq}=(e^{A})_{pq}` is the number of closed walks starting
+    at node `p` and ending at node `q`,
+    and `C=(n-1)^{2}-(n-1)` is a normalization factor equal to the
+    number of terms in the sum.
+
+    The resulting `\omega_{r}` takes values between zero and one.
+    The lower bound cannot be attained for a connected
+    graph, and the upper bound is attained in the star graph.
+
+    References
+    ----------
+    .. [1] Ernesto Estrada, Desmond J. Higham, Naomichi Hatano,
+       "Communicability Betweenness in Complex Networks"
+       Physica A 388 (2009) 764-774.
+       https://arxiv.org/abs/0905.4102
+
+    Examples
+    --------
+    >>> G = nx.Graph([(0, 1), (1, 2), (1, 5), (5, 4), (2, 4), (2, 3), (4, 3), (3, 6)])
+    >>> cbc = nx.communicability_betweenness_centrality(G)
+    >>> print([f"{node} {cbc[node]:0.2f}" for node in sorted(cbc)])
+    ['0 0.03', '1 0.45', '2 0.51', '3 0.45', '4 0.40', '5 0.19', '6 0.03']
+    """
+    import numpy as np
+    import scipy as sp
+
+    nodelist = list(G)  # ordering of nodes in matrix
+    n = len(nodelist)
+    A = nx.to_numpy_array(G, nodelist)
+    # convert to 0-1 matrix
+    A[np.nonzero(A)] = 1
+    expA = sp.linalg.expm(A)
+    mapping = dict(zip(nodelist, range(n)))
+    cbc = {}
+    for v in G:
+        # remove row and col of node v
+        i = mapping[v]
+        row = A[i, :].copy()
+        col = A[:, i].copy()
+        A[i, :] = 0
+        A[:, i] = 0
+        B = (expA - sp.linalg.expm(A)) / expA
+        # sum with row/col of node v and diag set to zero
+        B[i, :] = 0
+        B[:, i] = 0
+        B -= np.diag(np.diag(B))
+        cbc[v] = float(B.sum())
+        # put row and col back
+        A[i, :] = row
+        A[:, i] = col
+    # rescale when more than two nodes
+    order = len(cbc)
+    if order > 2:
+        scale = 1.0 / ((order - 1.0) ** 2 - (order - 1.0))
+        cbc = {node: value * scale for node, value in cbc.items()}
+    return cbc
+
+
+@nx._dispatchable
+def estrada_index(G):
+    r"""Returns the Estrada index of a the graph G.
+
+    The Estrada Index is a topological index of folding or 3D "compactness" ([1]_).
+
+    Parameters
+    ----------
+    G: graph
+
+    Returns
+    -------
+    estrada index: float
+
+    Raises
+    ------
+    NetworkXError
+        If the graph is not undirected and simple.
+
+    Notes
+    -----
+    Let `G=(V,E)` be a simple undirected graph with `n` nodes  and let
+    `\lambda_{1}\leq\lambda_{2}\leq\cdots\lambda_{n}`
+    be a non-increasing ordering of the eigenvalues of its adjacency
+    matrix `A`. The Estrada index is ([1]_, [2]_)
+
+    .. math::
+        EE(G)=\sum_{j=1}^n e^{\lambda _j}.
+
+    References
+    ----------
+    .. [1] E. Estrada, "Characterization of 3D molecular structure",
+       Chem. Phys. Lett. 319, 713 (2000).
+       https://doi.org/10.1016/S0009-2614(00)00158-5
+    .. [2] José Antonio de la Peñaa, Ivan Gutman, Juan Rada,
+       "Estimating the Estrada index",
+       Linear Algebra and its Applications. 427, 1 (2007).
+       https://doi.org/10.1016/j.laa.2007.06.020
+
+    Examples
+    --------
+    >>> G = nx.Graph([(0, 1), (1, 2), (1, 5), (5, 4), (2, 4), (2, 3), (4, 3), (3, 6)])
+    >>> ei = nx.estrada_index(G)
+    >>> print(f"{ei:0.5}")
+    20.55
+    """
+    return sum(subgraph_centrality(G).values())
diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__init__.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/__init__.py
diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_betweenness_centrality.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_betweenness_centrality.py
new file mode 100644
index 00000000..4c059cf9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_betweenness_centrality.py
@@ -0,0 +1,780 @@
+import pytest
+
+import networkx as nx
+
+
+def weighted_G():
+    G = nx.Graph()
+    G.add_edge(0, 1, weight=3)
+    G.add_edge(0, 2, weight=2)
+    G.add_edge(0, 3, weight=6)
+    G.add_edge(0, 4, weight=4)
+    G.add_edge(1, 3, weight=5)
+    G.add_edge(1, 5, weight=5)
+    G.add_edge(2, 4, weight=1)
+    G.add_edge(3, 4, weight=2)
+    G.add_edge(3, 5, weight=1)
+    G.add_edge(4, 5, weight=4)
+    return G
+
+
+class TestBetweennessCentrality:
+    def test_K5(self):
+        """Betweenness centrality: K5"""
+        G = nx.complete_graph(5)
+        b = nx.betweenness_centrality(G, weight=None, normalized=False)
+        b_answer = {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0}
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_K5_endpoints(self):
+        """Betweenness centrality: K5 endpoints"""
+        G = nx.complete_graph(5)
+        b = nx.betweenness_centrality(G, weight=None, normalized=False, endpoints=True)
+        b_answer = {0: 4.0, 1: 4.0, 2: 4.0, 3: 4.0, 4: 4.0}
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+        # normalized = True case
+        b = nx.betweenness_centrality(G, weight=None, normalized=True, endpoints=True)
+        b_answer = {0: 0.4, 1: 0.4, 2: 0.4, 3: 0.4, 4: 0.4}
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_P3_normalized(self):
+        """Betweenness centrality: P3 normalized"""
+        G = nx.path_graph(3)
+        b = nx.betweenness_centrality(G, weight=None, normalized=True)
+        b_answer = {0: 0.0, 1: 1.0, 2: 0.0}
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_P3(self):
+        """Betweenness centrality: P3"""
+        G = nx.path_graph(3)
+        b_answer = {0: 0.0, 1: 1.0, 2: 0.0}
+        b = nx.betweenness_centrality(G, weight=None, normalized=False)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_sample_from_P3(self):
+        """Betweenness centrality: P3 sample"""
+        G = nx.path_graph(3)
+        b_answer = {0: 0.0, 1: 1.0, 2: 0.0}
+        b = nx.betweenness_centrality(G, k=3, weight=None, normalized=False, seed=1)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+        b = nx.betweenness_centrality(G, k=2, weight=None, normalized=False, seed=1)
+        # python versions give different results with same seed
+        b_approx1 = {0: 0.0, 1: 1.5, 2: 0.0}
+        b_approx2 = {0: 0.0, 1: 0.75, 2: 0.0}
+        for n in sorted(G):
+            assert b[n] in (b_approx1[n], b_approx2[n])
+
+    def test_P3_endpoints(self):
+        """Betweenness centrality: P3 endpoints"""
+        G = nx.path_graph(3)
+        b_answer = {0: 2.0, 1: 3.0, 2: 2.0}
+        b = nx.betweenness_centrality(G, weight=None, normalized=False, endpoints=True)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+        # normalized = True case
+        b_answer = {0: 2 / 3, 1: 1.0, 2: 2 / 3}
+        b = nx.betweenness_centrality(G, weight=None, normalized=True, endpoints=True)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_krackhardt_kite_graph(self):
+        """Betweenness centrality: Krackhardt kite graph"""
+        G = nx.krackhardt_kite_graph()
+        b_answer = {
+            0: 1.667,
+            1: 1.667,
+            2: 0.000,
+            3: 7.333,
+            4: 0.000,
+            5: 16.667,
+            6: 16.667,
+            7: 28.000,
+            8: 16.000,
+            9: 0.000,
+        }
+        for b in b_answer:
+            b_answer[b] /= 2
+        b = nx.betweenness_centrality(G, weight=None, normalized=False)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
+
+    def test_krackhardt_kite_graph_normalized(self):
+        """Betweenness centrality: Krackhardt kite graph normalized"""
+        G = nx.krackhardt_kite_graph()
+        b_answer = {
+            0: 0.023,
+            1: 0.023,
+            2: 0.000,
+            3: 0.102,
+            4: 0.000,
+            5: 0.231,
+            6: 0.231,
+            7: 0.389,
+            8: 0.222,
+            9: 0.000,
+        }
+        b = nx.betweenness_centrality(G, weight=None, normalized=True)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
+
+    def test_florentine_families_graph(self):
+        """Betweenness centrality: Florentine families graph"""
+        G = nx.florentine_families_graph()
+        b_answer = {
+            "Acciaiuoli": 0.000,
+            "Albizzi": 0.212,
+            "Barbadori": 0.093,
+            "Bischeri": 0.104,
+            "Castellani": 0.055,
+            "Ginori": 0.000,
+            "Guadagni": 0.255,
+            "Lamberteschi": 0.000,
+            "Medici": 0.522,
+            "Pazzi": 0.000,
+            "Peruzzi": 0.022,
+            "Ridolfi": 0.114,
+            "Salviati": 0.143,
+            "Strozzi": 0.103,
+            "Tornabuoni": 0.092,
+        }
+
+        b = nx.betweenness_centrality(G, weight=None, normalized=True)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
+
+    def test_les_miserables_graph(self):
+        """Betweenness centrality: Les Miserables graph"""
+        G = nx.les_miserables_graph()
+        b_answer = {
+            "Napoleon": 0.000,
+            "Myriel": 0.177,
+            "MlleBaptistine": 0.000,
+            "MmeMagloire": 0.000,
+            "CountessDeLo": 0.000,
+            "Geborand": 0.000,
+            "Champtercier": 0.000,
+            "Cravatte": 0.000,
+            "Count": 0.000,
+            "OldMan": 0.000,
+            "Valjean": 0.570,
+            "Labarre": 0.000,
+            "Marguerite": 0.000,
+            "MmeDeR": 0.000,
+            "Isabeau": 0.000,
+            "Gervais": 0.000,
+            "Listolier": 0.000,
+            "Tholomyes": 0.041,
+            "Fameuil": 0.000,
+            "Blacheville": 0.000,
+            "Favourite": 0.000,
+            "Dahlia": 0.000,
+            "Zephine": 0.000,
+            "Fantine": 0.130,
+            "MmeThenardier": 0.029,
+            "Thenardier": 0.075,
+            "Cosette": 0.024,
+            "Javert": 0.054,
+            "Fauchelevent": 0.026,
+            "Bamatabois": 0.008,
+            "Perpetue": 0.000,
+            "Simplice": 0.009,
+            "Scaufflaire": 0.000,
+            "Woman1": 0.000,
+            "Judge": 0.000,
+            "Champmathieu": 0.000,
+            "Brevet": 0.000,
+            "Chenildieu": 0.000,
+            "Cochepaille": 0.000,
+            "Pontmercy": 0.007,
+            "Boulatruelle": 0.000,
+            "Eponine": 0.011,
+            "Anzelma": 0.000,
+            "Woman2": 0.000,
+            "MotherInnocent": 0.000,
+            "Gribier": 0.000,
+            "MmeBurgon": 0.026,
+            "Jondrette": 0.000,
+            "Gavroche": 0.165,
+            "Gillenormand": 0.020,
+            "Magnon": 0.000,
+            "MlleGillenormand": 0.048,
+            "MmePontmercy": 0.000,
+            "MlleVaubois": 0.000,
+            "LtGillenormand": 0.000,
+            "Marius": 0.132,
+            "BaronessT": 0.000,
+            "Mabeuf": 0.028,
+            "Enjolras": 0.043,
+            "Combeferre": 0.001,
+            "Prouvaire": 0.000,
+            "Feuilly": 0.001,
+            "Courfeyrac": 0.005,
+            "Bahorel": 0.002,
+            "Bossuet": 0.031,
+            "Joly": 0.002,
+            "Grantaire": 0.000,
+            "MotherPlutarch": 0.000,
+            "Gueulemer": 0.005,
+            "Babet": 0.005,
+            "Claquesous": 0.005,
+            "Montparnasse": 0.004,
+            "Toussaint": 0.000,
+            "Child1": 0.000,
+            "Child2": 0.000,
+            "Brujon": 0.000,
+            "MmeHucheloup": 0.000,
+        }
+
+        b = nx.betweenness_centrality(G, weight=None, normalized=True)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
+
+    def test_ladder_graph(self):
+        """Betweenness centrality: Ladder graph"""
+        G = nx.Graph()  # ladder_graph(3)
+        G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (4, 5), (3, 5)])
+        b_answer = {0: 1.667, 1: 1.667, 2: 6.667, 3: 6.667, 4: 1.667, 5: 1.667}
+        for b in b_answer:
+            b_answer[b] /= 2
+        b = nx.betweenness_centrality(G, weight=None, normalized=False)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
+
+    def test_disconnected_path(self):
+        """Betweenness centrality: disconnected path"""
+        G = nx.Graph()
+        nx.add_path(G, [0, 1, 2])
+        nx.add_path(G, [3, 4, 5, 6])
+        b_answer = {0: 0, 1: 1, 2: 0, 3: 0, 4: 2, 5: 2, 6: 0}
+        b = nx.betweenness_centrality(G, weight=None, normalized=False)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_disconnected_path_endpoints(self):
+        """Betweenness centrality: disconnected path endpoints"""
+        G = nx.Graph()
+        nx.add_path(G, [0, 1, 2])
+        nx.add_path(G, [3, 4, 5, 6])
+        b_answer = {0: 2, 1: 3, 2: 2, 3: 3, 4: 5, 5: 5, 6: 3}
+        b = nx.betweenness_centrality(G, weight=None, normalized=False, endpoints=True)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+        # normalized = True case
+        b = nx.betweenness_centrality(G, weight=None, normalized=True, endpoints=True)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n] / 21, abs=1e-7)
+
+    def test_directed_path(self):
+        """Betweenness centrality: directed path"""
+        G = nx.DiGraph()
+        nx.add_path(G, [0, 1, 2])
+        b = nx.betweenness_centrality(G, weight=None, normalized=False)
+        b_answer = {0: 0.0, 1: 1.0, 2: 0.0}
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_directed_path_normalized(self):
+        """Betweenness centrality: directed path normalized"""
+        G = nx.DiGraph()
+        nx.add_path(G, [0, 1, 2])
+        b = nx.betweenness_centrality(G, weight=None, normalized=True)
+        b_answer = {0: 0.0, 1: 0.5, 2: 0.0}
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+
+class TestWeightedBetweennessCentrality:
+    def test_K5(self):
+        """Weighted betweenness centrality: K5"""
+        G = nx.complete_graph(5)
+        b = nx.betweenness_centrality(G, weight="weight", normalized=False)
+        b_answer = {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0}
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_P3_normalized(self):
+        """Weighted betweenness centrality: P3 normalized"""
+        G = nx.path_graph(3)
+        b = nx.betweenness_centrality(G, weight="weight", normalized=True)
+        b_answer = {0: 0.0, 1: 1.0, 2: 0.0}
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_P3(self):
+        """Weighted betweenness centrality: P3"""
+        G = nx.path_graph(3)
+        b_answer = {0: 0.0, 1: 1.0, 2: 0.0}
+        b = nx.betweenness_centrality(G, weight="weight", normalized=False)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_krackhardt_kite_graph(self):
+        """Weighted betweenness centrality: Krackhardt kite graph"""
+        G = nx.krackhardt_kite_graph()
+        b_answer = {
+            0: 1.667,
+            1: 1.667,
+            2: 0.000,
+            3: 7.333,
+            4: 0.000,
+            5: 16.667,
+            6: 16.667,
+            7: 28.000,
+            8: 16.000,
+            9: 0.000,
+        }
+        for b in b_answer:
+            b_answer[b] /= 2
+
+        b = nx.betweenness_centrality(G, weight="weight", normalized=False)
+
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
+
+    def test_krackhardt_kite_graph_normalized(self):
+        """Weighted betweenness centrality:
+        Krackhardt kite graph normalized
+        """
+        G = nx.krackhardt_kite_graph()
+        b_answer = {
+            0: 0.023,
+            1: 0.023,
+            2: 0.000,
+            3: 0.102,
+            4: 0.000,
+            5: 0.231,
+            6: 0.231,
+            7: 0.389,
+            8: 0.222,
+            9: 0.000,
+        }
+        b = nx.betweenness_centrality(G, weight="weight", normalized=True)
+
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
+
+    def test_florentine_families_graph(self):
+        """Weighted betweenness centrality:
+        Florentine families graph"""
+        G = nx.florentine_families_graph()
+        b_answer = {
+            "Acciaiuoli": 0.000,
+            "Albizzi": 0.212,
+            "Barbadori": 0.093,
+            "Bischeri": 0.104,
+            "Castellani": 0.055,
+            "Ginori": 0.000,
+            "Guadagni": 0.255,
+            "Lamberteschi": 0.000,
+            "Medici": 0.522,
+            "Pazzi": 0.000,
+            "Peruzzi": 0.022,
+            "Ridolfi": 0.114,
+            "Salviati": 0.143,
+            "Strozzi": 0.103,
+            "Tornabuoni": 0.092,
+        }
+
+        b = nx.betweenness_centrality(G, weight="weight", normalized=True)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
+
+    def test_les_miserables_graph(self):
+        """Weighted betweenness centrality: Les Miserables graph"""
+        G = nx.les_miserables_graph()
+        b_answer = {
+            "Napoleon": 0.000,
+            "Myriel": 0.177,
+            "MlleBaptistine": 0.000,
+            "MmeMagloire": 0.000,
+            "CountessDeLo": 0.000,
+            "Geborand": 0.000,
+            "Champtercier": 0.000,
+            "Cravatte": 0.000,
+            "Count": 0.000,
+            "OldMan": 0.000,
+            "Valjean": 0.454,
+            "Labarre": 0.000,
+            "Marguerite": 0.009,
+            "MmeDeR": 0.000,
+            "Isabeau": 0.000,
+            "Gervais": 0.000,
+            "Listolier": 0.000,
+            "Tholomyes": 0.066,
+            "Fameuil": 0.000,
+            "Blacheville": 0.000,
+            "Favourite": 0.000,
+            "Dahlia": 0.000,
+            "Zephine": 0.000,
+            "Fantine": 0.114,
+            "MmeThenardier": 0.046,
+            "Thenardier": 0.129,
+            "Cosette": 0.075,
+            "Javert": 0.193,
+            "Fauchelevent": 0.026,
+            "Bamatabois": 0.080,
+            "Perpetue": 0.000,
+            "Simplice": 0.001,
+            "Scaufflaire": 0.000,
+            "Woman1": 0.000,
+            "Judge": 0.000,
+            "Champmathieu": 0.000,
+            "Brevet": 0.000,
+            "Chenildieu": 0.000,
+            "Cochepaille": 0.000,
+            "Pontmercy": 0.023,
+            "Boulatruelle": 0.000,
+            "Eponine": 0.023,
+            "Anzelma": 0.000,
+            "Woman2": 0.000,
+            "MotherInnocent": 0.000,
+            "Gribier": 0.000,
+            "MmeBurgon": 0.026,
+            "Jondrette": 0.000,
+            "Gavroche": 0.285,
+            "Gillenormand": 0.024,
+            "Magnon": 0.005,
+            "MlleGillenormand": 0.036,
+            "MmePontmercy": 0.005,
+            "MlleVaubois": 0.000,
+            "LtGillenormand": 0.015,
+            "Marius": 0.072,
+            "BaronessT": 0.004,
+            "Mabeuf": 0.089,
+            "Enjolras": 0.003,
+            "Combeferre": 0.000,
+            "Prouvaire": 0.000,
+            "Feuilly": 0.004,
+            "Courfeyrac": 0.001,
+            "Bahorel": 0.007,
+            "Bossuet": 0.028,
+            "Joly": 0.000,
+            "Grantaire": 0.036,
+            "MotherPlutarch": 0.000,
+            "Gueulemer": 0.025,
+            "Babet": 0.015,
+            "Claquesous": 0.042,
+            "Montparnasse": 0.050,
+            "Toussaint": 0.011,
+            "Child1": 0.000,
+            "Child2": 0.000,
+            "Brujon": 0.002,
+            "MmeHucheloup": 0.034,
+        }
+
+        b = nx.betweenness_centrality(G, weight="weight", normalized=True)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
+
+    def test_ladder_graph(self):
+        """Weighted betweenness centrality: Ladder graph"""
+        G = nx.Graph()  # ladder_graph(3)
+        G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (4, 5), (3, 5)])
+        b_answer = {0: 1.667, 1: 1.667, 2: 6.667, 3: 6.667, 4: 1.667, 5: 1.667}
+        for b in b_answer:
+            b_answer[b] /= 2
+        b = nx.betweenness_centrality(G, weight="weight", normalized=False)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
+
+    def test_G(self):
+        """Weighted betweenness centrality: G"""
+        G = weighted_G()
+        b_answer = {0: 2.0, 1: 0.0, 2: 4.0, 3: 3.0, 4: 4.0, 5: 0.0}
+        b = nx.betweenness_centrality(G, weight="weight", normalized=False)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_G2(self):
+        """Weighted betweenness centrality: G2"""
+        G = nx.DiGraph()
+        G.add_weighted_edges_from(
+            [
+                ("s", "u", 10),
+                ("s", "x", 5),
+                ("u", "v", 1),
+                ("u", "x", 2),
+                ("v", "y", 1),
+                ("x", "u", 3),
+                ("x", "v", 5),
+                ("x", "y", 2),
+                ("y", "s", 7),
+                ("y", "v", 6),
+            ]
+        )
+
+        b_answer = {"y": 5.0, "x": 5.0, "s": 4.0, "u": 2.0, "v": 2.0}
+
+        b = nx.betweenness_centrality(G, weight="weight", normalized=False)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_G3(self):
+        """Weighted betweenness centrality: G3"""
+        G = nx.MultiGraph(weighted_G())
+        es = list(G.edges(data=True))[::2]  # duplicate every other edge
+        G.add_edges_from(es)
+        b_answer = {0: 2.0, 1: 0.0, 2: 4.0, 3: 3.0, 4: 4.0, 5: 0.0}
+        b = nx.betweenness_centrality(G, weight="weight", normalized=False)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_G4(self):
+        """Weighted betweenness centrality: G4"""
+        G = nx.MultiDiGraph()
+        G.add_weighted_edges_from(
+            [
+                ("s", "u", 10),
+                ("s", "x", 5),
+                ("s", "x", 6),
+                ("u", "v", 1),
+                ("u", "x", 2),
+                ("v", "y", 1),
+                ("v", "y", 1),
+                ("x", "u", 3),
+                ("x", "v", 5),
+                ("x", "y", 2),
+                ("x", "y", 3),
+                ("y", "s", 7),
+                ("y", "v", 6),
+                ("y", "v", 6),
+            ]
+        )
+
+        b_answer = {"y": 5.0, "x": 5.0, "s": 4.0, "u": 2.0, "v": 2.0}
+
+        b = nx.betweenness_centrality(G, weight="weight", normalized=False)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+
+class TestEdgeBetweennessCentrality:
+    def test_K5(self):
+        """Edge betweenness centrality: K5"""
+        G = nx.complete_graph(5)
+        b = nx.edge_betweenness_centrality(G, weight=None, normalized=False)
+        b_answer = dict.fromkeys(G.edges(), 1)
+        for n in sorted(G.edges()):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_normalized_K5(self):
+        """Edge betweenness centrality: K5"""
+        G = nx.complete_graph(5)
+        b = nx.edge_betweenness_centrality(G, weight=None, normalized=True)
+        b_answer = dict.fromkeys(G.edges(), 1 / 10)
+        for n in sorted(G.edges()):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_C4(self):
+        """Edge betweenness centrality: C4"""
+        G = nx.cycle_graph(4)
+        b = nx.edge_betweenness_centrality(G, weight=None, normalized=True)
+        b_answer = {(0, 1): 2, (0, 3): 2, (1, 2): 2, (2, 3): 2}
+        for n in sorted(G.edges()):
+            assert b[n] == pytest.approx(b_answer[n] / 6, abs=1e-7)
+
+    def test_P4(self):
+        """Edge betweenness centrality: P4"""
+        G = nx.path_graph(4)
+        b = nx.edge_betweenness_centrality(G, weight=None, normalized=False)
+        b_answer = {(0, 1): 3, (1, 2): 4, (2, 3): 3}
+        for n in sorted(G.edges()):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_normalized_P4(self):
+        """Edge betweenness centrality: P4"""
+        G = nx.path_graph(4)
+        b = nx.edge_betweenness_centrality(G, weight=None, normalized=True)
+        b_answer = {(0, 1): 3, (1, 2): 4, (2, 3): 3}
+        for n in sorted(G.edges()):
+            assert b[n] == pytest.approx(b_answer[n] / 6, abs=1e-7)
+
+    def test_balanced_tree(self):
+        """Edge betweenness centrality: balanced tree"""
+        G = nx.balanced_tree(r=2, h=2)
+        b = nx.edge_betweenness_centrality(G, weight=None, normalized=False)
+        b_answer = {(0, 1): 12, (0, 2): 12, (1, 3): 6, (1, 4): 6, (2, 5): 6, (2, 6): 6}
+        for n in sorted(G.edges()):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+
+class TestWeightedEdgeBetweennessCentrality:
+    def test_K5(self):
+        """Edge betweenness centrality: K5"""
+        G = nx.complete_graph(5)
+        b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False)
+        b_answer = dict.fromkeys(G.edges(), 1)
+        for n in sorted(G.edges()):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_C4(self):
+        """Edge betweenness centrality: C4"""
+        G = nx.cycle_graph(4)
+        b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False)
+        b_answer = {(0, 1): 2, (0, 3): 2, (1, 2): 2, (2, 3): 2}
+        for n in sorted(G.edges()):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_P4(self):
+        """Edge betweenness centrality: P4"""
+        G = nx.path_graph(4)
+        b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False)
+        b_answer = {(0, 1): 3, (1, 2): 4, (2, 3): 3}
+        for n in sorted(G.edges()):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_balanced_tree(self):
+        """Edge betweenness centrality: balanced tree"""
+        G = nx.balanced_tree(r=2, h=2)
+        b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False)
+        b_answer = {(0, 1): 12, (0, 2): 12, (1, 3): 6, (1, 4): 6, (2, 5): 6, (2, 6): 6}
+        for n in sorted(G.edges()):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_weighted_graph(self):
+        """Edge betweenness centrality: weighted"""
+        eList = [
+            (0, 1, 5),
+            (0, 2, 4),
+            (0, 3, 3),
+            (0, 4, 2),
+            (1, 2, 4),
+            (1, 3, 1),
+            (1, 4, 3),
+            (2, 4, 5),
+            (3, 4, 4),
+        ]
+        G = nx.Graph()
+        G.add_weighted_edges_from(eList)
+        b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False)
+        b_answer = {
+            (0, 1): 0.0,
+            (0, 2): 1.0,
+            (0, 3): 2.0,
+            (0, 4): 1.0,
+            (1, 2): 2.0,
+            (1, 3): 3.5,
+            (1, 4): 1.5,
+            (2, 4): 1.0,
+            (3, 4): 0.5,
+        }
+        for n in sorted(G.edges()):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_normalized_weighted_graph(self):
+        """Edge betweenness centrality: normalized weighted"""
+        eList = [
+            (0, 1, 5),
+            (0, 2, 4),
+            (0, 3, 3),
+            (0, 4, 2),
+            (1, 2, 4),
+            (1, 3, 1),
+            (1, 4, 3),
+            (2, 4, 5),
+            (3, 4, 4),
+        ]
+        G = nx.Graph()
+        G.add_weighted_edges_from(eList)
+        b = nx.edge_betweenness_centrality(G, weight="weight", normalized=True)
+        b_answer = {
+            (0, 1): 0.0,
+            (0, 2): 1.0,
+            (0, 3): 2.0,
+            (0, 4): 1.0,
+            (1, 2): 2.0,
+            (1, 3): 3.5,
+            (1, 4): 1.5,
+            (2, 4): 1.0,
+            (3, 4): 0.5,
+        }
+        norm = len(G) * (len(G) - 1) / 2
+        for n in sorted(G.edges()):
+            assert b[n] == pytest.approx(b_answer[n] / norm, abs=1e-7)
+
+    def test_weighted_multigraph(self):
+        """Edge betweenness centrality: weighted multigraph"""
+        eList = [
+            (0, 1, 5),
+            (0, 1, 4),
+            (0, 2, 4),
+            (0, 3, 3),
+            (0, 3, 3),
+            (0, 4, 2),
+            (1, 2, 4),
+            (1, 3, 1),
+            (1, 3, 2),
+            (1, 4, 3),
+            (1, 4, 4),
+            (2, 4, 5),
+            (3, 4, 4),
+            (3, 4, 4),
+        ]
+        G = nx.MultiGraph()
+        G.add_weighted_edges_from(eList)
+        b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False)
+        b_answer = {
+            (0, 1, 0): 0.0,
+            (0, 1, 1): 0.5,
+            (0, 2, 0): 1.0,
+            (0, 3, 0): 0.75,
+            (0, 3, 1): 0.75,
+            (0, 4, 0): 1.0,
+            (1, 2, 0): 2.0,
+            (1, 3, 0): 3.0,
+            (1, 3, 1): 0.0,
+            (1, 4, 0): 1.5,
+            (1, 4, 1): 0.0,
+            (2, 4, 0): 1.0,
+            (3, 4, 0): 0.25,
+            (3, 4, 1): 0.25,
+        }
+        for n in sorted(G.edges(keys=True)):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_normalized_weighted_multigraph(self):
+        """Edge betweenness centrality: normalized weighted multigraph"""
+        eList = [
+            (0, 1, 5),
+            (0, 1, 4),
+            (0, 2, 4),
+            (0, 3, 3),
+            (0, 3, 3),
+            (0, 4, 2),
+            (1, 2, 4),
+            (1, 3, 1),
+            (1, 3, 2),
+            (1, 4, 3),
+            (1, 4, 4),
+            (2, 4, 5),
+            (3, 4, 4),
+            (3, 4, 4),
+        ]
+        G = nx.MultiGraph()
+        G.add_weighted_edges_from(eList)
+        b = nx.edge_betweenness_centrality(G, weight="weight", normalized=True)
+        b_answer = {
+            (0, 1, 0): 0.0,
+            (0, 1, 1): 0.5,
+            (0, 2, 0): 1.0,
+            (0, 3, 0): 0.75,
+            (0, 3, 1): 0.75,
+            (0, 4, 0): 1.0,
+            (1, 2, 0): 2.0,
+            (1, 3, 0): 3.0,
+            (1, 3, 1): 0.0,
+            (1, 4, 0): 1.5,
+            (1, 4, 1): 0.0,
+            (2, 4, 0): 1.0,
+            (3, 4, 0): 0.25,
+            (3, 4, 1): 0.25,
+        }
+        norm = len(G) * (len(G) - 1) / 2
+        for n in sorted(G.edges(keys=True)):
+            assert b[n] == pytest.approx(b_answer[n] / norm, abs=1e-7)
diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_betweenness_centrality_subset.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_betweenness_centrality_subset.py
new file mode 100644
index 00000000..a35a401a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_betweenness_centrality_subset.py
@@ -0,0 +1,340 @@
+import pytest
+
+import networkx as nx
+
+
+class TestSubsetBetweennessCentrality:
+    def test_K5(self):
+        """Betweenness Centrality Subset: K5"""
+        G = nx.complete_graph(5)
+        b = nx.betweenness_centrality_subset(
+            G, sources=[0], targets=[1, 3], weight=None
+        )
+        b_answer = {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0}
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_P5_directed(self):
+        """Betweenness Centrality Subset: P5 directed"""
+        G = nx.DiGraph()
+        nx.add_path(G, range(5))
+        b_answer = {0: 0, 1: 1, 2: 1, 3: 0, 4: 0, 5: 0}
+        b = nx.betweenness_centrality_subset(G, sources=[0], targets=[3], weight=None)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_P5(self):
+        """Betweenness Centrality Subset: P5"""
+        G = nx.Graph()
+        nx.add_path(G, range(5))
+        b_answer = {0: 0, 1: 0.5, 2: 0.5, 3: 0, 4: 0, 5: 0}
+        b = nx.betweenness_centrality_subset(G, sources=[0], targets=[3], weight=None)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_P5_multiple_target(self):
+        """Betweenness Centrality Subset: P5 multiple target"""
+        G = nx.Graph()
+        nx.add_path(G, range(5))
+        b_answer = {0: 0, 1: 1, 2: 1, 3: 0.5, 4: 0, 5: 0}
+        b = nx.betweenness_centrality_subset(
+            G, sources=[0], targets=[3, 4], weight=None
+        )
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_box(self):
+        """Betweenness Centrality Subset: box"""
+        G = nx.Graph()
+        G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)])
+        b_answer = {0: 0, 1: 0.25, 2: 0.25, 3: 0}
+        b = nx.betweenness_centrality_subset(G, sources=[0], targets=[3], weight=None)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_box_and_path(self):
+        """Betweenness Centrality Subset: box and path"""
+        G = nx.Graph()
+        G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (3, 4), (4, 5)])
+        b_answer = {0: 0, 1: 0.5, 2: 0.5, 3: 0.5, 4: 0, 5: 0}
+        b = nx.betweenness_centrality_subset(
+            G, sources=[0], targets=[3, 4], weight=None
+        )
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_box_and_path2(self):
+        """Betweenness Centrality Subset: box and path multiple target"""
+        G = nx.Graph()
+        G.add_edges_from([(0, 1), (1, 2), (2, 3), (1, 20), (20, 3), (3, 4)])
+        b_answer = {0: 0, 1: 1.0, 2: 0.5, 20: 0.5, 3: 0.5, 4: 0}
+        b = nx.betweenness_centrality_subset(
+            G, sources=[0], targets=[3, 4], weight=None
+        )
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_diamond_multi_path(self):
+        """Betweenness Centrality Subset: Diamond Multi Path"""
+        G = nx.Graph()
+        G.add_edges_from(
+            [
+                (1, 2),
+                (1, 3),
+                (1, 4),
+                (1, 5),
+                (1, 10),
+                (10, 11),
+                (11, 12),
+                (12, 9),
+                (2, 6),
+                (3, 6),
+                (4, 6),
+                (5, 7),
+                (7, 8),
+                (6, 8),
+                (8, 9),
+            ]
+        )
+        b = nx.betweenness_centrality_subset(G, sources=[1], targets=[9], weight=None)
+
+        expected_b = {
+            1: 0,
+            2: 1.0 / 10,
+            3: 1.0 / 10,
+            4: 1.0 / 10,
+            5: 1.0 / 10,
+            6: 3.0 / 10,
+            7: 1.0 / 10,
+            8: 4.0 / 10,
+            9: 0,
+            10: 1.0 / 10,
+            11: 1.0 / 10,
+            12: 1.0 / 10,
+        }
+
+        for n in sorted(G):
+            assert b[n] == pytest.approx(expected_b[n], abs=1e-7)
+
+    def test_normalized_p2(self):
+        """
+        Betweenness Centrality Subset: Normalized P2
+        if n <= 2:  no normalization, betweenness centrality should be 0 for all nodes.
+        """
+        G = nx.Graph()
+        nx.add_path(G, range(2))
+        b_answer = {0: 0, 1: 0.0}
+        b = nx.betweenness_centrality_subset(
+            G, sources=[0], targets=[1], normalized=True, weight=None
+        )
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_normalized_P5_directed(self):
+        """Betweenness Centrality Subset: Normalized Directed P5"""
+        G = nx.DiGraph()
+        nx.add_path(G, range(5))
+        b_answer = {0: 0, 1: 1.0 / 12.0, 2: 1.0 / 12.0, 3: 0, 4: 0, 5: 0}
+        b = nx.betweenness_centrality_subset(
+            G, sources=[0], targets=[3], normalized=True, weight=None
+        )
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_weighted_graph(self):
+        """Betweenness Centrality Subset: Weighted Graph"""
+        G = nx.DiGraph()
+        G.add_edge(0, 1, weight=3)
+        G.add_edge(0, 2, weight=2)
+        G.add_edge(0, 3, weight=6)
+        G.add_edge(0, 4, weight=4)
+        G.add_edge(1, 3, weight=5)
+        G.add_edge(1, 5, weight=5)
+        G.add_edge(2, 4, weight=1)
+        G.add_edge(3, 4, weight=2)
+        G.add_edge(3, 5, weight=1)
+        G.add_edge(4, 5, weight=4)
+        b_answer = {0: 0.0, 1: 0.0, 2: 0.5, 3: 0.5, 4: 0.5, 5: 0.0}
+        b = nx.betweenness_centrality_subset(
+            G, sources=[0], targets=[5], normalized=False, weight="weight"
+        )
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+
+class TestEdgeSubsetBetweennessCentrality:
+    def test_K5(self):
+        """Edge betweenness subset centrality: K5"""
+        G = nx.complete_graph(5)
+        b = nx.edge_betweenness_centrality_subset(
+            G, sources=[0], targets=[1, 3], weight=None
+        )
+        b_answer = dict.fromkeys(G.edges(), 0)
+        b_answer[(0, 3)] = b_answer[(0, 1)] = 0.5
+        for n in sorted(G.edges()):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_P5_directed(self):
+        """Edge betweenness subset centrality: P5 directed"""
+        G = nx.DiGraph()
+        nx.add_path(G, range(5))
+        b_answer = dict.fromkeys(G.edges(), 0)
+        b_answer[(0, 1)] = b_answer[(1, 2)] = b_answer[(2, 3)] = 1
+        b = nx.edge_betweenness_centrality_subset(
+            G, sources=[0], targets=[3], weight=None
+        )
+        for n in sorted(G.edges()):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_P5(self):
+        """Edge betweenness subset centrality: P5"""
+        G = nx.Graph()
+        nx.add_path(G, range(5))
+        b_answer = dict.fromkeys(G.edges(), 0)
+        b_answer[(0, 1)] = b_answer[(1, 2)] = b_answer[(2, 3)] = 0.5
+        b = nx.edge_betweenness_centrality_subset(
+            G, sources=[0], targets=[3], weight=None
+        )
+        for n in sorted(G.edges()):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_P5_multiple_target(self):
+        """Edge betweenness subset centrality: P5 multiple target"""
+        G = nx.Graph()
+        nx.add_path(G, range(5))
+        b_answer = dict.fromkeys(G.edges(), 0)
+        b_answer[(0, 1)] = b_answer[(1, 2)] = b_answer[(2, 3)] = 1
+        b_answer[(3, 4)] = 0.5
+        b = nx.edge_betweenness_centrality_subset(
+            G, sources=[0], targets=[3, 4], weight=None
+        )
+        for n in sorted(G.edges()):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_box(self):
+        """Edge betweenness subset centrality: box"""
+        G = nx.Graph()
+        G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)])
+        b_answer = dict.fromkeys(G.edges(), 0)
+        b_answer[(0, 1)] = b_answer[(0, 2)] = 0.25
+        b_answer[(1, 3)] = b_answer[(2, 3)] = 0.25
+        b = nx.edge_betweenness_centrality_subset(
+            G, sources=[0], targets=[3], weight=None
+        )
+        for n in sorted(G.edges()):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_box_and_path(self):
+        """Edge betweenness subset centrality: box and path"""
+        G = nx.Graph()
+        G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (3, 4), (4, 5)])
+        b_answer = dict.fromkeys(G.edges(), 0)
+        b_answer[(0, 1)] = b_answer[(0, 2)] = 0.5
+        b_answer[(1, 3)] = b_answer[(2, 3)] = 0.5
+        b_answer[(3, 4)] = 0.5
+        b = nx.edge_betweenness_centrality_subset(
+            G, sources=[0], targets=[3, 4], weight=None
+        )
+        for n in sorted(G.edges()):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_box_and_path2(self):
+        """Edge betweenness subset centrality: box and path multiple target"""
+        G = nx.Graph()
+        G.add_edges_from([(0, 1), (1, 2), (2, 3), (1, 20), (20, 3), (3, 4)])
+        b_answer = dict.fromkeys(G.edges(), 0)
+        b_answer[(0, 1)] = 1.0
+        b_answer[(1, 20)] = b_answer[(3, 20)] = 0.5
+        b_answer[(1, 2)] = b_answer[(2, 3)] = 0.5
+        b_answer[(3, 4)] = 0.5
+        b = nx.edge_betweenness_centrality_subset(
+            G, sources=[0], targets=[3, 4], weight=None
+        )
+        for n in sorted(G.edges()):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_diamond_multi_path(self):
+        """Edge betweenness subset centrality: Diamond Multi Path"""
+        G = nx.Graph()
+        G.add_edges_from(
+            [
+                (1, 2),
+                (1, 3),
+                (1, 4),
+                (1, 5),
+                (1, 10),
+                (10, 11),
+                (11, 12),
+                (12, 9),
+                (2, 6),
+                (3, 6),
+                (4, 6),
+                (5, 7),
+                (7, 8),
+                (6, 8),
+                (8, 9),
+            ]
+        )
+        b_answer = dict.fromkeys(G.edges(), 0)
+        b_answer[(8, 9)] = 0.4
+        b_answer[(6, 8)] = b_answer[(7, 8)] = 0.2
+        b_answer[(2, 6)] = b_answer[(3, 6)] = b_answer[(4, 6)] = 0.2 / 3.0
+        b_answer[(1, 2)] = b_answer[(1, 3)] = b_answer[(1, 4)] = 0.2 / 3.0
+        b_answer[(5, 7)] = 0.2
+        b_answer[(1, 5)] = 0.2
+        b_answer[(9, 12)] = 0.1
+        b_answer[(11, 12)] = b_answer[(10, 11)] = b_answer[(1, 10)] = 0.1
+        b = nx.edge_betweenness_centrality_subset(
+            G, sources=[1], targets=[9], weight=None
+        )
+        for n in G.edges():
+            sort_n = tuple(sorted(n))
+            assert b[n] == pytest.approx(b_answer[sort_n], abs=1e-7)
+
+    def test_normalized_p1(self):
+        """
+        Edge betweenness subset centrality: P1
+        if n <= 1: no normalization b=0 for all nodes
+        """
+        G = nx.Graph()
+        nx.add_path(G, range(1))
+        b_answer = dict.fromkeys(G.edges(), 0)
+        b = nx.edge_betweenness_centrality_subset(
+            G, sources=[0], targets=[0], normalized=True, weight=None
+        )
+        for n in G.edges():
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_normalized_P5_directed(self):
+        """Edge betweenness subset centrality: Normalized Directed P5"""
+        G = nx.DiGraph()
+        nx.add_path(G, range(5))
+        b_answer = dict.fromkeys(G.edges(), 0)
+        b_answer[(0, 1)] = b_answer[(1, 2)] = b_answer[(2, 3)] = 0.05
+        b = nx.edge_betweenness_centrality_subset(
+            G, sources=[0], targets=[3], normalized=True, weight=None
+        )
+        for n in G.edges():
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_weighted_graph(self):
+        """Edge betweenness subset centrality: Weighted Graph"""
+        G = nx.DiGraph()
+        G.add_edge(0, 1, weight=3)
+        G.add_edge(0, 2, weight=2)
+        G.add_edge(0, 3, weight=6)
+        G.add_edge(0, 4, weight=4)
+        G.add_edge(1, 3, weight=5)
+        G.add_edge(1, 5, weight=5)
+        G.add_edge(2, 4, weight=1)
+        G.add_edge(3, 4, weight=2)
+        G.add_edge(3, 5, weight=1)
+        G.add_edge(4, 5, weight=4)
+        b_answer = dict.fromkeys(G.edges(), 0)
+        b_answer[(0, 2)] = b_answer[(2, 4)] = b_answer[(4, 5)] = 0.5
+        b_answer[(0, 3)] = b_answer[(3, 5)] = 0.5
+        b = nx.edge_betweenness_centrality_subset(
+            G, sources=[0], targets=[5], normalized=False, weight="weight"
+        )
+        for n in G.edges():
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_closeness_centrality.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_closeness_centrality.py
new file mode 100644
index 00000000..7bdb7e7c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_closeness_centrality.py
@@ -0,0 +1,307 @@
+"""
+Tests for closeness centrality.
+"""
+
+import pytest
+
+import networkx as nx
+
+
+class TestClosenessCentrality:
+    @classmethod
+    def setup_class(cls):
+        cls.K = nx.krackhardt_kite_graph()
+        cls.P3 = nx.path_graph(3)
+        cls.P4 = nx.path_graph(4)
+        cls.K5 = nx.complete_graph(5)
+
+        cls.C4 = nx.cycle_graph(4)
+        cls.T = nx.balanced_tree(r=2, h=2)
+        cls.Gb = nx.Graph()
+        cls.Gb.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (4, 5), (3, 5)])
+
+        F = nx.florentine_families_graph()
+        cls.F = F
+
+        cls.LM = nx.les_miserables_graph()
+
+        # Create random undirected, unweighted graph for testing incremental version
+        cls.undirected_G = nx.fast_gnp_random_graph(n=100, p=0.6, seed=123)
+        cls.undirected_G_cc = nx.closeness_centrality(cls.undirected_G)
+
+    def test_wf_improved(self):
+        G = nx.union(self.P4, nx.path_graph([4, 5, 6]))
+        c = nx.closeness_centrality(G)
+        cwf = nx.closeness_centrality(G, wf_improved=False)
+        res = {0: 0.25, 1: 0.375, 2: 0.375, 3: 0.25, 4: 0.222, 5: 0.333, 6: 0.222}
+        wf_res = {0: 0.5, 1: 0.75, 2: 0.75, 3: 0.5, 4: 0.667, 5: 1.0, 6: 0.667}
+        for n in G:
+            assert c[n] == pytest.approx(res[n], abs=1e-3)
+            assert cwf[n] == pytest.approx(wf_res[n], abs=1e-3)
+
+    def test_digraph(self):
+        G = nx.path_graph(3, create_using=nx.DiGraph())
+        c = nx.closeness_centrality(G)
+        cr = nx.closeness_centrality(G.reverse())
+        d = {0: 0.0, 1: 0.500, 2: 0.667}
+        dr = {0: 0.667, 1: 0.500, 2: 0.0}
+        for n in sorted(self.P3):
+            assert c[n] == pytest.approx(d[n], abs=1e-3)
+            assert cr[n] == pytest.approx(dr[n], abs=1e-3)
+
+    def test_k5_closeness(self):
+        c = nx.closeness_centrality(self.K5)
+        d = {0: 1.000, 1: 1.000, 2: 1.000, 3: 1.000, 4: 1.000}
+        for n in sorted(self.K5):
+            assert c[n] == pytest.approx(d[n], abs=1e-3)
+
+    def test_p3_closeness(self):
+        c = nx.closeness_centrality(self.P3)
+        d = {0: 0.667, 1: 1.000, 2: 0.667}
+        for n in sorted(self.P3):
+            assert c[n] == pytest.approx(d[n], abs=1e-3)
+
+    def test_krackhardt_closeness(self):
+        c = nx.closeness_centrality(self.K)
+        d = {
+            0: 0.529,
+            1: 0.529,
+            2: 0.500,
+            3: 0.600,
+            4: 0.500,
+            5: 0.643,
+            6: 0.643,
+            7: 0.600,
+            8: 0.429,
+            9: 0.310,
+        }
+        for n in sorted(self.K):
+            assert c[n] == pytest.approx(d[n], abs=1e-3)
+
+    def test_florentine_families_closeness(self):
+        c = nx.closeness_centrality(self.F)
+        d = {
+            "Acciaiuoli": 0.368,
+            "Albizzi": 0.483,
+            "Barbadori": 0.4375,
+            "Bischeri": 0.400,
+            "Castellani": 0.389,
+            "Ginori": 0.333,
+            "Guadagni": 0.467,
+            "Lamberteschi": 0.326,
+            "Medici": 0.560,
+            "Pazzi": 0.286,
+            "Peruzzi": 0.368,
+            "Ridolfi": 0.500,
+            "Salviati": 0.389,
+            "Strozzi": 0.4375,
+            "Tornabuoni": 0.483,
+        }
+        for n in sorted(self.F):
+            assert c[n] == pytest.approx(d[n], abs=1e-3)
+
+    def test_les_miserables_closeness(self):
+        c = nx.closeness_centrality(self.LM)
+        d = {
+            "Napoleon": 0.302,
+            "Myriel": 0.429,
+            "MlleBaptistine": 0.413,
+            "MmeMagloire": 0.413,
+            "CountessDeLo": 0.302,
+            "Geborand": 0.302,
+            "Champtercier": 0.302,
+            "Cravatte": 0.302,
+            "Count": 0.302,
+            "OldMan": 0.302,
+            "Valjean": 0.644,
+            "Labarre": 0.394,
+            "Marguerite": 0.413,
+            "MmeDeR": 0.394,
+            "Isabeau": 0.394,
+            "Gervais": 0.394,
+            "Listolier": 0.341,
+            "Tholomyes": 0.392,
+            "Fameuil": 0.341,
+            "Blacheville": 0.341,
+            "Favourite": 0.341,
+            "Dahlia": 0.341,
+            "Zephine": 0.341,
+            "Fantine": 0.461,
+            "MmeThenardier": 0.461,
+            "Thenardier": 0.517,
+            "Cosette": 0.478,
+            "Javert": 0.517,
+            "Fauchelevent": 0.402,
+            "Bamatabois": 0.427,
+            "Perpetue": 0.318,
+            "Simplice": 0.418,
+            "Scaufflaire": 0.394,
+            "Woman1": 0.396,
+            "Judge": 0.404,
+            "Champmathieu": 0.404,
+            "Brevet": 0.404,
+            "Chenildieu": 0.404,
+            "Cochepaille": 0.404,
+            "Pontmercy": 0.373,
+            "Boulatruelle": 0.342,
+            "Eponine": 0.396,
+            "Anzelma": 0.352,
+            "Woman2": 0.402,
+            "MotherInnocent": 0.398,
+            "Gribier": 0.288,
+            "MmeBurgon": 0.344,
+            "Jondrette": 0.257,
+            "Gavroche": 0.514,
+            "Gillenormand": 0.442,
+            "Magnon": 0.335,
+            "MlleGillenormand": 0.442,
+            "MmePontmercy": 0.315,
+            "MlleVaubois": 0.308,
+            "LtGillenormand": 0.365,
+            "Marius": 0.531,
+            "BaronessT": 0.352,
+            "Mabeuf": 0.396,
+            "Enjolras": 0.481,
+            "Combeferre": 0.392,
+            "Prouvaire": 0.357,
+            "Feuilly": 0.392,
+            "Courfeyrac": 0.400,
+            "Bahorel": 0.394,
+            "Bossuet": 0.475,
+            "Joly": 0.394,
+            "Grantaire": 0.358,
+            "MotherPlutarch": 0.285,
+            "Gueulemer": 0.463,
+            "Babet": 0.463,
+            "Claquesous": 0.452,
+            "Montparnasse": 0.458,
+            "Toussaint": 0.402,
+            "Child1": 0.342,
+            "Child2": 0.342,
+            "Brujon": 0.380,
+            "MmeHucheloup": 0.353,
+        }
+        for n in sorted(self.LM):
+            assert c[n] == pytest.approx(d[n], abs=1e-3)
+
+    def test_weighted_closeness(self):
+        edges = [
+            ("s", "u", 10),
+            ("s", "x", 5),
+            ("u", "v", 1),
+            ("u", "x", 2),
+            ("v", "y", 1),
+            ("x", "u", 3),
+            ("x", "v", 5),
+            ("x", "y", 2),
+            ("y", "s", 7),
+            ("y", "v", 6),
+        ]
+        XG = nx.Graph()
+        XG.add_weighted_edges_from(edges)
+        c = nx.closeness_centrality(XG, distance="weight")
+        d = {"y": 0.200, "x": 0.286, "s": 0.138, "u": 0.235, "v": 0.200}
+        for n in sorted(XG):
+            assert c[n] == pytest.approx(d[n], abs=1e-3)
+
+    #
+    # Tests for incremental closeness centrality.
+    #
+    @staticmethod
+    def pick_add_edge(g):
+        u = nx.utils.arbitrary_element(g)
+        possible_nodes = set(g.nodes())
+        neighbors = list(g.neighbors(u)) + [u]
+        possible_nodes.difference_update(neighbors)
+        v = nx.utils.arbitrary_element(possible_nodes)
+        return (u, v)
+
+    @staticmethod
+    def pick_remove_edge(g):
+        u = nx.utils.arbitrary_element(g)
+        possible_nodes = list(g.neighbors(u))
+        v = nx.utils.arbitrary_element(possible_nodes)
+        return (u, v)
+
+    def test_directed_raises(self):
+        with pytest.raises(nx.NetworkXNotImplemented):
+            dir_G = nx.gn_graph(n=5)
+            prev_cc = None
+            edge = self.pick_add_edge(dir_G)
+            insert = True
+            nx.incremental_closeness_centrality(dir_G, edge, prev_cc, insert)
+
+    def test_wrong_size_prev_cc_raises(self):
+        with pytest.raises(nx.NetworkXError):
+            G = self.undirected_G.copy()
+            edge = self.pick_add_edge(G)
+            insert = True
+            prev_cc = self.undirected_G_cc.copy()
+            prev_cc.pop(0)
+            nx.incremental_closeness_centrality(G, edge, prev_cc, insert)
+
+    def test_wrong_nodes_prev_cc_raises(self):
+        with pytest.raises(nx.NetworkXError):
+            G = self.undirected_G.copy()
+            edge = self.pick_add_edge(G)
+            insert = True
+            prev_cc = self.undirected_G_cc.copy()
+            num_nodes = len(prev_cc)
+            prev_cc.pop(0)
+            prev_cc[num_nodes] = 0.5
+            nx.incremental_closeness_centrality(G, edge, prev_cc, insert)
+
+    def test_zero_centrality(self):
+        G = nx.path_graph(3)
+        prev_cc = nx.closeness_centrality(G)
+        edge = self.pick_remove_edge(G)
+        test_cc = nx.incremental_closeness_centrality(G, edge, prev_cc, insertion=False)
+        G.remove_edges_from([edge])
+        real_cc = nx.closeness_centrality(G)
+        shared_items = set(test_cc.items()) & set(real_cc.items())
+        assert len(shared_items) == len(real_cc)
+        assert 0 in test_cc.values()
+
+    def test_incremental(self):
+        # Check that incremental and regular give same output
+        G = self.undirected_G.copy()
+        prev_cc = None
+        for i in range(5):
+            if i % 2 == 0:
+                # Remove an edge
+                insert = False
+                edge = self.pick_remove_edge(G)
+            else:
+                # Add an edge
+                insert = True
+                edge = self.pick_add_edge(G)
+
+            # start = timeit.default_timer()
+            test_cc = nx.incremental_closeness_centrality(G, edge, prev_cc, insert)
+            # inc_elapsed = (timeit.default_timer() - start)
+            # print(f"incremental time: {inc_elapsed}")
+
+            if insert:
+                G.add_edges_from([edge])
+            else:
+                G.remove_edges_from([edge])
+
+            # start = timeit.default_timer()
+            real_cc = nx.closeness_centrality(G)
+            # reg_elapsed = (timeit.default_timer() - start)
+            # print(f"regular time: {reg_elapsed}")
+            # Example output:
+            # incremental time: 0.208
+            # regular time: 0.276
+            # incremental time: 0.00683
+            # regular time: 0.260
+            # incremental time: 0.0224
+            # regular time: 0.278
+            # incremental time: 0.00804
+            # regular time: 0.208
+            # incremental time: 0.00947
+            # regular time: 0.188
+
+            assert set(test_cc.items()) == set(real_cc.items())
+
+            prev_cc = test_cc
diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality.py
new file mode 100644
index 00000000..4e3d4385
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality.py
@@ -0,0 +1,197 @@
+import pytest
+
+import networkx as nx
+from networkx import approximate_current_flow_betweenness_centrality as approximate_cfbc
+from networkx import edge_current_flow_betweenness_centrality as edge_current_flow
+
+np = pytest.importorskip("numpy")
+pytest.importorskip("scipy")
+
+
+class TestFlowBetweennessCentrality:
+    def test_K4_normalized(self):
+        """Betweenness centrality: K4"""
+        G = nx.complete_graph(4)
+        b = nx.current_flow_betweenness_centrality(G, normalized=True)
+        b_answer = {0: 0.25, 1: 0.25, 2: 0.25, 3: 0.25}
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+        G.add_edge(0, 1, weight=0.5, other=0.3)
+        b = nx.current_flow_betweenness_centrality(G, normalized=True, weight=None)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+        wb_answer = {0: 0.2222222, 1: 0.2222222, 2: 0.30555555, 3: 0.30555555}
+        b = nx.current_flow_betweenness_centrality(G, normalized=True, weight="weight")
+        for n in sorted(G):
+            assert b[n] == pytest.approx(wb_answer[n], abs=1e-7)
+        wb_answer = {0: 0.2051282, 1: 0.2051282, 2: 0.33974358, 3: 0.33974358}
+        b = nx.current_flow_betweenness_centrality(G, normalized=True, weight="other")
+        for n in sorted(G):
+            assert b[n] == pytest.approx(wb_answer[n], abs=1e-7)
+
+    def test_K4(self):
+        """Betweenness centrality: K4"""
+        G = nx.complete_graph(4)
+        for solver in ["full", "lu", "cg"]:
+            b = nx.current_flow_betweenness_centrality(
+                G, normalized=False, solver=solver
+            )
+            b_answer = {0: 0.75, 1: 0.75, 2: 0.75, 3: 0.75}
+            for n in sorted(G):
+                assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_P4_normalized(self):
+        """Betweenness centrality: P4 normalized"""
+        G = nx.path_graph(4)
+        b = nx.current_flow_betweenness_centrality(G, normalized=True)
+        b_answer = {0: 0, 1: 2.0 / 3, 2: 2.0 / 3, 3: 0}
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_P4(self):
+        """Betweenness centrality: P4"""
+        G = nx.path_graph(4)
+        b = nx.current_flow_betweenness_centrality(G, normalized=False)
+        b_answer = {0: 0, 1: 2, 2: 2, 3: 0}
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_star(self):
+        """Betweenness centrality: star"""
+        G = nx.Graph()
+        nx.add_star(G, ["a", "b", "c", "d"])
+        b = nx.current_flow_betweenness_centrality(G, normalized=True)
+        b_answer = {"a": 1.0, "b": 0.0, "c": 0.0, "d": 0.0}
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_solvers2(self):
+        """Betweenness centrality: alternate solvers"""
+        G = nx.complete_graph(4)
+        for solver in ["full", "lu", "cg"]:
+            b = nx.current_flow_betweenness_centrality(
+                G, normalized=False, solver=solver
+            )
+            b_answer = {0: 0.75, 1: 0.75, 2: 0.75, 3: 0.75}
+            for n in sorted(G):
+                assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+
+class TestApproximateFlowBetweennessCentrality:
+    def test_K4_normalized(self):
+        "Approximate current-flow betweenness centrality: K4 normalized"
+        G = nx.complete_graph(4)
+        b = nx.current_flow_betweenness_centrality(G, normalized=True)
+        epsilon = 0.1
+        ba = approximate_cfbc(G, normalized=True, epsilon=0.5 * epsilon)
+        for n in sorted(G):
+            np.testing.assert_allclose(b[n], ba[n], atol=epsilon)
+
+    def test_K4(self):
+        "Approximate current-flow betweenness centrality: K4"
+        G = nx.complete_graph(4)
+        b = nx.current_flow_betweenness_centrality(G, normalized=False)
+        epsilon = 0.1
+        ba = approximate_cfbc(G, normalized=False, epsilon=0.5 * epsilon)
+        for n in sorted(G):
+            np.testing.assert_allclose(b[n], ba[n], atol=epsilon * len(G) ** 2)
+
+    def test_star(self):
+        "Approximate current-flow betweenness centrality: star"
+        G = nx.Graph()
+        nx.add_star(G, ["a", "b", "c", "d"])
+        b = nx.current_flow_betweenness_centrality(G, normalized=True)
+        epsilon = 0.1
+        ba = approximate_cfbc(G, normalized=True, epsilon=0.5 * epsilon)
+        for n in sorted(G):
+            np.testing.assert_allclose(b[n], ba[n], atol=epsilon)
+
+    def test_grid(self):
+        "Approximate current-flow betweenness centrality: 2d grid"
+        G = nx.grid_2d_graph(4, 4)
+        b = nx.current_flow_betweenness_centrality(G, normalized=True)
+        epsilon = 0.1
+        ba = approximate_cfbc(G, normalized=True, epsilon=0.5 * epsilon)
+        for n in sorted(G):
+            np.testing.assert_allclose(b[n], ba[n], atol=epsilon)
+
+    def test_seed(self):
+        G = nx.complete_graph(4)
+        b = approximate_cfbc(G, normalized=False, epsilon=0.05, seed=1)
+        b_answer = {0: 0.75, 1: 0.75, 2: 0.75, 3: 0.75}
+        for n in sorted(G):
+            np.testing.assert_allclose(b[n], b_answer[n], atol=0.1)
+
+    def test_solvers(self):
+        "Approximate current-flow betweenness centrality: solvers"
+        G = nx.complete_graph(4)
+        epsilon = 0.1
+        for solver in ["full", "lu", "cg"]:
+            b = approximate_cfbc(
+                G, normalized=False, solver=solver, epsilon=0.5 * epsilon
+            )
+            b_answer = {0: 0.75, 1: 0.75, 2: 0.75, 3: 0.75}
+            for n in sorted(G):
+                np.testing.assert_allclose(b[n], b_answer[n], atol=epsilon)
+
+    def test_lower_kmax(self):
+        G = nx.complete_graph(4)
+        with pytest.raises(nx.NetworkXError, match="Increase kmax or epsilon"):
+            nx.approximate_current_flow_betweenness_centrality(G, kmax=4)
+
+
+class TestWeightedFlowBetweennessCentrality:
+    pass
+
+
+class TestEdgeFlowBetweennessCentrality:
+    def test_K4(self):
+        """Edge flow betweenness centrality: K4"""
+        G = nx.complete_graph(4)
+        b = edge_current_flow(G, normalized=True)
+        b_answer = dict.fromkeys(G.edges(), 0.25)
+        for (s, t), v1 in b_answer.items():
+            v2 = b.get((s, t), b.get((t, s)))
+            assert v1 == pytest.approx(v2, abs=1e-7)
+
+    def test_K4_normalized(self):
+        """Edge flow betweenness centrality: K4"""
+        G = nx.complete_graph(4)
+        b = edge_current_flow(G, normalized=False)
+        b_answer = dict.fromkeys(G.edges(), 0.75)
+        for (s, t), v1 in b_answer.items():
+            v2 = b.get((s, t), b.get((t, s)))
+            assert v1 == pytest.approx(v2, abs=1e-7)
+
+    def test_C4(self):
+        """Edge flow betweenness centrality: C4"""
+        G = nx.cycle_graph(4)
+        b = edge_current_flow(G, normalized=False)
+        b_answer = {(0, 1): 1.25, (0, 3): 1.25, (1, 2): 1.25, (2, 3): 1.25}
+        for (s, t), v1 in b_answer.items():
+            v2 = b.get((s, t), b.get((t, s)))
+            assert v1 == pytest.approx(v2, abs=1e-7)
+
+    def test_P4(self):
+        """Edge betweenness centrality: P4"""
+        G = nx.path_graph(4)
+        b = edge_current_flow(G, normalized=False)
+        b_answer = {(0, 1): 1.5, (1, 2): 2.0, (2, 3): 1.5}
+        for (s, t), v1 in b_answer.items():
+            v2 = b.get((s, t), b.get((t, s)))
+            assert v1 == pytest.approx(v2, abs=1e-7)
+
+
+@pytest.mark.parametrize(
+    "centrality_func",
+    (
+        nx.current_flow_betweenness_centrality,
+        nx.edge_current_flow_betweenness_centrality,
+        nx.approximate_current_flow_betweenness_centrality,
+    ),
+)
+def test_unconnected_graphs_betweenness_centrality(centrality_func):
+    G = nx.Graph([(1, 2), (3, 4)])
+    G.add_node(5)
+    with pytest.raises(nx.NetworkXError, match="Graph not connected"):
+        centrality_func(G)
diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality_subset.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality_subset.py
new file mode 100644
index 00000000..7b1611b0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality_subset.py
@@ -0,0 +1,147 @@
+import pytest
+
+pytest.importorskip("numpy")
+pytest.importorskip("scipy")
+
+import networkx as nx
+from networkx import edge_current_flow_betweenness_centrality as edge_current_flow
+from networkx import (
+    edge_current_flow_betweenness_centrality_subset as edge_current_flow_subset,
+)
+
+
+class TestFlowBetweennessCentrality:
+    def test_K4_normalized(self):
+        """Betweenness centrality: K4"""
+        G = nx.complete_graph(4)
+        b = nx.current_flow_betweenness_centrality_subset(
+            G, list(G), list(G), normalized=True
+        )
+        b_answer = nx.current_flow_betweenness_centrality(G, normalized=True)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_K4(self):
+        """Betweenness centrality: K4"""
+        G = nx.complete_graph(4)
+        b = nx.current_flow_betweenness_centrality_subset(
+            G, list(G), list(G), normalized=True
+        )
+        b_answer = nx.current_flow_betweenness_centrality(G, normalized=True)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+        # test weighted network
+        G.add_edge(0, 1, weight=0.5, other=0.3)
+        b = nx.current_flow_betweenness_centrality_subset(
+            G, list(G), list(G), normalized=True, weight=None
+        )
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+        b = nx.current_flow_betweenness_centrality_subset(
+            G, list(G), list(G), normalized=True
+        )
+        b_answer = nx.current_flow_betweenness_centrality(G, normalized=True)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+        b = nx.current_flow_betweenness_centrality_subset(
+            G, list(G), list(G), normalized=True, weight="other"
+        )
+        b_answer = nx.current_flow_betweenness_centrality(
+            G, normalized=True, weight="other"
+        )
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_P4_normalized(self):
+        """Betweenness centrality: P4 normalized"""
+        G = nx.path_graph(4)
+        b = nx.current_flow_betweenness_centrality_subset(
+            G, list(G), list(G), normalized=True
+        )
+        b_answer = nx.current_flow_betweenness_centrality(G, normalized=True)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_P4(self):
+        """Betweenness centrality: P4"""
+        G = nx.path_graph(4)
+        b = nx.current_flow_betweenness_centrality_subset(
+            G, list(G), list(G), normalized=True
+        )
+        b_answer = nx.current_flow_betweenness_centrality(G, normalized=True)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_star(self):
+        """Betweenness centrality: star"""
+        G = nx.Graph()
+        nx.add_star(G, ["a", "b", "c", "d"])
+        b = nx.current_flow_betweenness_centrality_subset(
+            G, list(G), list(G), normalized=True
+        )
+        b_answer = nx.current_flow_betweenness_centrality(G, normalized=True)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+
+# class TestWeightedFlowBetweennessCentrality():
+#     pass
+
+
+class TestEdgeFlowBetweennessCentrality:
+    def test_K4_normalized(self):
+        """Betweenness centrality: K4"""
+        G = nx.complete_graph(4)
+        b = edge_current_flow_subset(G, list(G), list(G), normalized=True)
+        b_answer = edge_current_flow(G, normalized=True)
+        for (s, t), v1 in b_answer.items():
+            v2 = b.get((s, t), b.get((t, s)))
+            assert v1 == pytest.approx(v2, abs=1e-7)
+
+    def test_K4(self):
+        """Betweenness centrality: K4"""
+        G = nx.complete_graph(4)
+        b = edge_current_flow_subset(G, list(G), list(G), normalized=False)
+        b_answer = edge_current_flow(G, normalized=False)
+        for (s, t), v1 in b_answer.items():
+            v2 = b.get((s, t), b.get((t, s)))
+            assert v1 == pytest.approx(v2, abs=1e-7)
+        # test weighted network
+        G.add_edge(0, 1, weight=0.5, other=0.3)
+        b = edge_current_flow_subset(G, list(G), list(G), normalized=False, weight=None)
+        # weight is None => same as unweighted network
+        for (s, t), v1 in b_answer.items():
+            v2 = b.get((s, t), b.get((t, s)))
+            assert v1 == pytest.approx(v2, abs=1e-7)
+
+        b = edge_current_flow_subset(G, list(G), list(G), normalized=False)
+        b_answer = edge_current_flow(G, normalized=False)
+        for (s, t), v1 in b_answer.items():
+            v2 = b.get((s, t), b.get((t, s)))
+            assert v1 == pytest.approx(v2, abs=1e-7)
+
+        b = edge_current_flow_subset(
+            G, list(G), list(G), normalized=False, weight="other"
+        )
+        b_answer = edge_current_flow(G, normalized=False, weight="other")
+        for (s, t), v1 in b_answer.items():
+            v2 = b.get((s, t), b.get((t, s)))
+            assert v1 == pytest.approx(v2, abs=1e-7)
+
+    def test_C4(self):
+        """Edge betweenness centrality: C4"""
+        G = nx.cycle_graph(4)
+        b = edge_current_flow_subset(G, list(G), list(G), normalized=True)
+        b_answer = edge_current_flow(G, normalized=True)
+        for (s, t), v1 in b_answer.items():
+            v2 = b.get((s, t), b.get((t, s)))
+            assert v1 == pytest.approx(v2, abs=1e-7)
+
+    def test_P4(self):
+        """Edge betweenness centrality: P4"""
+        G = nx.path_graph(4)
+        b = edge_current_flow_subset(G, list(G), list(G), normalized=True)
+        b_answer = edge_current_flow(G, normalized=True)
+        for (s, t), v1 in b_answer.items():
+            v2 = b.get((s, t), b.get((t, s)))
+            assert v1 == pytest.approx(v2, abs=1e-7)
diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_current_flow_closeness.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_current_flow_closeness.py
new file mode 100644
index 00000000..2528d622
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_current_flow_closeness.py
@@ -0,0 +1,43 @@
+import pytest
+
+pytest.importorskip("numpy")
+pytest.importorskip("scipy")
+
+import networkx as nx
+
+
+class TestFlowClosenessCentrality:
+    def test_K4(self):
+        """Closeness centrality: K4"""
+        G = nx.complete_graph(4)
+        b = nx.current_flow_closeness_centrality(G)
+        b_answer = {0: 2.0 / 3, 1: 2.0 / 3, 2: 2.0 / 3, 3: 2.0 / 3}
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_P4(self):
+        """Closeness centrality: P4"""
+        G = nx.path_graph(4)
+        b = nx.current_flow_closeness_centrality(G)
+        b_answer = {0: 1.0 / 6, 1: 1.0 / 4, 2: 1.0 / 4, 3: 1.0 / 6}
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_star(self):
+        """Closeness centrality: star"""
+        G = nx.Graph()
+        nx.add_star(G, ["a", "b", "c", "d"])
+        b = nx.current_flow_closeness_centrality(G)
+        b_answer = {"a": 1.0 / 3, "b": 0.6 / 3, "c": 0.6 / 3, "d": 0.6 / 3}
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_current_flow_closeness_centrality_not_connected(self):
+        G = nx.Graph()
+        G.add_nodes_from([1, 2, 3])
+        with pytest.raises(nx.NetworkXError):
+            nx.current_flow_closeness_centrality(G)
+
+
+class TestWeightedFlowClosenessCentrality:
+    pass
diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_degree_centrality.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_degree_centrality.py
new file mode 100644
index 00000000..e39aa3b1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_degree_centrality.py
@@ -0,0 +1,144 @@
+"""
+Unit tests for degree centrality.
+"""
+
+import pytest
+
+import networkx as nx
+
+
+class TestDegreeCentrality:
+    def setup_method(self):
+        self.K = nx.krackhardt_kite_graph()
+        self.P3 = nx.path_graph(3)
+        self.K5 = nx.complete_graph(5)
+
+        F = nx.Graph()  # Florentine families
+        F.add_edge("Acciaiuoli", "Medici")
+        F.add_edge("Castellani", "Peruzzi")
+        F.add_edge("Castellani", "Strozzi")
+        F.add_edge("Castellani", "Barbadori")
+        F.add_edge("Medici", "Barbadori")
+        F.add_edge("Medici", "Ridolfi")
+        F.add_edge("Medici", "Tornabuoni")
+        F.add_edge("Medici", "Albizzi")
+        F.add_edge("Medici", "Salviati")
+        F.add_edge("Salviati", "Pazzi")
+        F.add_edge("Peruzzi", "Strozzi")
+        F.add_edge("Peruzzi", "Bischeri")
+        F.add_edge("Strozzi", "Ridolfi")
+        F.add_edge("Strozzi", "Bischeri")
+        F.add_edge("Ridolfi", "Tornabuoni")
+        F.add_edge("Tornabuoni", "Guadagni")
+        F.add_edge("Albizzi", "Ginori")
+        F.add_edge("Albizzi", "Guadagni")
+        F.add_edge("Bischeri", "Guadagni")
+        F.add_edge("Guadagni", "Lamberteschi")
+        self.F = F
+
+        G = nx.DiGraph()
+        G.add_edge(0, 5)
+        G.add_edge(1, 5)
+        G.add_edge(2, 5)
+        G.add_edge(3, 5)
+        G.add_edge(4, 5)
+        G.add_edge(5, 6)
+        G.add_edge(5, 7)
+        G.add_edge(5, 8)
+        self.G = G
+
+    def test_degree_centrality_1(self):
+        d = nx.degree_centrality(self.K5)
+        exact = dict(zip(range(5), [1] * 5))
+        for n, dc in d.items():
+            assert exact[n] == pytest.approx(dc, abs=1e-7)
+
+    def test_degree_centrality_2(self):
+        d = nx.degree_centrality(self.P3)
+        exact = {0: 0.5, 1: 1, 2: 0.5}
+        for n, dc in d.items():
+            assert exact[n] == pytest.approx(dc, abs=1e-7)
+
+    def test_degree_centrality_3(self):
+        d = nx.degree_centrality(self.K)
+        exact = {
+            0: 0.444,
+            1: 0.444,
+            2: 0.333,
+            3: 0.667,
+            4: 0.333,
+            5: 0.556,
+            6: 0.556,
+            7: 0.333,
+            8: 0.222,
+            9: 0.111,
+        }
+        for n, dc in d.items():
+            assert exact[n] == pytest.approx(float(f"{dc:.3f}"), abs=1e-7)
+
+    def test_degree_centrality_4(self):
+        d = nx.degree_centrality(self.F)
+        names = sorted(self.F.nodes())
+        dcs = [
+            0.071,
+            0.214,
+            0.143,
+            0.214,
+            0.214,
+            0.071,
+            0.286,
+            0.071,
+            0.429,
+            0.071,
+            0.214,
+            0.214,
+            0.143,
+            0.286,
+            0.214,
+        ]
+        exact = dict(zip(names, dcs))
+        for n, dc in d.items():
+            assert exact[n] == pytest.approx(float(f"{dc:.3f}"), abs=1e-7)
+
+    def test_indegree_centrality(self):
+        d = nx.in_degree_centrality(self.G)
+        exact = {
+            0: 0.0,
+            1: 0.0,
+            2: 0.0,
+            3: 0.0,
+            4: 0.0,
+            5: 0.625,
+            6: 0.125,
+            7: 0.125,
+            8: 0.125,
+        }
+        for n, dc in d.items():
+            assert exact[n] == pytest.approx(dc, abs=1e-7)
+
+    def test_outdegree_centrality(self):
+        d = nx.out_degree_centrality(self.G)
+        exact = {
+            0: 0.125,
+            1: 0.125,
+            2: 0.125,
+            3: 0.125,
+            4: 0.125,
+            5: 0.375,
+            6: 0.0,
+            7: 0.0,
+            8: 0.0,
+        }
+        for n, dc in d.items():
+            assert exact[n] == pytest.approx(dc, abs=1e-7)
+
+    def test_small_graph_centrality(self):
+        G = nx.empty_graph(create_using=nx.DiGraph)
+        assert {} == nx.degree_centrality(G)
+        assert {} == nx.out_degree_centrality(G)
+        assert {} == nx.in_degree_centrality(G)
+
+        G = nx.empty_graph(1, create_using=nx.DiGraph)
+        assert {0: 1} == nx.degree_centrality(G)
+        assert {0: 1} == nx.out_degree_centrality(G)
+        assert {0: 1} == nx.in_degree_centrality(G)
diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_dispersion.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_dispersion.py
new file mode 100644
index 00000000..05de1c43
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_dispersion.py
@@ -0,0 +1,73 @@
+import networkx as nx
+
+
+def small_ego_G():
+    """The sample network from https://arxiv.org/pdf/1310.6753v1.pdf"""
+    edges = [
+        ("a", "b"),
+        ("a", "c"),
+        ("b", "c"),
+        ("b", "d"),
+        ("b", "e"),
+        ("b", "f"),
+        ("c", "d"),
+        ("c", "f"),
+        ("c", "h"),
+        ("d", "f"),
+        ("e", "f"),
+        ("f", "h"),
+        ("h", "j"),
+        ("h", "k"),
+        ("i", "j"),
+        ("i", "k"),
+        ("j", "k"),
+        ("u", "a"),
+        ("u", "b"),
+        ("u", "c"),
+        ("u", "d"),
+        ("u", "e"),
+        ("u", "f"),
+        ("u", "g"),
+        ("u", "h"),
+        ("u", "i"),
+        ("u", "j"),
+        ("u", "k"),
+    ]
+    G = nx.Graph()
+    G.add_edges_from(edges)
+
+    return G
+
+
+class TestDispersion:
+    def test_article(self):
+        """our algorithm matches article's"""
+        G = small_ego_G()
+        disp_uh = nx.dispersion(G, "u", "h", normalized=False)
+        disp_ub = nx.dispersion(G, "u", "b", normalized=False)
+        assert disp_uh == 4
+        assert disp_ub == 1
+
+    def test_results_length(self):
+        """there is a result for every node"""
+        G = small_ego_G()
+        disp = nx.dispersion(G)
+        disp_Gu = nx.dispersion(G, "u")
+        disp_uv = nx.dispersion(G, "u", "h")
+        assert len(disp) == len(G)
+        assert len(disp_Gu) == len(G) - 1
+        assert isinstance(disp_uv, float)
+
+    def test_dispersion_v_only(self):
+        G = small_ego_G()
+        disp_G_h = nx.dispersion(G, v="h", normalized=False)
+        disp_G_h_normalized = nx.dispersion(G, v="h", normalized=True)
+        assert disp_G_h == {"c": 0, "f": 0, "j": 0, "k": 0, "u": 4}
+        assert disp_G_h_normalized == {"c": 0.0, "f": 0.0, "j": 0.0, "k": 0.0, "u": 1.0}
+
+    def test_impossible_things(self):
+        G = nx.karate_club_graph()
+        disp = nx.dispersion(G)
+        for u in disp:
+            for v in disp[u]:
+                assert disp[u][v] >= 0
diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_eigenvector_centrality.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_eigenvector_centrality.py
new file mode 100644
index 00000000..cfc9ee79
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_eigenvector_centrality.py
@@ -0,0 +1,187 @@
+import math
+
+import pytest
+
+np = pytest.importorskip("numpy")
+pytest.importorskip("scipy")
+
+
+import networkx as nx
+
+
+class TestEigenvectorCentrality:
+    def test_K5(self):
+        """Eigenvector centrality: K5"""
+        G = nx.complete_graph(5)
+        b = nx.eigenvector_centrality(G)
+        v = math.sqrt(1 / 5.0)
+        b_answer = dict.fromkeys(G, v)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+        nstart = {n: 1 for n in G}
+        b = nx.eigenvector_centrality(G, nstart=nstart)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+        b = nx.eigenvector_centrality_numpy(G)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
+
+    def test_P3(self):
+        """Eigenvector centrality: P3"""
+        G = nx.path_graph(3)
+        b_answer = {0: 0.5, 1: 0.7071, 2: 0.5}
+        b = nx.eigenvector_centrality_numpy(G)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-4)
+        b = nx.eigenvector_centrality(G)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-4)
+
+    def test_P3_unweighted(self):
+        """Eigenvector centrality: P3"""
+        G = nx.path_graph(3)
+        b_answer = {0: 0.5, 1: 0.7071, 2: 0.5}
+        b = nx.eigenvector_centrality_numpy(G, weight=None)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-4)
+
+    def test_maxiter(self):
+        with pytest.raises(nx.PowerIterationFailedConvergence):
+            G = nx.path_graph(3)
+            nx.eigenvector_centrality(G, max_iter=0)
+
+
+class TestEigenvectorCentralityDirected:
+    @classmethod
+    def setup_class(cls):
+        G = nx.DiGraph()
+
+        edges = [
+            (1, 2),
+            (1, 3),
+            (2, 4),
+            (3, 2),
+            (3, 5),
+            (4, 2),
+            (4, 5),
+            (4, 6),
+            (5, 6),
+            (5, 7),
+            (5, 8),
+            (6, 8),
+            (7, 1),
+            (7, 5),
+            (7, 8),
+            (8, 6),
+            (8, 7),
+        ]
+
+        G.add_edges_from(edges, weight=2.0)
+        cls.G = G.reverse()
+        cls.G.evc = [
+            0.25368793,
+            0.19576478,
+            0.32817092,
+            0.40430835,
+            0.48199885,
+            0.15724483,
+            0.51346196,
+            0.32475403,
+        ]
+
+        H = nx.DiGraph()
+
+        edges = [
+            (1, 2),
+            (1, 3),
+            (2, 4),
+            (3, 2),
+            (3, 5),
+            (4, 2),
+            (4, 5),
+            (4, 6),
+            (5, 6),
+            (5, 7),
+            (5, 8),
+            (6, 8),
+            (7, 1),
+            (7, 5),
+            (7, 8),
+            (8, 6),
+            (8, 7),
+        ]
+
+        G.add_edges_from(edges)
+        cls.H = G.reverse()
+        cls.H.evc = [
+            0.25368793,
+            0.19576478,
+            0.32817092,
+            0.40430835,
+            0.48199885,
+            0.15724483,
+            0.51346196,
+            0.32475403,
+        ]
+
+    def test_eigenvector_centrality_weighted(self):
+        G = self.G
+        p = nx.eigenvector_centrality(G)
+        for a, b in zip(list(p.values()), self.G.evc):
+            assert a == pytest.approx(b, abs=1e-4)
+
+    def test_eigenvector_centrality_weighted_numpy(self):
+        G = self.G
+        p = nx.eigenvector_centrality_numpy(G)
+        for a, b in zip(list(p.values()), self.G.evc):
+            assert a == pytest.approx(b, abs=1e-7)
+
+    def test_eigenvector_centrality_unweighted(self):
+        G = self.H
+        p = nx.eigenvector_centrality(G)
+        for a, b in zip(list(p.values()), self.G.evc):
+            assert a == pytest.approx(b, abs=1e-4)
+
+    def test_eigenvector_centrality_unweighted_numpy(self):
+        G = self.H
+        p = nx.eigenvector_centrality_numpy(G)
+        for a, b in zip(list(p.values()), self.G.evc):
+            assert a == pytest.approx(b, abs=1e-7)
+
+
+class TestEigenvectorCentralityExceptions:
+    def test_multigraph(self):
+        with pytest.raises(nx.NetworkXException):
+            nx.eigenvector_centrality(nx.MultiGraph())
+
+    def test_multigraph_numpy(self):
+        with pytest.raises(nx.NetworkXException):
+            nx.eigenvector_centrality_numpy(nx.MultiGraph())
+
+    def test_null(self):
+        with pytest.raises(nx.NetworkXException):
+            nx.eigenvector_centrality(nx.Graph())
+
+    def test_null_numpy(self):
+        with pytest.raises(nx.NetworkXException):
+            nx.eigenvector_centrality_numpy(nx.Graph())
+
+    @pytest.mark.parametrize(
+        "G",
+        [
+            nx.empty_graph(3),
+            nx.DiGraph([(0, 1), (1, 2)]),
+        ],
+    )
+    def test_disconnected_numpy(self, G):
+        msg = "does not give consistent results for disconnected"
+        with pytest.raises(nx.AmbiguousSolution, match=msg):
+            nx.eigenvector_centrality_numpy(G)
+
+    def test_zero_nstart(self):
+        G = nx.Graph([(1, 2), (1, 3), (2, 3)])
+        with pytest.raises(
+            nx.NetworkXException, match="initial vector cannot have all zero values"
+        ):
+            nx.eigenvector_centrality(G, nstart={v: 0 for v in G})
diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_group.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_group.py
new file mode 100644
index 00000000..82343f28
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_group.py
@@ -0,0 +1,277 @@
+"""
+Tests for Group Centrality Measures
+"""
+
+import pytest
+
+import networkx as nx
+
+
+class TestGroupBetweennessCentrality:
+    def test_group_betweenness_single_node(self):
+        """
+        Group betweenness centrality for single node group
+        """
+        G = nx.path_graph(5)
+        C = [1]
+        b = nx.group_betweenness_centrality(
+            G, C, weight=None, normalized=False, endpoints=False
+        )
+        b_answer = 3.0
+        assert b == b_answer
+
+    def test_group_betweenness_with_endpoints(self):
+        """
+        Group betweenness centrality for single node group
+        """
+        G = nx.path_graph(5)
+        C = [1]
+        b = nx.group_betweenness_centrality(
+            G, C, weight=None, normalized=False, endpoints=True
+        )
+        b_answer = 7.0
+        assert b == b_answer
+
+    def test_group_betweenness_normalized(self):
+        """
+        Group betweenness centrality for group with more than
+        1 node and normalized
+        """
+        G = nx.path_graph(5)
+        C = [1, 3]
+        b = nx.group_betweenness_centrality(
+            G, C, weight=None, normalized=True, endpoints=False
+        )
+        b_answer = 1.0
+        assert b == b_answer
+
+    def test_two_group_betweenness_value_zero(self):
+        """
+        Group betweenness centrality value of 0
+        """
+        G = nx.cycle_graph(7)
+        C = [[0, 1, 6], [0, 1, 5]]
+        b = nx.group_betweenness_centrality(G, C, weight=None, normalized=False)
+        b_answer = [0.0, 3.0]
+        assert b == b_answer
+
+    def test_group_betweenness_value_zero(self):
+        """
+        Group betweenness centrality value of 0
+        """
+        G = nx.cycle_graph(6)
+        C = [0, 1, 5]
+        b = nx.group_betweenness_centrality(G, C, weight=None, normalized=False)
+        b_answer = 0.0
+        assert b == b_answer
+
+    def test_group_betweenness_disconnected_graph(self):
+        """
+        Group betweenness centrality in a disconnected graph
+        """
+        G = nx.path_graph(5)
+        G.remove_edge(0, 1)
+        C = [1]
+        b = nx.group_betweenness_centrality(G, C, weight=None, normalized=False)
+        b_answer = 0.0
+        assert b == b_answer
+
+    def test_group_betweenness_node_not_in_graph(self):
+        """
+        Node(s) in C not in graph, raises NodeNotFound exception
+        """
+        with pytest.raises(nx.NodeNotFound):
+            nx.group_betweenness_centrality(nx.path_graph(5), [4, 7, 8])
+
+    def test_group_betweenness_directed_weighted(self):
+        """
+        Group betweenness centrality in a directed and weighted graph
+        """
+        G = nx.DiGraph()
+        G.add_edge(1, 0, weight=1)
+        G.add_edge(0, 2, weight=2)
+        G.add_edge(1, 2, weight=3)
+        G.add_edge(3, 1, weight=4)
+        G.add_edge(2, 3, weight=1)
+        G.add_edge(4, 3, weight=6)
+        G.add_edge(2, 4, weight=7)
+        C = [1, 2]
+        b = nx.group_betweenness_centrality(G, C, weight="weight", normalized=False)
+        b_answer = 5.0
+        assert b == b_answer
+
+
+class TestProminentGroup:
+    np = pytest.importorskip("numpy")
+    pd = pytest.importorskip("pandas")
+
+    def test_prominent_group_single_node(self):
+        """
+        Prominent group for single node
+        """
+        G = nx.path_graph(5)
+        k = 1
+        b, g = nx.prominent_group(G, k, normalized=False, endpoints=False)
+        b_answer, g_answer = 4.0, [2]
+        assert b == b_answer and g == g_answer
+
+    def test_prominent_group_with_c(self):
+        """
+        Prominent group without some nodes
+        """
+        G = nx.path_graph(5)
+        k = 1
+        b, g = nx.prominent_group(G, k, normalized=False, C=[2])
+        b_answer, g_answer = 3.0, [1]
+        assert b == b_answer and g == g_answer
+
+    def test_prominent_group_normalized_endpoints(self):
+        """
+        Prominent group with normalized result, with endpoints
+        """
+        G = nx.cycle_graph(7)
+        k = 2
+        b, g = nx.prominent_group(G, k, normalized=True, endpoints=True)
+        b_answer, g_answer = 1.7, [2, 5]
+        assert b == b_answer and g == g_answer
+
+    def test_prominent_group_disconnected_graph(self):
+        """
+        Prominent group of disconnected graph
+        """
+        G = nx.path_graph(6)
+        G.remove_edge(0, 1)
+        k = 1
+        b, g = nx.prominent_group(G, k, weight=None, normalized=False)
+        b_answer, g_answer = 4.0, [3]
+        assert b == b_answer and g == g_answer
+
+    def test_prominent_group_node_not_in_graph(self):
+        """
+        Node(s) in C not in graph, raises NodeNotFound exception
+        """
+        with pytest.raises(nx.NodeNotFound):
+            nx.prominent_group(nx.path_graph(5), 1, C=[10])
+
+    def test_group_betweenness_directed_weighted(self):
+        """
+        Group betweenness centrality in a directed and weighted graph
+        """
+        G = nx.DiGraph()
+        G.add_edge(1, 0, weight=1)
+        G.add_edge(0, 2, weight=2)
+        G.add_edge(1, 2, weight=3)
+        G.add_edge(3, 1, weight=4)
+        G.add_edge(2, 3, weight=1)
+        G.add_edge(4, 3, weight=6)
+        G.add_edge(2, 4, weight=7)
+        k = 2
+        b, g = nx.prominent_group(G, k, weight="weight", normalized=False)
+        b_answer, g_answer = 5.0, [1, 2]
+        assert b == b_answer and g == g_answer
+
+    def test_prominent_group_greedy_algorithm(self):
+        """
+        Group betweenness centrality in a greedy algorithm
+        """
+        G = nx.cycle_graph(7)
+        k = 2
+        b, g = nx.prominent_group(G, k, normalized=True, endpoints=True, greedy=True)
+        b_answer, g_answer = 1.7, [6, 3]
+        assert b == b_answer and g == g_answer
+
+
+class TestGroupClosenessCentrality:
+    def test_group_closeness_single_node(self):
+        """
+        Group closeness centrality for a single node group
+        """
+        G = nx.path_graph(5)
+        c = nx.group_closeness_centrality(G, [1])
+        c_answer = nx.closeness_centrality(G, 1)
+        assert c == c_answer
+
+    def test_group_closeness_disconnected(self):
+        """
+        Group closeness centrality for a disconnected graph
+        """
+        G = nx.Graph()
+        G.add_nodes_from([1, 2, 3, 4])
+        c = nx.group_closeness_centrality(G, [1, 2])
+        c_answer = 0
+        assert c == c_answer
+
+    def test_group_closeness_multiple_node(self):
+        """
+        Group closeness centrality for a group with more than
+        1 node
+        """
+        G = nx.path_graph(4)
+        c = nx.group_closeness_centrality(G, [1, 2])
+        c_answer = 1
+        assert c == c_answer
+
+    def test_group_closeness_node_not_in_graph(self):
+        """
+        Node(s) in S not in graph, raises NodeNotFound exception
+        """
+        with pytest.raises(nx.NodeNotFound):
+            nx.group_closeness_centrality(nx.path_graph(5), [6, 7, 8])
+
+
+class TestGroupDegreeCentrality:
+    def test_group_degree_centrality_single_node(self):
+        """
+        Group degree centrality for a single node group
+        """
+        G = nx.path_graph(4)
+        d = nx.group_degree_centrality(G, [1])
+        d_answer = nx.degree_centrality(G)[1]
+        assert d == d_answer
+
+    def test_group_degree_centrality_multiple_node(self):
+        """
+        Group degree centrality for group with more than
+        1 node
+        """
+        G = nx.Graph()
+        G.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8])
+        G.add_edges_from(
+            [(1, 2), (1, 3), (1, 6), (1, 7), (1, 8), (2, 3), (2, 4), (2, 5)]
+        )
+        d = nx.group_degree_centrality(G, [1, 2])
+        d_answer = 1
+        assert d == d_answer
+
+    def test_group_in_degree_centrality(self):
+        """
+        Group in-degree centrality in a DiGraph
+        """
+        G = nx.DiGraph()
+        G.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8])
+        G.add_edges_from(
+            [(1, 2), (1, 3), (1, 6), (1, 7), (1, 8), (2, 3), (2, 4), (2, 5)]
+        )
+        d = nx.group_in_degree_centrality(G, [1, 2])
+        d_answer = 0
+        assert d == d_answer
+
+    def test_group_out_degree_centrality(self):
+        """
+        Group out-degree centrality in a DiGraph
+        """
+        G = nx.DiGraph()
+        G.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8])
+        G.add_edges_from(
+            [(1, 2), (1, 3), (1, 6), (1, 7), (1, 8), (2, 3), (2, 4), (2, 5)]
+        )
+        d = nx.group_out_degree_centrality(G, [1, 2])
+        d_answer = 1
+        assert d == d_answer
+
+    def test_group_degree_centrality_node_not_in_graph(self):
+        """
+        Node(s) in S not in graph, raises NetworkXError
+        """
+        with pytest.raises(nx.NetworkXError):
+            nx.group_degree_centrality(nx.path_graph(5), [6, 7, 8])
diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_harmonic_centrality.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_harmonic_centrality.py
new file mode 100644
index 00000000..4b3dc4ac
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_harmonic_centrality.py
@@ -0,0 +1,122 @@
+"""
+Tests for degree centrality.
+"""
+
+import pytest
+
+import networkx as nx
+from networkx.algorithms.centrality import harmonic_centrality
+
+
+class TestClosenessCentrality:
+    @classmethod
+    def setup_class(cls):
+        cls.P3 = nx.path_graph(3)
+        cls.P4 = nx.path_graph(4)
+        cls.K5 = nx.complete_graph(5)
+
+        cls.C4 = nx.cycle_graph(4)
+        cls.C4_directed = nx.cycle_graph(4, create_using=nx.DiGraph)
+
+        cls.C5 = nx.cycle_graph(5)
+
+        cls.T = nx.balanced_tree(r=2, h=2)
+
+        cls.Gb = nx.DiGraph()
+        cls.Gb.add_edges_from([(0, 1), (0, 2), (0, 4), (2, 1), (2, 3), (4, 3)])
+
+    def test_p3_harmonic(self):
+        c = harmonic_centrality(self.P3)
+        d = {0: 1.5, 1: 2, 2: 1.5}
+        for n in sorted(self.P3):
+            assert c[n] == pytest.approx(d[n], abs=1e-3)
+
+    def test_p4_harmonic(self):
+        c = harmonic_centrality(self.P4)
+        d = {0: 1.8333333, 1: 2.5, 2: 2.5, 3: 1.8333333}
+        for n in sorted(self.P4):
+            assert c[n] == pytest.approx(d[n], abs=1e-3)
+
+    def test_clique_complete(self):
+        c = harmonic_centrality(self.K5)
+        d = {0: 4, 1: 4, 2: 4, 3: 4, 4: 4}
+        for n in sorted(self.P3):
+            assert c[n] == pytest.approx(d[n], abs=1e-3)
+
+    def test_cycle_C4(self):
+        c = harmonic_centrality(self.C4)
+        d = {0: 2.5, 1: 2.5, 2: 2.5, 3: 2.5}
+        for n in sorted(self.C4):
+            assert c[n] == pytest.approx(d[n], abs=1e-3)
+
+    def test_cycle_C5(self):
+        c = harmonic_centrality(self.C5)
+        d = {0: 3, 1: 3, 2: 3, 3: 3, 4: 3, 5: 4}
+        for n in sorted(self.C5):
+            assert c[n] == pytest.approx(d[n], abs=1e-3)
+
+    def test_bal_tree(self):
+        c = harmonic_centrality(self.T)
+        d = {0: 4.0, 1: 4.1666, 2: 4.1666, 3: 2.8333, 4: 2.8333, 5: 2.8333, 6: 2.8333}
+        for n in sorted(self.T):
+            assert c[n] == pytest.approx(d[n], abs=1e-3)
+
+    def test_exampleGraph(self):
+        c = harmonic_centrality(self.Gb)
+        d = {0: 0, 1: 2, 2: 1, 3: 2.5, 4: 1}
+        for n in sorted(self.Gb):
+            assert c[n] == pytest.approx(d[n], abs=1e-3)
+
+    def test_weighted_harmonic(self):
+        XG = nx.DiGraph()
+        XG.add_weighted_edges_from(
+            [
+                ("a", "b", 10),
+                ("d", "c", 5),
+                ("a", "c", 1),
+                ("e", "f", 2),
+                ("f", "c", 1),
+                ("a", "f", 3),
+            ]
+        )
+        c = harmonic_centrality(XG, distance="weight")
+        d = {"a": 0, "b": 0.1, "c": 2.533, "d": 0, "e": 0, "f": 0.83333}
+        for n in sorted(XG):
+            assert c[n] == pytest.approx(d[n], abs=1e-3)
+
+    def test_empty(self):
+        G = nx.DiGraph()
+        c = harmonic_centrality(G, distance="weight")
+        d = {}
+        assert c == d
+
+    def test_singleton(self):
+        G = nx.DiGraph()
+        G.add_node(0)
+        c = harmonic_centrality(G, distance="weight")
+        d = {0: 0}
+        assert c == d
+
+    def test_cycle_c4_directed(self):
+        c = harmonic_centrality(self.C4_directed, nbunch=[0, 1], sources=[1, 2])
+        d = {0: 0.833, 1: 0.333}
+        for n in [0, 1]:
+            assert c[n] == pytest.approx(d[n], abs=1e-3)
+
+    def test_cycle_c4_directed_subset(self):
+        c = harmonic_centrality(self.C4_directed, nbunch=[0, 1])
+        d = 1.833
+        for n in [0, 1]:
+            assert c[n] == pytest.approx(d, abs=1e-3)
+
+    def test_p3_harmonic_subset(self):
+        c = harmonic_centrality(self.P3, sources=[0, 1])
+        d = {0: 1, 1: 1, 2: 1.5}
+        for n in self.P3:
+            assert c[n] == pytest.approx(d[n], abs=1e-3)
+
+    def test_p4_harmonic_subset(self):
+        c = harmonic_centrality(self.P4, nbunch=[2, 3], sources=[0, 1])
+        d = {2: 1.5, 3: 0.8333333}
+        for n in [2, 3]:
+            assert c[n] == pytest.approx(d[n], abs=1e-3)
diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_katz_centrality.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_katz_centrality.py
new file mode 100644
index 00000000..0927f00b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_katz_centrality.py
@@ -0,0 +1,345 @@
+import math
+
+import pytest
+
+import networkx as nx
+
+
+class TestKatzCentrality:
+    def test_K5(self):
+        """Katz centrality: K5"""
+        G = nx.complete_graph(5)
+        alpha = 0.1
+        b = nx.katz_centrality(G, alpha)
+        v = math.sqrt(1 / 5.0)
+        b_answer = dict.fromkeys(G, v)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+        nstart = {n: 1 for n in G}
+        b = nx.katz_centrality(G, alpha, nstart=nstart)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+
+    def test_P3(self):
+        """Katz centrality: P3"""
+        alpha = 0.1
+        G = nx.path_graph(3)
+        b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449, 2: 0.5598852584152162}
+        b = nx.katz_centrality(G, alpha)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-4)
+
+    def test_maxiter(self):
+        with pytest.raises(nx.PowerIterationFailedConvergence):
+            nx.katz_centrality(nx.path_graph(3), 0.1, max_iter=0)
+
+    def test_beta_as_scalar(self):
+        alpha = 0.1
+        beta = 0.1
+        b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449, 2: 0.5598852584152162}
+        G = nx.path_graph(3)
+        b = nx.katz_centrality(G, alpha, beta)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-4)
+
+    def test_beta_as_dict(self):
+        alpha = 0.1
+        beta = {0: 1.0, 1: 1.0, 2: 1.0}
+        b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449, 2: 0.5598852584152162}
+        G = nx.path_graph(3)
+        b = nx.katz_centrality(G, alpha, beta)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-4)
+
+    def test_multiple_alpha(self):
+        alpha_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6]
+        for alpha in alpha_list:
+            b_answer = {
+                0.1: {
+                    0: 0.5598852584152165,
+                    1: 0.6107839182711449,
+                    2: 0.5598852584152162,
+                },
+                0.2: {
+                    0: 0.5454545454545454,
+                    1: 0.6363636363636365,
+                    2: 0.5454545454545454,
+                },
+                0.3: {
+                    0: 0.5333964609104419,
+                    1: 0.6564879518897746,
+                    2: 0.5333964609104419,
+                },
+                0.4: {
+                    0: 0.5232045649263551,
+                    1: 0.6726915834767423,
+                    2: 0.5232045649263551,
+                },
+                0.5: {
+                    0: 0.5144957746691622,
+                    1: 0.6859943117075809,
+                    2: 0.5144957746691622,
+                },
+                0.6: {
+                    0: 0.5069794004195823,
+                    1: 0.6970966755769258,
+                    2: 0.5069794004195823,
+                },
+            }
+            G = nx.path_graph(3)
+            b = nx.katz_centrality(G, alpha)
+            for n in sorted(G):
+                assert b[n] == pytest.approx(b_answer[alpha][n], abs=1e-4)
+
+    def test_multigraph(self):
+        with pytest.raises(nx.NetworkXException):
+            nx.katz_centrality(nx.MultiGraph(), 0.1)
+
+    def test_empty(self):
+        e = nx.katz_centrality(nx.Graph(), 0.1)
+        assert e == {}
+
+    def test_bad_beta(self):
+        with pytest.raises(nx.NetworkXException):
+            G = nx.Graph([(0, 1)])
+            beta = {0: 77}
+            nx.katz_centrality(G, 0.1, beta=beta)
+
+    def test_bad_beta_number(self):
+        with pytest.raises(nx.NetworkXException):
+            G = nx.Graph([(0, 1)])
+            nx.katz_centrality(G, 0.1, beta="foo")
+
+
+class TestKatzCentralityNumpy:
+    @classmethod
+    def setup_class(cls):
+        global np
+        np = pytest.importorskip("numpy")
+        pytest.importorskip("scipy")
+
+    def test_K5(self):
+        """Katz centrality: K5"""
+        G = nx.complete_graph(5)
+        alpha = 0.1
+        b = nx.katz_centrality(G, alpha)
+        v = math.sqrt(1 / 5.0)
+        b_answer = dict.fromkeys(G, v)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+        b = nx.eigenvector_centrality_numpy(G)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
+
+    def test_P3(self):
+        """Katz centrality: P3"""
+        alpha = 0.1
+        G = nx.path_graph(3)
+        b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449, 2: 0.5598852584152162}
+        b = nx.katz_centrality_numpy(G, alpha)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-4)
+
+    def test_beta_as_scalar(self):
+        alpha = 0.1
+        beta = 0.1
+        b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449, 2: 0.5598852584152162}
+        G = nx.path_graph(3)
+        b = nx.katz_centrality_numpy(G, alpha, beta)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-4)
+
+    def test_beta_as_dict(self):
+        alpha = 0.1
+        beta = {0: 1.0, 1: 1.0, 2: 1.0}
+        b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449, 2: 0.5598852584152162}
+        G = nx.path_graph(3)
+        b = nx.katz_centrality_numpy(G, alpha, beta)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-4)
+
+    def test_multiple_alpha(self):
+        alpha_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6]
+        for alpha in alpha_list:
+            b_answer = {
+                0.1: {
+                    0: 0.5598852584152165,
+                    1: 0.6107839182711449,
+                    2: 0.5598852584152162,
+                },
+                0.2: {
+                    0: 0.5454545454545454,
+                    1: 0.6363636363636365,
+                    2: 0.5454545454545454,
+                },
+                0.3: {
+                    0: 0.5333964609104419,
+                    1: 0.6564879518897746,
+                    2: 0.5333964609104419,
+                },
+                0.4: {
+                    0: 0.5232045649263551,
+                    1: 0.6726915834767423,
+                    2: 0.5232045649263551,
+                },
+                0.5: {
+                    0: 0.5144957746691622,
+                    1: 0.6859943117075809,
+                    2: 0.5144957746691622,
+                },
+                0.6: {
+                    0: 0.5069794004195823,
+                    1: 0.6970966755769258,
+                    2: 0.5069794004195823,
+                },
+            }
+            G = nx.path_graph(3)
+            b = nx.katz_centrality_numpy(G, alpha)
+            for n in sorted(G):
+                assert b[n] == pytest.approx(b_answer[alpha][n], abs=1e-4)
+
+    def test_multigraph(self):
+        with pytest.raises(nx.NetworkXException):
+            nx.katz_centrality(nx.MultiGraph(), 0.1)
+
+    def test_empty(self):
+        e = nx.katz_centrality(nx.Graph(), 0.1)
+        assert e == {}
+
+    def test_bad_beta(self):
+        with pytest.raises(nx.NetworkXException):
+            G = nx.Graph([(0, 1)])
+            beta = {0: 77}
+            nx.katz_centrality_numpy(G, 0.1, beta=beta)
+
+    def test_bad_beta_numbe(self):
+        with pytest.raises(nx.NetworkXException):
+            G = nx.Graph([(0, 1)])
+            nx.katz_centrality_numpy(G, 0.1, beta="foo")
+
+    def test_K5_unweighted(self):
+        """Katz centrality: K5"""
+        G = nx.complete_graph(5)
+        alpha = 0.1
+        b = nx.katz_centrality(G, alpha, weight=None)
+        v = math.sqrt(1 / 5.0)
+        b_answer = dict.fromkeys(G, v)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
+        b = nx.eigenvector_centrality_numpy(G, weight=None)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
+
+    def test_P3_unweighted(self):
+        """Katz centrality: P3"""
+        alpha = 0.1
+        G = nx.path_graph(3)
+        b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449, 2: 0.5598852584152162}
+        b = nx.katz_centrality_numpy(G, alpha, weight=None)
+        for n in sorted(G):
+            assert b[n] == pytest.approx(b_answer[n], abs=1e-4)
+
+
+class TestKatzCentralityDirected:
+    @classmethod
+    def setup_class(cls):
+        G = nx.DiGraph()
+        edges = [
+            (1, 2),
+            (1, 3),
+            (2, 4),
+            (3, 2),
+            (3, 5),
+            (4, 2),
+            (4, 5),
+            (4, 6),
+            (5, 6),
+            (5, 7),
+            (5, 8),
+            (6, 8),
+            (7, 1),
+            (7, 5),
+            (7, 8),
+            (8, 6),
+            (8, 7),
+        ]
+        G.add_edges_from(edges, weight=2.0)
+        cls.G = G.reverse()
+        cls.G.alpha = 0.1
+        cls.G.evc = [
+            0.3289589783189635,
+            0.2832077296243516,
+            0.3425906003685471,
+            0.3970420865198392,
+            0.41074871061646284,
+            0.272257430756461,
+            0.4201989685435462,
+            0.34229059218038554,
+        ]
+
+        H = nx.DiGraph(edges)
+        cls.H = G.reverse()
+        cls.H.alpha = 0.1
+        cls.H.evc = [
+            0.3289589783189635,
+            0.2832077296243516,
+            0.3425906003685471,
+            0.3970420865198392,
+            0.41074871061646284,
+            0.272257430756461,
+            0.4201989685435462,
+            0.34229059218038554,
+        ]
+
+    def test_katz_centrality_weighted(self):
+        G = self.G
+        alpha = self.G.alpha
+        p = nx.katz_centrality(G, alpha, weight="weight")
+        for a, b in zip(list(p.values()), self.G.evc):
+            assert a == pytest.approx(b, abs=1e-7)
+
+    def test_katz_centrality_unweighted(self):
+        H = self.H
+        alpha = self.H.alpha
+        p = nx.katz_centrality(H, alpha, weight="weight")
+        for a, b in zip(list(p.values()), self.H.evc):
+            assert a == pytest.approx(b, abs=1e-7)
+
+
+class TestKatzCentralityDirectedNumpy(TestKatzCentralityDirected):
+    @classmethod
+    def setup_class(cls):
+        global np
+        np = pytest.importorskip("numpy")
+        pytest.importorskip("scipy")
+        super().setup_class()
+
+    def test_katz_centrality_weighted(self):
+        G = self.G
+        alpha = self.G.alpha
+        p = nx.katz_centrality_numpy(G, alpha, weight="weight")
+        for a, b in zip(list(p.values()), self.G.evc):
+            assert a == pytest.approx(b, abs=1e-7)
+
+    def test_katz_centrality_unweighted(self):
+        H = self.H
+        alpha = self.H.alpha
+        p = nx.katz_centrality_numpy(H, alpha, weight="weight")
+        for a, b in zip(list(p.values()), self.H.evc):
+            assert a == pytest.approx(b, abs=1e-7)
+
+
+class TestKatzEigenvectorVKatz:
+    @classmethod
+    def setup_class(cls):
+        global np
+        np = pytest.importorskip("numpy")
+        pytest.importorskip("scipy")
+
+    def test_eigenvector_v_katz_random(self):
+        G = nx.gnp_random_graph(10, 0.5, seed=1234)
+        l = max(np.linalg.eigvals(nx.adjacency_matrix(G).todense()))
+        e = nx.eigenvector_centrality_numpy(G)
+        k = nx.katz_centrality_numpy(G, 1.0 / l)
+        for n in G:
+            assert e[n] == pytest.approx(k[n], abs=1e-7)
diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_laplacian_centrality.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_laplacian_centrality.py
new file mode 100644
index 00000000..21aa28b0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_laplacian_centrality.py
@@ -0,0 +1,221 @@
+import pytest
+
+import networkx as nx
+
+np = pytest.importorskip("numpy")
+sp = pytest.importorskip("scipy")
+
+
+def test_laplacian_centrality_null_graph():
+    G = nx.Graph()
+    with pytest.raises(nx.NetworkXPointlessConcept):
+        d = nx.laplacian_centrality(G, normalized=False)
+
+
+def test_laplacian_centrality_single_node():
+    """See gh-6571"""
+    G = nx.empty_graph(1)
+    assert nx.laplacian_centrality(G, normalized=False) == {0: 0}
+    with pytest.raises(ZeroDivisionError):
+        nx.laplacian_centrality(G, normalized=True)
+
+
+def test_laplacian_centrality_unconnected_nodes():
+    """laplacian_centrality on a unconnected node graph should return 0
+
+    For graphs without edges, the Laplacian energy is 0 and is unchanged with
+    node removal, so::
+
+        LC(v) = LE(G) - LE(G - v) = 0 - 0 = 0
+    """
+    G = nx.empty_graph(3)
+    assert nx.laplacian_centrality(G, normalized=False) == {0: 0, 1: 0, 2: 0}
+
+
+def test_laplacian_centrality_empty_graph():
+    G = nx.empty_graph(3)
+    with pytest.raises(ZeroDivisionError):
+        d = nx.laplacian_centrality(G, normalized=True)
+
+
+def test_laplacian_centrality_E():
+    E = nx.Graph()
+    E.add_weighted_edges_from(
+        [(0, 1, 4), (4, 5, 1), (0, 2, 2), (2, 1, 1), (1, 3, 2), (1, 4, 2)]
+    )
+    d = nx.laplacian_centrality(E)
+    exact = {
+        0: 0.700000,
+        1: 0.900000,
+        2: 0.280000,
+        3: 0.220000,
+        4: 0.260000,
+        5: 0.040000,
+    }
+
+    for n, dc in d.items():
+        assert exact[n] == pytest.approx(dc, abs=1e-7)
+
+    # Check not normalized
+    full_energy = 200
+    dnn = nx.laplacian_centrality(E, normalized=False)
+    for n, dc in dnn.items():
+        assert exact[n] * full_energy == pytest.approx(dc, abs=1e-7)
+
+    # Check unweighted not-normalized version
+    duw_nn = nx.laplacian_centrality(E, normalized=False, weight=None)
+    print(duw_nn)
+    exact_uw_nn = {
+        0: 18,
+        1: 34,
+        2: 18,
+        3: 10,
+        4: 16,
+        5: 6,
+    }
+    for n, dc in duw_nn.items():
+        assert exact_uw_nn[n] == pytest.approx(dc, abs=1e-7)
+
+    # Check unweighted version
+    duw = nx.laplacian_centrality(E, weight=None)
+    full_energy = 42
+    for n, dc in duw.items():
+        assert exact_uw_nn[n] / full_energy == pytest.approx(dc, abs=1e-7)
+
+
+def test_laplacian_centrality_KC():
+    KC = nx.karate_club_graph()
+    d = nx.laplacian_centrality(KC)
+    exact = {
+        0: 0.2543593,
+        1: 0.1724524,
+        2: 0.2166053,
+        3: 0.0964646,
+        4: 0.0350344,
+        5: 0.0571109,
+        6: 0.0540713,
+        7: 0.0788674,
+        8: 0.1222204,
+        9: 0.0217565,
+        10: 0.0308751,
+        11: 0.0215965,
+        12: 0.0174372,
+        13: 0.118861,
+        14: 0.0366341,
+        15: 0.0548712,
+        16: 0.0172772,
+        17: 0.0191969,
+        18: 0.0225564,
+        19: 0.0331147,
+        20: 0.0279955,
+        21: 0.0246361,
+        22: 0.0382339,
+        23: 0.1294193,
+        24: 0.0227164,
+        25: 0.0644697,
+        26: 0.0281555,
+        27: 0.075188,
+        28: 0.0364742,
+        29: 0.0707087,
+        30: 0.0708687,
+        31: 0.131019,
+        32: 0.2370821,
+        33: 0.3066709,
+    }
+    for n, dc in d.items():
+        assert exact[n] == pytest.approx(dc, abs=1e-7)
+
+    # Check not normalized
+    full_energy = 12502
+    dnn = nx.laplacian_centrality(KC, normalized=False)
+    for n, dc in dnn.items():
+        assert exact[n] * full_energy == pytest.approx(dc, abs=1e-3)
+
+
+def test_laplacian_centrality_K():
+    K = nx.krackhardt_kite_graph()
+    d = nx.laplacian_centrality(K)
+    exact = {
+        0: 0.3010753,
+        1: 0.3010753,
+        2: 0.2258065,
+        3: 0.483871,
+        4: 0.2258065,
+        5: 0.3870968,
+        6: 0.3870968,
+        7: 0.1935484,
+        8: 0.0752688,
+        9: 0.0322581,
+    }
+    for n, dc in d.items():
+        assert exact[n] == pytest.approx(dc, abs=1e-7)
+
+    # Check not normalized
+    full_energy = 186
+    dnn = nx.laplacian_centrality(K, normalized=False)
+    for n, dc in dnn.items():
+        assert exact[n] * full_energy == pytest.approx(dc, abs=1e-3)
+
+
+def test_laplacian_centrality_P3():
+    P3 = nx.path_graph(3)
+    d = nx.laplacian_centrality(P3)
+    exact = {0: 0.6, 1: 1.0, 2: 0.6}
+    for n, dc in d.items():
+        assert exact[n] == pytest.approx(dc, abs=1e-7)
+
+
+def test_laplacian_centrality_K5():
+    K5 = nx.complete_graph(5)
+    d = nx.laplacian_centrality(K5)
+    exact = {0: 0.52, 1: 0.52, 2: 0.52, 3: 0.52, 4: 0.52}
+    for n, dc in d.items():
+        assert exact[n] == pytest.approx(dc, abs=1e-7)
+
+
+def test_laplacian_centrality_FF():
+    FF = nx.florentine_families_graph()
+    d = nx.laplacian_centrality(FF)
+    exact = {
+        "Acciaiuoli": 0.0804598,
+        "Medici": 0.4022989,
+        "Castellani": 0.1724138,
+        "Peruzzi": 0.183908,
+        "Strozzi": 0.2528736,
+        "Barbadori": 0.137931,
+        "Ridolfi": 0.2183908,
+        "Tornabuoni": 0.2183908,
+        "Albizzi": 0.1954023,
+        "Salviati": 0.1149425,
+        "Pazzi": 0.0344828,
+        "Bischeri": 0.1954023,
+        "Guadagni": 0.2298851,
+        "Ginori": 0.045977,
+        "Lamberteschi": 0.0574713,
+    }
+    for n, dc in d.items():
+        assert exact[n] == pytest.approx(dc, abs=1e-7)
+
+
+def test_laplacian_centrality_DG():
+    DG = nx.DiGraph([(0, 5), (1, 5), (2, 5), (3, 5), (4, 5), (5, 6), (5, 7), (5, 8)])
+    d = nx.laplacian_centrality(DG)
+    exact = {
+        0: 0.2123352,
+        5: 0.515391,
+        1: 0.2123352,
+        2: 0.2123352,
+        3: 0.2123352,
+        4: 0.2123352,
+        6: 0.2952031,
+        7: 0.2952031,
+        8: 0.2952031,
+    }
+    for n, dc in d.items():
+        assert exact[n] == pytest.approx(dc, abs=1e-7)
+
+    # Check not normalized
+    full_energy = 9.50704
+    dnn = nx.laplacian_centrality(DG, normalized=False)
+    for n, dc in dnn.items():
+        assert exact[n] * full_energy == pytest.approx(dc, abs=1e-4)
diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_load_centrality.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_load_centrality.py
new file mode 100644
index 00000000..bf096039
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_load_centrality.py
@@ -0,0 +1,344 @@
+import pytest
+
+import networkx as nx
+
+
+class TestLoadCentrality:
+    @classmethod
+    def setup_class(cls):
+        G = nx.Graph()
+        G.add_edge(0, 1, weight=3)
+        G.add_edge(0, 2, weight=2)
+        G.add_edge(0, 3, weight=6)
+        G.add_edge(0, 4, weight=4)
+        G.add_edge(1, 3, weight=5)
+        G.add_edge(1, 5, weight=5)
+        G.add_edge(2, 4, weight=1)
+        G.add_edge(3, 4, weight=2)
+        G.add_edge(3, 5, weight=1)
+        G.add_edge(4, 5, weight=4)
+        cls.G = G
+        cls.exact_weighted = {0: 4.0, 1: 0.0, 2: 8.0, 3: 6.0, 4: 8.0, 5: 0.0}
+        cls.K = nx.krackhardt_kite_graph()
+        cls.P3 = nx.path_graph(3)
+        cls.P4 = nx.path_graph(4)
+        cls.K5 = nx.complete_graph(5)
+        cls.P2 = nx.path_graph(2)
+
+        cls.C4 = nx.cycle_graph(4)
+        cls.T = nx.balanced_tree(r=2, h=2)
+        cls.Gb = nx.Graph()
+        cls.Gb.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (4, 5), (3, 5)])
+        cls.F = nx.florentine_families_graph()
+        cls.LM = nx.les_miserables_graph()
+        cls.D = nx.cycle_graph(3, create_using=nx.DiGraph())
+        cls.D.add_edges_from([(3, 0), (4, 3)])
+
+    def test_not_strongly_connected(self):
+        b = nx.load_centrality(self.D)
+        result = {0: 5.0 / 12, 1: 1.0 / 4, 2: 1.0 / 12, 3: 1.0 / 4, 4: 0.000}
+        for n in sorted(self.D):
+            assert result[n] == pytest.approx(b[n], abs=1e-3)
+            assert result[n] == pytest.approx(nx.load_centrality(self.D, n), abs=1e-3)
+
+    def test_P2_normalized_load(self):
+        G = self.P2
+        c = nx.load_centrality(G, normalized=True)
+        d = {0: 0.000, 1: 0.000}
+        for n in sorted(G):
+            assert c[n] == pytest.approx(d[n], abs=1e-3)
+
+    def test_weighted_load(self):
+        b = nx.load_centrality(self.G, weight="weight", normalized=False)
+        for n in sorted(self.G):
+            assert b[n] == self.exact_weighted[n]
+
+    def test_k5_load(self):
+        G = self.K5
+        c = nx.load_centrality(G)
+        d = {0: 0.000, 1: 0.000, 2: 0.000, 3: 0.000, 4: 0.000}
+        for n in sorted(G):
+            assert c[n] == pytest.approx(d[n], abs=1e-3)
+
+    def test_p3_load(self):
+        G = self.P3
+        c = nx.load_centrality(G)
+        d = {0: 0.000, 1: 1.000, 2: 0.000}
+        for n in sorted(G):
+            assert c[n] == pytest.approx(d[n], abs=1e-3)
+        c = nx.load_centrality(G, v=1)
+        assert c == pytest.approx(1.0, abs=1e-7)
+        c = nx.load_centrality(G, v=1, normalized=True)
+        assert c == pytest.approx(1.0, abs=1e-7)
+
+    def test_p2_load(self):
+        G = nx.path_graph(2)
+        c = nx.load_centrality(G)
+        d = {0: 0.000, 1: 0.000}
+        for n in sorted(G):
+            assert c[n] == pytest.approx(d[n], abs=1e-3)
+
+    def test_krackhardt_load(self):
+        G = self.K
+        c = nx.load_centrality(G)
+        d = {
+            0: 0.023,
+            1: 0.023,
+            2: 0.000,
+            3: 0.102,
+            4: 0.000,
+            5: 0.231,
+            6: 0.231,
+            7: 0.389,
+            8: 0.222,
+            9: 0.000,
+        }
+        for n in sorted(G):
+            assert c[n] == pytest.approx(d[n], abs=1e-3)
+
+    def test_florentine_families_load(self):
+        G = self.F
+        c = nx.load_centrality(G)
+        d = {
+            "Acciaiuoli": 0.000,
+            "Albizzi": 0.211,
+            "Barbadori": 0.093,
+            "Bischeri": 0.104,
+            "Castellani": 0.055,
+            "Ginori": 0.000,
+            "Guadagni": 0.251,
+            "Lamberteschi": 0.000,
+            "Medici": 0.522,
+            "Pazzi": 0.000,
+            "Peruzzi": 0.022,
+            "Ridolfi": 0.117,
+            "Salviati": 0.143,
+            "Strozzi": 0.106,
+            "Tornabuoni": 0.090,
+        }
+        for n in sorted(G):
+            assert c[n] == pytest.approx(d[n], abs=1e-3)
+
+    def test_les_miserables_load(self):
+        G = self.LM
+        c = nx.load_centrality(G)
+        d = {
+            "Napoleon": 0.000,
+            "Myriel": 0.177,
+            "MlleBaptistine": 0.000,
+            "MmeMagloire": 0.000,
+            "CountessDeLo": 0.000,
+            "Geborand": 0.000,
+            "Champtercier": 0.000,
+            "Cravatte": 0.000,
+            "Count": 0.000,
+            "OldMan": 0.000,
+            "Valjean": 0.567,
+            "Labarre": 0.000,
+            "Marguerite": 0.000,
+            "MmeDeR": 0.000,
+            "Isabeau": 0.000,
+            "Gervais": 0.000,
+            "Listolier": 0.000,
+            "Tholomyes": 0.043,
+            "Fameuil": 0.000,
+            "Blacheville": 0.000,
+            "Favourite": 0.000,
+            "Dahlia": 0.000,
+            "Zephine": 0.000,
+            "Fantine": 0.128,
+            "MmeThenardier": 0.029,
+            "Thenardier": 0.075,
+            "Cosette": 0.024,
+            "Javert": 0.054,
+            "Fauchelevent": 0.026,
+            "Bamatabois": 0.008,
+            "Perpetue": 0.000,
+            "Simplice": 0.009,
+            "Scaufflaire": 0.000,
+            "Woman1": 0.000,
+            "Judge": 0.000,
+            "Champmathieu": 0.000,
+            "Brevet": 0.000,
+            "Chenildieu": 0.000,
+            "Cochepaille": 0.000,
+            "Pontmercy": 0.007,
+            "Boulatruelle": 0.000,
+            "Eponine": 0.012,
+            "Anzelma": 0.000,
+            "Woman2": 0.000,
+            "MotherInnocent": 0.000,
+            "Gribier": 0.000,
+            "MmeBurgon": 0.026,
+            "Jondrette": 0.000,
+            "Gavroche": 0.164,
+            "Gillenormand": 0.021,
+            "Magnon": 0.000,
+            "MlleGillenormand": 0.047,
+            "MmePontmercy": 0.000,
+            "MlleVaubois": 0.000,
+            "LtGillenormand": 0.000,
+            "Marius": 0.133,
+            "BaronessT": 0.000,
+            "Mabeuf": 0.028,
+            "Enjolras": 0.041,
+            "Combeferre": 0.001,
+            "Prouvaire": 0.000,
+            "Feuilly": 0.001,
+            "Courfeyrac": 0.006,
+            "Bahorel": 0.002,
+            "Bossuet": 0.032,
+            "Joly": 0.002,
+            "Grantaire": 0.000,
+            "MotherPlutarch": 0.000,
+            "Gueulemer": 0.005,
+            "Babet": 0.005,
+            "Claquesous": 0.005,
+            "Montparnasse": 0.004,
+            "Toussaint": 0.000,
+            "Child1": 0.000,
+            "Child2": 0.000,
+            "Brujon": 0.000,
+            "MmeHucheloup": 0.000,
+        }
+        for n in sorted(G):
+            assert c[n] == pytest.approx(d[n], abs=1e-3)
+
+    def test_unnormalized_k5_load(self):
+        G = self.K5
+        c = nx.load_centrality(G, normalized=False)
+        d = {0: 0.000, 1: 0.000, 2: 0.000, 3: 0.000, 4: 0.000}
+        for n in sorted(G):
+            assert c[n] == pytest.approx(d[n], abs=1e-3)
+
+    def test_unnormalized_p3_load(self):
+        G = self.P3
+        c = nx.load_centrality(G, normalized=False)
+        d = {0: 0.000, 1: 2.000, 2: 0.000}
+        for n in sorted(G):
+            assert c[n] == pytest.approx(d[n], abs=1e-3)
+
+    def test_unnormalized_krackhardt_load(self):
+        G = self.K
+        c = nx.load_centrality(G, normalized=False)
+        d = {
+            0: 1.667,
+            1: 1.667,
+            2: 0.000,
+            3: 7.333,
+            4: 0.000,
+            5: 16.667,
+            6: 16.667,
+            7: 28.000,
+            8: 16.000,
+            9: 0.000,
+        }
+
+        for n in sorted(G):
+            assert c[n] == pytest.approx(d[n], abs=1e-3)
+
+    def test_unnormalized_florentine_families_load(self):
+        G = self.F
+        c = nx.load_centrality(G, normalized=False)
+
+        d = {
+            "Acciaiuoli": 0.000,
+            "Albizzi": 38.333,
+            "Barbadori": 17.000,
+            "Bischeri": 19.000,
+            "Castellani": 10.000,
+            "Ginori": 0.000,
+            "Guadagni": 45.667,
+            "Lamberteschi": 0.000,
+            "Medici": 95.000,
+            "Pazzi": 0.000,
+            "Peruzzi": 4.000,
+            "Ridolfi": 21.333,
+            "Salviati": 26.000,
+            "Strozzi": 19.333,
+            "Tornabuoni": 16.333,
+        }
+        for n in sorted(G):
+            assert c[n] == pytest.approx(d[n], abs=1e-3)
+
+    def test_load_betweenness_difference(self):
+        # Difference Between Load and Betweenness
+        # --------------------------------------- The smallest graph
+        # that shows the difference between load and betweenness is
+        # G=ladder_graph(3) (Graph B below)
+
+        # Graph A and B are from Tao Zhou, Jian-Guo Liu, Bing-Hong
+        # Wang: Comment on "Scientific collaboration
+        # networks. II. Shortest paths, weighted networks, and
+        # centrality". https://arxiv.org/pdf/physics/0511084
+
+        # Notice that unlike here, their calculation adds to 1 to the
+        # betweenness of every node i for every path from i to every
+        # other node.  This is exactly what it should be, based on
+        # Eqn. (1) in their paper: the eqn is B(v) = \sum_{s\neq t,
+        # s\neq v}{\frac{\sigma_{st}(v)}{\sigma_{st}}}, therefore,
+        # they allow v to be the target node.
+
+        # We follow Brandes 2001, who follows Freeman 1977 that make
+        # the sum for betweenness of v exclude paths where v is either
+        # the source or target node.  To agree with their numbers, we
+        # must additionally, remove edge (4,8) from the graph, see AC
+        # example following (there is a mistake in the figure in their
+        # paper - personal communication).
+
+        # A = nx.Graph()
+        # A.add_edges_from([(0,1), (1,2), (1,3), (2,4),
+        #                  (3,5), (4,6), (4,7), (4,8),
+        #                  (5,8), (6,9), (7,9), (8,9)])
+        B = nx.Graph()  # ladder_graph(3)
+        B.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (4, 5), (3, 5)])
+        c = nx.load_centrality(B, normalized=False)
+        d = {0: 1.750, 1: 1.750, 2: 6.500, 3: 6.500, 4: 1.750, 5: 1.750}
+        for n in sorted(B):
+            assert c[n] == pytest.approx(d[n], abs=1e-3)
+
+    def test_c4_edge_load(self):
+        G = self.C4
+        c = nx.edge_load_centrality(G)
+        d = {(0, 1): 6.000, (0, 3): 6.000, (1, 2): 6.000, (2, 3): 6.000}
+        for n in G.edges():
+            assert c[n] == pytest.approx(d[n], abs=1e-3)
+
+    def test_p4_edge_load(self):
+        G = self.P4
+        c = nx.edge_load_centrality(G)
+        d = {(0, 1): 6.000, (1, 2): 8.000, (2, 3): 6.000}
+        for n in G.edges():
+            assert c[n] == pytest.approx(d[n], abs=1e-3)
+
+    def test_k5_edge_load(self):
+        G = self.K5
+        c = nx.edge_load_centrality(G)
+        d = {
+            (0, 1): 5.000,
+            (0, 2): 5.000,
+            (0, 3): 5.000,
+            (0, 4): 5.000,
+            (1, 2): 5.000,
+            (1, 3): 5.000,
+            (1, 4): 5.000,
+            (2, 3): 5.000,
+            (2, 4): 5.000,
+            (3, 4): 5.000,
+        }
+        for n in G.edges():
+            assert c[n] == pytest.approx(d[n], abs=1e-3)
+
+    def test_tree_edge_load(self):
+        G = self.T
+        c = nx.edge_load_centrality(G)
+        d = {
+            (0, 1): 24.000,
+            (0, 2): 24.000,
+            (1, 3): 12.000,
+            (1, 4): 12.000,
+            (2, 5): 12.000,
+            (2, 6): 12.000,
+        }
+        for n in G.edges():
+            assert c[n] == pytest.approx(d[n], abs=1e-3)
diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_percolation_centrality.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_percolation_centrality.py
new file mode 100644
index 00000000..0cb8f529
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_percolation_centrality.py
@@ -0,0 +1,87 @@
+import pytest
+
+import networkx as nx
+
+
+def example1a_G():
+    G = nx.Graph()
+    G.add_node(1, percolation=0.1)
+    G.add_node(2, percolation=0.2)
+    G.add_node(3, percolation=0.2)
+    G.add_node(4, percolation=0.2)
+    G.add_node(5, percolation=0.3)
+    G.add_node(6, percolation=0.2)
+    G.add_node(7, percolation=0.5)
+    G.add_node(8, percolation=0.5)
+    G.add_edges_from([(1, 4), (2, 4), (3, 4), (4, 5), (5, 6), (6, 7), (6, 8)])
+    return G
+
+
+def example1b_G():
+    G = nx.Graph()
+    G.add_node(1, percolation=0.3)
+    G.add_node(2, percolation=0.5)
+    G.add_node(3, percolation=0.5)
+    G.add_node(4, percolation=0.2)
+    G.add_node(5, percolation=0.3)
+    G.add_node(6, percolation=0.2)
+    G.add_node(7, percolation=0.1)
+    G.add_node(8, percolation=0.1)
+    G.add_edges_from([(1, 4), (2, 4), (3, 4), (4, 5), (5, 6), (6, 7), (6, 8)])
+    return G
+
+
+def test_percolation_example1a():
+    """percolation centrality: example 1a"""
+    G = example1a_G()
+    p = nx.percolation_centrality(G)
+    p_answer = {4: 0.625, 6: 0.667}
+    for n, k in p_answer.items():
+        assert p[n] == pytest.approx(k, abs=1e-3)
+
+
+def test_percolation_example1b():
+    """percolation centrality: example 1a"""
+    G = example1b_G()
+    p = nx.percolation_centrality(G)
+    p_answer = {4: 0.825, 6: 0.4}
+    for n, k in p_answer.items():
+        assert p[n] == pytest.approx(k, abs=1e-3)
+
+
+def test_converge_to_betweenness():
+    """percolation centrality: should converge to betweenness
+    centrality when all nodes are percolated the same"""
+    # taken from betweenness test test_florentine_families_graph
+    G = nx.florentine_families_graph()
+    b_answer = {
+        "Acciaiuoli": 0.000,
+        "Albizzi": 0.212,
+        "Barbadori": 0.093,
+        "Bischeri": 0.104,
+        "Castellani": 0.055,
+        "Ginori": 0.000,
+        "Guadagni": 0.255,
+        "Lamberteschi": 0.000,
+        "Medici": 0.522,
+        "Pazzi": 0.000,
+        "Peruzzi": 0.022,
+        "Ridolfi": 0.114,
+        "Salviati": 0.143,
+        "Strozzi": 0.103,
+        "Tornabuoni": 0.092,
+    }
+
+    # If no initial state is provided, state for
+    # every node defaults to 1
+    p_answer = nx.percolation_centrality(G)
+    assert p_answer == pytest.approx(b_answer, abs=1e-3)
+
+    p_states = {k: 0.3 for k, v in b_answer.items()}
+    p_answer = nx.percolation_centrality(G, states=p_states)
+    assert p_answer == pytest.approx(b_answer, abs=1e-3)
+
+
+def test_default_percolation():
+    G = nx.erdos_renyi_graph(42, 0.42, seed=42)
+    assert nx.percolation_centrality(G) == pytest.approx(nx.betweenness_centrality(G))
diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_reaching.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_reaching.py
new file mode 100644
index 00000000..35d50e70
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_reaching.py
@@ -0,0 +1,140 @@
+"""Unit tests for the :mod:`networkx.algorithms.centrality.reaching` module."""
+
+import pytest
+
+import networkx as nx
+
+
+class TestGlobalReachingCentrality:
+    """Unit tests for the global reaching centrality function."""
+
+    def test_non_positive_weights(self):
+        with pytest.raises(nx.NetworkXError):
+            G = nx.DiGraph()
+            nx.global_reaching_centrality(G, weight="weight")
+
+    def test_negatively_weighted(self):
+        with pytest.raises(nx.NetworkXError):
+            G = nx.Graph()
+            G.add_weighted_edges_from([(0, 1, -2), (1, 2, +1)])
+            nx.global_reaching_centrality(G, weight="weight")
+
+    def test_directed_star(self):
+        G = nx.DiGraph()
+        G.add_weighted_edges_from([(1, 2, 0.5), (1, 3, 0.5)])
+        grc = nx.global_reaching_centrality
+        assert grc(G, normalized=False, weight="weight") == 0.5
+        assert grc(G) == 1
+
+    def test_undirected_unweighted_star(self):
+        G = nx.star_graph(2)
+        grc = nx.global_reaching_centrality
+        assert grc(G, normalized=False, weight=None) == 0.25
+
+    def test_undirected_weighted_star(self):
+        G = nx.Graph()
+        G.add_weighted_edges_from([(1, 2, 1), (1, 3, 2)])
+        grc = nx.global_reaching_centrality
+        assert grc(G, normalized=False, weight="weight") == 0.375
+
+    def test_cycle_directed_unweighted(self):
+        G = nx.DiGraph()
+        G.add_edge(1, 2)
+        G.add_edge(2, 1)
+        assert nx.global_reaching_centrality(G, weight=None) == 0
+
+    def test_cycle_undirected_unweighted(self):
+        G = nx.Graph()
+        G.add_edge(1, 2)
+        assert nx.global_reaching_centrality(G, weight=None) == 0
+
+    def test_cycle_directed_weighted(self):
+        G = nx.DiGraph()
+        G.add_weighted_edges_from([(1, 2, 1), (2, 1, 1)])
+        assert nx.global_reaching_centrality(G) == 0
+
+    def test_cycle_undirected_weighted(self):
+        G = nx.Graph()
+        G.add_edge(1, 2, weight=1)
+        grc = nx.global_reaching_centrality
+        assert grc(G, normalized=False) == 0
+
+    def test_directed_weighted(self):
+        G = nx.DiGraph()
+        G.add_edge("A", "B", weight=5)
+        G.add_edge("B", "C", weight=1)
+        G.add_edge("B", "D", weight=0.25)
+        G.add_edge("D", "E", weight=1)
+
+        denom = len(G) - 1
+        A_local = sum([5, 3, 2.625, 2.0833333333333]) / denom
+        B_local = sum([1, 0.25, 0.625]) / denom
+        C_local = 0
+        D_local = sum([1]) / denom
+        E_local = 0
+
+        local_reach_ctrs = [A_local, C_local, B_local, D_local, E_local]
+        max_local = max(local_reach_ctrs)
+        expected = sum(max_local - lrc for lrc in local_reach_ctrs) / denom
+        grc = nx.global_reaching_centrality
+        actual = grc(G, normalized=False, weight="weight")
+        assert expected == pytest.approx(actual, abs=1e-7)
+
+    def test_single_node_with_cycle(self):
+        G = nx.DiGraph([(1, 1)])
+        with pytest.raises(nx.NetworkXError, match="local_reaching_centrality"):
+            nx.global_reaching_centrality(G)
+
+    def test_single_node_with_weighted_cycle(self):
+        G = nx.DiGraph()
+        G.add_weighted_edges_from([(1, 1, 2)])
+        with pytest.raises(nx.NetworkXError, match="local_reaching_centrality"):
+            nx.global_reaching_centrality(G, weight="weight")
+
+
+class TestLocalReachingCentrality:
+    """Unit tests for the local reaching centrality function."""
+
+    def test_non_positive_weights(self):
+        with pytest.raises(nx.NetworkXError):
+            G = nx.DiGraph()
+            G.add_weighted_edges_from([(0, 1, 0)])
+            nx.local_reaching_centrality(G, 0, weight="weight")
+
+    def test_negatively_weighted(self):
+        with pytest.raises(nx.NetworkXError):
+            G = nx.Graph()
+            G.add_weighted_edges_from([(0, 1, -2), (1, 2, +1)])
+            nx.local_reaching_centrality(G, 0, weight="weight")
+
+    def test_undirected_unweighted_star(self):
+        G = nx.star_graph(2)
+        grc = nx.local_reaching_centrality
+        assert grc(G, 1, weight=None, normalized=False) == 0.75
+
+    def test_undirected_weighted_star(self):
+        G = nx.Graph()
+        G.add_weighted_edges_from([(1, 2, 1), (1, 3, 2)])
+        centrality = nx.local_reaching_centrality(
+            G, 1, normalized=False, weight="weight"
+        )
+        assert centrality == 1.5
+
+    def test_undirected_weighted_normalized(self):
+        G = nx.Graph()
+        G.add_weighted_edges_from([(1, 2, 1), (1, 3, 2)])
+        centrality = nx.local_reaching_centrality(
+            G, 1, normalized=True, weight="weight"
+        )
+        assert centrality == 1.0
+
+    def test_single_node_with_cycle(self):
+        G = nx.DiGraph([(1, 1)])
+        with pytest.raises(nx.NetworkXError, match="local_reaching_centrality"):
+            nx.local_reaching_centrality(G, 1)
+
+    def test_single_node_with_weighted_cycle(self):
+        G = nx.DiGraph()
+        G.add_weighted_edges_from([(1, 1, 2)])
+        with pytest.raises(nx.NetworkXError, match="local_reaching_centrality"):
+            nx.local_reaching_centrality(G, 1, weight="weight")
diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_second_order_centrality.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_second_order_centrality.py
new file mode 100644
index 00000000..cc304786
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_second_order_centrality.py
@@ -0,0 +1,82 @@
+"""
+Tests for second order centrality.
+"""
+
+import pytest
+
+pytest.importorskip("numpy")
+pytest.importorskip("scipy")
+
+import networkx as nx
+
+
+def test_empty():
+    with pytest.raises(nx.NetworkXException):
+        G = nx.empty_graph()
+        nx.second_order_centrality(G)
+
+
+def test_non_connected():
+    with pytest.raises(nx.NetworkXException):
+        G = nx.Graph()
+        G.add_node(0)
+        G.add_node(1)
+        nx.second_order_centrality(G)
+
+
+def test_non_negative_edge_weights():
+    with pytest.raises(nx.NetworkXException):
+        G = nx.path_graph(2)
+        G.add_edge(0, 1, weight=-1)
+        nx.second_order_centrality(G)
+
+
+def test_weight_attribute():
+    G = nx.Graph()
+    G.add_weighted_edges_from([(0, 1, 1.0), (1, 2, 3.5)], weight="w")
+    expected = {0: 3.431, 1: 3.082, 2: 5.612}
+    b = nx.second_order_centrality(G, weight="w")
+
+    for n in sorted(G):
+        assert b[n] == pytest.approx(expected[n], abs=1e-2)
+
+
+def test_one_node_graph():
+    """Second order centrality: single node"""
+    G = nx.Graph()
+    G.add_node(0)
+    G.add_edge(0, 0)
+    assert nx.second_order_centrality(G)[0] == 0
+
+
+def test_P3():
+    """Second order centrality: line graph, as defined in paper"""
+    G = nx.path_graph(3)
+    b_answer = {0: 3.741, 1: 1.414, 2: 3.741}
+
+    b = nx.second_order_centrality(G)
+
+    for n in sorted(G):
+        assert b[n] == pytest.approx(b_answer[n], abs=1e-2)
+
+
+def test_K3():
+    """Second order centrality: complete graph, as defined in paper"""
+    G = nx.complete_graph(3)
+    b_answer = {0: 1.414, 1: 1.414, 2: 1.414}
+
+    b = nx.second_order_centrality(G)
+
+    for n in sorted(G):
+        assert b[n] == pytest.approx(b_answer[n], abs=1e-2)
+
+
+def test_ring_graph():
+    """Second order centrality: ring graph, as defined in paper"""
+    G = nx.cycle_graph(5)
+    b_answer = {0: 4.472, 1: 4.472, 2: 4.472, 3: 4.472, 4: 4.472}
+
+    b = nx.second_order_centrality(G)
+
+    for n in sorted(G):
+        assert b[n] == pytest.approx(b_answer[n], abs=1e-2)
diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_subgraph.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_subgraph.py
new file mode 100644
index 00000000..71092751
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_subgraph.py
@@ -0,0 +1,110 @@
+import pytest
+
+pytest.importorskip("numpy")
+pytest.importorskip("scipy")
+
+import networkx as nx
+from networkx.algorithms.centrality.subgraph_alg import (
+    communicability_betweenness_centrality,
+    estrada_index,
+    subgraph_centrality,
+    subgraph_centrality_exp,
+)
+
+
+class TestSubgraph:
+    def test_subgraph_centrality(self):
+        answer = {0: 1.5430806348152433, 1: 1.5430806348152433}
+        result = subgraph_centrality(nx.path_graph(2))
+        for k, v in result.items():
+            assert answer[k] == pytest.approx(v, abs=1e-7)
+
+        answer1 = {
+            "1": 1.6445956054135658,
+            "Albert": 2.4368257358712189,
+            "Aric": 2.4368257358712193,
+            "Dan": 3.1306328496328168,
+            "Franck": 2.3876142275231915,
+        }
+        G1 = nx.Graph(
+            [
+                ("Franck", "Aric"),
+                ("Aric", "Dan"),
+                ("Dan", "Albert"),
+                ("Albert", "Franck"),
+                ("Dan", "1"),
+                ("Franck", "Albert"),
+            ]
+        )
+        result1 = subgraph_centrality(G1)
+        for k, v in result1.items():
+            assert answer1[k] == pytest.approx(v, abs=1e-7)
+        result1 = subgraph_centrality_exp(G1)
+        for k, v in result1.items():
+            assert answer1[k] == pytest.approx(v, abs=1e-7)
+
+    def test_subgraph_centrality_big_graph(self):
+        g199 = nx.complete_graph(199)
+        g200 = nx.complete_graph(200)
+
+        comm199 = nx.subgraph_centrality(g199)
+        comm199_exp = nx.subgraph_centrality_exp(g199)
+
+        comm200 = nx.subgraph_centrality(g200)
+        comm200_exp = nx.subgraph_centrality_exp(g200)
+
+    def test_communicability_betweenness_centrality_small(self):
+        result = communicability_betweenness_centrality(nx.path_graph(2))
+        assert result == {0: 0, 1: 0}
+
+        result = communicability_betweenness_centrality(nx.path_graph(1))
+        assert result == {0: 0}
+
+        result = communicability_betweenness_centrality(nx.path_graph(0))
+        assert result == {}
+
+        answer = {0: 0.1411224421177313, 1: 1.0, 2: 0.1411224421177313}
+        result = communicability_betweenness_centrality(nx.path_graph(3))
+        for k, v in result.items():
+            assert answer[k] == pytest.approx(v, abs=1e-7)
+
+        result = communicability_betweenness_centrality(nx.complete_graph(3))
+        for k, v in result.items():
+            assert 0.49786143366223296 == pytest.approx(v, abs=1e-7)
+
+    def test_communicability_betweenness_centrality(self):
+        answer = {
+            0: 0.07017447951484615,
+            1: 0.71565598701107991,
+            2: 0.71565598701107991,
+            3: 0.07017447951484615,
+        }
+        result = communicability_betweenness_centrality(nx.path_graph(4))
+        for k, v in result.items():
+            assert answer[k] == pytest.approx(v, abs=1e-7)
+
+        answer1 = {
+            "1": 0.060039074193949521,
+            "Albert": 0.315470761661372,
+            "Aric": 0.31547076166137211,
+            "Dan": 0.68297778678316201,
+            "Franck": 0.21977926617449497,
+        }
+        G1 = nx.Graph(
+            [
+                ("Franck", "Aric"),
+                ("Aric", "Dan"),
+                ("Dan", "Albert"),
+                ("Albert", "Franck"),
+                ("Dan", "1"),
+                ("Franck", "Albert"),
+            ]
+        )
+        result1 = communicability_betweenness_centrality(G1)
+        for k, v in result1.items():
+            assert answer1[k] == pytest.approx(v, abs=1e-7)
+
+    def test_estrada_index(self):
+        answer = 1041.2470334195475
+        result = estrada_index(nx.karate_club_graph())
+        assert answer == pytest.approx(result, abs=1e-7)
diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_trophic.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_trophic.py
new file mode 100644
index 00000000..e6880d52
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_trophic.py
@@ -0,0 +1,302 @@
+"""Test trophic levels, trophic differences and trophic coherence"""
+
+import pytest
+
+np = pytest.importorskip("numpy")
+pytest.importorskip("scipy")
+
+import networkx as nx
+
+
+def test_trophic_levels():
+    """Trivial example"""
+    G = nx.DiGraph()
+    G.add_edge("a", "b")
+    G.add_edge("b", "c")
+
+    d = nx.trophic_levels(G)
+    assert d == {"a": 1, "b": 2, "c": 3}
+
+
+def test_trophic_levels_levine():
+    """Example from Figure 5 in Stephen Levine (1980) J. theor. Biol. 83,
+    195-207
+    """
+    S = nx.DiGraph()
+    S.add_edge(1, 2, weight=1.0)
+    S.add_edge(1, 3, weight=0.2)
+    S.add_edge(1, 4, weight=0.8)
+    S.add_edge(2, 3, weight=0.2)
+    S.add_edge(2, 5, weight=0.3)
+    S.add_edge(4, 3, weight=0.6)
+    S.add_edge(4, 5, weight=0.7)
+    S.add_edge(5, 4, weight=0.2)
+
+    # save copy for later, test intermediate implementation details first
+    S2 = S.copy()
+
+    # drop nodes of in-degree zero
+    z = [nid for nid, d in S.in_degree if d == 0]
+    for nid in z:
+        S.remove_node(nid)
+
+    # find adjacency matrix
+    q = nx.linalg.graphmatrix.adjacency_matrix(S).T
+
+    # fmt: off
+    expected_q = np.array([
+        [0, 0, 0., 0],
+        [0.2, 0, 0.6, 0],
+        [0, 0, 0, 0.2],
+        [0.3, 0, 0.7, 0]
+    ])
+    # fmt: on
+    assert np.array_equal(q.todense(), expected_q)
+
+    # must be square, size of number of nodes
+    assert len(q.shape) == 2
+    assert q.shape[0] == q.shape[1]
+    assert q.shape[0] == len(S)
+
+    nn = q.shape[0]
+
+    i = np.eye(nn)
+    n = np.linalg.inv(i - q)
+    y = np.asarray(n) @ np.ones(nn)
+
+    expected_y = np.array([1, 2.07906977, 1.46511628, 2.3255814])
+    assert np.allclose(y, expected_y)
+
+    expected_d = {1: 1, 2: 2, 3: 3.07906977, 4: 2.46511628, 5: 3.3255814}
+
+    d = nx.trophic_levels(S2)
+
+    for nid, level in d.items():
+        expected_level = expected_d[nid]
+        assert expected_level == pytest.approx(level, abs=1e-7)
+
+
+def test_trophic_levels_simple():
+    matrix_a = np.array([[0, 0], [1, 0]])
+    G = nx.from_numpy_array(matrix_a, create_using=nx.DiGraph)
+    d = nx.trophic_levels(G)
+    assert d[0] == pytest.approx(2, abs=1e-7)
+    assert d[1] == pytest.approx(1, abs=1e-7)
+
+
+def test_trophic_levels_more_complex():
+    # fmt: off
+    matrix = np.array([
+        [0, 1, 0, 0],
+        [0, 0, 1, 0],
+        [0, 0, 0, 1],
+        [0, 0, 0, 0]
+    ])
+    # fmt: on
+    G = nx.from_numpy_array(matrix, create_using=nx.DiGraph)
+    d = nx.trophic_levels(G)
+    expected_result = [1, 2, 3, 4]
+    for ind in range(4):
+        assert d[ind] == pytest.approx(expected_result[ind], abs=1e-7)
+
+    # fmt: off
+    matrix = np.array([
+        [0, 1, 1, 0],
+        [0, 0, 1, 1],
+        [0, 0, 0, 1],
+        [0, 0, 0, 0]
+    ])
+    # fmt: on
+    G = nx.from_numpy_array(matrix, create_using=nx.DiGraph)
+    d = nx.trophic_levels(G)
+
+    expected_result = [1, 2, 2.5, 3.25]
+    print("Calculated result: ", d)
+    print("Expected Result: ", expected_result)
+
+    for ind in range(4):
+        assert d[ind] == pytest.approx(expected_result[ind], abs=1e-7)
+
+
+def test_trophic_levels_even_more_complex():
+    # fmt: off
+    # Another, bigger matrix
+    matrix = np.array([
+        [0, 0, 0, 0, 0],
+        [0, 1, 0, 1, 0],
+        [1, 0, 0, 0, 0],
+        [0, 1, 0, 0, 0],
+        [0, 0, 0, 1, 0]
+    ])
+    # Generated this linear system using pen and paper:
+    K = np.array([
+        [1, 0, -1, 0, 0],
+        [0, 0.5, 0, -0.5, 0],
+        [0, 0, 1, 0, 0],
+        [0, -0.5, 0, 1, -0.5],
+        [0, 0, 0, 0, 1],
+    ])
+    # fmt: on
+    result_1 = np.ravel(np.linalg.inv(K) @ np.ones(5))
+    G = nx.from_numpy_array(matrix, create_using=nx.DiGraph)
+    result_2 = nx.trophic_levels(G)
+
+    for ind in range(5):
+        assert result_1[ind] == pytest.approx(result_2[ind], abs=1e-7)
+
+
+def test_trophic_levels_singular_matrix():
+    """Should raise an error with graphs with only non-basal nodes"""
+    matrix = np.identity(4)
+    G = nx.from_numpy_array(matrix, create_using=nx.DiGraph)
+    with pytest.raises(nx.NetworkXError) as e:
+        nx.trophic_levels(G)
+    msg = (
+        "Trophic levels are only defined for graphs where every node "
+        + "has a path from a basal node (basal nodes are nodes with no "
+        + "incoming edges)."
+    )
+    assert msg in str(e.value)
+
+
+def test_trophic_levels_singular_with_basal():
+    """Should fail to compute if there are any parts of the graph which are not
+    reachable from any basal node (with in-degree zero).
+    """
+    G = nx.DiGraph()
+    # a has in-degree zero
+    G.add_edge("a", "b")
+
+    # b is one level above a, c and d
+    G.add_edge("c", "b")
+    G.add_edge("d", "b")
+
+    # c and d form a loop, neither are reachable from a
+    G.add_edge("c", "d")
+    G.add_edge("d", "c")
+
+    with pytest.raises(nx.NetworkXError) as e:
+        nx.trophic_levels(G)
+    msg = (
+        "Trophic levels are only defined for graphs where every node "
+        + "has a path from a basal node (basal nodes are nodes with no "
+        + "incoming edges)."
+    )
+    assert msg in str(e.value)
+
+    # if self-loops are allowed, smaller example:
+    G = nx.DiGraph()
+    G.add_edge("a", "b")  # a has in-degree zero
+    G.add_edge("c", "b")  # b is one level above a and c
+    G.add_edge("c", "c")  # c has a self-loop
+    with pytest.raises(nx.NetworkXError) as e:
+        nx.trophic_levels(G)
+    msg = (
+        "Trophic levels are only defined for graphs where every node "
+        + "has a path from a basal node (basal nodes are nodes with no "
+        + "incoming edges)."
+    )
+    assert msg in str(e.value)
+
+
+def test_trophic_differences():
+    matrix_a = np.array([[0, 1], [0, 0]])
+    G = nx.from_numpy_array(matrix_a, create_using=nx.DiGraph)
+    diffs = nx.trophic_differences(G)
+    assert diffs[(0, 1)] == pytest.approx(1, abs=1e-7)
+
+    # fmt: off
+    matrix_b = np.array([
+        [0, 1, 1, 0],
+        [0, 0, 1, 1],
+        [0, 0, 0, 1],
+        [0, 0, 0, 0]
+    ])
+    # fmt: on
+    G = nx.from_numpy_array(matrix_b, create_using=nx.DiGraph)
+    diffs = nx.trophic_differences(G)
+
+    assert diffs[(0, 1)] == pytest.approx(1, abs=1e-7)
+    assert diffs[(0, 2)] == pytest.approx(1.5, abs=1e-7)
+    assert diffs[(1, 2)] == pytest.approx(0.5, abs=1e-7)
+    assert diffs[(1, 3)] == pytest.approx(1.25, abs=1e-7)
+    assert diffs[(2, 3)] == pytest.approx(0.75, abs=1e-7)
+
+
+def test_trophic_incoherence_parameter_no_cannibalism():
+    matrix_a = np.array([[0, 1], [0, 0]])
+    G = nx.from_numpy_array(matrix_a, create_using=nx.DiGraph)
+    q = nx.trophic_incoherence_parameter(G, cannibalism=False)
+    assert q == pytest.approx(0, abs=1e-7)
+
+    # fmt: off
+    matrix_b = np.array([
+        [0, 1, 1, 0],
+        [0, 0, 1, 1],
+        [0, 0, 0, 1],
+        [0, 0, 0, 0]
+    ])
+    # fmt: on
+    G = nx.from_numpy_array(matrix_b, create_using=nx.DiGraph)
+    q = nx.trophic_incoherence_parameter(G, cannibalism=False)
+    assert q == pytest.approx(np.std([1, 1.5, 0.5, 0.75, 1.25]), abs=1e-7)
+
+    # fmt: off
+    matrix_c = np.array([
+        [0, 1, 1, 0],
+        [0, 1, 1, 1],
+        [0, 0, 0, 1],
+        [0, 0, 0, 1]
+    ])
+    # fmt: on
+    G = nx.from_numpy_array(matrix_c, create_using=nx.DiGraph)
+    q = nx.trophic_incoherence_parameter(G, cannibalism=False)
+    # Ignore the -link
+    assert q == pytest.approx(np.std([1, 1.5, 0.5, 0.75, 1.25]), abs=1e-7)
+
+    # no self-loops case
+    # fmt: off
+    matrix_d = np.array([
+        [0, 1, 1, 0],
+        [0, 0, 1, 1],
+        [0, 0, 0, 1],
+        [0, 0, 0, 0]
+    ])
+    # fmt: on
+    G = nx.from_numpy_array(matrix_d, create_using=nx.DiGraph)
+    q = nx.trophic_incoherence_parameter(G, cannibalism=False)
+    # Ignore the -link
+    assert q == pytest.approx(np.std([1, 1.5, 0.5, 0.75, 1.25]), abs=1e-7)
+
+
+def test_trophic_incoherence_parameter_cannibalism():
+    matrix_a = np.array([[0, 1], [0, 0]])
+    G = nx.from_numpy_array(matrix_a, create_using=nx.DiGraph)
+    q = nx.trophic_incoherence_parameter(G, cannibalism=True)
+    assert q == pytest.approx(0, abs=1e-7)
+
+    # fmt: off
+    matrix_b = np.array([
+        [0, 0, 0, 0, 0],
+        [0, 1, 0, 1, 0],
+        [1, 0, 0, 0, 0],
+        [0, 1, 0, 0, 0],
+        [0, 0, 0, 1, 0]
+    ])
+    # fmt: on
+    G = nx.from_numpy_array(matrix_b, create_using=nx.DiGraph)
+    q = nx.trophic_incoherence_parameter(G, cannibalism=True)
+    assert q == pytest.approx(2, abs=1e-7)
+
+    # fmt: off
+    matrix_c = np.array([
+        [0, 1, 1, 0],
+        [0, 0, 1, 1],
+        [0, 0, 0, 1],
+        [0, 0, 0, 0]
+    ])
+    # fmt: on
+    G = nx.from_numpy_array(matrix_c, create_using=nx.DiGraph)
+    q = nx.trophic_incoherence_parameter(G, cannibalism=True)
+    # Ignore the -link
+    assert q == pytest.approx(np.std([1, 1.5, 0.5, 0.75, 1.25]), abs=1e-7)
diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_voterank.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_voterank.py
new file mode 100644
index 00000000..a5cfb610
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/tests/test_voterank.py
@@ -0,0 +1,64 @@
+"""
+Unit tests for VoteRank.
+"""
+
+import networkx as nx
+
+
+class TestVoteRankCentrality:
+    # Example Graph present in reference paper
+    def test_voterank_centrality_1(self):
+        G = nx.Graph()
+        G.add_edges_from(
+            [
+                (7, 8),
+                (7, 5),
+                (7, 9),
+                (5, 0),
+                (0, 1),
+                (0, 2),
+                (0, 3),
+                (0, 4),
+                (1, 6),
+                (2, 6),
+                (3, 6),
+                (4, 6),
+            ]
+        )
+        assert [0, 7, 6] == nx.voterank(G)
+
+    def test_voterank_emptygraph(self):
+        G = nx.Graph()
+        assert [] == nx.voterank(G)
+
+    # Graph unit test
+    def test_voterank_centrality_2(self):
+        G = nx.florentine_families_graph()
+        d = nx.voterank(G, 4)
+        exact = ["Medici", "Strozzi", "Guadagni", "Castellani"]
+        assert exact == d
+
+    # DiGraph unit test
+    def test_voterank_centrality_3(self):
+        G = nx.gnc_graph(10, seed=7)
+        d = nx.voterank(G, 4)
+        exact = [3, 6, 8]
+        assert exact == d
+
+    # MultiGraph unit test
+    def test_voterank_centrality_4(self):
+        G = nx.MultiGraph()
+        G.add_edges_from(
+            [(0, 1), (0, 1), (1, 2), (2, 5), (2, 5), (5, 6), (5, 6), (2, 4), (4, 3)]
+        )
+        exact = [2, 1, 5, 4]
+        assert exact == nx.voterank(G)
+
+    # MultiDiGraph unit test
+    def test_voterank_centrality_5(self):
+        G = nx.MultiDiGraph()
+        G.add_edges_from(
+            [(0, 1), (0, 1), (1, 2), (2, 5), (2, 5), (5, 6), (5, 6), (2, 4), (4, 3)]
+        )
+        exact = [2, 0, 5, 4]
+        assert exact == nx.voterank(G)
diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/trophic.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/trophic.py
new file mode 100644
index 00000000..9e461ced
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/trophic.py
@@ -0,0 +1,163 @@
+"""Trophic levels"""
+
+import networkx as nx
+from networkx.utils import not_implemented_for
+
+__all__ = ["trophic_levels", "trophic_differences", "trophic_incoherence_parameter"]
+
+
+@not_implemented_for("undirected")
+@nx._dispatchable(edge_attrs="weight")
+def trophic_levels(G, weight="weight"):
+    r"""Compute the trophic levels of nodes.
+
+    The trophic level of a node $i$ is
+
+    .. math::
+
+        s_i = 1 + \frac{1}{k^{in}_i} \sum_{j} a_{ij} s_j
+
+    where $k^{in}_i$ is the in-degree of i
+
+    .. math::
+
+        k^{in}_i = \sum_{j} a_{ij}
+
+    and nodes with $k^{in}_i = 0$ have $s_i = 1$ by convention.
+
+    These are calculated using the method outlined in Levine [1]_.
+
+    Parameters
+    ----------
+    G : DiGraph
+        A directed networkx graph
+
+    Returns
+    -------
+    nodes : dict
+        Dictionary of nodes with trophic level as the value.
+
+    References
+    ----------
+    .. [1] Stephen Levine (1980) J. theor. Biol. 83, 195-207
+    """
+    import numpy as np
+
+    # find adjacency matrix
+    a = nx.adjacency_matrix(G, weight=weight).T.toarray()
+
+    # drop rows/columns where in-degree is zero
+    rowsum = np.sum(a, axis=1)
+    p = a[rowsum != 0][:, rowsum != 0]
+    # normalise so sum of in-degree weights is 1 along each row
+    p = p / rowsum[rowsum != 0][:, np.newaxis]
+
+    # calculate trophic levels
+    nn = p.shape[0]
+    i = np.eye(nn)
+    try:
+        n = np.linalg.inv(i - p)
+    except np.linalg.LinAlgError as err:
+        # LinAlgError is raised when there is a non-basal node
+        msg = (
+            "Trophic levels are only defined for graphs where every "
+            + "node has a path from a basal node (basal nodes are nodes "
+            + "with no incoming edges)."
+        )
+        raise nx.NetworkXError(msg) from err
+    y = n.sum(axis=1) + 1
+
+    levels = {}
+
+    # all nodes with in-degree zero have trophic level == 1
+    zero_node_ids = (node_id for node_id, degree in G.in_degree if degree == 0)
+    for node_id in zero_node_ids:
+        levels[node_id] = 1
+
+    # all other nodes have levels as calculated
+    nonzero_node_ids = (node_id for node_id, degree in G.in_degree if degree != 0)
+    for i, node_id in enumerate(nonzero_node_ids):
+        levels[node_id] = y.item(i)
+
+    return levels
+
+
+@not_implemented_for("undirected")
+@nx._dispatchable(edge_attrs="weight")
+def trophic_differences(G, weight="weight"):
+    r"""Compute the trophic differences of the edges of a directed graph.
+
+    The trophic difference $x_ij$ for each edge is defined in Johnson et al.
+    [1]_ as:
+
+    .. math::
+        x_ij = s_j - s_i
+
+    Where $s_i$ is the trophic level of node $i$.
+
+    Parameters
+    ----------
+    G : DiGraph
+        A directed networkx graph
+
+    Returns
+    -------
+    diffs : dict
+        Dictionary of edges with trophic differences as the value.
+
+    References
+    ----------
+    .. [1] Samuel Johnson, Virginia Dominguez-Garcia, Luca Donetti, Miguel A.
+        Munoz (2014) PNAS "Trophic coherence determines food-web stability"
+    """
+    levels = trophic_levels(G, weight=weight)
+    diffs = {}
+    for u, v in G.edges:
+        diffs[(u, v)] = levels[v] - levels[u]
+    return diffs
+
+
+@not_implemented_for("undirected")
+@nx._dispatchable(edge_attrs="weight")
+def trophic_incoherence_parameter(G, weight="weight", cannibalism=False):
+    r"""Compute the trophic incoherence parameter of a graph.
+
+    Trophic coherence is defined as the homogeneity of the distribution of
+    trophic distances: the more similar, the more coherent. This is measured by
+    the standard deviation of the trophic differences and referred to as the
+    trophic incoherence parameter $q$ by [1].
+
+    Parameters
+    ----------
+    G : DiGraph
+        A directed networkx graph
+
+    cannibalism: Boolean
+        If set to False, self edges are not considered in the calculation
+
+    Returns
+    -------
+    trophic_incoherence_parameter : float
+        The trophic coherence of a graph
+
+    References
+    ----------
+    .. [1] Samuel Johnson, Virginia Dominguez-Garcia, Luca Donetti, Miguel A.
+        Munoz (2014) PNAS "Trophic coherence determines food-web stability"
+    """
+    import numpy as np
+
+    if cannibalism:
+        diffs = trophic_differences(G, weight=weight)
+    else:
+        # If no cannibalism, remove self-edges
+        self_loops = list(nx.selfloop_edges(G))
+        if self_loops:
+            # Make a copy so we do not change G's edges in memory
+            G_2 = G.copy()
+            G_2.remove_edges_from(self_loops)
+        else:
+            # Avoid copy otherwise
+            G_2 = G
+        diffs = trophic_differences(G_2, weight=weight)
+    return float(np.std(list(diffs.values())))
diff --git a/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/voterank_alg.py b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/voterank_alg.py
new file mode 100644
index 00000000..9b510b28
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/networkx/algorithms/centrality/voterank_alg.py
@@ -0,0 +1,95 @@
+"""Algorithm to select influential nodes in a graph using VoteRank."""
+
+import networkx as nx
+
+__all__ = ["voterank"]
+
+
+@nx._dispatchable
+def voterank(G, number_of_nodes=None):
+    """Select a list of influential nodes in a graph using VoteRank algorithm
+
+    VoteRank [1]_ computes a ranking of the nodes in a graph G based on a
+    voting scheme. With VoteRank, all nodes vote for each of its in-neighbors
+    and the node with the highest votes is elected iteratively. The voting
+    ability of out-neighbors of elected nodes is decreased in subsequent turns.
+
+    Parameters
+    ----------
+    G : graph
+        A NetworkX graph.
+
+    number_of_nodes : integer, optional
+        Number of ranked nodes to extract (default all nodes).
+
+    Returns
+    -------
+    voterank : list
+        Ordered list of computed seeds.
+        Only nodes with positive number of votes are returned.
+
+    Examples
+    --------
+    >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 4)])
+    >>> nx.voterank(G)
+    [0, 1]
+
+    The algorithm can be used both for undirected and directed graphs.
+    However, the directed version is different in two ways:
+    (i) nodes only vote for their in-neighbors and
+    (ii) only the voting ability of elected node and its out-neighbors are updated:
+
+    >>> G = nx.DiGraph([(0, 1), (2, 1), (2, 3), (3, 4)])
+    >>> nx.voterank(G)
+    [2, 3]
+
+    Notes
+    -----
+    Each edge is treated independently in case of multigraphs.
+
+    References
+    ----------
+    .. [1] Zhang, J.-X. et al. (2016).
+        Identifying a set of influential spreaders in complex networks.
+        Sci. Rep. 6, 27823; doi: 10.1038/srep27823.
+    """
+    influential_nodes = []
+    vote_rank = {}
+    if len(G) == 0:
+        return influential_nodes
+    if number_of_nodes is None or number_of_nodes > len(G):
+        number_of_nodes = len(G)
+    if G.is_directed():
+        # For directed graphs compute average out-degree
+        avgDegree = sum(deg for _, deg in G.out_degree()) / len(G)
+    else:
+        # For undirected graphs compute average degree
+        avgDegree = sum(deg for _, deg in G.degree()) / len(G)
+    # step 1 - initiate all nodes to (0,1) (score, voting ability)
+    for n in G.nodes():
+        vote_rank[n] = [0, 1]
+    # Repeat steps 1b to 4 until num_seeds are elected.
+    for _ in range(number_of_nodes):
+        # step 1b - reset rank
+        for n in G.nodes():
+            vote_rank[n][0] = 0
+        # step 2 - vote
+        for n, nbr in G.edges():
+            # In directed graphs nodes only vote for their in-neighbors
+            vote_rank[n][0] += vote_rank[nbr][1]
+            if not G.is_directed():
+                vote_rank[nbr][0] += vote_rank[n][1]
+        for n in influential_nodes:
+            vote_rank[n][0] = 0
+        # step 3 - select top node
+        n = max(G.nodes, key=lambda x: vote_rank[x][0])
+        if vote_rank[n][0] == 0:
+            return influential_nodes
+        influential_nodes.append(n)
+        # weaken the selected node
+        vote_rank[n] = [0, 0]
+        # step 4 - update voterank properties
+        for _, nbr in G.edges(n):
+            vote_rank[nbr][1] -= 1 / avgDegree
+            vote_rank[nbr][1] = max(vote_rank[nbr][1], 0)
+    return influential_nodes