diff options
Diffstat (limited to '.venv/lib/python3.12/site-packages/networkx/linalg')
16 files changed, 3672 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/__init__.py b/.venv/lib/python3.12/site-packages/networkx/linalg/__init__.py new file mode 100644 index 00000000..119db185 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/linalg/__init__.py @@ -0,0 +1,13 @@ +from networkx.linalg.attrmatrix import * +from networkx.linalg import attrmatrix +from networkx.linalg.spectrum import * +from networkx.linalg import spectrum +from networkx.linalg.graphmatrix import * +from networkx.linalg import graphmatrix +from networkx.linalg.laplacianmatrix import * +from networkx.linalg import laplacianmatrix +from networkx.linalg.algebraicconnectivity import * +from networkx.linalg.modularitymatrix import * +from networkx.linalg import modularitymatrix +from networkx.linalg.bethehessianmatrix import * +from networkx.linalg import bethehessianmatrix diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/algebraicconnectivity.py b/.venv/lib/python3.12/site-packages/networkx/linalg/algebraicconnectivity.py new file mode 100644 index 00000000..c94972a5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/linalg/algebraicconnectivity.py @@ -0,0 +1,657 @@ +""" +Algebraic connectivity and Fiedler vectors of undirected graphs. +""" + +from functools import partial + +import networkx as nx +from networkx.utils import ( + not_implemented_for, + np_random_state, + reverse_cuthill_mckee_ordering, +) + +__all__ = [ + "algebraic_connectivity", + "fiedler_vector", + "spectral_ordering", + "spectral_bisection", +] + + +class _PCGSolver: + """Preconditioned conjugate gradient method. + + To solve Ax = b: + M = A.diagonal() # or some other preconditioner + solver = _PCGSolver(lambda x: A * x, lambda x: M * x) + x = solver.solve(b) + + The inputs A and M are functions which compute + matrix multiplication on the argument. + A - multiply by the matrix A in Ax=b + M - multiply by M, the preconditioner surrogate for A + + Warning: There is no limit on number of iterations. + """ + + def __init__(self, A, M): + self._A = A + self._M = M + + def solve(self, B, tol): + import numpy as np + + # Densifying step - can this be kept sparse? + B = np.asarray(B) + X = np.ndarray(B.shape, order="F") + for j in range(B.shape[1]): + X[:, j] = self._solve(B[:, j], tol) + return X + + def _solve(self, b, tol): + import numpy as np + import scipy as sp + + A = self._A + M = self._M + tol *= sp.linalg.blas.dasum(b) + # Initialize. + x = np.zeros(b.shape) + r = b.copy() + z = M(r) + rz = sp.linalg.blas.ddot(r, z) + p = z.copy() + # Iterate. + while True: + Ap = A(p) + alpha = rz / sp.linalg.blas.ddot(p, Ap) + x = sp.linalg.blas.daxpy(p, x, a=alpha) + r = sp.linalg.blas.daxpy(Ap, r, a=-alpha) + if sp.linalg.blas.dasum(r) < tol: + return x + z = M(r) + beta = sp.linalg.blas.ddot(r, z) + beta, rz = beta / rz, beta + p = sp.linalg.blas.daxpy(p, z, a=beta) + + +class _LUSolver: + """LU factorization. + + To solve Ax = b: + solver = _LUSolver(A) + x = solver.solve(b) + + optional argument `tol` on solve method is ignored but included + to match _PCGsolver API. + """ + + def __init__(self, A): + import scipy as sp + + self._LU = sp.sparse.linalg.splu( + A, + permc_spec="MMD_AT_PLUS_A", + diag_pivot_thresh=0.0, + options={"Equil": True, "SymmetricMode": True}, + ) + + def solve(self, B, tol=None): + import numpy as np + + B = np.asarray(B) + X = np.ndarray(B.shape, order="F") + for j in range(B.shape[1]): + X[:, j] = self._LU.solve(B[:, j]) + return X + + +def _preprocess_graph(G, weight): + """Compute edge weights and eliminate zero-weight edges.""" + if G.is_directed(): + H = nx.MultiGraph() + H.add_nodes_from(G) + H.add_weighted_edges_from( + ((u, v, e.get(weight, 1.0)) for u, v, e in G.edges(data=True) if u != v), + weight=weight, + ) + G = H + if not G.is_multigraph(): + edges = ( + (u, v, abs(e.get(weight, 1.0))) for u, v, e in G.edges(data=True) if u != v + ) + else: + edges = ( + (u, v, sum(abs(e.get(weight, 1.0)) for e in G[u][v].values())) + for u, v in G.edges() + if u != v + ) + H = nx.Graph() + H.add_nodes_from(G) + H.add_weighted_edges_from((u, v, e) for u, v, e in edges if e != 0) + return H + + +def _rcm_estimate(G, nodelist): + """Estimate the Fiedler vector using the reverse Cuthill-McKee ordering.""" + import numpy as np + + G = G.subgraph(nodelist) + order = reverse_cuthill_mckee_ordering(G) + n = len(nodelist) + index = dict(zip(nodelist, range(n))) + x = np.ndarray(n, dtype=float) + for i, u in enumerate(order): + x[index[u]] = i + x -= (n - 1) / 2.0 + return x + + +def _tracemin_fiedler(L, X, normalized, tol, method): + """Compute the Fiedler vector of L using the TraceMIN-Fiedler algorithm. + + The Fiedler vector of a connected undirected graph is the eigenvector + corresponding to the second smallest eigenvalue of the Laplacian matrix + of the graph. This function starts with the Laplacian L, not the Graph. + + Parameters + ---------- + L : Laplacian of a possibly weighted or normalized, but undirected graph + + X : Initial guess for a solution. Usually a matrix of random numbers. + This function allows more than one column in X to identify more than + one eigenvector if desired. + + normalized : bool + Whether the normalized Laplacian matrix is used. + + tol : float + Tolerance of relative residual in eigenvalue computation. + Warning: There is no limit on number of iterations. + + method : string + Should be 'tracemin_pcg' or 'tracemin_lu'. + Otherwise exception is raised. + + Returns + ------- + sigma, X : Two NumPy arrays of floats. + The lowest eigenvalues and corresponding eigenvectors of L. + The size of input X determines the size of these outputs. + As this is for Fiedler vectors, the zero eigenvalue (and + constant eigenvector) are avoided. + """ + import numpy as np + import scipy as sp + + n = X.shape[0] + + if normalized: + # Form the normalized Laplacian matrix and determine the eigenvector of + # its nullspace. + e = np.sqrt(L.diagonal()) + # TODO: rm csr_array wrapper when spdiags array creation becomes available + D = sp.sparse.csr_array(sp.sparse.spdiags(1 / e, 0, n, n, format="csr")) + L = D @ L @ D + e *= 1.0 / np.linalg.norm(e, 2) + + if normalized: + + def project(X): + """Make X orthogonal to the nullspace of L.""" + X = np.asarray(X) + for j in range(X.shape[1]): + X[:, j] -= (X[:, j] @ e) * e + + else: + + def project(X): + """Make X orthogonal to the nullspace of L.""" + X = np.asarray(X) + for j in range(X.shape[1]): + X[:, j] -= X[:, j].sum() / n + + if method == "tracemin_pcg": + D = L.diagonal().astype(float) + solver = _PCGSolver(lambda x: L @ x, lambda x: D * x) + elif method == "tracemin_lu": + # Convert A to CSC to suppress SparseEfficiencyWarning. + A = sp.sparse.csc_array(L, dtype=float, copy=True) + # Force A to be nonsingular. Since A is the Laplacian matrix of a + # connected graph, its rank deficiency is one, and thus one diagonal + # element needs to modified. Changing to infinity forces a zero in the + # corresponding element in the solution. + i = (A.indptr[1:] - A.indptr[:-1]).argmax() + A[i, i] = np.inf + solver = _LUSolver(A) + else: + raise nx.NetworkXError(f"Unknown linear system solver: {method}") + + # Initialize. + Lnorm = abs(L).sum(axis=1).flatten().max() + project(X) + W = np.ndarray(X.shape, order="F") + + while True: + # Orthonormalize X. + X = np.linalg.qr(X)[0] + # Compute iteration matrix H. + W[:, :] = L @ X + H = X.T @ W + sigma, Y = sp.linalg.eigh(H, overwrite_a=True) + # Compute the Ritz vectors. + X = X @ Y + # Test for convergence exploiting the fact that L * X == W * Y. + res = sp.linalg.blas.dasum(W @ Y[:, 0] - sigma[0] * X[:, 0]) / Lnorm + if res < tol: + break + # Compute X = L \ X / (X' * (L \ X)). + # L \ X can have an arbitrary projection on the nullspace of L, + # which will be eliminated. + W[:, :] = solver.solve(X, tol) + X = (sp.linalg.inv(W.T @ X) @ W.T).T # Preserves Fortran storage order. + project(X) + + return sigma, np.asarray(X) + + +def _get_fiedler_func(method): + """Returns a function that solves the Fiedler eigenvalue problem.""" + import numpy as np + + if method == "tracemin": # old style keyword <v2.1 + method = "tracemin_pcg" + if method in ("tracemin_pcg", "tracemin_lu"): + + def find_fiedler(L, x, normalized, tol, seed): + q = 1 if method == "tracemin_pcg" else min(4, L.shape[0] - 1) + X = np.asarray(seed.normal(size=(q, L.shape[0]))).T + sigma, X = _tracemin_fiedler(L, X, normalized, tol, method) + return sigma[0], X[:, 0] + + elif method == "lanczos" or method == "lobpcg": + + def find_fiedler(L, x, normalized, tol, seed): + import scipy as sp + + L = sp.sparse.csc_array(L, dtype=float) + n = L.shape[0] + if normalized: + # TODO: rm csc_array wrapping when spdiags array becomes available + D = sp.sparse.csc_array( + sp.sparse.spdiags( + 1.0 / np.sqrt(L.diagonal()), [0], n, n, format="csc" + ) + ) + L = D @ L @ D + if method == "lanczos" or n < 10: + # Avoid LOBPCG when n < 10 due to + # https://github.com/scipy/scipy/issues/3592 + # https://github.com/scipy/scipy/pull/3594 + sigma, X = sp.sparse.linalg.eigsh( + L, 2, which="SM", tol=tol, return_eigenvectors=True + ) + return sigma[1], X[:, 1] + else: + X = np.asarray(np.atleast_2d(x).T) + # TODO: rm csr_array wrapping when spdiags array becomes available + M = sp.sparse.csr_array(sp.sparse.spdiags(1.0 / L.diagonal(), 0, n, n)) + Y = np.ones(n) + if normalized: + Y /= D.diagonal() + sigma, X = sp.sparse.linalg.lobpcg( + L, X, M=M, Y=np.atleast_2d(Y).T, tol=tol, maxiter=n, largest=False + ) + return sigma[0], X[:, 0] + + else: + raise nx.NetworkXError(f"unknown method {method!r}.") + + return find_fiedler + + +@not_implemented_for("directed") +@np_random_state(5) +@nx._dispatchable(edge_attrs="weight") +def algebraic_connectivity( + G, weight="weight", normalized=False, tol=1e-8, method="tracemin_pcg", seed=None +): + r"""Returns the algebraic connectivity of an undirected graph. + + The algebraic connectivity of a connected undirected graph is the second + smallest eigenvalue of its Laplacian matrix. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + weight : object, optional (default: None) + The data key used to determine the weight of each edge. If None, then + each edge has unit weight. + + normalized : bool, optional (default: False) + Whether the normalized Laplacian matrix is used. + + tol : float, optional (default: 1e-8) + Tolerance of relative residual in eigenvalue computation. + + method : string, optional (default: 'tracemin_pcg') + Method of eigenvalue computation. It must be one of the tracemin + options shown below (TraceMIN), 'lanczos' (Lanczos iteration) + or 'lobpcg' (LOBPCG). + + The TraceMIN algorithm uses a linear system solver. The following + values allow specifying the solver to be used. + + =============== ======================================== + Value Solver + =============== ======================================== + 'tracemin_pcg' Preconditioned conjugate gradient method + 'tracemin_lu' LU factorization + =============== ======================================== + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness<randomness>`. + + Returns + ------- + algebraic_connectivity : float + Algebraic connectivity. + + Raises + ------ + NetworkXNotImplemented + If G is directed. + + NetworkXError + If G has less than two nodes. + + Notes + ----- + Edge weights are interpreted by their absolute values. For MultiGraph's, + weights of parallel edges are summed. Zero-weighted edges are ignored. + + See Also + -------- + laplacian_matrix + + Examples + -------- + For undirected graphs algebraic connectivity can tell us if a graph is connected or not + `G` is connected iff ``algebraic_connectivity(G) > 0``: + + >>> G = nx.complete_graph(5) + >>> nx.algebraic_connectivity(G) > 0 + True + >>> G.add_node(10) # G is no longer connected + >>> nx.algebraic_connectivity(G) > 0 + False + + """ + if len(G) < 2: + raise nx.NetworkXError("graph has less than two nodes.") + G = _preprocess_graph(G, weight) + if not nx.is_connected(G): + return 0.0 + + L = nx.laplacian_matrix(G) + if L.shape[0] == 2: + return 2.0 * float(L[0, 0]) if not normalized else 2.0 + + find_fiedler = _get_fiedler_func(method) + x = None if method != "lobpcg" else _rcm_estimate(G, G) + sigma, fiedler = find_fiedler(L, x, normalized, tol, seed) + return float(sigma) + + +@not_implemented_for("directed") +@np_random_state(5) +@nx._dispatchable(edge_attrs="weight") +def fiedler_vector( + G, weight="weight", normalized=False, tol=1e-8, method="tracemin_pcg", seed=None +): + """Returns the Fiedler vector of a connected undirected graph. + + The Fiedler vector of a connected undirected graph is the eigenvector + corresponding to the second smallest eigenvalue of the Laplacian matrix + of the graph. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + weight : object, optional (default: None) + The data key used to determine the weight of each edge. If None, then + each edge has unit weight. + + normalized : bool, optional (default: False) + Whether the normalized Laplacian matrix is used. + + tol : float, optional (default: 1e-8) + Tolerance of relative residual in eigenvalue computation. + + method : string, optional (default: 'tracemin_pcg') + Method of eigenvalue computation. It must be one of the tracemin + options shown below (TraceMIN), 'lanczos' (Lanczos iteration) + or 'lobpcg' (LOBPCG). + + The TraceMIN algorithm uses a linear system solver. The following + values allow specifying the solver to be used. + + =============== ======================================== + Value Solver + =============== ======================================== + 'tracemin_pcg' Preconditioned conjugate gradient method + 'tracemin_lu' LU factorization + =============== ======================================== + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness<randomness>`. + + Returns + ------- + fiedler_vector : NumPy array of floats. + Fiedler vector. + + Raises + ------ + NetworkXNotImplemented + If G is directed. + + NetworkXError + If G has less than two nodes or is not connected. + + Notes + ----- + Edge weights are interpreted by their absolute values. For MultiGraph's, + weights of parallel edges are summed. Zero-weighted edges are ignored. + + See Also + -------- + laplacian_matrix + + Examples + -------- + Given a connected graph the signs of the values in the Fiedler vector can be + used to partition the graph into two components. + + >>> G = nx.barbell_graph(5, 0) + >>> nx.fiedler_vector(G, normalized=True, seed=1) + array([-0.32864129, -0.32864129, -0.32864129, -0.32864129, -0.26072899, + 0.26072899, 0.32864129, 0.32864129, 0.32864129, 0.32864129]) + + The connected components are the two 5-node cliques of the barbell graph. + """ + import numpy as np + + if len(G) < 2: + raise nx.NetworkXError("graph has less than two nodes.") + G = _preprocess_graph(G, weight) + if not nx.is_connected(G): + raise nx.NetworkXError("graph is not connected.") + + if len(G) == 2: + return np.array([1.0, -1.0]) + + find_fiedler = _get_fiedler_func(method) + L = nx.laplacian_matrix(G) + x = None if method != "lobpcg" else _rcm_estimate(G, G) + sigma, fiedler = find_fiedler(L, x, normalized, tol, seed) + return fiedler + + +@np_random_state(5) +@nx._dispatchable(edge_attrs="weight") +def spectral_ordering( + G, weight="weight", normalized=False, tol=1e-8, method="tracemin_pcg", seed=None +): + """Compute the spectral_ordering of a graph. + + The spectral ordering of a graph is an ordering of its nodes where nodes + in the same weakly connected components appear contiguous and ordered by + their corresponding elements in the Fiedler vector of the component. + + Parameters + ---------- + G : NetworkX graph + A graph. + + weight : object, optional (default: None) + The data key used to determine the weight of each edge. If None, then + each edge has unit weight. + + normalized : bool, optional (default: False) + Whether the normalized Laplacian matrix is used. + + tol : float, optional (default: 1e-8) + Tolerance of relative residual in eigenvalue computation. + + method : string, optional (default: 'tracemin_pcg') + Method of eigenvalue computation. It must be one of the tracemin + options shown below (TraceMIN), 'lanczos' (Lanczos iteration) + or 'lobpcg' (LOBPCG). + + The TraceMIN algorithm uses a linear system solver. The following + values allow specifying the solver to be used. + + =============== ======================================== + Value Solver + =============== ======================================== + 'tracemin_pcg' Preconditioned conjugate gradient method + 'tracemin_lu' LU factorization + =============== ======================================== + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness<randomness>`. + + Returns + ------- + spectral_ordering : NumPy array of floats. + Spectral ordering of nodes. + + Raises + ------ + NetworkXError + If G is empty. + + Notes + ----- + Edge weights are interpreted by their absolute values. For MultiGraph's, + weights of parallel edges are summed. Zero-weighted edges are ignored. + + See Also + -------- + laplacian_matrix + """ + if len(G) == 0: + raise nx.NetworkXError("graph is empty.") + G = _preprocess_graph(G, weight) + + find_fiedler = _get_fiedler_func(method) + order = [] + for component in nx.connected_components(G): + size = len(component) + if size > 2: + L = nx.laplacian_matrix(G, component) + x = None if method != "lobpcg" else _rcm_estimate(G, component) + sigma, fiedler = find_fiedler(L, x, normalized, tol, seed) + sort_info = zip(fiedler, range(size), component) + order.extend(u for x, c, u in sorted(sort_info)) + else: + order.extend(component) + + return order + + +@nx._dispatchable(edge_attrs="weight") +def spectral_bisection( + G, weight="weight", normalized=False, tol=1e-8, method="tracemin_pcg", seed=None +): + """Bisect the graph using the Fiedler vector. + + This method uses the Fiedler vector to bisect a graph. + The partition is defined by the nodes which are associated with + either positive or negative values in the vector. + + Parameters + ---------- + G : NetworkX Graph + + weight : str, optional (default: weight) + The data key used to determine the weight of each edge. If None, then + each edge has unit weight. + + normalized : bool, optional (default: False) + Whether the normalized Laplacian matrix is used. + + tol : float, optional (default: 1e-8) + Tolerance of relative residual in eigenvalue computation. + + method : string, optional (default: 'tracemin_pcg') + Method of eigenvalue computation. It must be one of the tracemin + options shown below (TraceMIN), 'lanczos' (Lanczos iteration) + or 'lobpcg' (LOBPCG). + + The TraceMIN algorithm uses a linear system solver. The following + values allow specifying the solver to be used. + + =============== ======================================== + Value Solver + =============== ======================================== + 'tracemin_pcg' Preconditioned conjugate gradient method + 'tracemin_lu' LU factorization + =============== ======================================== + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness<randomness>`. + + Returns + ------- + bisection : tuple of sets + Sets with the bisection of nodes + + Examples + -------- + >>> G = nx.barbell_graph(3, 0) + >>> nx.spectral_bisection(G) + ({0, 1, 2}, {3, 4, 5}) + + References + ---------- + .. [1] M. E. J Newman 'Networks: An Introduction', pages 364-370 + Oxford University Press 2011. + """ + import numpy as np + + v = nx.fiedler_vector(G, weight, normalized, tol, method, seed) + nodes = np.array(list(G)) + pos_vals = v >= 0 + + return set(nodes[~pos_vals].tolist()), set(nodes[pos_vals].tolist()) diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/attrmatrix.py b/.venv/lib/python3.12/site-packages/networkx/linalg/attrmatrix.py new file mode 100644 index 00000000..b5a70492 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/linalg/attrmatrix.py @@ -0,0 +1,465 @@ +""" +Functions for constructing matrix-like objects from graph attributes. +""" + +import networkx as nx + +__all__ = ["attr_matrix", "attr_sparse_matrix"] + + +def _node_value(G, node_attr): + """Returns a function that returns a value from G.nodes[u]. + + We return a function expecting a node as its sole argument. Then, in the + simplest scenario, the returned function will return G.nodes[u][node_attr]. + However, we also handle the case when `node_attr` is None or when it is a + function itself. + + Parameters + ---------- + G : graph + A NetworkX graph + + node_attr : {None, str, callable} + Specification of how the value of the node attribute should be obtained + from the node attribute dictionary. + + Returns + ------- + value : function + A function expecting a node as its sole argument. The function will + returns a value from G.nodes[u] that depends on `edge_attr`. + + """ + if node_attr is None: + + def value(u): + return u + + elif not callable(node_attr): + # assume it is a key for the node attribute dictionary + def value(u): + return G.nodes[u][node_attr] + + else: + # Advanced: Allow users to specify something else. + # + # For example, + # node_attr = lambda u: G.nodes[u].get('size', .5) * 3 + # + value = node_attr + + return value + + +def _edge_value(G, edge_attr): + """Returns a function that returns a value from G[u][v]. + + Suppose there exists an edge between u and v. Then we return a function + expecting u and v as arguments. For Graph and DiGraph, G[u][v] is + the edge attribute dictionary, and the function (essentially) returns + G[u][v][edge_attr]. However, we also handle cases when `edge_attr` is None + and when it is a function itself. For MultiGraph and MultiDiGraph, G[u][v] + is a dictionary of all edges between u and v. In this case, the returned + function sums the value of `edge_attr` for every edge between u and v. + + Parameters + ---------- + G : graph + A NetworkX graph + + edge_attr : {None, str, callable} + Specification of how the value of the edge attribute should be obtained + from the edge attribute dictionary, G[u][v]. For multigraphs, G[u][v] + is a dictionary of all the edges between u and v. This allows for + special treatment of multiedges. + + Returns + ------- + value : function + A function expecting two nodes as parameters. The nodes should + represent the from- and to- node of an edge. The function will + return a value from G[u][v] that depends on `edge_attr`. + + """ + + if edge_attr is None: + # topological count of edges + + if G.is_multigraph(): + + def value(u, v): + return len(G[u][v]) + + else: + + def value(u, v): + return 1 + + elif not callable(edge_attr): + # assume it is a key for the edge attribute dictionary + + if edge_attr == "weight": + # provide a default value + if G.is_multigraph(): + + def value(u, v): + return sum(d.get(edge_attr, 1) for d in G[u][v].values()) + + else: + + def value(u, v): + return G[u][v].get(edge_attr, 1) + + else: + # otherwise, the edge attribute MUST exist for each edge + if G.is_multigraph(): + + def value(u, v): + return sum(d[edge_attr] for d in G[u][v].values()) + + else: + + def value(u, v): + return G[u][v][edge_attr] + + else: + # Advanced: Allow users to specify something else. + # + # Alternative default value: + # edge_attr = lambda u,v: G[u][v].get('thickness', .5) + # + # Function on an attribute: + # edge_attr = lambda u,v: abs(G[u][v]['weight']) + # + # Handle Multi(Di)Graphs differently: + # edge_attr = lambda u,v: numpy.prod([d['size'] for d in G[u][v].values()]) + # + # Ignore multiple edges + # edge_attr = lambda u,v: 1 if len(G[u][v]) else 0 + # + value = edge_attr + + return value + + +@nx._dispatchable(edge_attrs={"edge_attr": None}, node_attrs="node_attr") +def attr_matrix( + G, + edge_attr=None, + node_attr=None, + normalized=False, + rc_order=None, + dtype=None, + order=None, +): + """Returns the attribute matrix using attributes from `G` as a numpy array. + + If only `G` is passed in, then the adjacency matrix is constructed. + + Let A be a discrete set of values for the node attribute `node_attr`. Then + the elements of A represent the rows and columns of the constructed matrix. + Now, iterate through every edge e=(u,v) in `G` and consider the value + of the edge attribute `edge_attr`. If ua and va are the values of the + node attribute `node_attr` for u and v, respectively, then the value of + the edge attribute is added to the matrix element at (ua, va). + + Parameters + ---------- + G : graph + The NetworkX graph used to construct the attribute matrix. + + edge_attr : str, optional + Each element of the matrix represents a running total of the + specified edge attribute for edges whose node attributes correspond + to the rows/cols of the matrix. The attribute must be present for + all edges in the graph. If no attribute is specified, then we + just count the number of edges whose node attributes correspond + to the matrix element. + + node_attr : str, optional + Each row and column in the matrix represents a particular value + of the node attribute. The attribute must be present for all nodes + in the graph. Note, the values of this attribute should be reliably + hashable. So, float values are not recommended. If no attribute is + specified, then the rows and columns will be the nodes of the graph. + + normalized : bool, optional + If True, then each row is normalized by the summation of its values. + + rc_order : list, optional + A list of the node attribute values. This list specifies the ordering + of rows and columns of the array. If no ordering is provided, then + the ordering will be random (and also, a return value). + + Other Parameters + ---------------- + dtype : NumPy data-type, optional + A valid NumPy dtype used to initialize the array. Keep in mind certain + dtypes can yield unexpected results if the array is to be normalized. + The parameter is passed to numpy.zeros(). If unspecified, the NumPy + default is used. + + order : {'C', 'F'}, optional + Whether to store multidimensional data in C- or Fortran-contiguous + (row- or column-wise) order in memory. This parameter is passed to + numpy.zeros(). If unspecified, the NumPy default is used. + + Returns + ------- + M : 2D NumPy ndarray + The attribute matrix. + + ordering : list + If `rc_order` was specified, then only the attribute matrix is returned. + However, if `rc_order` was None, then the ordering used to construct + the matrix is returned as well. + + Examples + -------- + Construct an adjacency matrix: + + >>> G = nx.Graph() + >>> G.add_edge(0, 1, thickness=1, weight=3) + >>> G.add_edge(0, 2, thickness=2) + >>> G.add_edge(1, 2, thickness=3) + >>> nx.attr_matrix(G, rc_order=[0, 1, 2]) + array([[0., 1., 1.], + [1., 0., 1.], + [1., 1., 0.]]) + + Alternatively, we can obtain the matrix describing edge thickness. + + >>> nx.attr_matrix(G, edge_attr="thickness", rc_order=[0, 1, 2]) + array([[0., 1., 2.], + [1., 0., 3.], + [2., 3., 0.]]) + + We can also color the nodes and ask for the probability distribution over + all edges (u,v) describing: + + Pr(v has color Y | u has color X) + + >>> G.nodes[0]["color"] = "red" + >>> G.nodes[1]["color"] = "red" + >>> G.nodes[2]["color"] = "blue" + >>> rc = ["red", "blue"] + >>> nx.attr_matrix(G, node_attr="color", normalized=True, rc_order=rc) + array([[0.33333333, 0.66666667], + [1. , 0. ]]) + + For example, the above tells us that for all edges (u,v): + + Pr( v is red | u is red) = 1/3 + Pr( v is blue | u is red) = 2/3 + + Pr( v is red | u is blue) = 1 + Pr( v is blue | u is blue) = 0 + + Finally, we can obtain the total weights listed by the node colors. + + >>> nx.attr_matrix(G, edge_attr="weight", node_attr="color", rc_order=rc) + array([[3., 2.], + [2., 0.]]) + + Thus, the total weight over all edges (u,v) with u and v having colors: + + (red, red) is 3 # the sole contribution is from edge (0,1) + (red, blue) is 2 # contributions from edges (0,2) and (1,2) + (blue, red) is 2 # same as (red, blue) since graph is undirected + (blue, blue) is 0 # there are no edges with blue endpoints + + """ + import numpy as np + + edge_value = _edge_value(G, edge_attr) + node_value = _node_value(G, node_attr) + + if rc_order is None: + ordering = list({node_value(n) for n in G}) + else: + ordering = rc_order + + N = len(ordering) + undirected = not G.is_directed() + index = dict(zip(ordering, range(N))) + M = np.zeros((N, N), dtype=dtype, order=order) + + seen = set() + for u, nbrdict in G.adjacency(): + for v in nbrdict: + # Obtain the node attribute values. + i, j = index[node_value(u)], index[node_value(v)] + if v not in seen: + M[i, j] += edge_value(u, v) + if undirected: + M[j, i] = M[i, j] + + if undirected: + seen.add(u) + + if normalized: + M /= M.sum(axis=1).reshape((N, 1)) + + if rc_order is None: + return M, ordering + else: + return M + + +@nx._dispatchable(edge_attrs={"edge_attr": None}, node_attrs="node_attr") +def attr_sparse_matrix( + G, edge_attr=None, node_attr=None, normalized=False, rc_order=None, dtype=None +): + """Returns a SciPy sparse array using attributes from G. + + If only `G` is passed in, then the adjacency matrix is constructed. + + Let A be a discrete set of values for the node attribute `node_attr`. Then + the elements of A represent the rows and columns of the constructed matrix. + Now, iterate through every edge e=(u,v) in `G` and consider the value + of the edge attribute `edge_attr`. If ua and va are the values of the + node attribute `node_attr` for u and v, respectively, then the value of + the edge attribute is added to the matrix element at (ua, va). + + Parameters + ---------- + G : graph + The NetworkX graph used to construct the NumPy matrix. + + edge_attr : str, optional + Each element of the matrix represents a running total of the + specified edge attribute for edges whose node attributes correspond + to the rows/cols of the matrix. The attribute must be present for + all edges in the graph. If no attribute is specified, then we + just count the number of edges whose node attributes correspond + to the matrix element. + + node_attr : str, optional + Each row and column in the matrix represents a particular value + of the node attribute. The attribute must be present for all nodes + in the graph. Note, the values of this attribute should be reliably + hashable. So, float values are not recommended. If no attribute is + specified, then the rows and columns will be the nodes of the graph. + + normalized : bool, optional + If True, then each row is normalized by the summation of its values. + + rc_order : list, optional + A list of the node attribute values. This list specifies the ordering + of rows and columns of the array. If no ordering is provided, then + the ordering will be random (and also, a return value). + + Other Parameters + ---------------- + dtype : NumPy data-type, optional + A valid NumPy dtype used to initialize the array. Keep in mind certain + dtypes can yield unexpected results if the array is to be normalized. + The parameter is passed to numpy.zeros(). If unspecified, the NumPy + default is used. + + Returns + ------- + M : SciPy sparse array + The attribute matrix. + + ordering : list + If `rc_order` was specified, then only the matrix is returned. + However, if `rc_order` was None, then the ordering used to construct + the matrix is returned as well. + + Examples + -------- + Construct an adjacency matrix: + + >>> G = nx.Graph() + >>> G.add_edge(0, 1, thickness=1, weight=3) + >>> G.add_edge(0, 2, thickness=2) + >>> G.add_edge(1, 2, thickness=3) + >>> M = nx.attr_sparse_matrix(G, rc_order=[0, 1, 2]) + >>> M.toarray() + array([[0., 1., 1.], + [1., 0., 1.], + [1., 1., 0.]]) + + Alternatively, we can obtain the matrix describing edge thickness. + + >>> M = nx.attr_sparse_matrix(G, edge_attr="thickness", rc_order=[0, 1, 2]) + >>> M.toarray() + array([[0., 1., 2.], + [1., 0., 3.], + [2., 3., 0.]]) + + We can also color the nodes and ask for the probability distribution over + all edges (u,v) describing: + + Pr(v has color Y | u has color X) + + >>> G.nodes[0]["color"] = "red" + >>> G.nodes[1]["color"] = "red" + >>> G.nodes[2]["color"] = "blue" + >>> rc = ["red", "blue"] + >>> M = nx.attr_sparse_matrix(G, node_attr="color", normalized=True, rc_order=rc) + >>> M.toarray() + array([[0.33333333, 0.66666667], + [1. , 0. ]]) + + For example, the above tells us that for all edges (u,v): + + Pr( v is red | u is red) = 1/3 + Pr( v is blue | u is red) = 2/3 + + Pr( v is red | u is blue) = 1 + Pr( v is blue | u is blue) = 0 + + Finally, we can obtain the total weights listed by the node colors. + + >>> M = nx.attr_sparse_matrix(G, edge_attr="weight", node_attr="color", rc_order=rc) + >>> M.toarray() + array([[3., 2.], + [2., 0.]]) + + Thus, the total weight over all edges (u,v) with u and v having colors: + + (red, red) is 3 # the sole contribution is from edge (0,1) + (red, blue) is 2 # contributions from edges (0,2) and (1,2) + (blue, red) is 2 # same as (red, blue) since graph is undirected + (blue, blue) is 0 # there are no edges with blue endpoints + + """ + import numpy as np + import scipy as sp + + edge_value = _edge_value(G, edge_attr) + node_value = _node_value(G, node_attr) + + if rc_order is None: + ordering = list({node_value(n) for n in G}) + else: + ordering = rc_order + + N = len(ordering) + undirected = not G.is_directed() + index = dict(zip(ordering, range(N))) + M = sp.sparse.lil_array((N, N), dtype=dtype) + + seen = set() + for u, nbrdict in G.adjacency(): + for v in nbrdict: + # Obtain the node attribute values. + i, j = index[node_value(u)], index[node_value(v)] + if v not in seen: + M[i, j] += edge_value(u, v) + if undirected: + M[j, i] = M[i, j] + + if undirected: + seen.add(u) + + if normalized: + M *= 1 / M.sum(axis=1)[:, np.newaxis] # in-place mult preserves sparse + + if rc_order is None: + return M, ordering + else: + return M diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/bethehessianmatrix.py b/.venv/lib/python3.12/site-packages/networkx/linalg/bethehessianmatrix.py new file mode 100644 index 00000000..3d42fc6d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/linalg/bethehessianmatrix.py @@ -0,0 +1,79 @@ +"""Bethe Hessian or deformed Laplacian matrix of graphs.""" + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["bethe_hessian_matrix"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable +def bethe_hessian_matrix(G, r=None, nodelist=None): + r"""Returns the Bethe Hessian matrix of G. + + The Bethe Hessian is a family of matrices parametrized by r, defined as + H(r) = (r^2 - 1) I - r A + D where A is the adjacency matrix, D is the + diagonal matrix of node degrees, and I is the identify matrix. It is equal + to the graph laplacian when the regularizer r = 1. + + The default choice of regularizer should be the ratio [2]_ + + .. math:: + r_m = \left(\sum k_i \right)^{-1}\left(\sum k_i^2 \right) - 1 + + Parameters + ---------- + G : Graph + A NetworkX graph + r : float + Regularizer parameter + nodelist : list, optional + The rows and columns are ordered according to the nodes in nodelist. + If nodelist is None, then the ordering is produced by ``G.nodes()``. + + Returns + ------- + H : scipy.sparse.csr_array + The Bethe Hessian matrix of `G`, with parameter `r`. + + Examples + -------- + >>> k = [3, 2, 2, 1, 0] + >>> G = nx.havel_hakimi_graph(k) + >>> H = nx.bethe_hessian_matrix(G) + >>> H.toarray() + array([[ 3.5625, -1.25 , -1.25 , -1.25 , 0. ], + [-1.25 , 2.5625, -1.25 , 0. , 0. ], + [-1.25 , -1.25 , 2.5625, 0. , 0. ], + [-1.25 , 0. , 0. , 1.5625, 0. ], + [ 0. , 0. , 0. , 0. , 0.5625]]) + + See Also + -------- + bethe_hessian_spectrum + adjacency_matrix + laplacian_matrix + + References + ---------- + .. [1] A. Saade, F. Krzakala and L. Zdeborová + "Spectral Clustering of Graphs with the Bethe Hessian", + Advances in Neural Information Processing Systems, 2014. + .. [2] C. M. Le, E. Levina + "Estimating the number of communities in networks by spectral methods" + arXiv:1507.00827, 2015. + """ + import scipy as sp + + if nodelist is None: + nodelist = list(G) + if r is None: + r = sum(d**2 for v, d in nx.degree(G)) / sum(d for v, d in nx.degree(G)) - 1 + A = nx.to_scipy_sparse_array(G, nodelist=nodelist, format="csr") + n, m = A.shape + # TODO: Rm csr_array wrapper when spdiags array creation becomes available + D = sp.sparse.csr_array(sp.sparse.spdiags(A.sum(axis=1), 0, m, n, format="csr")) + # TODO: Rm csr_array wrapper when eye array creation becomes available + I = sp.sparse.csr_array(sp.sparse.eye(m, n, format="csr")) + return (r**2 - 1) * I - r * A + D diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/graphmatrix.py b/.venv/lib/python3.12/site-packages/networkx/linalg/graphmatrix.py new file mode 100644 index 00000000..9f477bcc --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/linalg/graphmatrix.py @@ -0,0 +1,168 @@ +""" +Adjacency matrix and incidence matrix of graphs. +""" + +import networkx as nx + +__all__ = ["incidence_matrix", "adjacency_matrix"] + + +@nx._dispatchable(edge_attrs="weight") +def incidence_matrix( + G, nodelist=None, edgelist=None, oriented=False, weight=None, *, dtype=None +): + """Returns incidence matrix of G. + + The incidence matrix assigns each row to a node and each column to an edge. + For a standard incidence matrix a 1 appears wherever a row's node is + incident on the column's edge. For an oriented incidence matrix each + edge is assigned an orientation (arbitrarily for undirected and aligning to + direction for directed). A -1 appears for the source (tail) of an edge and + 1 for the destination (head) of the edge. The elements are zero otherwise. + + Parameters + ---------- + G : graph + A NetworkX graph + + nodelist : list, optional (default= all nodes in G) + The rows are ordered according to the nodes in nodelist. + If nodelist is None, then the ordering is produced by G.nodes(). + + edgelist : list, optional (default= all edges in G) + The columns are ordered according to the edges in edgelist. + If edgelist is None, then the ordering is produced by G.edges(). + + oriented: bool, optional (default=False) + If True, matrix elements are +1 or -1 for the head or tail node + respectively of each edge. If False, +1 occurs at both nodes. + + weight : string or None, optional (default=None) + The edge data key used to provide each value in the matrix. + If None, then each edge has weight 1. Edge weights, if used, + should be positive so that the orientation can provide the sign. + + dtype : a NumPy dtype or None (default=None) + The dtype of the output sparse array. This type should be a compatible + type of the weight argument, eg. if weight would return a float this + argument should also be a float. + If None, then the default for SciPy is used. + + Returns + ------- + A : SciPy sparse array + The incidence matrix of G. + + Notes + ----- + For MultiGraph/MultiDiGraph, the edges in edgelist should be + (u,v,key) 3-tuples. + + "Networks are the best discrete model for so many problems in + applied mathematics" [1]_. + + References + ---------- + .. [1] Gil Strang, Network applications: A = incidence matrix, + http://videolectures.net/mit18085f07_strang_lec03/ + """ + import scipy as sp + + if nodelist is None: + nodelist = list(G) + if edgelist is None: + if G.is_multigraph(): + edgelist = list(G.edges(keys=True)) + else: + edgelist = list(G.edges()) + A = sp.sparse.lil_array((len(nodelist), len(edgelist)), dtype=dtype) + node_index = {node: i for i, node in enumerate(nodelist)} + for ei, e in enumerate(edgelist): + (u, v) = e[:2] + if u == v: + continue # self loops give zero column + try: + ui = node_index[u] + vi = node_index[v] + except KeyError as err: + raise nx.NetworkXError( + f"node {u} or {v} in edgelist but not in nodelist" + ) from err + if weight is None: + wt = 1 + else: + if G.is_multigraph(): + ekey = e[2] + wt = G[u][v][ekey].get(weight, 1) + else: + wt = G[u][v].get(weight, 1) + if oriented: + A[ui, ei] = -wt + A[vi, ei] = wt + else: + A[ui, ei] = wt + A[vi, ei] = wt + return A.asformat("csc") + + +@nx._dispatchable(edge_attrs="weight") +def adjacency_matrix(G, nodelist=None, dtype=None, weight="weight"): + """Returns adjacency matrix of `G`. + + Parameters + ---------- + G : graph + A NetworkX graph + + nodelist : list, optional + The rows and columns are ordered according to the nodes in `nodelist`. + If ``nodelist=None`` (the default), then the ordering is produced by + ``G.nodes()``. + + dtype : NumPy data-type, optional + The desired data-type for the array. + If `None`, then the NumPy default is used. + + weight : string or None, optional (default='weight') + The edge data key used to provide each value in the matrix. + If None, then each edge has weight 1. + + Returns + ------- + A : SciPy sparse array + Adjacency matrix representation of G. + + Notes + ----- + For directed graphs, entry ``i, j`` corresponds to an edge from ``i`` to ``j``. + + If you want a pure Python adjacency matrix representation try + :func:`~networkx.convert.to_dict_of_dicts` which will return a + dictionary-of-dictionaries format that can be addressed as a + sparse matrix. + + For multigraphs with parallel edges the weights are summed. + See :func:`networkx.convert_matrix.to_numpy_array` for other options. + + The convention used for self-loop edges in graphs is to assign the + diagonal matrix entry value to the edge weight attribute + (or the number 1 if the edge has no weight attribute). If the + alternate convention of doubling the edge weight is desired the + resulting SciPy sparse array can be modified as follows:: + + >>> G = nx.Graph([(1, 1)]) + >>> A = nx.adjacency_matrix(G) + >>> A.toarray() + array([[1]]) + >>> A.setdiag(A.diagonal() * 2) + >>> A.toarray() + array([[2]]) + + See Also + -------- + to_numpy_array + to_scipy_sparse_array + to_dict_of_dicts + adjacency_spectrum + """ + return nx.to_scipy_sparse_array(G, nodelist=nodelist, dtype=dtype, weight=weight) diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/laplacianmatrix.py b/.venv/lib/python3.12/site-packages/networkx/linalg/laplacianmatrix.py new file mode 100644 index 00000000..d49ae491 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/linalg/laplacianmatrix.py @@ -0,0 +1,617 @@ +"""Laplacian matrix of graphs. + +All calculations here are done using the out-degree. For Laplacians using +in-degree, use `G.reverse(copy=False)` instead of `G` and take the transpose. + +The `laplacian_matrix` function provides an unnormalized matrix, +while `normalized_laplacian_matrix`, `directed_laplacian_matrix`, +and `directed_combinatorial_laplacian_matrix` are all normalized. +""" + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = [ + "laplacian_matrix", + "normalized_laplacian_matrix", + "total_spanning_tree_weight", + "directed_laplacian_matrix", + "directed_combinatorial_laplacian_matrix", +] + + +@nx._dispatchable(edge_attrs="weight") +def laplacian_matrix(G, nodelist=None, weight="weight"): + """Returns the Laplacian matrix of G. + + The graph Laplacian is the matrix L = D - A, where + A is the adjacency matrix and D is the diagonal matrix of node degrees. + + Parameters + ---------- + G : graph + A NetworkX graph + + nodelist : list, optional + The rows and columns are ordered according to the nodes in nodelist. + If nodelist is None, then the ordering is produced by G.nodes(). + + weight : string or None, optional (default='weight') + The edge data key used to compute each value in the matrix. + If None, then each edge has weight 1. + + Returns + ------- + L : SciPy sparse array + The Laplacian matrix of G. + + Notes + ----- + For MultiGraph, the edges weights are summed. + + This returns an unnormalized matrix. For a normalized output, + use `normalized_laplacian_matrix`, `directed_laplacian_matrix`, + or `directed_combinatorial_laplacian_matrix`. + + This calculation uses the out-degree of the graph `G`. To use the + in-degree for calculations instead, use `G.reverse(copy=False)` and + take the transpose. + + See Also + -------- + :func:`~networkx.convert_matrix.to_numpy_array` + normalized_laplacian_matrix + directed_laplacian_matrix + directed_combinatorial_laplacian_matrix + :func:`~networkx.linalg.spectrum.laplacian_spectrum` + + Examples + -------- + For graphs with multiple connected components, L is permutation-similar + to a block diagonal matrix where each block is the respective Laplacian + matrix for each component. + + >>> G = nx.Graph([(1, 2), (2, 3), (4, 5)]) + >>> print(nx.laplacian_matrix(G).toarray()) + [[ 1 -1 0 0 0] + [-1 2 -1 0 0] + [ 0 -1 1 0 0] + [ 0 0 0 1 -1] + [ 0 0 0 -1 1]] + + >>> edges = [ + ... (1, 2), + ... (2, 1), + ... (2, 4), + ... (4, 3), + ... (3, 4), + ... ] + >>> DiG = nx.DiGraph(edges) + >>> print(nx.laplacian_matrix(DiG).toarray()) + [[ 1 -1 0 0] + [-1 2 -1 0] + [ 0 0 1 -1] + [ 0 0 -1 1]] + + Notice that node 4 is represented by the third column and row. This is because + by default the row/column order is the order of `G.nodes` (i.e. the node added + order -- in the edgelist, 4 first appears in (2, 4), before node 3 in edge (4, 3).) + To control the node order of the matrix, use the `nodelist` argument. + + >>> print(nx.laplacian_matrix(DiG, nodelist=[1, 2, 3, 4]).toarray()) + [[ 1 -1 0 0] + [-1 2 0 -1] + [ 0 0 1 -1] + [ 0 0 -1 1]] + + This calculation uses the out-degree of the graph `G`. To use the + in-degree for calculations instead, use `G.reverse(copy=False)` and + take the transpose. + + >>> print(nx.laplacian_matrix(DiG.reverse(copy=False)).toarray().T) + [[ 1 -1 0 0] + [-1 1 -1 0] + [ 0 0 2 -1] + [ 0 0 -1 1]] + + References + ---------- + .. [1] Langville, Amy N., and Carl D. Meyer. Google’s PageRank and Beyond: + The Science of Search Engine Rankings. Princeton University Press, 2006. + + """ + import scipy as sp + + if nodelist is None: + nodelist = list(G) + A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, format="csr") + n, m = A.shape + # TODO: rm csr_array wrapper when spdiags can produce arrays + D = sp.sparse.csr_array(sp.sparse.spdiags(A.sum(axis=1), 0, m, n, format="csr")) + return D - A + + +@nx._dispatchable(edge_attrs="weight") +def normalized_laplacian_matrix(G, nodelist=None, weight="weight"): + r"""Returns the normalized Laplacian matrix of G. + + The normalized graph Laplacian is the matrix + + .. math:: + + N = D^{-1/2} L D^{-1/2} + + where `L` is the graph Laplacian and `D` is the diagonal matrix of + node degrees [1]_. + + Parameters + ---------- + G : graph + A NetworkX graph + + nodelist : list, optional + The rows and columns are ordered according to the nodes in nodelist. + If nodelist is None, then the ordering is produced by G.nodes(). + + weight : string or None, optional (default='weight') + The edge data key used to compute each value in the matrix. + If None, then each edge has weight 1. + + Returns + ------- + N : SciPy sparse array + The normalized Laplacian matrix of G. + + Notes + ----- + For MultiGraph, the edges weights are summed. + See :func:`to_numpy_array` for other options. + + If the Graph contains selfloops, D is defined as ``diag(sum(A, 1))``, where A is + the adjacency matrix [2]_. + + This calculation uses the out-degree of the graph `G`. To use the + in-degree for calculations instead, use `G.reverse(copy=False)` and + take the transpose. + + For an unnormalized output, use `laplacian_matrix`. + + Examples + -------- + + >>> import numpy as np + >>> edges = [ + ... (1, 2), + ... (2, 1), + ... (2, 4), + ... (4, 3), + ... (3, 4), + ... ] + >>> DiG = nx.DiGraph(edges) + >>> print(nx.normalized_laplacian_matrix(DiG).toarray()) + [[ 1. -0.70710678 0. 0. ] + [-0.70710678 1. -0.70710678 0. ] + [ 0. 0. 1. -1. ] + [ 0. 0. -1. 1. ]] + + Notice that node 4 is represented by the third column and row. This is because + by default the row/column order is the order of `G.nodes` (i.e. the node added + order -- in the edgelist, 4 first appears in (2, 4), before node 3 in edge (4, 3).) + To control the node order of the matrix, use the `nodelist` argument. + + >>> print(nx.normalized_laplacian_matrix(DiG, nodelist=[1, 2, 3, 4]).toarray()) + [[ 1. -0.70710678 0. 0. ] + [-0.70710678 1. 0. -0.70710678] + [ 0. 0. 1. -1. ] + [ 0. 0. -1. 1. ]] + >>> G = nx.Graph(edges) + >>> print(nx.normalized_laplacian_matrix(G).toarray()) + [[ 1. -0.70710678 0. 0. ] + [-0.70710678 1. -0.5 0. ] + [ 0. -0.5 1. -0.70710678] + [ 0. 0. -0.70710678 1. ]] + + See Also + -------- + laplacian_matrix + normalized_laplacian_spectrum + directed_laplacian_matrix + directed_combinatorial_laplacian_matrix + + References + ---------- + .. [1] Fan Chung-Graham, Spectral Graph Theory, + CBMS Regional Conference Series in Mathematics, Number 92, 1997. + .. [2] Steve Butler, Interlacing For Weighted Graphs Using The Normalized + Laplacian, Electronic Journal of Linear Algebra, Volume 16, pp. 90-98, + March 2007. + .. [3] Langville, Amy N., and Carl D. Meyer. Google’s PageRank and Beyond: + The Science of Search Engine Rankings. Princeton University Press, 2006. + """ + import numpy as np + import scipy as sp + + if nodelist is None: + nodelist = list(G) + A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, format="csr") + n, _ = A.shape + diags = A.sum(axis=1) + # TODO: rm csr_array wrapper when spdiags can produce arrays + D = sp.sparse.csr_array(sp.sparse.spdiags(diags, 0, n, n, format="csr")) + L = D - A + with np.errstate(divide="ignore"): + diags_sqrt = 1.0 / np.sqrt(diags) + diags_sqrt[np.isinf(diags_sqrt)] = 0 + # TODO: rm csr_array wrapper when spdiags can produce arrays + DH = sp.sparse.csr_array(sp.sparse.spdiags(diags_sqrt, 0, n, n, format="csr")) + return DH @ (L @ DH) + + +@nx._dispatchable(edge_attrs="weight") +def total_spanning_tree_weight(G, weight=None, root=None): + """ + Returns the total weight of all spanning trees of `G`. + + Kirchoff's Tree Matrix Theorem [1]_, [2]_ states that the determinant of any + cofactor of the Laplacian matrix of a graph is the number of spanning trees + in the graph. For a weighted Laplacian matrix, it is the sum across all + spanning trees of the multiplicative weight of each tree. That is, the + weight of each tree is the product of its edge weights. + + For unweighted graphs, the total weight equals the number of spanning trees in `G`. + + For directed graphs, the total weight follows by summing over all directed + spanning trees in `G` that start in the `root` node [3]_. + + .. deprecated:: 3.3 + + ``total_spanning_tree_weight`` is deprecated and will be removed in v3.5. + Use ``nx.number_of_spanning_trees(G)`` instead. + + Parameters + ---------- + G : NetworkX Graph + + weight : string or None, optional (default=None) + The key for the edge attribute holding the edge weight. + If None, then each edge has weight 1. + + root : node (only required for directed graphs) + A node in the directed graph `G`. + + Returns + ------- + total_weight : float + Undirected graphs: + The sum of the total multiplicative weights for all spanning trees in `G`. + Directed graphs: + The sum of the total multiplicative weights for all spanning trees of `G`, + rooted at node `root`. + + Raises + ------ + NetworkXPointlessConcept + If `G` does not contain any nodes. + + NetworkXError + If the graph `G` is not (weakly) connected, + or if `G` is directed and the root node is not specified or not in G. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> round(nx.total_spanning_tree_weight(G)) + 125 + + >>> G = nx.Graph() + >>> G.add_edge(1, 2, weight=2) + >>> G.add_edge(1, 3, weight=1) + >>> G.add_edge(2, 3, weight=1) + >>> round(nx.total_spanning_tree_weight(G, "weight")) + 5 + + Notes + ----- + Self-loops are excluded. Multi-edges are contracted in one edge + equal to the sum of the weights. + + References + ---------- + .. [1] Wikipedia + "Kirchhoff's theorem." + https://en.wikipedia.org/wiki/Kirchhoff%27s_theorem + .. [2] Kirchhoff, G. R. + Über die Auflösung der Gleichungen, auf welche man + bei der Untersuchung der linearen Vertheilung + Galvanischer Ströme geführt wird + Annalen der Physik und Chemie, vol. 72, pp. 497-508, 1847. + .. [3] Margoliash, J. + "Matrix-Tree Theorem for Directed Graphs" + https://www.math.uchicago.edu/~may/VIGRE/VIGRE2010/REUPapers/Margoliash.pdf + """ + import warnings + + warnings.warn( + ( + "\n\ntotal_spanning_tree_weight is deprecated and will be removed in v3.5.\n" + "Use `nx.number_of_spanning_trees(G)` instead." + ), + category=DeprecationWarning, + stacklevel=3, + ) + + return nx.number_of_spanning_trees(G, weight=weight, root=root) + + +############################################################################### +# Code based on work from https://github.com/bjedwards + + +@not_implemented_for("undirected") +@not_implemented_for("multigraph") +@nx._dispatchable(edge_attrs="weight") +def directed_laplacian_matrix( + G, nodelist=None, weight="weight", walk_type=None, alpha=0.95 +): + r"""Returns the directed Laplacian matrix of G. + + The graph directed Laplacian is the matrix + + .. math:: + + L = I - \frac{1}{2} \left (\Phi^{1/2} P \Phi^{-1/2} + \Phi^{-1/2} P^T \Phi^{1/2} \right ) + + where `I` is the identity matrix, `P` is the transition matrix of the + graph, and `\Phi` a matrix with the Perron vector of `P` in the diagonal and + zeros elsewhere [1]_. + + Depending on the value of walk_type, `P` can be the transition matrix + induced by a random walk, a lazy random walk, or a random walk with + teleportation (PageRank). + + Parameters + ---------- + G : DiGraph + A NetworkX graph + + nodelist : list, optional + The rows and columns are ordered according to the nodes in nodelist. + If nodelist is None, then the ordering is produced by G.nodes(). + + weight : string or None, optional (default='weight') + The edge data key used to compute each value in the matrix. + If None, then each edge has weight 1. + + walk_type : string or None, optional (default=None) + One of ``"random"``, ``"lazy"``, or ``"pagerank"``. If ``walk_type=None`` + (the default), then a value is selected according to the properties of `G`: + - ``walk_type="random"`` if `G` is strongly connected and aperiodic + - ``walk_type="lazy"`` if `G` is strongly connected but not aperiodic + - ``walk_type="pagerank"`` for all other cases. + + alpha : real + (1 - alpha) is the teleportation probability used with pagerank + + Returns + ------- + L : NumPy matrix + Normalized Laplacian of G. + + Notes + ----- + Only implemented for DiGraphs + + The result is always a symmetric matrix. + + This calculation uses the out-degree of the graph `G`. To use the + in-degree for calculations instead, use `G.reverse(copy=False)` and + take the transpose. + + See Also + -------- + laplacian_matrix + normalized_laplacian_matrix + directed_combinatorial_laplacian_matrix + + References + ---------- + .. [1] Fan Chung (2005). + Laplacians and the Cheeger inequality for directed graphs. + Annals of Combinatorics, 9(1), 2005 + """ + import numpy as np + import scipy as sp + + # NOTE: P has type ndarray if walk_type=="pagerank", else csr_array + P = _transition_matrix( + G, nodelist=nodelist, weight=weight, walk_type=walk_type, alpha=alpha + ) + + n, m = P.shape + + evals, evecs = sp.sparse.linalg.eigs(P.T, k=1) + v = evecs.flatten().real + p = v / v.sum() + # p>=0 by Perron-Frobenius Thm. Use abs() to fix roundoff across zero gh-6865 + sqrtp = np.sqrt(np.abs(p)) + Q = ( + # TODO: rm csr_array wrapper when spdiags creates arrays + sp.sparse.csr_array(sp.sparse.spdiags(sqrtp, 0, n, n)) + @ P + # TODO: rm csr_array wrapper when spdiags creates arrays + @ sp.sparse.csr_array(sp.sparse.spdiags(1.0 / sqrtp, 0, n, n)) + ) + # NOTE: This could be sparsified for the non-pagerank cases + I = np.identity(len(G)) + + return I - (Q + Q.T) / 2.0 + + +@not_implemented_for("undirected") +@not_implemented_for("multigraph") +@nx._dispatchable(edge_attrs="weight") +def directed_combinatorial_laplacian_matrix( + G, nodelist=None, weight="weight", walk_type=None, alpha=0.95 +): + r"""Return the directed combinatorial Laplacian matrix of G. + + The graph directed combinatorial Laplacian is the matrix + + .. math:: + + L = \Phi - \frac{1}{2} \left (\Phi P + P^T \Phi \right) + + where `P` is the transition matrix of the graph and `\Phi` a matrix + with the Perron vector of `P` in the diagonal and zeros elsewhere [1]_. + + Depending on the value of walk_type, `P` can be the transition matrix + induced by a random walk, a lazy random walk, or a random walk with + teleportation (PageRank). + + Parameters + ---------- + G : DiGraph + A NetworkX graph + + nodelist : list, optional + The rows and columns are ordered according to the nodes in nodelist. + If nodelist is None, then the ordering is produced by G.nodes(). + + weight : string or None, optional (default='weight') + The edge data key used to compute each value in the matrix. + If None, then each edge has weight 1. + + walk_type : string or None, optional (default=None) + One of ``"random"``, ``"lazy"``, or ``"pagerank"``. If ``walk_type=None`` + (the default), then a value is selected according to the properties of `G`: + - ``walk_type="random"`` if `G` is strongly connected and aperiodic + - ``walk_type="lazy"`` if `G` is strongly connected but not aperiodic + - ``walk_type="pagerank"`` for all other cases. + + alpha : real + (1 - alpha) is the teleportation probability used with pagerank + + Returns + ------- + L : NumPy matrix + Combinatorial Laplacian of G. + + Notes + ----- + Only implemented for DiGraphs + + The result is always a symmetric matrix. + + This calculation uses the out-degree of the graph `G`. To use the + in-degree for calculations instead, use `G.reverse(copy=False)` and + take the transpose. + + See Also + -------- + laplacian_matrix + normalized_laplacian_matrix + directed_laplacian_matrix + + References + ---------- + .. [1] Fan Chung (2005). + Laplacians and the Cheeger inequality for directed graphs. + Annals of Combinatorics, 9(1), 2005 + """ + import scipy as sp + + P = _transition_matrix( + G, nodelist=nodelist, weight=weight, walk_type=walk_type, alpha=alpha + ) + + n, m = P.shape + + evals, evecs = sp.sparse.linalg.eigs(P.T, k=1) + v = evecs.flatten().real + p = v / v.sum() + # NOTE: could be improved by not densifying + # TODO: Rm csr_array wrapper when spdiags array creation becomes available + Phi = sp.sparse.csr_array(sp.sparse.spdiags(p, 0, n, n)).toarray() + + return Phi - (Phi @ P + P.T @ Phi) / 2.0 + + +def _transition_matrix(G, nodelist=None, weight="weight", walk_type=None, alpha=0.95): + """Returns the transition matrix of G. + + This is a row stochastic giving the transition probabilities while + performing a random walk on the graph. Depending on the value of walk_type, + P can be the transition matrix induced by a random walk, a lazy random walk, + or a random walk with teleportation (PageRank). + + Parameters + ---------- + G : DiGraph + A NetworkX graph + + nodelist : list, optional + The rows and columns are ordered according to the nodes in nodelist. + If nodelist is None, then the ordering is produced by G.nodes(). + + weight : string or None, optional (default='weight') + The edge data key used to compute each value in the matrix. + If None, then each edge has weight 1. + + walk_type : string or None, optional (default=None) + One of ``"random"``, ``"lazy"``, or ``"pagerank"``. If ``walk_type=None`` + (the default), then a value is selected according to the properties of `G`: + - ``walk_type="random"`` if `G` is strongly connected and aperiodic + - ``walk_type="lazy"`` if `G` is strongly connected but not aperiodic + - ``walk_type="pagerank"`` for all other cases. + + alpha : real + (1 - alpha) is the teleportation probability used with pagerank + + Returns + ------- + P : numpy.ndarray + transition matrix of G. + + Raises + ------ + NetworkXError + If walk_type not specified or alpha not in valid range + """ + import numpy as np + import scipy as sp + + if walk_type is None: + if nx.is_strongly_connected(G): + if nx.is_aperiodic(G): + walk_type = "random" + else: + walk_type = "lazy" + else: + walk_type = "pagerank" + + A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, dtype=float) + n, m = A.shape + if walk_type in ["random", "lazy"]: + # TODO: Rm csr_array wrapper when spdiags array creation becomes available + DI = sp.sparse.csr_array(sp.sparse.spdiags(1.0 / A.sum(axis=1), 0, n, n)) + if walk_type == "random": + P = DI @ A + else: + # TODO: Rm csr_array wrapper when identity array creation becomes available + I = sp.sparse.csr_array(sp.sparse.identity(n)) + P = (I + DI @ A) / 2.0 + + elif walk_type == "pagerank": + if not (0 < alpha < 1): + raise nx.NetworkXError("alpha must be between 0 and 1") + # this is using a dense representation. NOTE: This should be sparsified! + A = A.toarray() + # add constant to dangling nodes' row + A[A.sum(axis=1) == 0, :] = 1 / n + # normalize + A = A / A.sum(axis=1)[np.newaxis, :].T + P = alpha * A + (1 - alpha) / n + else: + raise nx.NetworkXError("walk_type must be random, lazy, or pagerank") + + return P diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/modularitymatrix.py b/.venv/lib/python3.12/site-packages/networkx/linalg/modularitymatrix.py new file mode 100644 index 00000000..0287910b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/linalg/modularitymatrix.py @@ -0,0 +1,166 @@ +"""Modularity matrix of graphs.""" + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["modularity_matrix", "directed_modularity_matrix"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatchable(edge_attrs="weight") +def modularity_matrix(G, nodelist=None, weight=None): + r"""Returns the modularity matrix of G. + + The modularity matrix is the matrix B = A - <A>, where A is the adjacency + matrix and <A> is the average adjacency matrix, assuming that the graph + is described by the configuration model. + + More specifically, the element B_ij of B is defined as + + .. math:: + A_{ij} - {k_i k_j \over 2 m} + + where k_i is the degree of node i, and where m is the number of edges + in the graph. When weight is set to a name of an attribute edge, Aij, k_i, + k_j and m are computed using its value. + + Parameters + ---------- + G : Graph + A NetworkX graph + + nodelist : list, optional + The rows and columns are ordered according to the nodes in nodelist. + If nodelist is None, then the ordering is produced by G.nodes(). + + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used for + the edge weight. If None then all edge weights are 1. + + Returns + ------- + B : Numpy array + The modularity matrix of G. + + Examples + -------- + >>> k = [3, 2, 2, 1, 0] + >>> G = nx.havel_hakimi_graph(k) + >>> B = nx.modularity_matrix(G) + + + See Also + -------- + to_numpy_array + modularity_spectrum + adjacency_matrix + directed_modularity_matrix + + References + ---------- + .. [1] M. E. J. Newman, "Modularity and community structure in networks", + Proc. Natl. Acad. Sci. USA, vol. 103, pp. 8577-8582, 2006. + """ + import numpy as np + + if nodelist is None: + nodelist = list(G) + A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, format="csr") + k = A.sum(axis=1) + m = k.sum() * 0.5 + # Expected adjacency matrix + X = np.outer(k, k) / (2 * m) + + return A - X + + +@not_implemented_for("undirected") +@not_implemented_for("multigraph") +@nx._dispatchable(edge_attrs="weight") +def directed_modularity_matrix(G, nodelist=None, weight=None): + """Returns the directed modularity matrix of G. + + The modularity matrix is the matrix B = A - <A>, where A is the adjacency + matrix and <A> is the expected adjacency matrix, assuming that the graph + is described by the configuration model. + + More specifically, the element B_ij of B is defined as + + .. math:: + B_{ij} = A_{ij} - k_i^{out} k_j^{in} / m + + where :math:`k_i^{in}` is the in degree of node i, and :math:`k_j^{out}` is the out degree + of node j, with m the number of edges in the graph. When weight is set + to a name of an attribute edge, Aij, k_i, k_j and m are computed using + its value. + + Parameters + ---------- + G : DiGraph + A NetworkX DiGraph + + nodelist : list, optional + The rows and columns are ordered according to the nodes in nodelist. + If nodelist is None, then the ordering is produced by G.nodes(). + + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used for + the edge weight. If None then all edge weights are 1. + + Returns + ------- + B : Numpy array + The modularity matrix of G. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edges_from( + ... ( + ... (1, 2), + ... (1, 3), + ... (3, 1), + ... (3, 2), + ... (3, 5), + ... (4, 5), + ... (4, 6), + ... (5, 4), + ... (5, 6), + ... (6, 4), + ... ) + ... ) + >>> B = nx.directed_modularity_matrix(G) + + + Notes + ----- + NetworkX defines the element A_ij of the adjacency matrix as 1 if there + is a link going from node i to node j. Leicht and Newman use the opposite + definition. This explains the different expression for B_ij. + + See Also + -------- + to_numpy_array + modularity_spectrum + adjacency_matrix + modularity_matrix + + References + ---------- + .. [1] E. A. Leicht, M. E. J. Newman, + "Community structure in directed networks", + Phys. Rev Lett., vol. 100, no. 11, p. 118703, 2008. + """ + import numpy as np + + if nodelist is None: + nodelist = list(G) + A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, format="csr") + k_in = A.sum(axis=0) + k_out = A.sum(axis=1) + m = k_in.sum() + # Expected adjacency matrix + X = np.outer(k_out, k_in) / m + + return A - X diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/spectrum.py b/.venv/lib/python3.12/site-packages/networkx/linalg/spectrum.py new file mode 100644 index 00000000..079b1855 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/linalg/spectrum.py @@ -0,0 +1,186 @@ +""" +Eigenvalue spectrum of graphs. +""" + +import networkx as nx + +__all__ = [ + "laplacian_spectrum", + "adjacency_spectrum", + "modularity_spectrum", + "normalized_laplacian_spectrum", + "bethe_hessian_spectrum", +] + + +@nx._dispatchable(edge_attrs="weight") +def laplacian_spectrum(G, weight="weight"): + """Returns eigenvalues of the Laplacian of G + + Parameters + ---------- + G : graph + A NetworkX graph + + weight : string or None, optional (default='weight') + The edge data key used to compute each value in the matrix. + If None, then each edge has weight 1. + + Returns + ------- + evals : NumPy array + Eigenvalues + + Notes + ----- + For MultiGraph/MultiDiGraph, the edges weights are summed. + See :func:`~networkx.convert_matrix.to_numpy_array` for other options. + + See Also + -------- + laplacian_matrix + + Examples + -------- + The multiplicity of 0 as an eigenvalue of the laplacian matrix is equal + to the number of connected components of G. + + >>> G = nx.Graph() # Create a graph with 5 nodes and 3 connected components + >>> G.add_nodes_from(range(5)) + >>> G.add_edges_from([(0, 2), (3, 4)]) + >>> nx.laplacian_spectrum(G) + array([0., 0., 0., 2., 2.]) + + """ + import scipy as sp + + return sp.linalg.eigvalsh(nx.laplacian_matrix(G, weight=weight).todense()) + + +@nx._dispatchable(edge_attrs="weight") +def normalized_laplacian_spectrum(G, weight="weight"): + """Return eigenvalues of the normalized Laplacian of G + + Parameters + ---------- + G : graph + A NetworkX graph + + weight : string or None, optional (default='weight') + The edge data key used to compute each value in the matrix. + If None, then each edge has weight 1. + + Returns + ------- + evals : NumPy array + Eigenvalues + + Notes + ----- + For MultiGraph/MultiDiGraph, the edges weights are summed. + See to_numpy_array for other options. + + See Also + -------- + normalized_laplacian_matrix + """ + import scipy as sp + + return sp.linalg.eigvalsh( + nx.normalized_laplacian_matrix(G, weight=weight).todense() + ) + + +@nx._dispatchable(edge_attrs="weight") +def adjacency_spectrum(G, weight="weight"): + """Returns eigenvalues of the adjacency matrix of G. + + Parameters + ---------- + G : graph + A NetworkX graph + + weight : string or None, optional (default='weight') + The edge data key used to compute each value in the matrix. + If None, then each edge has weight 1. + + Returns + ------- + evals : NumPy array + Eigenvalues + + Notes + ----- + For MultiGraph/MultiDiGraph, the edges weights are summed. + See to_numpy_array for other options. + + See Also + -------- + adjacency_matrix + """ + import scipy as sp + + return sp.linalg.eigvals(nx.adjacency_matrix(G, weight=weight).todense()) + + +@nx._dispatchable +def modularity_spectrum(G): + """Returns eigenvalues of the modularity matrix of G. + + Parameters + ---------- + G : Graph + A NetworkX Graph or DiGraph + + Returns + ------- + evals : NumPy array + Eigenvalues + + See Also + -------- + modularity_matrix + + References + ---------- + .. [1] M. E. J. Newman, "Modularity and community structure in networks", + Proc. Natl. Acad. Sci. USA, vol. 103, pp. 8577-8582, 2006. + """ + import scipy as sp + + if G.is_directed(): + return sp.linalg.eigvals(nx.directed_modularity_matrix(G)) + else: + return sp.linalg.eigvals(nx.modularity_matrix(G)) + + +@nx._dispatchable +def bethe_hessian_spectrum(G, r=None): + """Returns eigenvalues of the Bethe Hessian matrix of G. + + Parameters + ---------- + G : Graph + A NetworkX Graph or DiGraph + + r : float + Regularizer parameter + + Returns + ------- + evals : NumPy array + Eigenvalues + + See Also + -------- + bethe_hessian_matrix + + References + ---------- + .. [1] A. Saade, F. Krzakala and L. Zdeborová + "Spectral clustering of graphs with the bethe hessian", + Advances in Neural Information Processing Systems. 2014. + """ + import scipy as sp + + return sp.linalg.eigvalsh(nx.bethe_hessian_matrix(G, r).todense()) diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/tests/__init__.py b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/__init__.py diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/tests/test_algebraic_connectivity.py b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/test_algebraic_connectivity.py new file mode 100644 index 00000000..089d917a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/test_algebraic_connectivity.py @@ -0,0 +1,402 @@ +from math import sqrt + +import pytest + +np = pytest.importorskip("numpy") + + +import networkx as nx + +methods = ("tracemin_pcg", "tracemin_lu", "lanczos", "lobpcg") + + +def test_algebraic_connectivity_tracemin_chol(): + """Test that "tracemin_chol" raises an exception.""" + pytest.importorskip("scipy") + G = nx.barbell_graph(5, 4) + with pytest.raises(nx.NetworkXError): + nx.algebraic_connectivity(G, method="tracemin_chol") + + +def test_fiedler_vector_tracemin_chol(): + """Test that "tracemin_chol" raises an exception.""" + pytest.importorskip("scipy") + G = nx.barbell_graph(5, 4) + with pytest.raises(nx.NetworkXError): + nx.fiedler_vector(G, method="tracemin_chol") + + +def test_spectral_ordering_tracemin_chol(): + """Test that "tracemin_chol" raises an exception.""" + pytest.importorskip("scipy") + G = nx.barbell_graph(5, 4) + with pytest.raises(nx.NetworkXError): + nx.spectral_ordering(G, method="tracemin_chol") + + +def test_fiedler_vector_tracemin_unknown(): + """Test that "tracemin_unknown" raises an exception.""" + pytest.importorskip("scipy") + G = nx.barbell_graph(5, 4) + L = nx.laplacian_matrix(G) + X = np.asarray(np.random.normal(size=(1, L.shape[0]))).T + with pytest.raises(nx.NetworkXError, match="Unknown linear system solver"): + nx.linalg.algebraicconnectivity._tracemin_fiedler( + L, X, normalized=False, tol=1e-8, method="tracemin_unknown" + ) + + +def test_spectral_bisection(): + pytest.importorskip("scipy") + G = nx.barbell_graph(3, 0) + C = nx.spectral_bisection(G) + assert C == ({0, 1, 2}, {3, 4, 5}) + + mapping = dict(enumerate("badfec")) + G = nx.relabel_nodes(G, mapping) + C = nx.spectral_bisection(G) + assert C == ( + {mapping[0], mapping[1], mapping[2]}, + {mapping[3], mapping[4], mapping[5]}, + ) + + +def check_eigenvector(A, l, x): + nx = np.linalg.norm(x) + # Check zeroness. + assert nx != pytest.approx(0, abs=1e-07) + y = A @ x + ny = np.linalg.norm(y) + # Check collinearity. + assert x @ y == pytest.approx(nx * ny, abs=1e-7) + # Check eigenvalue. + assert ny == pytest.approx(l * nx, abs=1e-7) + + +class TestAlgebraicConnectivity: + @pytest.mark.parametrize("method", methods) + def test_directed(self, method): + G = nx.DiGraph() + pytest.raises( + nx.NetworkXNotImplemented, nx.algebraic_connectivity, G, method=method + ) + pytest.raises(nx.NetworkXNotImplemented, nx.fiedler_vector, G, method=method) + + @pytest.mark.parametrize("method", methods) + def test_null_and_singleton(self, method): + G = nx.Graph() + pytest.raises(nx.NetworkXError, nx.algebraic_connectivity, G, method=method) + pytest.raises(nx.NetworkXError, nx.fiedler_vector, G, method=method) + G.add_edge(0, 0) + pytest.raises(nx.NetworkXError, nx.algebraic_connectivity, G, method=method) + pytest.raises(nx.NetworkXError, nx.fiedler_vector, G, method=method) + + @pytest.mark.parametrize("method", methods) + def test_disconnected(self, method): + G = nx.Graph() + G.add_nodes_from(range(2)) + assert nx.algebraic_connectivity(G) == 0 + pytest.raises(nx.NetworkXError, nx.fiedler_vector, G, method=method) + G.add_edge(0, 1, weight=0) + assert nx.algebraic_connectivity(G) == 0 + pytest.raises(nx.NetworkXError, nx.fiedler_vector, G, method=method) + + def test_unrecognized_method(self): + pytest.importorskip("scipy") + G = nx.path_graph(4) + pytest.raises(nx.NetworkXError, nx.algebraic_connectivity, G, method="unknown") + pytest.raises(nx.NetworkXError, nx.fiedler_vector, G, method="unknown") + + @pytest.mark.parametrize("method", methods) + def test_two_nodes(self, method): + pytest.importorskip("scipy") + G = nx.Graph() + G.add_edge(0, 1, weight=1) + A = nx.laplacian_matrix(G) + assert nx.algebraic_connectivity(G, tol=1e-12, method=method) == pytest.approx( + 2, abs=1e-7 + ) + x = nx.fiedler_vector(G, tol=1e-12, method=method) + check_eigenvector(A, 2, x) + + @pytest.mark.parametrize("method", methods) + def test_two_nodes_multigraph(self, method): + pytest.importorskip("scipy") + G = nx.MultiGraph() + G.add_edge(0, 0, spam=1e8) + G.add_edge(0, 1, spam=1) + G.add_edge(0, 1, spam=-2) + A = -3 * nx.laplacian_matrix(G, weight="spam") + assert nx.algebraic_connectivity( + G, weight="spam", tol=1e-12, method=method + ) == pytest.approx(6, abs=1e-7) + x = nx.fiedler_vector(G, weight="spam", tol=1e-12, method=method) + check_eigenvector(A, 6, x) + + def test_abbreviation_of_method(self): + pytest.importorskip("scipy") + G = nx.path_graph(8) + A = nx.laplacian_matrix(G) + sigma = 2 - sqrt(2 + sqrt(2)) + ac = nx.algebraic_connectivity(G, tol=1e-12, method="tracemin") + assert ac == pytest.approx(sigma, abs=1e-7) + x = nx.fiedler_vector(G, tol=1e-12, method="tracemin") + check_eigenvector(A, sigma, x) + + @pytest.mark.parametrize("method", methods) + def test_path(self, method): + pytest.importorskip("scipy") + G = nx.path_graph(8) + A = nx.laplacian_matrix(G) + sigma = 2 - sqrt(2 + sqrt(2)) + ac = nx.algebraic_connectivity(G, tol=1e-12, method=method) + assert ac == pytest.approx(sigma, abs=1e-7) + x = nx.fiedler_vector(G, tol=1e-12, method=method) + check_eigenvector(A, sigma, x) + + @pytest.mark.parametrize("method", methods) + def test_problematic_graph_issue_2381(self, method): + pytest.importorskip("scipy") + G = nx.path_graph(4) + G.add_edges_from([(4, 2), (5, 1)]) + A = nx.laplacian_matrix(G) + sigma = 0.438447187191 + ac = nx.algebraic_connectivity(G, tol=1e-12, method=method) + assert ac == pytest.approx(sigma, abs=1e-7) + x = nx.fiedler_vector(G, tol=1e-12, method=method) + check_eigenvector(A, sigma, x) + + @pytest.mark.parametrize("method", methods) + def test_cycle(self, method): + pytest.importorskip("scipy") + G = nx.cycle_graph(8) + A = nx.laplacian_matrix(G) + sigma = 2 - sqrt(2) + ac = nx.algebraic_connectivity(G, tol=1e-12, method=method) + assert ac == pytest.approx(sigma, abs=1e-7) + x = nx.fiedler_vector(G, tol=1e-12, method=method) + check_eigenvector(A, sigma, x) + + @pytest.mark.parametrize("method", methods) + def test_seed_argument(self, method): + pytest.importorskip("scipy") + G = nx.cycle_graph(8) + A = nx.laplacian_matrix(G) + sigma = 2 - sqrt(2) + ac = nx.algebraic_connectivity(G, tol=1e-12, method=method, seed=1) + assert ac == pytest.approx(sigma, abs=1e-7) + x = nx.fiedler_vector(G, tol=1e-12, method=method, seed=1) + check_eigenvector(A, sigma, x) + + @pytest.mark.parametrize( + ("normalized", "sigma", "laplacian_fn"), + ( + (False, 0.2434017461399311, nx.laplacian_matrix), + (True, 0.08113391537997749, nx.normalized_laplacian_matrix), + ), + ) + @pytest.mark.parametrize("method", methods) + def test_buckminsterfullerene(self, normalized, sigma, laplacian_fn, method): + pytest.importorskip("scipy") + G = nx.Graph( + [ + (1, 10), + (1, 41), + (1, 59), + (2, 12), + (2, 42), + (2, 60), + (3, 6), + (3, 43), + (3, 57), + (4, 8), + (4, 44), + (4, 58), + (5, 13), + (5, 56), + (5, 57), + (6, 10), + (6, 31), + (7, 14), + (7, 56), + (7, 58), + (8, 12), + (8, 32), + (9, 23), + (9, 53), + (9, 59), + (10, 15), + (11, 24), + (11, 53), + (11, 60), + (12, 16), + (13, 14), + (13, 25), + (14, 26), + (15, 27), + (15, 49), + (16, 28), + (16, 50), + (17, 18), + (17, 19), + (17, 54), + (18, 20), + (18, 55), + (19, 23), + (19, 41), + (20, 24), + (20, 42), + (21, 31), + (21, 33), + (21, 57), + (22, 32), + (22, 34), + (22, 58), + (23, 24), + (25, 35), + (25, 43), + (26, 36), + (26, 44), + (27, 51), + (27, 59), + (28, 52), + (28, 60), + (29, 33), + (29, 34), + (29, 56), + (30, 51), + (30, 52), + (30, 53), + (31, 47), + (32, 48), + (33, 45), + (34, 46), + (35, 36), + (35, 37), + (36, 38), + (37, 39), + (37, 49), + (38, 40), + (38, 50), + (39, 40), + (39, 51), + (40, 52), + (41, 47), + (42, 48), + (43, 49), + (44, 50), + (45, 46), + (45, 54), + (46, 55), + (47, 54), + (48, 55), + ] + ) + A = laplacian_fn(G) + try: + assert nx.algebraic_connectivity( + G, normalized=normalized, tol=1e-12, method=method + ) == pytest.approx(sigma, abs=1e-7) + x = nx.fiedler_vector(G, normalized=normalized, tol=1e-12, method=method) + check_eigenvector(A, sigma, x) + except nx.NetworkXError as err: + if err.args not in ( + ("Cholesky solver unavailable.",), + ("LU solver unavailable.",), + ): + raise + + +class TestSpectralOrdering: + _graphs = (nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph) + + @pytest.mark.parametrize("graph", _graphs) + def test_nullgraph(self, graph): + G = graph() + pytest.raises(nx.NetworkXError, nx.spectral_ordering, G) + + @pytest.mark.parametrize("graph", _graphs) + def test_singleton(self, graph): + G = graph() + G.add_node("x") + assert nx.spectral_ordering(G) == ["x"] + G.add_edge("x", "x", weight=33) + G.add_edge("x", "x", weight=33) + assert nx.spectral_ordering(G) == ["x"] + + def test_unrecognized_method(self): + G = nx.path_graph(4) + pytest.raises(nx.NetworkXError, nx.spectral_ordering, G, method="unknown") + + @pytest.mark.parametrize("method", methods) + def test_three_nodes(self, method): + pytest.importorskip("scipy") + G = nx.Graph() + G.add_weighted_edges_from([(1, 2, 1), (1, 3, 2), (2, 3, 1)], weight="spam") + order = nx.spectral_ordering(G, weight="spam", method=method) + assert set(order) == set(G) + assert {1, 3} in (set(order[:-1]), set(order[1:])) + + @pytest.mark.parametrize("method", methods) + def test_three_nodes_multigraph(self, method): + pytest.importorskip("scipy") + G = nx.MultiDiGraph() + G.add_weighted_edges_from([(1, 2, 1), (1, 3, 2), (2, 3, 1), (2, 3, 2)]) + order = nx.spectral_ordering(G, method=method) + assert set(order) == set(G) + assert {2, 3} in (set(order[:-1]), set(order[1:])) + + @pytest.mark.parametrize("method", methods) + def test_path(self, method): + pytest.importorskip("scipy") + path = list(range(10)) + np.random.shuffle(path) + G = nx.Graph() + nx.add_path(G, path) + order = nx.spectral_ordering(G, method=method) + assert order in [path, list(reversed(path))] + + @pytest.mark.parametrize("method", methods) + def test_seed_argument(self, method): + pytest.importorskip("scipy") + path = list(range(10)) + np.random.shuffle(path) + G = nx.Graph() + nx.add_path(G, path) + order = nx.spectral_ordering(G, method=method, seed=1) + assert order in [path, list(reversed(path))] + + @pytest.mark.parametrize("method", methods) + def test_disconnected(self, method): + pytest.importorskip("scipy") + G = nx.Graph() + nx.add_path(G, range(0, 10, 2)) + nx.add_path(G, range(1, 10, 2)) + order = nx.spectral_ordering(G, method=method) + assert set(order) == set(G) + seqs = [ + list(range(0, 10, 2)), + list(range(8, -1, -2)), + list(range(1, 10, 2)), + list(range(9, -1, -2)), + ] + assert order[:5] in seqs + assert order[5:] in seqs + + @pytest.mark.parametrize( + ("normalized", "expected_order"), + ( + (False, [[1, 2, 0, 3, 4, 5, 6, 9, 7, 8], [8, 7, 9, 6, 5, 4, 3, 0, 2, 1]]), + (True, [[1, 2, 3, 0, 4, 5, 9, 6, 7, 8], [8, 7, 6, 9, 5, 4, 0, 3, 2, 1]]), + ), + ) + @pytest.mark.parametrize("method", methods) + def test_cycle(self, normalized, expected_order, method): + pytest.importorskip("scipy") + path = list(range(10)) + G = nx.Graph() + nx.add_path(G, path, weight=5) + G.add_edge(path[-1], path[0], weight=1) + A = nx.laplacian_matrix(G).todense() + order = nx.spectral_ordering(G, normalized=normalized, method=method) + assert order in expected_order diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/tests/test_attrmatrix.py b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/test_attrmatrix.py new file mode 100644 index 00000000..01574bb3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/test_attrmatrix.py @@ -0,0 +1,108 @@ +import pytest + +np = pytest.importorskip("numpy") + +import networkx as nx + + +def test_attr_matrix(): + G = nx.Graph() + G.add_edge(0, 1, thickness=1, weight=3) + G.add_edge(0, 1, thickness=1, weight=3) + G.add_edge(0, 2, thickness=2) + G.add_edge(1, 2, thickness=3) + + def node_attr(u): + return G.nodes[u].get("size", 0.5) * 3 + + def edge_attr(u, v): + return G[u][v].get("thickness", 0.5) + + M = nx.attr_matrix(G, edge_attr=edge_attr, node_attr=node_attr) + np.testing.assert_equal(M[0], np.array([[6.0]])) + assert M[1] == [1.5] + + +def test_attr_matrix_directed(): + G = nx.DiGraph() + G.add_edge(0, 1, thickness=1, weight=3) + G.add_edge(0, 1, thickness=1, weight=3) + G.add_edge(0, 2, thickness=2) + G.add_edge(1, 2, thickness=3) + M = nx.attr_matrix(G, rc_order=[0, 1, 2]) + # fmt: off + data = np.array( + [[0., 1., 1.], + [0., 0., 1.], + [0., 0., 0.]] + ) + # fmt: on + np.testing.assert_equal(M, np.array(data)) + + +def test_attr_matrix_multigraph(): + G = nx.MultiGraph() + G.add_edge(0, 1, thickness=1, weight=3) + G.add_edge(0, 1, thickness=1, weight=3) + G.add_edge(0, 1, thickness=1, weight=3) + G.add_edge(0, 2, thickness=2) + G.add_edge(1, 2, thickness=3) + M = nx.attr_matrix(G, rc_order=[0, 1, 2]) + # fmt: off + data = np.array( + [[0., 3., 1.], + [3., 0., 1.], + [1., 1., 0.]] + ) + # fmt: on + np.testing.assert_equal(M, np.array(data)) + M = nx.attr_matrix(G, edge_attr="weight", rc_order=[0, 1, 2]) + # fmt: off + data = np.array( + [[0., 9., 1.], + [9., 0., 1.], + [1., 1., 0.]] + ) + # fmt: on + np.testing.assert_equal(M, np.array(data)) + M = nx.attr_matrix(G, edge_attr="thickness", rc_order=[0, 1, 2]) + # fmt: off + data = np.array( + [[0., 3., 2.], + [3., 0., 3.], + [2., 3., 0.]] + ) + # fmt: on + np.testing.assert_equal(M, np.array(data)) + + +def test_attr_sparse_matrix(): + pytest.importorskip("scipy") + G = nx.Graph() + G.add_edge(0, 1, thickness=1, weight=3) + G.add_edge(0, 2, thickness=2) + G.add_edge(1, 2, thickness=3) + M = nx.attr_sparse_matrix(G) + mtx = M[0] + data = np.ones((3, 3), float) + np.fill_diagonal(data, 0) + np.testing.assert_equal(mtx.todense(), np.array(data)) + assert M[1] == [0, 1, 2] + + +def test_attr_sparse_matrix_directed(): + pytest.importorskip("scipy") + G = nx.DiGraph() + G.add_edge(0, 1, thickness=1, weight=3) + G.add_edge(0, 1, thickness=1, weight=3) + G.add_edge(0, 2, thickness=2) + G.add_edge(1, 2, thickness=3) + M = nx.attr_sparse_matrix(G, rc_order=[0, 1, 2]) + # fmt: off + data = np.array( + [[0., 1., 1.], + [0., 0., 1.], + [0., 0., 0.]] + ) + # fmt: on + np.testing.assert_equal(M.todense(), np.array(data)) diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/tests/test_bethehessian.py b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/test_bethehessian.py new file mode 100644 index 00000000..339fe1be --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/test_bethehessian.py @@ -0,0 +1,41 @@ +import pytest + +np = pytest.importorskip("numpy") +pytest.importorskip("scipy") + +import networkx as nx +from networkx.generators.degree_seq import havel_hakimi_graph + + +class TestBetheHessian: + @classmethod + def setup_class(cls): + deg = [3, 2, 2, 1, 0] + cls.G = havel_hakimi_graph(deg) + cls.P = nx.path_graph(3) + + def test_bethe_hessian(self): + "Bethe Hessian matrix" + # fmt: off + H = np.array([[4, -2, 0], + [-2, 5, -2], + [0, -2, 4]]) + # fmt: on + permutation = [2, 0, 1] + # Bethe Hessian gives expected form + np.testing.assert_equal(nx.bethe_hessian_matrix(self.P, r=2).todense(), H) + # nodelist is correctly implemented + np.testing.assert_equal( + nx.bethe_hessian_matrix(self.P, r=2, nodelist=permutation).todense(), + H[np.ix_(permutation, permutation)], + ) + # Equal to Laplacian matrix when r=1 + np.testing.assert_equal( + nx.bethe_hessian_matrix(self.G, r=1).todense(), + nx.laplacian_matrix(self.G).todense(), + ) + # Correct default for the regularizer r + np.testing.assert_equal( + nx.bethe_hessian_matrix(self.G).todense(), + nx.bethe_hessian_matrix(self.G, r=1.25).todense(), + ) diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/tests/test_graphmatrix.py b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/test_graphmatrix.py new file mode 100644 index 00000000..519198bc --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/test_graphmatrix.py @@ -0,0 +1,276 @@ +import pytest + +np = pytest.importorskip("numpy") +pytest.importorskip("scipy") + +import networkx as nx +from networkx.exception import NetworkXError +from networkx.generators.degree_seq import havel_hakimi_graph + + +def test_incidence_matrix_simple(): + deg = [3, 2, 2, 1, 0] + G = havel_hakimi_graph(deg) + deg = [(1, 0), (1, 0), (1, 0), (2, 0), (1, 0), (2, 1), (0, 1), (0, 1)] + MG = nx.random_clustered_graph(deg, seed=42) + + I = nx.incidence_matrix(G, dtype=int).todense() + # fmt: off + expected = np.array( + [[1, 1, 1, 0], + [0, 1, 0, 1], + [1, 0, 0, 1], + [0, 0, 1, 0], + [0, 0, 0, 0]] + ) + # fmt: on + np.testing.assert_equal(I, expected) + + I = nx.incidence_matrix(MG, dtype=int).todense() + # fmt: off + expected = np.array( + [[1, 0, 0, 0, 0, 0, 0], + [1, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 1, 0], + [0, 0, 0, 0, 0, 1, 1], + [0, 0, 0, 0, 1, 0, 1]] + ) + # fmt: on + np.testing.assert_equal(I, expected) + + with pytest.raises(NetworkXError): + nx.incidence_matrix(G, nodelist=[0, 1]) + + +class TestGraphMatrix: + @classmethod + def setup_class(cls): + deg = [3, 2, 2, 1, 0] + cls.G = havel_hakimi_graph(deg) + # fmt: off + cls.OI = np.array( + [[-1, -1, -1, 0], + [1, 0, 0, -1], + [0, 1, 0, 1], + [0, 0, 1, 0], + [0, 0, 0, 0]] + ) + cls.A = np.array( + [[0, 1, 1, 1, 0], + [1, 0, 1, 0, 0], + [1, 1, 0, 0, 0], + [1, 0, 0, 0, 0], + [0, 0, 0, 0, 0]] + ) + # fmt: on + cls.WG = havel_hakimi_graph(deg) + cls.WG.add_edges_from( + (u, v, {"weight": 0.5, "other": 0.3}) for (u, v) in cls.G.edges() + ) + # fmt: off + cls.WA = np.array( + [[0, 0.5, 0.5, 0.5, 0], + [0.5, 0, 0.5, 0, 0], + [0.5, 0.5, 0, 0, 0], + [0.5, 0, 0, 0, 0], + [0, 0, 0, 0, 0]] + ) + # fmt: on + cls.MG = nx.MultiGraph(cls.G) + cls.MG2 = cls.MG.copy() + cls.MG2.add_edge(0, 1) + # fmt: off + cls.MG2A = np.array( + [[0, 2, 1, 1, 0], + [2, 0, 1, 0, 0], + [1, 1, 0, 0, 0], + [1, 0, 0, 0, 0], + [0, 0, 0, 0, 0]] + ) + cls.MGOI = np.array( + [[-1, -1, -1, -1, 0], + [1, 1, 0, 0, -1], + [0, 0, 1, 0, 1], + [0, 0, 0, 1, 0], + [0, 0, 0, 0, 0]] + ) + # fmt: on + cls.no_edges_G = nx.Graph([(1, 2), (3, 2, {"weight": 8})]) + cls.no_edges_A = np.array([[0, 0], [0, 0]]) + + def test_incidence_matrix(self): + "Conversion to incidence matrix" + I = nx.incidence_matrix( + self.G, + nodelist=sorted(self.G), + edgelist=sorted(self.G.edges()), + oriented=True, + dtype=int, + ).todense() + np.testing.assert_equal(I, self.OI) + + I = nx.incidence_matrix( + self.G, + nodelist=sorted(self.G), + edgelist=sorted(self.G.edges()), + oriented=False, + dtype=int, + ).todense() + np.testing.assert_equal(I, np.abs(self.OI)) + + I = nx.incidence_matrix( + self.MG, + nodelist=sorted(self.MG), + edgelist=sorted(self.MG.edges()), + oriented=True, + dtype=int, + ).todense() + np.testing.assert_equal(I, self.OI) + + I = nx.incidence_matrix( + self.MG, + nodelist=sorted(self.MG), + edgelist=sorted(self.MG.edges()), + oriented=False, + dtype=int, + ).todense() + np.testing.assert_equal(I, np.abs(self.OI)) + + I = nx.incidence_matrix( + self.MG2, + nodelist=sorted(self.MG2), + edgelist=sorted(self.MG2.edges()), + oriented=True, + dtype=int, + ).todense() + np.testing.assert_equal(I, self.MGOI) + + I = nx.incidence_matrix( + self.MG2, + nodelist=sorted(self.MG), + edgelist=sorted(self.MG2.edges()), + oriented=False, + dtype=int, + ).todense() + np.testing.assert_equal(I, np.abs(self.MGOI)) + + I = nx.incidence_matrix(self.G, dtype=np.uint8) + assert I.dtype == np.uint8 + + def test_weighted_incidence_matrix(self): + I = nx.incidence_matrix( + self.WG, + nodelist=sorted(self.WG), + edgelist=sorted(self.WG.edges()), + oriented=True, + dtype=int, + ).todense() + np.testing.assert_equal(I, self.OI) + + I = nx.incidence_matrix( + self.WG, + nodelist=sorted(self.WG), + edgelist=sorted(self.WG.edges()), + oriented=False, + dtype=int, + ).todense() + np.testing.assert_equal(I, np.abs(self.OI)) + + # np.testing.assert_equal(nx.incidence_matrix(self.WG,oriented=True, + # weight='weight').todense(),0.5*self.OI) + # np.testing.assert_equal(nx.incidence_matrix(self.WG,weight='weight').todense(), + # np.abs(0.5*self.OI)) + # np.testing.assert_equal(nx.incidence_matrix(self.WG,oriented=True,weight='other').todense(), + # 0.3*self.OI) + + I = nx.incidence_matrix( + self.WG, + nodelist=sorted(self.WG), + edgelist=sorted(self.WG.edges()), + oriented=True, + weight="weight", + ).todense() + np.testing.assert_equal(I, 0.5 * self.OI) + + I = nx.incidence_matrix( + self.WG, + nodelist=sorted(self.WG), + edgelist=sorted(self.WG.edges()), + oriented=False, + weight="weight", + ).todense() + np.testing.assert_equal(I, np.abs(0.5 * self.OI)) + + I = nx.incidence_matrix( + self.WG, + nodelist=sorted(self.WG), + edgelist=sorted(self.WG.edges()), + oriented=True, + weight="other", + ).todense() + np.testing.assert_equal(I, 0.3 * self.OI) + + # WMG=nx.MultiGraph(self.WG) + # WMG.add_edge(0,1,weight=0.5,other=0.3) + # np.testing.assert_equal(nx.incidence_matrix(WMG,weight='weight').todense(), + # np.abs(0.5*self.MGOI)) + # np.testing.assert_equal(nx.incidence_matrix(WMG,weight='weight',oriented=True).todense(), + # 0.5*self.MGOI) + # np.testing.assert_equal(nx.incidence_matrix(WMG,weight='other',oriented=True).todense(), + # 0.3*self.MGOI) + + WMG = nx.MultiGraph(self.WG) + WMG.add_edge(0, 1, weight=0.5, other=0.3) + + I = nx.incidence_matrix( + WMG, + nodelist=sorted(WMG), + edgelist=sorted(WMG.edges(keys=True)), + oriented=True, + weight="weight", + ).todense() + np.testing.assert_equal(I, 0.5 * self.MGOI) + + I = nx.incidence_matrix( + WMG, + nodelist=sorted(WMG), + edgelist=sorted(WMG.edges(keys=True)), + oriented=False, + weight="weight", + ).todense() + np.testing.assert_equal(I, np.abs(0.5 * self.MGOI)) + + I = nx.incidence_matrix( + WMG, + nodelist=sorted(WMG), + edgelist=sorted(WMG.edges(keys=True)), + oriented=True, + weight="other", + ).todense() + np.testing.assert_equal(I, 0.3 * self.MGOI) + + def test_adjacency_matrix(self): + "Conversion to adjacency matrix" + np.testing.assert_equal(nx.adjacency_matrix(self.G).todense(), self.A) + np.testing.assert_equal(nx.adjacency_matrix(self.MG).todense(), self.A) + np.testing.assert_equal(nx.adjacency_matrix(self.MG2).todense(), self.MG2A) + np.testing.assert_equal( + nx.adjacency_matrix(self.G, nodelist=[0, 1]).todense(), self.A[:2, :2] + ) + np.testing.assert_equal(nx.adjacency_matrix(self.WG).todense(), self.WA) + np.testing.assert_equal( + nx.adjacency_matrix(self.WG, weight=None).todense(), self.A + ) + np.testing.assert_equal( + nx.adjacency_matrix(self.MG2, weight=None).todense(), self.MG2A + ) + np.testing.assert_equal( + nx.adjacency_matrix(self.WG, weight="other").todense(), 0.6 * self.WA + ) + np.testing.assert_equal( + nx.adjacency_matrix(self.no_edges_G, nodelist=[1, 3]).todense(), + self.no_edges_A, + ) diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/tests/test_laplacian.py b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/test_laplacian.py new file mode 100644 index 00000000..23f1b28e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/test_laplacian.py @@ -0,0 +1,336 @@ +import pytest + +np = pytest.importorskip("numpy") +pytest.importorskip("scipy") + +import networkx as nx +from networkx.generators.degree_seq import havel_hakimi_graph +from networkx.generators.expanders import margulis_gabber_galil_graph + + +class TestLaplacian: + @classmethod + def setup_class(cls): + deg = [3, 2, 2, 1, 0] + cls.G = havel_hakimi_graph(deg) + cls.WG = nx.Graph( + (u, v, {"weight": 0.5, "other": 0.3}) for (u, v) in cls.G.edges() + ) + cls.WG.add_node(4) + cls.MG = nx.MultiGraph(cls.G) + + # Graph with clsloops + cls.Gsl = cls.G.copy() + for node in cls.Gsl.nodes(): + cls.Gsl.add_edge(node, node) + + # Graph used as an example in Sec. 4.1 of Langville and Meyer, + # "Google's PageRank and Beyond". + cls.DiG = nx.DiGraph() + cls.DiG.add_edges_from( + ( + (1, 2), + (1, 3), + (3, 1), + (3, 2), + (3, 5), + (4, 5), + (4, 6), + (5, 4), + (5, 6), + (6, 4), + ) + ) + cls.DiMG = nx.MultiDiGraph(cls.DiG) + cls.DiWG = nx.DiGraph( + (u, v, {"weight": 0.5, "other": 0.3}) for (u, v) in cls.DiG.edges() + ) + cls.DiGsl = cls.DiG.copy() + for node in cls.DiGsl.nodes(): + cls.DiGsl.add_edge(node, node) + + def test_laplacian(self): + "Graph Laplacian" + # fmt: off + NL = np.array([[ 3, -1, -1, -1, 0], + [-1, 2, -1, 0, 0], + [-1, -1, 2, 0, 0], + [-1, 0, 0, 1, 0], + [ 0, 0, 0, 0, 0]]) + # fmt: on + WL = 0.5 * NL + OL = 0.3 * NL + # fmt: off + DiNL = np.array([[ 2, -1, -1, 0, 0, 0], + [ 0, 0, 0, 0, 0, 0], + [-1, -1, 3, -1, 0, 0], + [ 0, 0, 0, 2, -1, -1], + [ 0, 0, 0, -1, 2, -1], + [ 0, 0, 0, 0, -1, 1]]) + # fmt: on + DiWL = 0.5 * DiNL + DiOL = 0.3 * DiNL + np.testing.assert_equal(nx.laplacian_matrix(self.G).todense(), NL) + np.testing.assert_equal(nx.laplacian_matrix(self.MG).todense(), NL) + np.testing.assert_equal( + nx.laplacian_matrix(self.G, nodelist=[0, 1]).todense(), + np.array([[1, -1], [-1, 1]]), + ) + np.testing.assert_equal(nx.laplacian_matrix(self.WG).todense(), WL) + np.testing.assert_equal(nx.laplacian_matrix(self.WG, weight=None).todense(), NL) + np.testing.assert_equal( + nx.laplacian_matrix(self.WG, weight="other").todense(), OL + ) + + np.testing.assert_equal(nx.laplacian_matrix(self.DiG).todense(), DiNL) + np.testing.assert_equal(nx.laplacian_matrix(self.DiMG).todense(), DiNL) + np.testing.assert_equal( + nx.laplacian_matrix(self.DiG, nodelist=[1, 2]).todense(), + np.array([[1, -1], [0, 0]]), + ) + np.testing.assert_equal(nx.laplacian_matrix(self.DiWG).todense(), DiWL) + np.testing.assert_equal( + nx.laplacian_matrix(self.DiWG, weight=None).todense(), DiNL + ) + np.testing.assert_equal( + nx.laplacian_matrix(self.DiWG, weight="other").todense(), DiOL + ) + + def test_normalized_laplacian(self): + "Generalized Graph Laplacian" + # fmt: off + G = np.array([[ 1. , -0.408, -0.408, -0.577, 0.], + [-0.408, 1. , -0.5 , 0. , 0.], + [-0.408, -0.5 , 1. , 0. , 0.], + [-0.577, 0. , 0. , 1. , 0.], + [ 0. , 0. , 0. , 0. , 0.]]) + GL = np.array([[ 1. , -0.408, -0.408, -0.577, 0. ], + [-0.408, 1. , -0.5 , 0. , 0. ], + [-0.408, -0.5 , 1. , 0. , 0. ], + [-0.577, 0. , 0. , 1. , 0. ], + [ 0. , 0. , 0. , 0. , 0. ]]) + Lsl = np.array([[ 0.75 , -0.2887, -0.2887, -0.3536, 0. ], + [-0.2887, 0.6667, -0.3333, 0. , 0. ], + [-0.2887, -0.3333, 0.6667, 0. , 0. ], + [-0.3536, 0. , 0. , 0.5 , 0. ], + [ 0. , 0. , 0. , 0. , 0. ]]) + + DiG = np.array([[ 1. , 0. , -0.4082, 0. , 0. , 0. ], + [ 0. , 0. , 0. , 0. , 0. , 0. ], + [-0.4082, 0. , 1. , 0. , -0.4082, 0. ], + [ 0. , 0. , 0. , 1. , -0.5 , -0.7071], + [ 0. , 0. , 0. , -0.5 , 1. , -0.7071], + [ 0. , 0. , 0. , -0.7071, 0. , 1. ]]) + DiGL = np.array([[ 1. , 0. , -0.4082, 0. , 0. , 0. ], + [ 0. , 0. , 0. , 0. , 0. , 0. ], + [-0.4082, 0. , 1. , -0.4082, 0. , 0. ], + [ 0. , 0. , 0. , 1. , -0.5 , -0.7071], + [ 0. , 0. , 0. , -0.5 , 1. , -0.7071], + [ 0. , 0. , 0. , 0. , -0.7071, 1. ]]) + DiLsl = np.array([[ 0.6667, -0.5774, -0.2887, 0. , 0. , 0. ], + [ 0. , 0. , 0. , 0. , 0. , 0. ], + [-0.2887, -0.5 , 0.75 , -0.2887, 0. , 0. ], + [ 0. , 0. , 0. , 0.6667, -0.3333, -0.4082], + [ 0. , 0. , 0. , -0.3333, 0.6667, -0.4082], + [ 0. , 0. , 0. , 0. , -0.4082, 0.5 ]]) + # fmt: on + + np.testing.assert_almost_equal( + nx.normalized_laplacian_matrix(self.G, nodelist=range(5)).todense(), + G, + decimal=3, + ) + np.testing.assert_almost_equal( + nx.normalized_laplacian_matrix(self.G).todense(), GL, decimal=3 + ) + np.testing.assert_almost_equal( + nx.normalized_laplacian_matrix(self.MG).todense(), GL, decimal=3 + ) + np.testing.assert_almost_equal( + nx.normalized_laplacian_matrix(self.WG).todense(), GL, decimal=3 + ) + np.testing.assert_almost_equal( + nx.normalized_laplacian_matrix(self.WG, weight="other").todense(), + GL, + decimal=3, + ) + np.testing.assert_almost_equal( + nx.normalized_laplacian_matrix(self.Gsl).todense(), Lsl, decimal=3 + ) + + np.testing.assert_almost_equal( + nx.normalized_laplacian_matrix( + self.DiG, + nodelist=range(1, 1 + 6), + ).todense(), + DiG, + decimal=3, + ) + np.testing.assert_almost_equal( + nx.normalized_laplacian_matrix(self.DiG).todense(), DiGL, decimal=3 + ) + np.testing.assert_almost_equal( + nx.normalized_laplacian_matrix(self.DiMG).todense(), DiGL, decimal=3 + ) + np.testing.assert_almost_equal( + nx.normalized_laplacian_matrix(self.DiWG).todense(), DiGL, decimal=3 + ) + np.testing.assert_almost_equal( + nx.normalized_laplacian_matrix(self.DiWG, weight="other").todense(), + DiGL, + decimal=3, + ) + np.testing.assert_almost_equal( + nx.normalized_laplacian_matrix(self.DiGsl).todense(), DiLsl, decimal=3 + ) + + +def test_directed_laplacian(): + "Directed Laplacian" + # Graph used as an example in Sec. 4.1 of Langville and Meyer, + # "Google's PageRank and Beyond". The graph contains dangling nodes, so + # the pagerank random walk is selected by directed_laplacian + G = nx.DiGraph() + G.add_edges_from( + ( + (1, 2), + (1, 3), + (3, 1), + (3, 2), + (3, 5), + (4, 5), + (4, 6), + (5, 4), + (5, 6), + (6, 4), + ) + ) + # fmt: off + GL = np.array([[ 0.9833, -0.2941, -0.3882, -0.0291, -0.0231, -0.0261], + [-0.2941, 0.8333, -0.2339, -0.0536, -0.0589, -0.0554], + [-0.3882, -0.2339, 0.9833, -0.0278, -0.0896, -0.0251], + [-0.0291, -0.0536, -0.0278, 0.9833, -0.4878, -0.6675], + [-0.0231, -0.0589, -0.0896, -0.4878, 0.9833, -0.2078], + [-0.0261, -0.0554, -0.0251, -0.6675, -0.2078, 0.9833]]) + # fmt: on + L = nx.directed_laplacian_matrix(G, alpha=0.9, nodelist=sorted(G)) + np.testing.assert_almost_equal(L, GL, decimal=3) + + # Make the graph strongly connected, so we can use a random and lazy walk + G.add_edges_from(((2, 5), (6, 1))) + # fmt: off + GL = np.array([[ 1. , -0.3062, -0.4714, 0. , 0. , -0.3227], + [-0.3062, 1. , -0.1443, 0. , -0.3162, 0. ], + [-0.4714, -0.1443, 1. , 0. , -0.0913, 0. ], + [ 0. , 0. , 0. , 1. , -0.5 , -0.5 ], + [ 0. , -0.3162, -0.0913, -0.5 , 1. , -0.25 ], + [-0.3227, 0. , 0. , -0.5 , -0.25 , 1. ]]) + # fmt: on + L = nx.directed_laplacian_matrix( + G, alpha=0.9, nodelist=sorted(G), walk_type="random" + ) + np.testing.assert_almost_equal(L, GL, decimal=3) + + # fmt: off + GL = np.array([[ 0.5 , -0.1531, -0.2357, 0. , 0. , -0.1614], + [-0.1531, 0.5 , -0.0722, 0. , -0.1581, 0. ], + [-0.2357, -0.0722, 0.5 , 0. , -0.0456, 0. ], + [ 0. , 0. , 0. , 0.5 , -0.25 , -0.25 ], + [ 0. , -0.1581, -0.0456, -0.25 , 0.5 , -0.125 ], + [-0.1614, 0. , 0. , -0.25 , -0.125 , 0.5 ]]) + # fmt: on + L = nx.directed_laplacian_matrix(G, alpha=0.9, nodelist=sorted(G), walk_type="lazy") + np.testing.assert_almost_equal(L, GL, decimal=3) + + # Make a strongly connected periodic graph + G = nx.DiGraph() + G.add_edges_from(((1, 2), (2, 4), (4, 1), (1, 3), (3, 4))) + # fmt: off + GL = np.array([[ 0.5 , -0.176, -0.176, -0.25 ], + [-0.176, 0.5 , 0. , -0.176], + [-0.176, 0. , 0.5 , -0.176], + [-0.25 , -0.176, -0.176, 0.5 ]]) + # fmt: on + L = nx.directed_laplacian_matrix(G, alpha=0.9, nodelist=sorted(G)) + np.testing.assert_almost_equal(L, GL, decimal=3) + + +def test_directed_combinatorial_laplacian(): + "Directed combinatorial Laplacian" + # Graph used as an example in Sec. 4.1 of Langville and Meyer, + # "Google's PageRank and Beyond". The graph contains dangling nodes, so + # the pagerank random walk is selected by directed_laplacian + G = nx.DiGraph() + G.add_edges_from( + ( + (1, 2), + (1, 3), + (3, 1), + (3, 2), + (3, 5), + (4, 5), + (4, 6), + (5, 4), + (5, 6), + (6, 4), + ) + ) + # fmt: off + GL = np.array([[ 0.0366, -0.0132, -0.0153, -0.0034, -0.0020, -0.0027], + [-0.0132, 0.0450, -0.0111, -0.0076, -0.0062, -0.0069], + [-0.0153, -0.0111, 0.0408, -0.0035, -0.0083, -0.0027], + [-0.0034, -0.0076, -0.0035, 0.3688, -0.1356, -0.2187], + [-0.0020, -0.0062, -0.0083, -0.1356, 0.2026, -0.0505], + [-0.0027, -0.0069, -0.0027, -0.2187, -0.0505, 0.2815]]) + # fmt: on + + L = nx.directed_combinatorial_laplacian_matrix(G, alpha=0.9, nodelist=sorted(G)) + np.testing.assert_almost_equal(L, GL, decimal=3) + + # Make the graph strongly connected, so we can use a random and lazy walk + G.add_edges_from(((2, 5), (6, 1))) + + # fmt: off + GL = np.array([[ 0.1395, -0.0349, -0.0465, 0. , 0. , -0.0581], + [-0.0349, 0.093 , -0.0116, 0. , -0.0465, 0. ], + [-0.0465, -0.0116, 0.0698, 0. , -0.0116, 0. ], + [ 0. , 0. , 0. , 0.2326, -0.1163, -0.1163], + [ 0. , -0.0465, -0.0116, -0.1163, 0.2326, -0.0581], + [-0.0581, 0. , 0. , -0.1163, -0.0581, 0.2326]]) + # fmt: on + + L = nx.directed_combinatorial_laplacian_matrix( + G, alpha=0.9, nodelist=sorted(G), walk_type="random" + ) + np.testing.assert_almost_equal(L, GL, decimal=3) + + # fmt: off + GL = np.array([[ 0.0698, -0.0174, -0.0233, 0. , 0. , -0.0291], + [-0.0174, 0.0465, -0.0058, 0. , -0.0233, 0. ], + [-0.0233, -0.0058, 0.0349, 0. , -0.0058, 0. ], + [ 0. , 0. , 0. , 0.1163, -0.0581, -0.0581], + [ 0. , -0.0233, -0.0058, -0.0581, 0.1163, -0.0291], + [-0.0291, 0. , 0. , -0.0581, -0.0291, 0.1163]]) + # fmt: on + + L = nx.directed_combinatorial_laplacian_matrix( + G, alpha=0.9, nodelist=sorted(G), walk_type="lazy" + ) + np.testing.assert_almost_equal(L, GL, decimal=3) + + E = nx.DiGraph(margulis_gabber_galil_graph(2)) + L = nx.directed_combinatorial_laplacian_matrix(E) + # fmt: off + expected = np.array( + [[ 0.16666667, -0.08333333, -0.08333333, 0. ], + [-0.08333333, 0.16666667, 0. , -0.08333333], + [-0.08333333, 0. , 0.16666667, -0.08333333], + [ 0. , -0.08333333, -0.08333333, 0.16666667]] + ) + # fmt: on + np.testing.assert_almost_equal(L, expected, decimal=6) + + with pytest.raises(nx.NetworkXError): + nx.directed_combinatorial_laplacian_matrix(G, walk_type="pagerank", alpha=100) + with pytest.raises(nx.NetworkXError): + nx.directed_combinatorial_laplacian_matrix(G, walk_type="silly") diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/tests/test_modularity.py b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/test_modularity.py new file mode 100644 index 00000000..9f94ff4d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/test_modularity.py @@ -0,0 +1,87 @@ +import pytest + +np = pytest.importorskip("numpy") +pytest.importorskip("scipy") + +import networkx as nx +from networkx.generators.degree_seq import havel_hakimi_graph + + +class TestModularity: + @classmethod + def setup_class(cls): + deg = [3, 2, 2, 1, 0] + cls.G = havel_hakimi_graph(deg) + # Graph used as an example in Sec. 4.1 of Langville and Meyer, + # "Google's PageRank and Beyond". (Used for test_directed_laplacian) + cls.DG = nx.DiGraph() + cls.DG.add_edges_from( + ( + (1, 2), + (1, 3), + (3, 1), + (3, 2), + (3, 5), + (4, 5), + (4, 6), + (5, 4), + (5, 6), + (6, 4), + ) + ) + + def test_modularity(self): + "Modularity matrix" + # fmt: off + B = np.array([[-1.125, 0.25, 0.25, 0.625, 0.], + [0.25, -0.5, 0.5, -0.25, 0.], + [0.25, 0.5, -0.5, -0.25, 0.], + [0.625, -0.25, -0.25, -0.125, 0.], + [0., 0., 0., 0., 0.]]) + # fmt: on + + permutation = [4, 0, 1, 2, 3] + np.testing.assert_equal(nx.modularity_matrix(self.G), B) + np.testing.assert_equal( + nx.modularity_matrix(self.G, nodelist=permutation), + B[np.ix_(permutation, permutation)], + ) + + def test_modularity_weight(self): + "Modularity matrix with weights" + # fmt: off + B = np.array([[-1.125, 0.25, 0.25, 0.625, 0.], + [0.25, -0.5, 0.5, -0.25, 0.], + [0.25, 0.5, -0.5, -0.25, 0.], + [0.625, -0.25, -0.25, -0.125, 0.], + [0., 0., 0., 0., 0.]]) + # fmt: on + + G_weighted = self.G.copy() + for n1, n2 in G_weighted.edges(): + G_weighted.edges[n1, n2]["weight"] = 0.5 + # The following test would fail in networkx 1.1 + np.testing.assert_equal(nx.modularity_matrix(G_weighted), B) + # The following test that the modularity matrix get rescaled accordingly + np.testing.assert_equal( + nx.modularity_matrix(G_weighted, weight="weight"), 0.5 * B + ) + + def test_directed_modularity(self): + "Directed Modularity matrix" + # fmt: off + B = np.array([[-0.2, 0.6, 0.8, -0.4, -0.4, -0.4], + [0., 0., 0., 0., 0., 0.], + [0.7, 0.4, -0.3, -0.6, 0.4, -0.6], + [-0.2, -0.4, -0.2, -0.4, 0.6, 0.6], + [-0.2, -0.4, -0.2, 0.6, -0.4, 0.6], + [-0.1, -0.2, -0.1, 0.8, -0.2, -0.2]]) + # fmt: on + node_permutation = [5, 1, 2, 3, 4, 6] + idx_permutation = [4, 0, 1, 2, 3, 5] + mm = nx.directed_modularity_matrix(self.DG, nodelist=sorted(self.DG)) + np.testing.assert_equal(mm, B) + np.testing.assert_equal( + nx.directed_modularity_matrix(self.DG, nodelist=node_permutation), + B[np.ix_(idx_permutation, idx_permutation)], + ) diff --git a/.venv/lib/python3.12/site-packages/networkx/linalg/tests/test_spectrum.py b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/test_spectrum.py new file mode 100644 index 00000000..e9101303 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/networkx/linalg/tests/test_spectrum.py @@ -0,0 +1,71 @@ +import pytest + +np = pytest.importorskip("numpy") +pytest.importorskip("scipy") + +import networkx as nx +from networkx.generators.degree_seq import havel_hakimi_graph + + +class TestSpectrum: + @classmethod + def setup_class(cls): + deg = [3, 2, 2, 1, 0] + cls.G = havel_hakimi_graph(deg) + cls.P = nx.path_graph(3) + cls.WG = nx.Graph( + (u, v, {"weight": 0.5, "other": 0.3}) for (u, v) in cls.G.edges() + ) + cls.WG.add_node(4) + cls.DG = nx.DiGraph() + nx.add_path(cls.DG, [0, 1, 2]) + + def test_laplacian_spectrum(self): + "Laplacian eigenvalues" + evals = np.array([0, 0, 1, 3, 4]) + e = sorted(nx.laplacian_spectrum(self.G)) + np.testing.assert_almost_equal(e, evals) + e = sorted(nx.laplacian_spectrum(self.WG, weight=None)) + np.testing.assert_almost_equal(e, evals) + e = sorted(nx.laplacian_spectrum(self.WG)) + np.testing.assert_almost_equal(e, 0.5 * evals) + e = sorted(nx.laplacian_spectrum(self.WG, weight="other")) + np.testing.assert_almost_equal(e, 0.3 * evals) + + def test_normalized_laplacian_spectrum(self): + "Normalized Laplacian eigenvalues" + evals = np.array([0, 0, 0.7712864461218, 1.5, 1.7287135538781]) + e = sorted(nx.normalized_laplacian_spectrum(self.G)) + np.testing.assert_almost_equal(e, evals) + e = sorted(nx.normalized_laplacian_spectrum(self.WG, weight=None)) + np.testing.assert_almost_equal(e, evals) + e = sorted(nx.normalized_laplacian_spectrum(self.WG)) + np.testing.assert_almost_equal(e, evals) + e = sorted(nx.normalized_laplacian_spectrum(self.WG, weight="other")) + np.testing.assert_almost_equal(e, evals) + + def test_adjacency_spectrum(self): + "Adjacency eigenvalues" + evals = np.array([-np.sqrt(2), 0, np.sqrt(2)]) + e = sorted(nx.adjacency_spectrum(self.P)) + np.testing.assert_almost_equal(e, evals) + + def test_modularity_spectrum(self): + "Modularity eigenvalues" + evals = np.array([-1.5, 0.0, 0.0]) + e = sorted(nx.modularity_spectrum(self.P)) + np.testing.assert_almost_equal(e, evals) + # Directed modularity eigenvalues + evals = np.array([-0.5, 0.0, 0.0]) + e = sorted(nx.modularity_spectrum(self.DG)) + np.testing.assert_almost_equal(e, evals) + + def test_bethe_hessian_spectrum(self): + "Bethe Hessian eigenvalues" + evals = np.array([0.5 * (9 - np.sqrt(33)), 4, 0.5 * (9 + np.sqrt(33))]) + e = sorted(nx.bethe_hessian_spectrum(self.P, r=2)) + np.testing.assert_almost_equal(e, evals) + # Collapses back to Laplacian: + e1 = sorted(nx.bethe_hessian_spectrum(self.P, r=1)) + e2 = sorted(nx.laplacian_spectrum(self.P)) + np.testing.assert_almost_equal(e1, e2) |