Merged
Show file tree
Hide file tree
Changes from all commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Failed to load files.
Original file line numberDiff line numberDiff line change
Expand Up@@ -33,7 +33,7 @@ jobs:
conda install -c conda-forge python-graphblas scipy pandas \
pytest-cov pytest-randomly black flake8-comprehensions flake8-bugbear
# matplotlib lxml pygraphviz pydot sympy # Extra networkx deps we don't need yet
pip install git+https://.com/jim22k/networkx.git@nx-sparse --no-deps
pip install git+https://.com/networkx/networkx.git@main --no-deps
pip install -e . --no-deps
- name: Style checks
run: |
Expand Down
Original file line numberDiff line numberDiff line change
Expand Up@@ -2,6 +2,8 @@
#
# Before first use: `pre-commit install`
# To run: `pre-commit run --all-files`
# To update: `pre-commit autoupdate`
# - &flake8_dependencies below needs updated manually
fail_fast: true
repos:
- repo: https://.com/pre-commit/pre-commit-hooks
Expand All@@ -26,7 +28,7 @@ repos:
- id: isort
language_version: python3
- repo: https://.com/asottile/pyupgrade
rev: v3.1.0
rev: v3.2.2
hooks:
- id: pyupgrade
args: [--py38-plus]
Expand All@@ -45,9 +47,10 @@ repos:
hooks:
- id: flake8
additional_dependencies: &flake8_dependencies
# These versions need updated manually
- flake8==5.0.4
- flake8-comprehensions==3.10.0
- flake8-bugbear==22.9.23
- flake8-comprehensions==3.10.1
- flake8-bugbear==22.10.27
- repo: https://.com/asottile/yesqa
rev: v1.4.0
hooks:
Expand Down
Original file line numberDiff line numberDiff line change
Expand Up@@ -241,7 +241,7 @@ def square_clustering(G, node_ids=None):
A, degrees = G.get_properties("A degrees+") # TODO" how to handle self-edges?
# P2 from https://arxiv.org/pdf/2007.11111.pdf; we'll also use it as scratch
if node_ids is not None:
v = Vector.from_values(node_ids, True, size=degrees.size)
v = Vector.from_coo(node_ids, True, size=degrees.size)
Asubset = binary.second(v & A).new(name="A_subset")
else:
Asubset = A
Expand DownExpand Up@@ -298,10 +298,10 @@ def generalized_degree(G, *, mask=None):
else:
Tri(A.S) << 0
Tri(Tri.S, binary.second) << plus_pair(Tri @ A.T)
rows, cols, vals = Tri.to_values()
rows, cols, vals = Tri.to_coo()
# The column index indicates the number of triangles an edge participates in.
# The largest this can be is `A.ncols - 1`. Values is count of edges.
return Matrix.from_values(
return Matrix.from_coo(
rows,
vals,
np.ones(vals.size, dtype=int),
Expand Down
Original file line numberDiff line numberDiff line change
Expand Up@@ -28,7 +28,7 @@ def k_truss(G: Graph, k) -> Graph:
S = C

# Remove isolate nodes
indices, _ = C.reduce_rowwise(monoid.any).to_values()
indices, _ = C.reduce_rowwise(monoid.any).to_coo()
Ktruss = C[indices, indices].new()

# Convert back to networkx graph with correct node ids
Expand Down
Original file line numberDiff line numberDiff line change
Expand Up@@ -10,7 +10,7 @@ def descendants(G, source):
raise KeyError(f"The node {source} is not in the graph")
index = G._key_to_id[source]
A = G._A
q = Vector.from_values(index, True, size=A.nrows, name="q")
q = Vector.from_coo(index, True, size=A.nrows, name="q")
rv = q.dup(name="descendants")
for _ in range(A.nrows):
q(~rv.S, replace) << lor_pair(q @ A)
Expand All@@ -26,7 +26,7 @@ def ancestors(G, source):
raise KeyError(f"The node {source} is not in the graph")
index = G._key_to_id[source]
A = G._A
q = Vector.from_values(index, True, size=A.nrows, name="q")
q = Vector.from_coo(index, True, size=A.nrows, name="q")
rv = q.dup(name="descendants")
for _ in range(A.nrows):
q(~rv.S, replace) << lor_pair(A @ q)
Expand Down
Original file line numberDiff line numberDiff line change
Expand Up@@ -11,9 +11,9 @@ def has_path(G, source, target):
if src == dst:
return True
A = G._A
q_src = Vector.from_values(src, True, size=A.nrows, name="q_src")
q_src = Vector.from_coo(src, True, size=A.nrows, name="q_src")
seen_src = q_src.dup(name="seen_src")
q_dst = Vector.from_values(dst, True, size=A.nrows, name="q_dst")
q_dst = Vector.from_coo(dst, True, size=A.nrows, name="q_dst")
seen_dst = q_dst.dup(name="seen_dst")
for _ in range(A.nrows // 2):
q_src(~seen_src.S, replace) << lor_pair(q_src @ A)
Expand Down
Original file line numberDiff line numberDiff line change
Expand Up@@ -16,7 +16,7 @@ def is_simple_path(G, nodes):
if len(indices) != len(nodes) or len(indices) > len(set(indices)):
return False
# Check all steps in path at once
P = Matrix.from_values(indices[:-1], indices[1:], True, nrows=A.nrows, ncols=A.ncols)
P = Matrix.from_coo(indices[:-1], indices[1:], True, nrows=A.nrows, ncols=A.ncols)
P << binary.second(A & P)
return P.nvals == len(indices) - 1
# Alternative
Expand Down
Original file line numberDiff line numberDiff line change
Expand Up@@ -15,7 +15,7 @@ def is_tournament(G):

def score_sequence(G):
degrees = G.get_property("row_degrees+")
_, values = degrees.to_values(indices=False, sort=False)
_, values = degrees.to_coo(indices=False, sort=False)
values.sort()
if degrees.nvals != degrees.size:
values = np.pad(values, (degrees.size - degrees.nvals, 0))
Expand Down
Original file line numberDiff line numberDiff line change
Expand Up@@ -52,7 +52,7 @@ def __getitem__(self, key):
def __iter__(self):
# Slow if we iterate over one; fast if we iterate over all
return map(
self.id_to_key.__getitem__, self.vector.to_values(values=False, sort=False)[0].tolist()
self.id_to_key.__getitem__, self.vector.to_coo(values=False, sort=False)[0].tolist()
)

def __len__(self):
Expand DownExpand Up@@ -123,7 +123,7 @@ def __getitem__(self, key):

def __iter__(self):
# Slow if we iterate over one; fast if we iterate over all
return iter(self.vector.to_values(values=False, sort=False)[0].tolist())
return iter(self.vector.to_coo(values=False, sort=False)[0].tolist())

def __len__(self):
return self.vector.nvals
Expand DownExpand Up@@ -232,7 +232,7 @@ def __iter__(self):
# Slow if we iterate over one; fast if we iterate over all
return map(
self.id_to_key.__getitem__,
self._get_rows().to_values(values=False, sort=False)[0].tolist(),
self._get_rows().to_coo(values=False, sort=False)[0].tolist(),
)

def __len__(self):
Expand Down
Original file line numberDiff line numberDiff line change
Expand Up@@ -46,7 +46,7 @@ def __contains__(self, x):
def __iter__(self):
# Slow if we iterate over one; fast if we iterate over all
return map(
self.id_to_key.__getitem__, self.vector.to_values(values=False, sort=False)[0].tolist()
self.id_to_key.__getitem__, self.vector.to_coo(values=False, sort=False)[0].tolist()
)

def __len__(self):
Expand Down
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,3 @@
import pytest

from . import nxapi


Expand DownExpand Up@@ -93,6 +91,10 @@ def convert_to_nx(obj, *, name=None):

@staticmethod
def on_start_tests(items):
try:
import pytest
except ImportError: # pragma: no cover (import)
return
skip = [
("test_attributes", {"TestBoruvka", "test_mst.py"}),
("test_weight_attribute", {"TestBoruvka", "test_mst.py"}),
Expand Down
Original file line numberDiff line numberDiff line change
Expand Up@@ -21,7 +21,7 @@ def edge_boundary(G, nbunch1, nbunch2=None, data=False, keys=False, default=None
v1 = G.set_to_vector(nbunch1, ignore_extra=True)
v2 = G.set_to_vector(nbunch2, ignore_extra=True)
result = algorithms.edge_boundary(G, v1, v2, is_weighted=is_multigraph or data)
rows, cols, vals = result.to_values(values=is_multigraph or data)
rows, cols, vals = result.to_coo(values=is_multigraph or data)
id_to_key = G.id_to_key
if data:
it = zip(
Expand Down
Original file line numberDiff line numberDiff line change
Expand Up@@ -95,7 +95,7 @@ def _split(L, k):
# TODO: should this move into algorithms?
def _square_clustering_split(G, node_ids=None, *, nsplits):
if node_ids is None:
node_ids = G._A.reduce_rowwise(monoid.any).to_values()[0]
node_ids = G._A.reduce_rowwise(monoid.any).to_coo()[0]
result = None
for chunk_ids in _split(node_ids, nsplits):
res = algorithms.square_clustering(G, chunk_ids)
Expand Down
Original file line numberDiff line numberDiff line change
Expand Up@@ -28,26 +28,17 @@

def isdised(func):
"""Can this NetworkX function dis to other backends?"""
# Haha, there should be a better way to know this
registered_algorithms = backends._registered_algorithms
try:
return (
func.__globals__.get("_registered_algorithms") is registered_algorithms
and func.__module__.startswith("networkx")
and func.__module__ != "networkx.classes.backends"
and set(func.__code__.co_freevars) == {"func", "name"}
)
except Exception:
return False
return (
callable(func) and hasattr(func, "disname") and func.__module__.startswith("networkx")
)


def disname(func):
"""The dised name of the disable NetworkX function"""
# Haha, there should be a better way to get this
if not isdised(func):
raise ValueError(f"Function is not dised in NetworkX: {func.__name__}")
index = func.__code__.co_freevars.index("name")
return func.__closure__[index].cell_contents
return func.disname


def fullname(func):
Expand Down
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
python-graphblas >=2022.10.1
python-graphblas >=2022.11.0
Original file line numberDiff line numberDiff line change
Expand Up@@ -178,14 +178,14 @@ def main(filename, backend, time, n, verify, alpha, tol, _get_result=False):

start = timeit.default_timer()
df = pd.read_csv(filename, delimiter="\t", names=["row", "col"])
G = Matrix.from_values(df["row"].values, df["col"].values, 1)
G = Matrix.from_coo(df["row"].values, df["col"].values, 1)
stop = timeit.default_timer()
num_nodes = G.nrows
num_edges = G.nvals
if _get_result:
result = pagerank(G, alpha=alpha, tol=tol)
result(~result.S) << 0 # Densify just in case
return result.to_values()[1]
return result.to_coo()[1]

elif backend == "scipy":
import pandas as pd
Expand Down