Skip to content

Commit

Permalink
Remove unnecessary CUDA utilities (#4855)
Browse files Browse the repository at this point in the history
This PR removes some utilities that were updated in #4830 but are no longer needed.

xref: rapidsai/build-planning#117

Authors:
  - Bradley Dice (https://github.com/bdice)

Approvers:
  - James Lamb (https://github.com/jameslamb)
  - Rick Ratzel (https://github.com/rlratzel)

URL: #4855
  • Loading branch information
bdice authored Jan 9, 2025
1 parent cddd69e commit e46ff65
Show file tree
Hide file tree
Showing 4 changed files with 3 additions and 61 deletions.
6 changes: 1 addition & 5 deletions benchmarks/cugraph/pytest-based/bench_algos.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2020-2024, NVIDIA CORPORATION.
# Copyright (c) 2020-2025, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
Expand Down Expand Up @@ -42,7 +42,6 @@ def setFixtureParamNames(*args, **kwargs):
from cugraph.structure.number_map import NumberMap
from cugraph.generators import rmat
from cugraph.testing import utils, mg_utils
from cugraph.utilities.utils import is_device_version_less_than

from cugraph_benchmarking.params import (
directed_datasets,
Expand Down Expand Up @@ -362,9 +361,6 @@ def bench_sorensen(gpubenchmark, unweighted_graph):
gpubenchmark(sorensen, G, vert_pairs)


@pytest.mark.skipif(
is_device_version_less_than((7, 0)), reason="Not supported on Pascal"
)
def bench_louvain(gpubenchmark, graph):
louvain = dask_cugraph.louvain if is_graph_distributed(graph) else cugraph.louvain
gpubenchmark(louvain, graph)
Expand Down
15 changes: 1 addition & 14 deletions ci/notebook_list.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,16 +41,7 @@ def _get_cuda_version_string():
minor //= 10
return f"{major}.{minor}"


def _is_ampere_or_newer():
status, device_id = runtime.cudaGetDevice()
if status != runtime.cudaError_t.cudaSuccess:
raise RuntimeError("Could not get CUDA device.")
status, device_prop = runtime.cudaGetDeviceProperties(device_id)
if status != runtime.cudaError_t.cudaSuccess:
raise RuntimeError("Could not get CUDA device properties.")
return (device_prop.major, device_prop.minor) >= (8, 0)

cuda_version_string = _get_cuda_version_string()

parser = argparse.ArgumentParser(description="Condition for running the notebook tests")
parser.add_argument("runtype", type=str)
Expand Down Expand Up @@ -86,10 +77,6 @@ def _is_ampere_or_newer():
)
skip = True
break
elif _is_ampere_or_newer() and re.search("# Does not run on Ampere", line):
print(f"SKIPPING {filename} (does not run on Ampere)", file=sys.stderr)
skip = True
break
elif re.search("# Does not run on CUDA ", line) and (
cuda_version_string in line
):
Expand Down
3 changes: 1 addition & 2 deletions python/cugraph/cugraph/traversal/ms_bfs.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
# Copyright (c) 2021-2025, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
Expand Down Expand Up @@ -56,7 +56,6 @@ def _get_feasibility(G, sources, components=None, depth_limit=None):

# Fixme not implemented in RMM yet
# using 96GB upper bound for now
# mem = get_device_memory_info()
mem = 9.6e10
n_sources = sources.size
V = G.number_of_vertices()
Expand Down
40 changes: 0 additions & 40 deletions python/cugraph/cugraph/utilities/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,6 @@
import cudf
from cudf.core.column import as_column

from cuda.bindings import runtime

from warnings import warn

# optional dependencies
Expand Down Expand Up @@ -207,44 +205,6 @@ def get_traversed_path_list(df, id):
return answer


def is_cuda_version_less_than(min_version):
"""
Returns True if the version of CUDA being used is less than min_version
"""
status, version = runtime.getLocalRuntimeVersion()
if status != runtime.cudaError_t.cudaSuccess:
raise RuntimeError("Could not get CUDA runtime version.")
major = version // 1000
minor = (version % 1000) // 10
return (major, minor) < min_version


def is_device_version_less_than(min_version):
"""
Returns True if the version of CUDA being used is less than min_version
"""
status, device_id = runtime.cudaGetDevice()
if status != runtime.cudaError_t.cudaSuccess:
raise RuntimeError("Could not get CUDA device.")
status, device_prop = runtime.cudaGetDeviceProperties(device_id)
if status != runtime.cudaError_t.cudaSuccess:
raise RuntimeError("Could not get CUDA device properties.")
return (device_prop.major, device_prop.minor) < min_version


def get_device_memory_info():
"""
Returns the total amount of global memory on the device in bytes
"""
status, device_id = runtime.cudaGetDevice()
if status != runtime.cudaError_t.cudaSuccess:
raise RuntimeError("Could not get CUDA device.")
status, device_prop = runtime.cudaGetDeviceProperties(device_id)
if status != runtime.cudaError_t.cudaSuccess:
raise RuntimeError("Could not get CUDA device properties.")
return device_prop.totalGlobalMem


# FIXME: if G is a Nx type, the weight attribute is assumed to be "weight", if
# set. An additional optional parameter for the weight attr name when accepting
# Nx graphs may be needed. From the Nx docs:
Expand Down

0 comments on commit e46ff65

Please sign in to comment.