Skip to content

Commit 1f9a4b8

Browse files
authored
Use RAPIDS 24.06 for the build (#109)
1 parent d285e04 commit 1f9a4b8

File tree

4 files changed

+11
-7
lines changed

4 files changed

+11
-7
lines changed

.github/workflows/github-actions.yml

+2-1
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,8 @@ jobs:
4949
echo " - python=3.10" >> environment-test-linux-cuda11.8-compilers-openmpi-ucx.yaml;
5050
mamba env create -n legate -f environment-test-linux-cuda11.8-compilers-openmpi-ucx.yaml;
5151
mamba activate legate;
52-
mamba install -y build scikit-learn hypothesis 'pytest<8' notebook;
52+
# Downgrade NumPy; as of 2024-07-01 it triggers a mypy bug
53+
mamba install -y build scikit-learn hypothesis 'pytest<8' notebook 'numpy<2';
5354
pip install matplotlib seaborn xgboost
5455
export LEGATE_CORE_ARCH='arch-linux-py-relwithdebinfo';
5556
./configure --with-python --cuda-arch 70 --with-cuda --build-type=relwithdebinfo --with-nccl --with-ucx && make && pip install .;

cmake/thirdparty/fetch_rapids.cmake

+1-1
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
# the License.
1313

1414
# Use this variable to update RAPIDS and RAFT versions
15-
set(RAPIDS_VERSION "24.02")
15+
set(RAPIDS_VERSION "24.06")
1616

1717
if(NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/RAFT_RAPIDS.cmake)
1818
file(DOWNLOAD https://raw.githubusercontent.com/rapidsai/rapids-cmake/branch-${RAPIDS_VERSION}/RAPIDS.cmake

legateboost/test/test_with_hypothesis.py

+7-5
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
from hypothesis import HealthCheck, Verbosity, assume, given, settings, strategies as st
33
from sklearn.preprocessing import StandardScaler
44

5+
import cunumeric as cn
56
import legateboost as lb
67
from legate.core import TaskTarget, get_legate_runtime
78

@@ -25,11 +26,11 @@
2526
@st.composite
2627
def tree_strategy(draw):
2728
if get_legate_runtime().machine.count(TaskTarget.GPU) > 0:
28-
max_depth = draw(st.integers(1, 12))
29+
max_depth = draw(st.integers(1, 8))
2930
else:
3031
max_depth = draw(st.integers(1, 6))
3132
alpha = draw(st.floats(0.0, 1.0))
32-
split_samples = draw(st.integers(1, 1000))
33+
split_samples = draw(st.integers(1, 500))
3334
return lb.models.Tree(max_depth=max_depth, alpha=alpha, split_samples=split_samples)
3435

3536

@@ -143,6 +144,7 @@ def regression_dataset_strategy(draw):
143144
regression_param_strategy,
144145
regression_dataset_strategy(),
145146
)
147+
@cn.errstate(divide="raise", invalid="raise")
146148
def test_regressor(model_params, regression_params, regression_dataset):
147149
X, y, w = regression_dataset
148150
eval_result = {}
@@ -156,7 +158,7 @@ def test_regressor(model_params, regression_params, regression_dataset):
156158
)
157159
model.predict(X)
158160
loss = next(iter(eval_result["train"].values()))
159-
assert non_increasing(loss, tol=1e-2)
161+
assert non_increasing(loss, tol=1e-1)
160162
sanity_check_models(model)
161163

162164

@@ -238,12 +240,12 @@ def classification_dataset_strategy(draw):
238240
classification_param_strategy,
239241
classification_dataset_strategy(),
240242
)
243+
@cn.errstate(divide="raise", invalid="raise")
241244
def test_classifier(
242245
model_params: dict, classification_params: dict, classification_dataset: tuple
243246
) -> None:
244247
X, y, w, name = classification_dataset
245248
eval_result = {}
246-
model_params["n_estimators"] = 3
247249
model = lb.LBClassifier(**model_params, **classification_params).fit(
248250
X, y, sample_weight=w, eval_result=eval_result
249251
)
@@ -253,4 +255,4 @@ def test_classifier(
253255
loss = next(iter(eval_result["train"].values()))
254256
# multiclass models with higher learning rates don't always converge
255257
if len(model.classes_) == 2:
256-
assert non_increasing(loss, 1e-2)
258+
assert non_increasing(loss, 1e-1)

legateboost/utils.py

+1
Original file line numberDiff line numberDiff line change
@@ -92,6 +92,7 @@ def preround(x: cn.ndarray) -> cn.ndarray:
9292
m = cn.sum(cn.abs(x))
9393
n = x.size
9494
delta = cn.floor(m / (one - two * n * eps))
95+
delta = cn.maximum(delta, one)
9596
M = two ** cn.ceil(cn.log2(delta))
9697
return (x + M) - M
9798

0 commit comments

Comments
 (0)