2
2
from hypothesis import HealthCheck , Verbosity , assume , given , settings , strategies as st
3
3
from sklearn .preprocessing import StandardScaler
4
4
5
+ import cunumeric as cn
5
6
import legateboost as lb
6
7
from legate .core import TaskTarget , get_legate_runtime
7
8
25
26
@st .composite
26
27
def tree_strategy (draw ):
27
28
if get_legate_runtime ().machine .count (TaskTarget .GPU ) > 0 :
28
- max_depth = draw (st .integers (1 , 12 ))
29
+ max_depth = draw (st .integers (1 , 8 ))
29
30
else :
30
31
max_depth = draw (st .integers (1 , 6 ))
31
32
alpha = draw (st .floats (0.0 , 1.0 ))
32
- split_samples = draw (st .integers (1 , 1000 ))
33
+ split_samples = draw (st .integers (1 , 500 ))
33
34
return lb .models .Tree (max_depth = max_depth , alpha = alpha , split_samples = split_samples )
34
35
35
36
@@ -143,6 +144,7 @@ def regression_dataset_strategy(draw):
143
144
regression_param_strategy ,
144
145
regression_dataset_strategy (),
145
146
)
147
+ @cn .errstate (divide = "raise" , invalid = "raise" )
146
148
def test_regressor (model_params , regression_params , regression_dataset ):
147
149
X , y , w = regression_dataset
148
150
eval_result = {}
@@ -156,7 +158,7 @@ def test_regressor(model_params, regression_params, regression_dataset):
156
158
)
157
159
model .predict (X )
158
160
loss = next (iter (eval_result ["train" ].values ()))
159
- assert non_increasing (loss , tol = 1e-2 )
161
+ assert non_increasing (loss , tol = 1e-1 )
160
162
sanity_check_models (model )
161
163
162
164
@@ -238,12 +240,12 @@ def classification_dataset_strategy(draw):
238
240
classification_param_strategy ,
239
241
classification_dataset_strategy (),
240
242
)
243
+ @cn .errstate (divide = "raise" , invalid = "raise" )
241
244
def test_classifier (
242
245
model_params : dict , classification_params : dict , classification_dataset : tuple
243
246
) -> None :
244
247
X , y , w , name = classification_dataset
245
248
eval_result = {}
246
- model_params ["n_estimators" ] = 3
247
249
model = lb .LBClassifier (** model_params , ** classification_params ).fit (
248
250
X , y , sample_weight = w , eval_result = eval_result
249
251
)
@@ -253,4 +255,4 @@ def test_classifier(
253
255
loss = next (iter (eval_result ["train" ].values ()))
254
256
# multiclass models with higher learning rates don't always converge
255
257
if len (model .classes_ ) == 2 :
256
- assert non_increasing (loss , 1e-2 )
258
+ assert non_increasing (loss , 1e-1 )
0 commit comments