2
2
from typing import List , Optional
3
3
from unittest .mock import Mock , patch
4
4
5
+ import kubeflow .katib as katib
5
6
import kubeflow .katib .katib_api_pb2 as katib_api_pb2
6
7
import pytest
8
+ import transformers
7
9
from kubeflow .katib import (
8
10
KatibClient ,
9
11
V1beta1AlgorithmSpec ,
16
18
V1beta1TrialTemplate ,
17
19
)
18
20
from kubeflow .katib .constants import constants
21
+ from kubeflow .storage_initializer .hugging_face import (
22
+ HuggingFaceDatasetParams ,
23
+ HuggingFaceModelParams ,
24
+ HuggingFaceTrainerParams ,
25
+ )
19
26
from kubernetes .client import V1ObjectMeta
20
27
28
+ PVC_FAILED = "pvc creation failed"
29
+
21
30
TEST_RESULT_SUCCESS = "success"
22
31
23
32
@@ -57,6 +66,27 @@ def get_observation_log_response(*args, **kwargs):
57
66
)
58
67
59
68
69
+ def create_namespaced_persistent_volume_claim_response (* args , ** kwargs ):
70
+ if kwargs .get ("namespace" ) == PVC_FAILED :
71
+ raise Exception ("PVC creation failed" )
72
+ else :
73
+ return {"metadata" : {"name" : "tune_test" }}
74
+
75
+
76
+ def list_namespaced_persistent_volume_claim_response (* args , ** kwargs ):
77
+ if kwargs .get ("namespace" ) == PVC_FAILED :
78
+ mock_pvc = Mock ()
79
+ mock_pvc .metadata .name = "pvc_failed"
80
+ mock_list = Mock ()
81
+ mock_list .items = [mock_pvc ]
82
+ else :
83
+ mock_pvc = Mock ()
84
+ mock_pvc .metadata .name = "tune_test"
85
+ mock_list = Mock ()
86
+ mock_list .items = [mock_pvc ]
87
+ return mock_list
88
+
89
+
60
90
def generate_trial_template () -> V1beta1TrialTemplate :
61
91
trial_spec = {
62
92
"apiVersion" : "batch/v1" ,
@@ -270,6 +300,215 @@ def create_experiment(
270
300
]
271
301
272
302
303
+ test_tune_data = [
304
+ (
305
+ "missing name" ,
306
+ {
307
+ "name" : None ,
308
+ "objective" : lambda x : print (f"a={ x } " ),
309
+ "parameters" : {"a" : katib .search .int (min = 10 , max = 100 )},
310
+ },
311
+ ValueError ,
312
+ ),
313
+ (
314
+ "invalid hybrid parameters - objective and model_provider_parameters" ,
315
+ {
316
+ "name" : "tune_test" ,
317
+ "objective" : lambda x : print (f"a={ x } " ),
318
+ "model_provider_parameters" : HuggingFaceModelParams (
319
+ model_uri = "hf://google-bert/bert-base-cased" ,
320
+ transformer_type = transformers .AutoModelForSequenceClassification ,
321
+ num_labels = 5 ,
322
+ ),
323
+ },
324
+ ValueError ,
325
+ ),
326
+ (
327
+ "missing parameters - no custom objective or external model tuning" ,
328
+ {
329
+ "name" : "tune_test" ,
330
+ },
331
+ ValueError ,
332
+ ),
333
+ (
334
+ "missing parameters in custom objective tuning - lack parameters" ,
335
+ {
336
+ "name" : "tune_test" ,
337
+ "objective" : lambda x : print (f"a={ x } " ),
338
+ },
339
+ ValueError ,
340
+ ),
341
+ (
342
+ "missing parameters in custom objective tuning - lack objective" ,
343
+ {
344
+ "name" : "tune_test" ,
345
+ "parameters" : {"a" : katib .search .int (min = 10 , max = 100 )},
346
+ },
347
+ ValueError ,
348
+ ),
349
+ (
350
+ "missing parameters in external model tuning - lack dataset_provider_parameters "
351
+ "and trainer_parameters" ,
352
+ {
353
+ "name" : "tune_test" ,
354
+ "model_provider_parameters" : HuggingFaceModelParams (
355
+ model_uri = "hf://google-bert/bert-base-cased" ,
356
+ transformer_type = transformers .AutoModelForSequenceClassification ,
357
+ num_labels = 5 ,
358
+ ),
359
+ },
360
+ ValueError ,
361
+ ),
362
+ (
363
+ "missing parameters in external model tuning - lack model_provider_parameters "
364
+ "and trainer_parameters" ,
365
+ {
366
+ "name" : "tune_test" ,
367
+ "dataset_provider_parameters" : HuggingFaceDatasetParams (
368
+ repo_id = "yelp_review_full" ,
369
+ split = "train[:3000]" ,
370
+ ),
371
+ },
372
+ ValueError ,
373
+ ),
374
+ (
375
+ "missing parameters in external model tuning - lack model_provider_parameters "
376
+ "and dataset_provider_parameters" ,
377
+ {
378
+ "name" : "tune_test" ,
379
+ "trainer_parameters" : HuggingFaceTrainerParams (
380
+ training_parameters = transformers .TrainingArguments (
381
+ output_dir = "test_tune_api" ,
382
+ learning_rate = katib .search .double (min = 1e-05 , max = 5e-05 ),
383
+ ),
384
+ ),
385
+ },
386
+ ValueError ,
387
+ ),
388
+ (
389
+ "invalid env_per_trial" ,
390
+ {
391
+ "name" : "tune_test" ,
392
+ "objective" : lambda x : print (f"a={ x } " ),
393
+ "parameters" : {"a" : katib .search .int (min = 10 , max = 100 )},
394
+ "env_per_trial" : "invalid" ,
395
+ },
396
+ ValueError ,
397
+ ),
398
+ (
399
+ "invalid model_provider_parameters" ,
400
+ {
401
+ "name" : "tune_test" ,
402
+ "model_provider_parameters" : "invalid" ,
403
+ "dataset_provider_parameters" : HuggingFaceDatasetParams (
404
+ repo_id = "yelp_review_full" ,
405
+ split = "train[:3000]" ,
406
+ ),
407
+ "trainer_parameters" : HuggingFaceTrainerParams (
408
+ training_parameters = transformers .TrainingArguments (
409
+ output_dir = "test_tune_api" ,
410
+ learning_rate = katib .search .double (min = 1e-05 , max = 5e-05 ),
411
+ ),
412
+ ),
413
+ },
414
+ ValueError ,
415
+ ),
416
+ (
417
+ "invalid dataset_provider_parameters" ,
418
+ {
419
+ "name" : "tune_test" ,
420
+ "model_provider_parameters" : HuggingFaceModelParams (
421
+ model_uri = "hf://google-bert/bert-base-cased" ,
422
+ transformer_type = transformers .AutoModelForSequenceClassification ,
423
+ num_labels = 5 ,
424
+ ),
425
+ "dataset_provider_parameters" : "invalid" ,
426
+ "trainer_parameters" : HuggingFaceTrainerParams (
427
+ training_parameters = transformers .TrainingArguments (
428
+ output_dir = "test_tune_api" ,
429
+ learning_rate = katib .search .double (min = 1e-05 , max = 5e-05 ),
430
+ ),
431
+ ),
432
+ },
433
+ ValueError ,
434
+ ),
435
+ (
436
+ "invalid trainer_parameters" ,
437
+ {
438
+ "name" : "tune_test" ,
439
+ "model_provider_parameters" : HuggingFaceModelParams (
440
+ model_uri = "hf://google-bert/bert-base-cased" ,
441
+ transformer_type = transformers .AutoModelForSequenceClassification ,
442
+ num_labels = 5 ,
443
+ ),
444
+ "dataset_provider_parameters" : HuggingFaceDatasetParams (
445
+ repo_id = "yelp_review_full" ,
446
+ split = "train[:3000]" ,
447
+ ),
448
+ "trainer_parameters" : "invalid" ,
449
+ },
450
+ ValueError ,
451
+ ),
452
+ (
453
+ "pvc creation failed" ,
454
+ {
455
+ "name" : "tune_test" ,
456
+ "namespace" : PVC_FAILED ,
457
+ "model_provider_parameters" : HuggingFaceModelParams (
458
+ model_uri = "hf://google-bert/bert-base-cased" ,
459
+ transformer_type = transformers .AutoModelForSequenceClassification ,
460
+ num_labels = 5 ,
461
+ ),
462
+ "dataset_provider_parameters" : HuggingFaceDatasetParams (
463
+ repo_id = "yelp_review_full" ,
464
+ split = "train[:3000]" ,
465
+ ),
466
+ "trainer_parameters" : HuggingFaceTrainerParams (
467
+ training_parameters = transformers .TrainingArguments (
468
+ output_dir = "test_tune_api" ,
469
+ learning_rate = katib .search .double (min = 1e-05 , max = 5e-05 ),
470
+ ),
471
+ ),
472
+ },
473
+ RuntimeError ,
474
+ ),
475
+ (
476
+ "valid flow with custom objective tuning" ,
477
+ {
478
+ "name" : "tune_test" ,
479
+ "objective" : lambda x : print (f"a={ x } " ),
480
+ "parameters" : {"a" : katib .search .int (min = 10 , max = 100 )},
481
+ "objective_metric_name" : "a" ,
482
+ },
483
+ TEST_RESULT_SUCCESS ,
484
+ ),
485
+ (
486
+ "valid flow with external model tuning" ,
487
+ {
488
+ "name" : "tune_test" ,
489
+ "model_provider_parameters" : HuggingFaceModelParams (
490
+ model_uri = "hf://google-bert/bert-base-cased" ,
491
+ transformer_type = transformers .AutoModelForSequenceClassification ,
492
+ num_labels = 5 ,
493
+ ),
494
+ "dataset_provider_parameters" : HuggingFaceDatasetParams (
495
+ repo_id = "yelp_review_full" ,
496
+ split = "train[:3000]" ,
497
+ ),
498
+ "trainer_parameters" : HuggingFaceTrainerParams (
499
+ training_parameters = transformers .TrainingArguments (
500
+ output_dir = "test_tune_api" ,
501
+ learning_rate = katib .search .double (min = 1e-05 , max = 5e-05 ),
502
+ ),
503
+ ),
504
+ "objective_metric_name" : "train_loss" ,
505
+ "objective_type" : "minimize" ,
506
+ },
507
+ TEST_RESULT_SUCCESS ,
508
+ ),
509
+ ]
510
+
511
+
273
512
@pytest .fixture
274
513
def katib_client ():
275
514
with patch (
@@ -284,6 +523,16 @@ def katib_client():
284
523
return_value = Mock (
285
524
GetObservationLog = Mock (side_effect = get_observation_log_response )
286
525
),
526
+ ), patch (
527
+ "kubernetes.client.CoreV1Api" ,
528
+ return_value = Mock (
529
+ create_namespaced_persistent_volume_claim = Mock (
530
+ side_effect = create_namespaced_persistent_volume_claim_response
531
+ ),
532
+ list_namespaced_persistent_volume_claim = Mock (
533
+ side_effect = list_namespaced_persistent_volume_claim_response
534
+ ),
535
+ ),
287
536
):
288
537
client = KatibClient ()
289
538
yield client
@@ -320,3 +569,90 @@ def test_get_trial_metrics(katib_client, test_name, kwargs, expected_output):
320
569
except Exception as e :
321
570
assert type (e ) is expected_output
322
571
print ("test execution complete" )
572
+
573
+
574
+ @pytest .mark .parametrize ("test_name,kwargs,expected_output" , test_tune_data )
575
+ def test_tune (katib_client , test_name , kwargs , expected_output ):
576
+ """
577
+ test tune function of katib client
578
+ """
579
+ print ("\n \n Executing test:" , test_name )
580
+
581
+ with patch .object (
582
+ katib_client , "create_experiment" , return_value = Mock ()
583
+ ) as mock_create_experiment :
584
+ try :
585
+ katib_client .tune (** kwargs )
586
+ mock_create_experiment .assert_called_once ()
587
+
588
+ if expected_output == TEST_RESULT_SUCCESS :
589
+ assert expected_output == TEST_RESULT_SUCCESS
590
+ call_args = mock_create_experiment .call_args
591
+ experiment = call_args [0 ][0 ]
592
+
593
+ if test_name == "valid flow with custom objective tuning" :
594
+ # Verify input_params
595
+ args_content = "" .join (
596
+ experiment .spec .trial_template .trial_spec .spec .template .spec .containers [
597
+ 0
598
+ ].args
599
+ )
600
+ assert "'a': '${trialParameters.a}'" in args_content
601
+ # Verify trial_params
602
+ assert experiment .spec .trial_template .trial_parameters == [
603
+ V1beta1TrialParameterSpec (name = "a" , reference = "a" ),
604
+ ]
605
+ # Verify experiment_params
606
+ assert experiment .spec .parameters == [
607
+ V1beta1ParameterSpec (
608
+ name = "a" ,
609
+ parameter_type = "int" ,
610
+ feasible_space = V1beta1FeasibleSpace (min = "10" , max = "100" ),
611
+ ),
612
+ ]
613
+ # Verify objective_spec
614
+ assert experiment .spec .objective == V1beta1ObjectiveSpec (
615
+ type = "maximize" ,
616
+ objective_metric_name = "a" ,
617
+ additional_metric_names = [],
618
+ )
619
+
620
+ elif test_name == "valid flow with external model tuning" :
621
+ # Verify input_params
622
+ args_content = "" .join (
623
+ experiment .spec .trial_template .trial_spec .spec .pytorch_replica_specs [
624
+ "Master"
625
+ ]
626
+ .template .spec .containers [0 ]
627
+ .args
628
+ )
629
+ assert (
630
+ '"learning_rate": "${trialParameters.learning_rate}"'
631
+ in args_content
632
+ )
633
+ # Verify trial_params
634
+ assert experiment .spec .trial_template .trial_parameters == [
635
+ V1beta1TrialParameterSpec (
636
+ name = "learning_rate" , reference = "learning_rate"
637
+ ),
638
+ ]
639
+ # Verify experiment_params
640
+ assert experiment .spec .parameters == [
641
+ V1beta1ParameterSpec (
642
+ name = "learning_rate" ,
643
+ parameter_type = "double" ,
644
+ feasible_space = V1beta1FeasibleSpace (
645
+ min = "1e-05" , max = "5e-05"
646
+ ),
647
+ ),
648
+ ]
649
+ # Verify objective_spec
650
+ assert experiment .spec .objective == V1beta1ObjectiveSpec (
651
+ type = "minimize" ,
652
+ objective_metric_name = "train_loss" ,
653
+ additional_metric_names = [],
654
+ )
655
+
656
+ except Exception as e :
657
+ assert type (e ) is expected_output
658
+ print ("test execution complete" )
0 commit comments