@@ -174,7 +174,7 @@ def test_auto_ml_v2_attach(problem_type, job_name_fixture_key, sagemaker_session
174
174
assert desc ["AutoMLJobName" ] == job_name
175
175
assert desc ["AutoMLJobStatus" ] in ["InProgress" , "Completed" ]
176
176
assert desc ["AutoMLJobSecondaryStatus" ] != "Failed"
177
- assert desc ["ProblemConfig" ] == auto_ml_v2_utils .PROBLEM_CONFIGS [problem_type ]
177
+ assert sorted ( desc ["AutoMLProblemTypeConfig" ]) == sorted ( auto_ml_v2_utils .PROBLEM_CONFIGS [problem_type ])
178
178
assert desc ["OutputDataConfig" ] == expected_default_output_config
179
179
180
180
@@ -251,6 +251,8 @@ def test_list_candidates(
251
251
252
252
candidates = auto_ml .list_candidates (job_name = job_name )
253
253
assert len (candidates ) == num_candidates
254
+ else :
255
+ pytest .skip ("The job hasn't finished yet" )
254
256
255
257
256
258
@pytest .mark .skipif (
@@ -320,6 +322,8 @@ def test_best_candidate(
320
322
best_candidate = auto_ml .best_candidate (job_name = job_name )
321
323
assert len (best_candidate ["InferenceContainers" ]) == num_containers
322
324
assert best_candidate ["CandidateStatus" ] == "Completed"
325
+ else :
326
+ pytest .skip ("The job hasn't finished yet" )
323
327
324
328
325
329
@pytest .mark .skipif (
@@ -411,6 +415,8 @@ def test_deploy_best_candidate(
411
415
)["EndpointStatus" ]
412
416
assert endpoint_status == "InService"
413
417
sagemaker_session .sagemaker_client .delete_endpoint (EndpointName = endpoint_name )
418
+ else :
419
+ pytest .skip ("The job hasn't finished yet" )
414
420
415
421
416
422
@pytest .mark .skipif (
@@ -482,3 +488,5 @@ def test_candidate_estimator_get_steps(
482
488
candidate_estimator = CandidateEstimator (candidate , sagemaker_session )
483
489
steps = candidate_estimator .get_steps ()
484
490
assert len (steps ) == num_steps
491
+ else :
492
+ pytest .skip ("The job hasn't finished yet" )
0 commit comments