@@ -836,6 +836,7 @@ def test_predictor_with_component_name(sagemaker_session, component_name):
836
836
assert predictor ._get_component_name () == component_name
837
837
838
838
839
+ @pytest .mark .skip (reason = "Hyperpod recipe code unavailable" )
839
840
def test_training_recipe_for_cpu (sagemaker_session ):
840
841
container_log_level = '"logging.INFO"'
841
842
@@ -864,17 +865,18 @@ def test_training_recipe_for_cpu(sagemaker_session):
864
865
instance_type = INSTANCE_TYPE ,
865
866
base_job_name = "job" ,
866
867
container_log_level = container_log_level ,
867
- training_recipe = "training/llama/hf_llama3_8b_seq8192_gpu " ,
868
+ training_recipe = "training/llama/hf_llama3_8b_seq8k_gpu_p5x16_pretrain " ,
868
869
recipe_overrides = recipe_overrides ,
869
870
)
870
871
871
872
873
+ @pytest .mark .skip (reason = "Hyperpod recipe code unavailable" )
872
874
@pytest .mark .parametrize (
873
875
"recipe, model" ,
874
876
[
875
- ("hf_llama3_8b_seq8192_gpu " , "llama" ),
876
- ("hf_mistral_gpu " , "mistral" ),
877
- ("hf_mixtral_gpu " , "mixtral" ),
877
+ ("hf_llama3_8b_seq8k_gpu_p5x16_pretrain " , "llama" ),
878
+ ("hf_mistral_7b_seq8k_gpu_p5x16_pretrain " , "mistral" ),
879
+ ("hf_mixtral_8x7b_seq8k_gpu_p5x16_pretrain " , "mixtral" ),
878
880
],
879
881
)
880
882
def test_training_recipe_for_gpu (sagemaker_session , recipe , model ):
@@ -925,6 +927,7 @@ def test_training_recipe_for_gpu(sagemaker_session, recipe, model):
925
927
assert pytorch .distribution .items () == expected_distribution .items ()
926
928
927
929
930
+ @pytest .mark .skip (reason = "Hyperpod recipe code unavailable" )
928
931
def test_training_recipe_with_override (sagemaker_session ):
929
932
container_log_level = '"logging.INFO"'
930
933
@@ -953,7 +956,7 @@ def test_training_recipe_with_override(sagemaker_session):
953
956
instance_type = INSTANCE_TYPE_GPU ,
954
957
base_job_name = "job" ,
955
958
container_log_level = container_log_level ,
956
- training_recipe = "training/llama/hf_llama3_8b_seq8192_gpu " ,
959
+ training_recipe = "training/llama/hf_llama3_8b_seq8k_gpu_p5x16_pretrain " ,
957
960
recipe_overrides = recipe_overrides ,
958
961
)
959
962
@@ -962,6 +965,7 @@ def test_training_recipe_with_override(sagemaker_session):
962
965
assert pytorch .image_uri == IMAGE_URI
963
966
964
967
968
+ @pytest .mark .skip (reason = "Hyperpod recipe code unavailable" )
965
969
def test_training_recipe_gpu_custom_source_dir (sagemaker_session ):
966
970
container_log_level = '"logging.INFO"'
967
971
@@ -992,7 +996,7 @@ def test_training_recipe_gpu_custom_source_dir(sagemaker_session):
992
996
instance_type = INSTANCE_TYPE_GPU ,
993
997
base_job_name = "job" ,
994
998
container_log_level = container_log_level ,
995
- training_recipe = "training/llama/hf_llama3_8b_seq8192_gpu " ,
999
+ training_recipe = "training/llama/hf_llama3_8b_seq8k_gpu_p5x16_pretrain " ,
996
1000
recipe_overrides = recipe_overrides ,
997
1001
)
998
1002
0 commit comments