Skip to content

Commit 6235d7e

Browse files
committed
reformat files
1 parent 3099e13 commit 6235d7e

File tree

4 files changed

+63
-51
lines changed

4 files changed

+63
-51
lines changed

src/sagemaker/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@
6363
from sagemaker.automl.automl import AutoML, AutoMLJob, AutoMLInput # noqa: F401
6464
from sagemaker.automl.candidate_estimator import CandidateEstimator, CandidateStep # noqa: F401
6565
from sagemaker.automl.automlv2 import ( # noqa: F401
66-
AutoMLV2,
66+
AutoMLV2,
6767
AutoMLJobV2,
6868
LocalAutoMLDataChannel,
6969
AutoMLDataChannel,

tests/unit/sagemaker/modules/train/sm_recipes/test_utils.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -110,24 +110,24 @@ def test_load_base_recipe_types(
110110

111111
if recipe_type == "sagemaker":
112112
# Mock the file check to simulate recipe exists
113-
with patch("os.path.isfile", return_value=True), \
114-
patch("shutil.copy") as mock_copy:
113+
with patch("os.path.isfile", return_value=True), patch("shutil.copy") as mock_copy:
115114
# Create a temporary recipe file for the copy operation
116115
import tempfile
117116
import os
118-
with tempfile.NamedTemporaryFile(mode='w', suffix='.yaml', delete=False) as f:
117+
118+
with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as f:
119119
yaml.dump({"trainer": {"num_nodes": 1}}, f)
120120
temp_path = f.name
121-
121+
122122
def copy_side_effect(src, dst):
123123
# Read from temp file and write to destination
124-
with open(temp_path, 'r') as src_file:
124+
with open(temp_path, "r") as src_file:
125125
content = src_file.read()
126-
with open(dst, 'w') as dst_file:
126+
with open(dst, "w") as dst_file:
127127
dst_file.write(content)
128-
128+
129129
mock_copy.side_effect = copy_side_effect
130-
130+
131131
load_recipe = _load_base_recipe(
132132
training_recipe="training/llama/p4_hf_llama3_70b_seq8k_gpu",
133133
recipe_overrides=None,
@@ -136,7 +136,7 @@ def copy_side_effect(src, dst):
136136
assert load_recipe is not None
137137
assert "trainer" in load_recipe
138138
assert mock_clone.call_args.args[0] == training_recipes_cfg.get("launcher_repo")
139-
139+
140140
# Clean up
141141
os.unlink(temp_path)
142142

tests/unit/sagemaker/modules/train/test_model_trainer.py

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -934,13 +934,15 @@ def mock_upload_data(path, bucket, key_prefix):
934934
@patch("sagemaker.modules.train.model_trainer._load_base_recipe")
935935
def test_model_trainer_gpu_recipe_full_init(mock_load_recipe, modules_session):
936936
from omegaconf import OmegaConf
937-
937+
938938
# Mock the recipe loading to return a valid GPU recipe structure
939-
mock_load_recipe.return_value = OmegaConf.create({
940-
"trainer": {"num_nodes": 2},
941-
"model": {"model_type": "llama_v3"},
942-
})
943-
939+
mock_load_recipe.return_value = OmegaConf.create(
940+
{
941+
"trainer": {"num_nodes": 2},
942+
"model": {"model_type": "llama_v3"},
943+
}
944+
)
945+
944946
training_recipe = "training/llama/p4_hf_llama3_70b_seq8k_gpu"
945947
recipe_overrides = {"run": {"results_dir": "/opt/ml/model"}}
946948
compute = Compute(instance_type="ml.p4d.24xlarge", instance_count="2")

tests/unit/test_pytorch.py

Lines changed: 45 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -900,23 +900,27 @@ def test_training_recipe_for_cpu(sagemaker_session):
900900
)
901901
@patch("sagemaker.pytorch.estimator.PyTorch._recipe_load")
902902
@patch("sagemaker.pytorch.estimator._get_training_recipe_gpu_script")
903-
def test_training_recipe_for_gpu(mock_gpu_script, mock_recipe_load, sagemaker_session, recipe, model):
903+
def test_training_recipe_for_gpu(
904+
mock_gpu_script, mock_recipe_load, sagemaker_session, recipe, model
905+
):
904906
from omegaconf import OmegaConf
905-
907+
906908
# Mock the GPU script function to return the expected entry point
907909
mock_gpu_script.return_value = f"{model}_pretrain.py"
908-
910+
909911
# Mock the recipe structure that would be loaded
910-
mock_recipe = OmegaConf.create({
911-
"trainer": {
912-
"num_nodes": 1,
913-
},
914-
"model": {
915-
"model_type": model,
916-
},
917-
})
912+
mock_recipe = OmegaConf.create(
913+
{
914+
"trainer": {
915+
"num_nodes": 1,
916+
},
917+
"model": {
918+
"model_type": model,
919+
},
920+
}
921+
)
918922
mock_recipe_load.return_value = (recipe, mock_recipe)
919-
923+
920924
container_log_level = '"logging.INFO"'
921925

922926
recipe_overrides = {
@@ -968,21 +972,23 @@ def test_training_recipe_for_gpu(mock_gpu_script, mock_recipe_load, sagemaker_se
968972
@patch("sagemaker.pytorch.estimator._get_training_recipe_gpu_script")
969973
def test_training_recipe_with_override(mock_gpu_script, mock_recipe_load, sagemaker_session):
970974
from omegaconf import OmegaConf
971-
975+
972976
# Mock the GPU script function to return the expected entry point
973977
mock_gpu_script.return_value = "mistral_pretrain.py"
974-
978+
975979
# Mock the recipe structure that would be loaded
976-
mock_recipe = OmegaConf.create({
977-
"trainer": {
978-
"num_nodes": 1,
979-
},
980-
"model": {
981-
"model_type": "mistral",
982-
},
983-
})
980+
mock_recipe = OmegaConf.create(
981+
{
982+
"trainer": {
983+
"num_nodes": 1,
984+
},
985+
"model": {
986+
"model_type": "mistral",
987+
},
988+
}
989+
)
984990
mock_recipe_load.return_value = ("hf_llama3_8b_seq8k_gpu_p5x16_pretrain", mock_recipe)
985-
991+
986992
container_log_level = '"logging.INFO"'
987993

988994
recipe_overrides = {
@@ -1021,23 +1027,27 @@ def test_training_recipe_with_override(mock_gpu_script, mock_recipe_load, sagema
10211027

10221028
@patch("sagemaker.pytorch.estimator.PyTorch._recipe_load")
10231029
@patch("sagemaker.pytorch.estimator._get_training_recipe_gpu_script")
1024-
def test_training_recipe_gpu_custom_source_dir(mock_gpu_script, mock_recipe_load, sagemaker_session):
1030+
def test_training_recipe_gpu_custom_source_dir(
1031+
mock_gpu_script, mock_recipe_load, sagemaker_session
1032+
):
10251033
from omegaconf import OmegaConf
1026-
1034+
10271035
# Mock the GPU script function to return the expected entry point
10281036
mock_gpu_script.return_value = "mistral_pretrain.py"
1029-
1037+
10301038
# Mock the recipe structure that would be loaded
1031-
mock_recipe = OmegaConf.create({
1032-
"trainer": {
1033-
"num_nodes": 1,
1034-
},
1035-
"model": {
1036-
"model_type": "mistral",
1037-
},
1038-
})
1039+
mock_recipe = OmegaConf.create(
1040+
{
1041+
"trainer": {
1042+
"num_nodes": 1,
1043+
},
1044+
"model": {
1045+
"model_type": "mistral",
1046+
},
1047+
}
1048+
)
10391049
mock_recipe_load.return_value = ("hf_llama3_8b_seq8k_gpu_p5x16_pretrain", mock_recipe)
1040-
1050+
10411051
container_log_level = '"logging.INFO"'
10421052

10431053
recipe_overrides = {

0 commit comments

Comments
 (0)