34
34
@patch ("sagemaker_pytorch_serving_container.torchserve._install_requirements" )
35
35
@patch ("os.path.exists" , return_value = True )
36
36
@patch ("sagemaker_pytorch_serving_container.torchserve._create_torchserve_config_file" )
37
- @patch ("sagemaker_pytorch_serving_container.torchserve._adapt_to_ts_format " )
37
+ @patch ("sagemaker_pytorch_serving_container.torchserve._set_python_path " )
38
38
def test_start_torchserve_default_service_handler (
39
- adapt ,
39
+ set_python_path ,
40
40
create_config ,
41
41
exists ,
42
42
install_requirements ,
@@ -47,9 +47,8 @@ def test_start_torchserve_default_service_handler(
47
47
):
48
48
torchserve .start_torchserve ()
49
49
50
- adapt .assert_called_once_with (torchserve .DEFAULT_HANDLER_SERVICE )
51
- create_config .assert_called_once_with ()
52
- exists .assert_called_once_with (REQUIREMENTS_PATH )
50
+ set_python_path .assert_called_once_with ()
51
+ create_config .assert_called_once_with (torchserve .DEFAULT_HANDLER_SERVICE )
53
52
install_requirements .assert_called_once_with ()
54
53
55
54
ts_model_server_cmd = [
@@ -62,7 +61,7 @@ def test_start_torchserve_default_service_handler(
62
61
"--log-config" ,
63
62
torchserve .DEFAULT_TS_LOG_FILE ,
64
63
"--models" ,
65
- "model.mar "
64
+ "model=/opt/ml/model "
66
65
]
67
66
68
67
subprocess_popen .assert_called_once_with (ts_model_server_cmd )
@@ -76,9 +75,9 @@ def test_start_torchserve_default_service_handler(
76
75
@patch ("sagemaker_pytorch_serving_container.torchserve._install_requirements" )
77
76
@patch ("os.path.exists" , return_value = True )
78
77
@patch ("sagemaker_pytorch_serving_container.torchserve._create_torchserve_config_file" )
79
- @patch ("sagemaker_pytorch_serving_container.torchserve._adapt_to_ts_format " )
78
+ @patch ("sagemaker_pytorch_serving_container.torchserve._set_python_path " )
80
79
def test_start_torchserve_default_service_handler_multi_model (
81
- adapt ,
80
+ set_python_path ,
82
81
create_config ,
83
82
exists ,
84
83
install_requirements ,
@@ -90,7 +89,9 @@ def test_start_torchserve_default_service_handler_multi_model(
90
89
torchserve .ENABLE_MULTI_MODEL = True
91
90
torchserve .start_torchserve ()
92
91
torchserve .ENABLE_MULTI_MODEL = False
93
- create_config .assert_called_once_with ()
92
+
93
+ set_python_path .assert_called_once_with ()
94
+ create_config .assert_called_once_with (torchserve .DEFAULT_HANDLER_SERVICE )
94
95
exists .assert_called_once_with (REQUIREMENTS_PATH )
95
96
install_requirements .assert_called_once_with ()
96
97
@@ -104,74 +105,13 @@ def test_start_torchserve_default_service_handler_multi_model(
104
105
"--log-config" ,
105
106
torchserve .DEFAULT_TS_LOG_FILE ,
106
107
"--models" ,
107
- "model.mar "
108
+ "model=/opt/ml/model "
108
109
]
109
110
110
111
subprocess_popen .assert_called_once_with (ts_model_server_cmd )
111
112
sigterm .assert_called_once_with (retrieve .return_value )
112
113
113
114
114
- @patch ("subprocess.call" )
115
- @patch ("subprocess.Popen" )
116
- @patch ("sagemaker_pytorch_serving_container.torchserve._retrieve_ts_server_process" )
117
- @patch ("sagemaker_pytorch_serving_container.torchserve._add_sigterm_handler" )
118
- @patch ("sagemaker_pytorch_serving_container.torchserve._create_torchserve_config_file" )
119
- @patch ("sagemaker_pytorch_serving_container.torchserve._adapt_to_ts_format" )
120
- def test_start_torchserve_custom_handler_service (
121
- adapt , create_config , sigterm , retrieve , subprocess_popen , subprocess_call
122
- ):
123
- handler_service = Mock ()
124
-
125
- torchserve .start_torchserve (handler_service )
126
-
127
- adapt .assert_called_once_with (handler_service )
128
-
129
-
130
- @patch ("sagemaker_pytorch_serving_container.torchserve._set_python_path" )
131
- @patch ("subprocess.check_call" )
132
- @patch ("os.makedirs" )
133
- @patch ("os.path.exists" , return_value = False )
134
- def test_adapt_to_ts_format (path_exists , make_dir , subprocess_check_call , set_python_path ):
135
- handler_service = Mock ()
136
-
137
- torchserve ._adapt_to_ts_format (handler_service )
138
-
139
- path_exists .assert_called_once_with (torchserve .DEFAULT_TS_MODEL_DIRECTORY )
140
- make_dir .assert_called_once_with (torchserve .DEFAULT_TS_MODEL_DIRECTORY )
141
-
142
- model_archiver_cmd = [
143
- "torch-model-archiver" ,
144
- "--model-name" ,
145
- torchserve .DEFAULT_TS_MODEL_NAME ,
146
- "--handler" ,
147
- handler_service ,
148
- "--export-path" ,
149
- torchserve .DEFAULT_TS_MODEL_DIRECTORY ,
150
- "--version" ,
151
- "1" ,
152
- "--extra-files" ,
153
- environment .model_dir
154
- ]
155
-
156
- subprocess_check_call .assert_called_once_with (model_archiver_cmd )
157
- set_python_path .assert_called_once_with ()
158
-
159
-
160
- @patch ("sagemaker_pytorch_serving_container.torchserve._set_python_path" )
161
- @patch ("subprocess.check_call" )
162
- @patch ("os.makedirs" )
163
- @patch ("os.path.exists" , return_value = True )
164
- def test_adapt_to_ts_format_existing_path (
165
- path_exists , make_dir , subprocess_check_call , set_python_path
166
- ):
167
- handler_service = Mock ()
168
-
169
- torchserve ._adapt_to_ts_format (handler_service )
170
-
171
- path_exists .assert_called_once_with (torchserve .DEFAULT_TS_MODEL_DIRECTORY )
172
- make_dir .assert_not_called ()
173
-
174
-
175
115
@patch .dict (os .environ , {torchserve .PYTHON_PATH_ENV : PYTHON_PATH }, clear = True )
176
116
def test_set_existing_python_path ():
177
117
torchserve ._set_python_path ()
@@ -193,7 +133,7 @@ def test_new_python_path():
193
133
@patch ("sagemaker_pytorch_serving_container.torchserve._generate_ts_config_properties" )
194
134
@patch ("sagemaker_inference.utils.write_file" )
195
135
def test_create_torchserve_config_file (write_file , generate_ts_config_props ):
196
- torchserve ._create_torchserve_config_file ()
136
+ torchserve ._create_torchserve_config_file (torchserve . DEFAULT_HANDLER_SERVICE )
197
137
198
138
write_file .assert_called_once_with (
199
139
torchserve .TS_CONFIG_FILE , generate_ts_config_props .return_value
@@ -211,7 +151,7 @@ def test_generate_ts_config_properties(env, read_file):
211
151
env .return_value .model_sever_workerse = model_server_workers
212
152
env .return_value .inference_http_port = http_port
213
153
214
- ts_config_properties = torchserve ._generate_ts_config_properties ()
154
+ ts_config_properties = torchserve ._generate_ts_config_properties (torchserve . DEFAULT_HANDLER_SERVICE )
215
155
216
156
inference_address = "inference_address=http://0.0.0.0:{}\n " .format (http_port )
217
157
server_timeout = "default_response_timeout={}\n " .format (model_server_timeout )
@@ -228,7 +168,7 @@ def test_generate_ts_config_properties(env, read_file):
228
168
def test_generate_ts_config_properties_default_workers (env , read_file ):
229
169
env .return_value .model_server_workers = None
230
170
231
- ts_config_properties = torchserve ._generate_ts_config_properties ()
171
+ ts_config_properties = torchserve ._generate_ts_config_properties (torchserve . DEFAULT_HANDLER_SERVICE )
232
172
233
173
workers = "default_workers_per_model={}" .format (None )
234
174
@@ -244,7 +184,7 @@ def test_generate_ts_config_properties_multi_model(env, read_file):
244
184
env .return_value .model_server_workers = None
245
185
246
186
torchserve .ENABLE_MULTI_MODEL = True
247
- ts_config_properties = torchserve ._generate_ts_config_properties ()
187
+ ts_config_properties = torchserve ._generate_ts_config_properties (torchserve . DEFAULT_HANDLER_SERVICE )
248
188
torchserve .ENABLE_MULTI_MODEL = False
249
189
250
190
workers = "default_workers_per_model={}" .format (None )
0 commit comments