diff --git a/src/sagemaker_pytorch_serving_container/torchserve.py b/src/sagemaker_pytorch_serving_container/torchserve.py index 048a06b4..529e4c85 100644 --- a/src/sagemaker_pytorch_serving_container/torchserve.py +++ b/src/sagemaker_pytorch_serving_container/torchserve.py @@ -136,18 +136,18 @@ def _generate_ts_config_properties(handler_service): ts_env = ts_environment.TorchServeEnvironment() if ts_env.is_env_set() and not ENABLE_MULTI_MODEL: - models_string = f'''{{\\ - "{DEFAULT_TS_MODEL_NAME}": {{\\ - "1.0": {{\\ - "defaultVersion": true,\\ - "marName": "{DEFAULT_TS_MODEL_NAME}.mar",\\ - "minWorkers": {ts_env._min_workers},\\ - "maxWorkers": {ts_env._max_workers},\\ - "batchSize": {ts_env._batch_size},\\ - "maxBatchDelay": {ts_env._max_batch_delay},\\ - "responseTimeout": {ts_env._response_timeout}\\ - }}\\ - }}\\ + models_string = f'''{{\\\n + "{DEFAULT_TS_MODEL_NAME}": {{\\\n + "1.0": {{\\\n + "defaultVersion": true,\\\n + "marName": "{DEFAULT_TS_MODEL_NAME}.mar",\\\n + "minWorkers": {ts_env._min_workers},\\\n + "maxWorkers": {ts_env._max_workers},\\\n + "batchSize": {ts_env._batch_size},\\\n + "maxBatchDelay": {ts_env._max_batch_delay},\\\n + "responseTimeout": {ts_env._response_timeout}\\\n + }}\\\n + }}\\\n }}''' user_defined_configuration["models"] = models_string logger.warn("Sagemaker TS environment variables have been set and will be used " diff --git a/test/container/1.10.2/Dockerfile.dlc.cpu b/test/container/1.10.2/Dockerfile.dlc.cpu index df81de2f..d3c7315c 100644 --- a/test/container/1.10.2/Dockerfile.dlc.cpu +++ b/test/container/1.10.2/Dockerfile.dlc.cpu @@ -1,9 +1,6 @@ ARG region FROM 763104351884.dkr.ecr.$region.amazonaws.com/pytorch-inference:1.10.2-cpu-py38-ubuntu20.04-sagemaker -RUN pip uninstall torchserve -y && \ - pip install torchserve-nightly==2022.3.23.post2 - COPY dist/sagemaker_pytorch_inference-*.tar.gz /sagemaker_pytorch_inference.tar.gz RUN pip install --upgrade --no-cache-dir /sagemaker_pytorch_inference.tar.gz && \ rm /sagemaker_pytorch_inference.tar.gz diff --git a/test/container/1.10.2/Dockerfile.dlc.gpu b/test/container/1.10.2/Dockerfile.dlc.gpu index 2970d718..192b6662 100644 --- a/test/container/1.10.2/Dockerfile.dlc.gpu +++ b/test/container/1.10.2/Dockerfile.dlc.gpu @@ -1,9 +1,6 @@ ARG region FROM 763104351884.dkr.ecr.$region.amazonaws.com/pytorch-inference:1.10.2-gpu-py38-cu113-ubuntu20.04-sagemaker -RUN pip uninstall torchserve -y && \ - pip install torchserve-nightly==2022.3.23.post2 - COPY dist/sagemaker_pytorch_inference-*.tar.gz /sagemaker_pytorch_inference.tar.gz RUN pip install --upgrade --no-cache-dir /sagemaker_pytorch_inference.tar.gz && \ rm /sagemaker_pytorch_inference.tar.gz