diff --git a/src/sagemaker/serve/builder/model_builder.py b/src/sagemaker/serve/builder/model_builder.py index b24f30fb3e..1fe75065d5 100644 --- a/src/sagemaker/serve/builder/model_builder.py +++ b/src/sagemaker/serve/builder/model_builder.py @@ -169,7 +169,7 @@ class ModelBuilder(Triton, DJL, JumpStart, TGI, Transformers, TensorflowServing, in order for model builder to build the artifacts correctly (according to the model server). Possible values for this argument are ``TORCHSERVE``, ``MMS``, ``TENSORFLOW_SERVING``, ``DJL_SERVING``, - ``TRITON``, and``TGI``. + ``TRITON``,``TGI``, and ``TEI``. model_metadata (Optional[Dict[str, Any]): Dictionary used to override model metadata. Currently, ``HF_TASK`` is overridable for HuggingFace model. HF_TASK should be set for new models without task metadata in the Hub, adding unsupported task types will throw diff --git a/src/sagemaker/serve/builder/tei_builder.py b/src/sagemaker/serve/builder/tei_builder.py index 50d3866468..6aba3c9da2 100644 --- a/src/sagemaker/serve/builder/tei_builder.py +++ b/src/sagemaker/serve/builder/tei_builder.py @@ -25,7 +25,7 @@ _get_nb_instance, ) from sagemaker.serve.model_server.tgi.prepare import _create_dir_structure -from sagemaker.serve.utils.predictors import TgiLocalModePredictor +from sagemaker.serve.utils.predictors import TeiLocalModePredictor from sagemaker.serve.utils.types import ModelServer from sagemaker.serve.mode.function_pointers import Mode from sagemaker.serve.utils.telemetry_logger import _capture_telemetry @@ -74,16 +74,16 @@ def _prepare_for_mode(self): def _get_client_translators(self): """Placeholder docstring""" - def _set_to_tgi(self): + def _set_to_tei(self): """Placeholder docstring""" - if self.model_server != ModelServer.TGI: + if self.model_server != ModelServer.TEI: messaging = ( "HuggingFace Model ID support on model server: " f"{self.model_server} is not currently supported. " - f"Defaulting to {ModelServer.TGI}" + f"Defaulting to {ModelServer.TEI}" ) logger.warning(messaging) - self.model_server = ModelServer.TGI + self.model_server = ModelServer.TEI def _create_tei_model(self, **kwargs) -> Type[Model]: """Placeholder docstring""" @@ -142,7 +142,7 @@ def _tei_model_builder_deploy_wrapper(self, *args, **kwargs) -> Type[PredictorBa if self.mode == Mode.LOCAL_CONTAINER: timeout = kwargs.get("model_data_download_timeout") - predictor = TgiLocalModePredictor( + predictor = TeiLocalModePredictor( self.modes[str(Mode.LOCAL_CONTAINER)], serializer, deserializer ) @@ -180,7 +180,9 @@ def _tei_model_builder_deploy_wrapper(self, *args, **kwargs) -> Type[PredictorBa if "endpoint_logging" not in kwargs: kwargs["endpoint_logging"] = True - if not self.nb_instance_type and "instance_type" not in kwargs: + if self.nb_instance_type and "instance_type" not in kwargs: + kwargs.update({"instance_type": self.nb_instance_type}) + elif not self.nb_instance_type and "instance_type" not in kwargs: raise ValueError( "Instance type must be provided when deploying " "to SageMaker Endpoint mode." ) @@ -216,7 +218,7 @@ def _build_for_tei(self): """Placeholder docstring""" self.secret_key = None - self._set_to_tgi() + self._set_to_tei() self.pysdk_model = self._build_for_hf_tei() return self.pysdk_model diff --git a/src/sagemaker/serve/mode/local_container_mode.py b/src/sagemaker/serve/mode/local_container_mode.py index f940e2959c..f040c61c1d 100644 --- a/src/sagemaker/serve/mode/local_container_mode.py +++ b/src/sagemaker/serve/mode/local_container_mode.py @@ -21,6 +21,7 @@ from sagemaker.serve.model_server.djl_serving.server import LocalDJLServing from sagemaker.serve.model_server.triton.server import LocalTritonServer from sagemaker.serve.model_server.tgi.server import LocalTgiServing +from sagemaker.serve.model_server.tei.server import LocalTeiServing from sagemaker.serve.model_server.multi_model_server.server import LocalMultiModelServer from sagemaker.session import Session @@ -69,6 +70,7 @@ def __init__( self.container = None self.secret_key = None self._ping_container = None + self._invoke_serving = None def load(self, model_path: str = None): """Placeholder docstring""" @@ -156,6 +158,19 @@ def create_server( env_vars=env_vars if env_vars else self.env_vars, ) self._ping_container = self._tensorflow_serving_deep_ping + elif self.model_server == ModelServer.TEI: + tei_serving = LocalTeiServing() + tei_serving._start_tei_serving( + client=self.client, + image=image, + model_path=model_path if model_path else self.model_path, + secret_key=secret_key, + env_vars=env_vars if env_vars else self.env_vars, + ) + tei_serving.schema_builder = self.schema_builder + self.container = tei_serving.container + self._ping_container = tei_serving._tei_deep_ping + self._invoke_serving = tei_serving._invoke_tei_serving # allow some time for container to be ready time.sleep(10) diff --git a/src/sagemaker/serve/mode/sagemaker_endpoint_mode.py b/src/sagemaker/serve/mode/sagemaker_endpoint_mode.py index 24acfc6a2f..b8f1d0529b 100644 --- a/src/sagemaker/serve/mode/sagemaker_endpoint_mode.py +++ b/src/sagemaker/serve/mode/sagemaker_endpoint_mode.py @@ -6,6 +6,7 @@ import logging from typing import Type +from sagemaker.serve.model_server.tei.server import SageMakerTeiServing from sagemaker.serve.model_server.tensorflow_serving.server import SageMakerTensorflowServing from sagemaker.session import Session from sagemaker.serve.utils.types import ModelServer @@ -37,6 +38,8 @@ def __init__(self, inference_spec: Type[InferenceSpec], model_server: ModelServe self.inference_spec = inference_spec self.model_server = model_server + self._tei_serving = SageMakerTeiServing() + def load(self, model_path: str): """Placeholder docstring""" path = Path(model_path) @@ -66,8 +69,9 @@ def prepare( + "session to be created or supply `sagemaker_session` into @serve.invoke." ) from e + upload_artifacts = None if self.model_server == ModelServer.TORCHSERVE: - return self._upload_torchserve_artifacts( + upload_artifacts = self._upload_torchserve_artifacts( model_path=model_path, sagemaker_session=sagemaker_session, secret_key=secret_key, @@ -76,7 +80,7 @@ def prepare( ) if self.model_server == ModelServer.TRITON: - return self._upload_triton_artifacts( + upload_artifacts = self._upload_triton_artifacts( model_path=model_path, sagemaker_session=sagemaker_session, secret_key=secret_key, @@ -85,7 +89,7 @@ def prepare( ) if self.model_server == ModelServer.DJL_SERVING: - return self._upload_djl_artifacts( + upload_artifacts = self._upload_djl_artifacts( model_path=model_path, sagemaker_session=sagemaker_session, s3_model_data_url=s3_model_data_url, @@ -93,7 +97,7 @@ def prepare( ) if self.model_server == ModelServer.TGI: - return self._upload_tgi_artifacts( + upload_artifacts = self._upload_tgi_artifacts( model_path=model_path, sagemaker_session=sagemaker_session, s3_model_data_url=s3_model_data_url, @@ -102,7 +106,7 @@ def prepare( ) if self.model_server == ModelServer.MMS: - return self._upload_server_artifacts( + upload_artifacts = self._upload_server_artifacts( model_path=model_path, sagemaker_session=sagemaker_session, s3_model_data_url=s3_model_data_url, @@ -110,7 +114,7 @@ def prepare( ) if self.model_server == ModelServer.TENSORFLOW_SERVING: - return self._upload_tensorflow_serving_artifacts( + upload_artifacts = self._upload_tensorflow_serving_artifacts( model_path=model_path, sagemaker_session=sagemaker_session, secret_key=secret_key, @@ -118,4 +122,15 @@ def prepare( image=image, ) + if self.model_server == ModelServer.TEI: + upload_artifacts = self._tei_serving._upload_tei_artifacts( + model_path=model_path, + sagemaker_session=sagemaker_session, + s3_model_data_url=s3_model_data_url, + image=image, + ) + + if upload_artifacts: + return upload_artifacts + raise ValueError("%s model server is not supported" % self.model_server) diff --git a/src/sagemaker/serve/model_server/tei/__init__.py b/src/sagemaker/serve/model_server/tei/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/sagemaker/serve/model_server/tei/server.py b/src/sagemaker/serve/model_server/tei/server.py new file mode 100644 index 0000000000..67fca0e847 --- /dev/null +++ b/src/sagemaker/serve/model_server/tei/server.py @@ -0,0 +1,160 @@ +"""Module for Local TEI Serving""" + +from __future__ import absolute_import + +import requests +import logging +from pathlib import Path +from docker.types import DeviceRequest +from sagemaker import Session, fw_utils +from sagemaker.serve.utils.exceptions import LocalModelInvocationException +from sagemaker.base_predictor import PredictorBase +from sagemaker.s3_utils import determine_bucket_and_prefix, parse_s3_url, s3_path_join +from sagemaker.s3 import S3Uploader +from sagemaker.local.utils import get_docker_host + + +MODE_DIR_BINDING = "/opt/ml/model/" +_SHM_SIZE = "2G" +_DEFAULT_ENV_VARS = { + "TRANSFORMERS_CACHE": "/opt/ml/model/", + "HUGGINGFACE_HUB_CACHE": "/opt/ml/model/", +} + +logger = logging.getLogger(__name__) + + +class LocalTeiServing: + """LocalTeiServing class""" + + def _start_tei_serving( + self, client: object, image: str, model_path: str, secret_key: str, env_vars: dict + ): + """Starts a local tei serving container. + + Args: + client: Docker client + image: Image to use + model_path: Path to the model + secret_key: Secret key to use for authentication + env_vars: Environment variables to set + """ + if env_vars and secret_key: + env_vars["SAGEMAKER_SERVE_SECRET_KEY"] = secret_key + + self.container = client.containers.run( + image, + shm_size=_SHM_SIZE, + device_requests=[DeviceRequest(count=-1, capabilities=[["gpu"]])], + network_mode="host", + detach=True, + auto_remove=True, + volumes={ + Path(model_path).joinpath("code"): { + "bind": MODE_DIR_BINDING, + "mode": "rw", + }, + }, + environment=_update_env_vars(env_vars), + ) + + def _invoke_tei_serving(self, request: object, content_type: str, accept: str): + """Invokes a local tei serving container. + + Args: + request: Request to send + content_type: Content type to use + accept: Accept to use + """ + try: + response = requests.post( + f"http://{get_docker_host()}:8080/invocations", + data=request, + headers={"Content-Type": content_type, "Accept": accept}, + timeout=600, + ) + response.raise_for_status() + return response.content + except Exception as e: + raise Exception("Unable to send request to the local container server") from e + + def _tei_deep_ping(self, predictor: PredictorBase): + """Checks if the local tei serving container is up and running. + + If the container is not up and running, it will raise an exception. + """ + response = None + try: + response = predictor.predict(self.schema_builder.sample_input) + return (True, response) + # pylint: disable=broad-except + except Exception as e: + if "422 Client Error: Unprocessable Entity for url" in str(e): + raise LocalModelInvocationException(str(e)) + return (False, response) + + return (True, response) + + +class SageMakerTeiServing: + """SageMakerTeiServing class""" + + def _upload_tei_artifacts( + self, + model_path: str, + sagemaker_session: Session, + s3_model_data_url: str = None, + image: str = None, + env_vars: dict = None, + ): + """Uploads the model artifacts to S3. + + Args: + model_path: Path to the model + sagemaker_session: SageMaker session + s3_model_data_url: S3 model data URL + image: Image to use + env_vars: Environment variables to set + """ + if s3_model_data_url: + bucket, key_prefix = parse_s3_url(url=s3_model_data_url) + else: + bucket, key_prefix = None, None + + code_key_prefix = fw_utils.model_code_key_prefix(key_prefix, None, image) + + bucket, code_key_prefix = determine_bucket_and_prefix( + bucket=bucket, key_prefix=code_key_prefix, sagemaker_session=sagemaker_session + ) + + code_dir = Path(model_path).joinpath("code") + + s3_location = s3_path_join("s3://", bucket, code_key_prefix, "code") + + logger.debug("Uploading TEI Model Resources uncompressed to: %s", s3_location) + + model_data_url = S3Uploader.upload( + str(code_dir), + s3_location, + None, + sagemaker_session, + ) + + model_data = { + "S3DataSource": { + "CompressionType": "None", + "S3DataType": "S3Prefix", + "S3Uri": model_data_url + "/", + } + } + + return (model_data, _update_env_vars(env_vars)) + + +def _update_env_vars(env_vars: dict) -> dict: + """Placeholder docstring""" + updated_env_vars = {} + updated_env_vars.update(_DEFAULT_ENV_VARS) + if env_vars: + updated_env_vars.update(env_vars) + return updated_env_vars diff --git a/src/sagemaker/serve/utils/predictors.py b/src/sagemaker/serve/utils/predictors.py index 866167c2c6..25a995eb48 100644 --- a/src/sagemaker/serve/utils/predictors.py +++ b/src/sagemaker/serve/utils/predictors.py @@ -209,6 +209,49 @@ def delete_predictor(self): self._mode_obj.destroy_server() +class TeiLocalModePredictor(PredictorBase): + """Lightweight Tei predictor for local deployment in IN_PROCESS and LOCAL_CONTAINER modes""" + + def __init__( + self, + mode_obj: Type[LocalContainerMode], + serializer=JSONSerializer(), + deserializer=JSONDeserializer(), + ): + self._mode_obj = mode_obj + self.serializer = serializer + self.deserializer = deserializer + + def predict(self, data): + """Placeholder docstring""" + return [ + self.deserializer.deserialize( + io.BytesIO( + self._mode_obj._invoke_serving( + self.serializer.serialize(data), + self.content_type, + self.deserializer.ACCEPT[0], + ) + ), + self.content_type, + ) + ] + + @property + def content_type(self): + """The MIME type of the data sent to the inference endpoint.""" + return self.serializer.CONTENT_TYPE + + @property + def accept(self): + """The content type(s) that are expected from the inference endpoint.""" + return self.deserializer.ACCEPT + + def delete_predictor(self): + """Shut down and remove the container that you created in LOCAL_CONTAINER mode""" + self._mode_obj.destroy_server() + + class TensorflowServingLocalPredictor(PredictorBase): """Lightweight predictor for local deployment in LOCAL_CONTAINER modes""" diff --git a/src/sagemaker/serve/utils/telemetry_logger.py b/src/sagemaker/serve/utils/telemetry_logger.py index 8983a4b5c9..99aeb4ff26 100644 --- a/src/sagemaker/serve/utils/telemetry_logger.py +++ b/src/sagemaker/serve/utils/telemetry_logger.py @@ -58,6 +58,7 @@ str(ModelServer.DJL_SERVING): 4, str(ModelServer.TRITON): 5, str(ModelServer.TGI): 6, + str(ModelServer.TEI): 7, } MLFLOW_MODEL_PATH_CODE = { diff --git a/src/sagemaker/serve/utils/types.py b/src/sagemaker/serve/utils/types.py index 661093f249..3ac80aa7ea 100644 --- a/src/sagemaker/serve/utils/types.py +++ b/src/sagemaker/serve/utils/types.py @@ -18,6 +18,7 @@ def __str__(self): DJL_SERVING = 4 TRITON = 5 TGI = 6 + TEI = 7 class _DjlEngine(Enum): diff --git a/tests/integ/sagemaker/serve/test_serve_tei.py b/tests/integ/sagemaker/serve/test_serve_tei.py index 19ee0b57de..5cf1a3635c 100644 --- a/tests/integ/sagemaker/serve/test_serve_tei.py +++ b/tests/integ/sagemaker/serve/test_serve_tei.py @@ -28,47 +28,14 @@ logger = logging.getLogger(__name__) -sample_input = { - "inputs": "The man worked as a [MASK].", -} - -loaded_response = [ - { - "score": 0.0974755585193634, - "token": 10533, - "token_str": "carpenter", - "sequence": "the man worked as a carpenter.", - }, - { - "score": 0.052383411675691605, - "token": 15610, - "token_str": "waiter", - "sequence": "the man worked as a waiter.", - }, - { - "score": 0.04962712526321411, - "token": 13362, - "token_str": "barber", - "sequence": "the man worked as a barber.", - }, - { - "score": 0.0378861166536808, - "token": 15893, - "token_str": "mechanic", - "sequence": "the man worked as a mechanic.", - }, - { - "score": 0.037680838257074356, - "token": 18968, - "token_str": "salesman", - "sequence": "the man worked as a salesman.", - }, -] +sample_input = {"inputs": "What is Deep Learning?"} + +loaded_response = [] @pytest.fixture def model_input(): - return {"inputs": "The man worked as a [MASK]."} + return {"inputs": "What is Deep Learning?"} @pytest.fixture @@ -77,9 +44,6 @@ def model_builder_model_schema_builder(): model_path=HF_DIR, model="BAAI/bge-m3", schema_builder=SchemaBuilder(sample_input, loaded_response), - model_metadata={ - "HF_TASK": "sentence-similarity", - }, ) diff --git a/tests/unit/sagemaker/serve/builder/test_tei_builder.py b/tests/unit/sagemaker/serve/builder/test_tei_builder.py index 79a8f23324..4a75174bfc 100644 --- a/tests/unit/sagemaker/serve/builder/test_tei_builder.py +++ b/tests/unit/sagemaker/serve/builder/test_tei_builder.py @@ -18,7 +18,7 @@ from sagemaker.serve.mode.function_pointers import Mode from tests.unit.sagemaker.serve.constants import MOCK_VPC_CONFIG -from sagemaker.serve.utils.predictors import TgiLocalModePredictor +from sagemaker.serve.utils.predictors import TeiLocalModePredictor mock_model_id = "bert-base-uncased" mock_prompt = "The man worked as a [MASK]." @@ -96,7 +96,7 @@ def test_build_deploy_for_tei_local_container_and_remote_container( assert model.vpc_config == MOCK_VPC_CONFIG assert builder.env_vars["MODEL_LOADING_TIMEOUT"] == "1800" - assert isinstance(predictor, TgiLocalModePredictor) + assert isinstance(predictor, TeiLocalModePredictor) assert builder.nb_instance_type == "ml.g5.24xlarge" @@ -139,7 +139,7 @@ def test_image_uri_override( assert builder.image_uri == MOCK_IMAGE_CONFIG assert builder.env_vars["MODEL_LOADING_TIMEOUT"] == "1800" - assert isinstance(predictor, TgiLocalModePredictor) + assert isinstance(predictor, TeiLocalModePredictor) assert builder.nb_instance_type == "ml.g5.24xlarge" diff --git a/tests/unit/sagemaker/serve/model_server/tei/__init__.py b/tests/unit/sagemaker/serve/model_server/tei/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/unit/sagemaker/serve/model_server/tei/test_server.py b/tests/unit/sagemaker/serve/model_server/tei/test_server.py new file mode 100644 index 0000000000..16dcf12b5a --- /dev/null +++ b/tests/unit/sagemaker/serve/model_server/tei/test_server.py @@ -0,0 +1,150 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +from __future__ import absolute_import + +from pathlib import PosixPath +from unittest import TestCase +from unittest.mock import Mock, patch + +from docker.types import DeviceRequest +from sagemaker.serve.model_server.tei.server import LocalTeiServing, SageMakerTeiServing +from sagemaker.serve.utils.exceptions import LocalModelInvocationException + +TEI_IMAGE = ( + "246618743249.dkr.ecr.us-west-2.amazonaws.com/tei:2.0.1-tei1.2.3-gpu-py310-cu122-ubuntu22.04" +) +MODEL_PATH = "model_path" +ENV_VAR = {"KEY": "VALUE"} +PAYLOAD = { + "inputs": { + "sourceSentence": "How cute your dog is!", + "sentences": ["The mitochondria is the powerhouse of the cell.", "Your dog is so cute."], + } +} +S3_URI = "s3://mock_model_data_uri" +SECRET_KEY = "secret_key" +INFER_RESPONSE = [] + + +class TeiServerTests(TestCase): + @patch("sagemaker.serve.model_server.tei.server.requests") + def test_start_invoke_destroy_local_tei_server(self, mock_requests): + mock_container = Mock() + mock_docker_client = Mock() + mock_docker_client.containers.run.return_value = mock_container + + local_tei_server = LocalTeiServing() + mock_schema_builder = Mock() + mock_schema_builder.input_serializer.serialize.return_value = PAYLOAD + local_tei_server.schema_builder = mock_schema_builder + + local_tei_server._start_tei_serving( + client=mock_docker_client, + model_path=MODEL_PATH, + secret_key=SECRET_KEY, + image=TEI_IMAGE, + env_vars=ENV_VAR, + ) + + mock_docker_client.containers.run.assert_called_once_with( + TEI_IMAGE, + shm_size="2G", + device_requests=[DeviceRequest(count=-1, capabilities=[["gpu"]])], + network_mode="host", + detach=True, + auto_remove=True, + volumes={PosixPath("model_path/code"): {"bind": "/opt/ml/model/", "mode": "rw"}}, + environment={ + "TRANSFORMERS_CACHE": "/opt/ml/model/", + "HUGGINGFACE_HUB_CACHE": "/opt/ml/model/", + "KEY": "VALUE", + "SAGEMAKER_SERVE_SECRET_KEY": "secret_key", + }, + ) + + mock_response = Mock() + mock_requests.post.side_effect = lambda *args, **kwargs: mock_response + mock_response.content = INFER_RESPONSE + + res = local_tei_server._invoke_tei_serving( + request=PAYLOAD, content_type="application/json", accept="application/json" + ) + + self.assertEqual(res, INFER_RESPONSE) + + def test_tei_deep_ping(self): + mock_predictor = Mock() + mock_response = Mock() + mock_schema_builder = Mock() + + mock_predictor.predict.side_effect = lambda *args, **kwargs: mock_response + mock_schema_builder.sample_input = PAYLOAD + + local_tei_server = LocalTeiServing() + local_tei_server.schema_builder = mock_schema_builder + res = local_tei_server._tei_deep_ping(mock_predictor) + + self.assertEqual(res, (True, mock_response)) + + def test_tei_deep_ping_invoke_ex(self): + mock_predictor = Mock() + mock_schema_builder = Mock() + + mock_predictor.predict.side_effect = lambda *args, **kwargs: exec( + 'raise(ValueError("422 Client Error: Unprocessable Entity for url:"))' + ) + mock_schema_builder.sample_input = PAYLOAD + + local_tei_server = LocalTeiServing() + local_tei_server.schema_builder = mock_schema_builder + + self.assertRaises( + LocalModelInvocationException, lambda: local_tei_server._tei_deep_ping(mock_predictor) + ) + + def test_tei_deep_ping_ex(self): + mock_predictor = Mock() + + mock_predictor.predict.side_effect = lambda *args, **kwargs: Exception() + + local_tei_server = LocalTeiServing() + res = local_tei_server._tei_deep_ping(mock_predictor) + + self.assertEqual(res, (False, None)) + + @patch("sagemaker.serve.model_server.tei.server.S3Uploader") + def test_upload_artifacts_sagemaker_tei_server(self, mock_uploader): + mock_session = Mock() + mock_uploader.upload.side_effect = ( + lambda *args, **kwargs: "s3://sagemaker-us-west-2-123456789123/tei-2024-05-20-16-05-36-027/code" + ) + + s3_upload_path, env_vars = SageMakerTeiServing()._upload_tei_artifacts( + model_path=MODEL_PATH, + sagemaker_session=mock_session, + s3_model_data_url=S3_URI, + image=TEI_IMAGE, + ) + + mock_uploader.upload.assert_called_once() + self.assertEqual( + s3_upload_path, + { + "S3DataSource": { + "CompressionType": "None", + "S3DataType": "S3Prefix", + "S3Uri": "s3://sagemaker-us-west-2-123456789123/tei-2024-05-20-16-05-36-027/code/", + } + }, + ) + self.assertIsNotNone(env_vars)