Skip to content

Commit 516c2a6

Browse files
Litellm remove circular imports (#7232)
* fix(utils.py): initial commit to remove circular imports - moves llmproviders to utils.py * fix(router.py): fix 'litellm.EmbeddingResponse' import from router.py ' * refactor: fix litellm.ModelResponse import on pass through endpoints * refactor(litellm_logging.py): fix circular import for custom callbacks literal * fix(factory.py): fix circular imports inside prompt factory * fix(cost_calculator.py): fix circular import for 'litellm.Usage' * fix(proxy_server.py): fix potential circular import with `litellm.Router' * fix(proxy/utils.py): fix potential circular import in `litellm.Router` * fix: remove circular imports in 'auth_checks' and 'guardrails/' * fix(prompt_injection_detection.py): fix router impor t * fix(vertex_passthrough_logging_handler.py): fix potential circular imports in vertex pass through * fix(anthropic_pass_through_logging_handler.py): fix potential circular imports * fix(slack_alerting.py-+-ollama_chat.py): fix modelresponse import * fix(base.py): fix potential circular import * fix(handler.py): fix potential circular ref in codestral + cohere handler's * fix(azure.py): fix potential circular imports * fix(gpt_transformation.py): fix modelresponse import * fix(litellm_logging.py): add logging base class - simplify typing makes it easy for other files to type check the logging obj without introducing circular imports * fix(azure_ai/embed): fix potential circular import on handler.py * fix(databricks/): fix potential circular imports in databricks/ * fix(vertex_ai/): fix potential circular imports on vertex ai embeddings * fix(vertex_ai/image_gen): fix import * fix(watsonx-+-bedrock): cleanup imports * refactor(anthropic-pass-through-+-petals): cleanup imports * refactor(huggingface/): cleanup imports * fix(ollama-+-clarifai): cleanup circular imports * fix(openai_like/): fix impor t * fix(openai_like/): fix embedding handler cleanup imports * refactor(openai.py): cleanup imports * fix(sagemaker/transformation.py): fix import * ci(config.yml): add circular import test to ci/cd
1 parent 0dbf712 commit 516c2a6

File tree

48 files changed

+489
-256
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

48 files changed

+489
-256
lines changed

.circleci/config.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -817,6 +817,7 @@ jobs:
817817
- run: python ./tests/documentation_tests/test_api_docs.py
818818
- run: python ./tests/code_coverage_tests/ensure_async_clients_test.py
819819
- run: python ./tests/code_coverage_tests/enforce_llms_folder_style.py
820+
- run: python ./tests/documentation_tests/test_circular_imports.py
820821
- run: helm lint ./deploy/charts/litellm-helm
821822

822823
db_migration_disable_update_check:

enterprise/enterprise_hooks/secret_detection.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -474,12 +474,9 @@ async def async_pre_call_hook(
474474
from detect_secrets import SecretsCollection
475475
from detect_secrets.settings import default_settings
476476

477-
print("INSIDE SECRET DETECTION PRE-CALL HOOK!")
478-
479477
if await self.should_run_check(user_api_key_dict) is False:
480478
return
481479

482-
print("RUNNING CHECK!")
483480
if "messages" in data and isinstance(data["messages"], list):
484481
for message in data["messages"]:
485482
if "content" in message and isinstance(message["content"], str):

litellm/__init__.py

Lines changed: 1 addition & 66 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@
3232
KeyManagementSettings,
3333
LiteLLM_UpperboundKeyGenerateParams,
3434
)
35-
from litellm.types.utils import StandardKeyGenerationConfig
35+
from litellm.types.utils import StandardKeyGenerationConfig, LlmProviders
3636
import httpx
3737
import dotenv
3838
from enum import Enum
@@ -838,71 +838,6 @@ def add_known_models():
838838
)
839839

840840

841-
class LlmProviders(str, Enum):
842-
OPENAI = "openai"
843-
OPENAI_LIKE = "openai_like" # embedding only
844-
JINA_AI = "jina_ai"
845-
XAI = "xai"
846-
CUSTOM_OPENAI = "custom_openai"
847-
TEXT_COMPLETION_OPENAI = "text-completion-openai"
848-
COHERE = "cohere"
849-
COHERE_CHAT = "cohere_chat"
850-
CLARIFAI = "clarifai"
851-
ANTHROPIC = "anthropic"
852-
ANTHROPIC_TEXT = "anthropic_text"
853-
REPLICATE = "replicate"
854-
HUGGINGFACE = "huggingface"
855-
TOGETHER_AI = "together_ai"
856-
OPENROUTER = "openrouter"
857-
VERTEX_AI = "vertex_ai"
858-
VERTEX_AI_BETA = "vertex_ai_beta"
859-
GEMINI = "gemini"
860-
AI21 = "ai21"
861-
BASETEN = "baseten"
862-
AZURE = "azure"
863-
AZURE_TEXT = "azure_text"
864-
AZURE_AI = "azure_ai"
865-
SAGEMAKER = "sagemaker"
866-
SAGEMAKER_CHAT = "sagemaker_chat"
867-
BEDROCK = "bedrock"
868-
VLLM = "vllm"
869-
NLP_CLOUD = "nlp_cloud"
870-
PETALS = "petals"
871-
OOBABOOGA = "oobabooga"
872-
OLLAMA = "ollama"
873-
OLLAMA_CHAT = "ollama_chat"
874-
DEEPINFRA = "deepinfra"
875-
PERPLEXITY = "perplexity"
876-
MISTRAL = "mistral"
877-
GROQ = "groq"
878-
NVIDIA_NIM = "nvidia_nim"
879-
CEREBRAS = "cerebras"
880-
AI21_CHAT = "ai21_chat"
881-
VOLCENGINE = "volcengine"
882-
CODESTRAL = "codestral"
883-
TEXT_COMPLETION_CODESTRAL = "text-completion-codestral"
884-
DEEPSEEK = "deepseek"
885-
SAMBANOVA = "sambanova"
886-
MARITALK = "maritalk"
887-
VOYAGE = "voyage"
888-
CLOUDFLARE = "cloudflare"
889-
XINFERENCE = "xinference"
890-
FIREWORKS_AI = "fireworks_ai"
891-
FRIENDLIAI = "friendliai"
892-
WATSONX = "watsonx"
893-
WATSONX_TEXT = "watsonx_text"
894-
TRITON = "triton"
895-
PREDIBASE = "predibase"
896-
DATABRICKS = "databricks"
897-
EMPOWER = "empower"
898-
GITHUB = "github"
899-
CUSTOM = "custom"
900-
LITELLM_PROXY = "litellm_proxy"
901-
HOSTED_VLLM = "hosted_vllm"
902-
LM_STUDIO = "lm_studio"
903-
GALADRIEL = "galadriel"
904-
905-
906841
provider_list: List[Union[LlmProviders, str]] = list(LlmProviders)
907842

908843

litellm/adapters/anthropic_adapter.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
AnthropicResponse,
1919
ContentBlockDelta,
2020
)
21-
from litellm.types.utils import AdapterCompletionStreamWrapper
21+
from litellm.types.utils import AdapterCompletionStreamWrapper, ModelResponse
2222

2323

2424
class AnthropicAdapter(CustomLogger):
@@ -41,7 +41,7 @@ def translate_completion_input_params(
4141
return translated_body
4242

4343
def translate_completion_output_params(
44-
self, response: litellm.ModelResponse
44+
self, response: ModelResponse
4545
) -> Optional[AnthropicResponse]:
4646

4747
return litellm.AnthropicExperimentalPassThroughConfig().translate_openai_response_to_anthropic(

litellm/cost_calculator.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -484,15 +484,15 @@ def completion_cost( # noqa: PLR0915
484484
completion_characters: Optional[int] = None
485485
cache_creation_input_tokens: Optional[int] = None
486486
cache_read_input_tokens: Optional[int] = None
487-
cost_per_token_usage_object: Optional[litellm.Usage] = _get_usage_object(
487+
cost_per_token_usage_object: Optional[Usage] = _get_usage_object(
488488
completion_response=completion_response
489489
)
490490
if completion_response is not None and (
491491
isinstance(completion_response, BaseModel)
492492
or isinstance(completion_response, dict)
493493
): # tts returns a custom class
494494

495-
usage_obj: Optional[Union[dict, litellm.Usage]] = completion_response.get( # type: ignore
495+
usage_obj: Optional[Union[dict, Usage]] = completion_response.get( # type: ignore
496496
"usage", {}
497497
)
498498
if isinstance(usage_obj, BaseModel) and not isinstance(

litellm/integrations/SlackAlerting/slack_alerting.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@
3939
VirtualKeyEvent,
4040
WebhookEvent,
4141
)
42+
from litellm.router import Router
4243
from litellm.types.integrations.slack_alerting import *
4344
from litellm.types.router import LiteLLM_Params
4445

@@ -93,7 +94,7 @@ def update_values(
9394
alert_types: Optional[List[AlertType]] = None,
9495
alert_to_webhook_url: Optional[Dict[AlertType, Union[List[str], str]]] = None,
9596
alerting_args: Optional[Dict] = None,
96-
llm_router: Optional[litellm.Router] = None,
97+
llm_router: Optional[Router] = None,
9798
):
9899
if alerting is not None:
99100
self.alerting = alerting

litellm/litellm_core_utils/litellm_logging.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818

1919
import litellm
2020
from litellm import (
21+
_custom_logger_compatible_callbacks_literal,
2122
json_logs,
2223
log_raw_request_response,
2324
turn_off_message_logging,
@@ -41,6 +42,7 @@
4142
CallTypes,
4243
EmbeddingResponse,
4344
ImageResponse,
45+
LiteLLMLoggingBaseClass,
4446
ModelResponse,
4547
StandardCallbackDynamicParams,
4648
StandardLoggingAdditionalHeaders,
@@ -190,7 +192,7 @@ def set_cache(self, credentials: dict, service_name: str, logging_obj: Any) -> N
190192
in_memory_dynamic_logger_cache = DynamicLoggingCache()
191193

192194

193-
class Logging:
195+
class Logging(LiteLLMLoggingBaseClass):
194196
global supabaseClient, promptLayerLogger, weightsBiasesLogger, logfireLogger, capture_exception, add_breadcrumb, lunaryLogger, logfireLogger, prometheusLogger, slack_app
195197
custom_pricing: bool = False
196198
stream_options = None
@@ -2142,7 +2144,7 @@ def set_callbacks(callback_list, function_id=None): # noqa: PLR0915
21422144

21432145

21442146
def _init_custom_logger_compatible_class( # noqa: PLR0915
2145-
logging_integration: litellm._custom_logger_compatible_callbacks_literal,
2147+
logging_integration: _custom_logger_compatible_callbacks_literal,
21462148
internal_usage_cache: Optional[DualCache],
21472149
llm_router: Optional[
21482150
Any
@@ -2362,7 +2364,7 @@ def _init_custom_logger_compatible_class( # noqa: PLR0915
23622364

23632365

23642366
def get_custom_logger_compatible_class( # noqa: PLR0915
2365-
logging_integration: litellm._custom_logger_compatible_callbacks_literal,
2367+
logging_integration: _custom_logger_compatible_callbacks_literal,
23662368
) -> Optional[CustomLogger]:
23672369
if logging_integration == "lago":
23682370
for callback in _in_memory_loggers:

litellm/litellm_core_utils/prompt_templates/factory.py

Lines changed: 16 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@
1313
import litellm
1414
import litellm.types
1515
import litellm.types.llms
16-
import litellm.types.llms.vertex_ai
1716
from litellm import verbose_logger
1817
from litellm.llms.custom_httpx.http_handler import HTTPHandler
1918
from litellm.types.completion import (
@@ -40,6 +39,9 @@
4039
ChatCompletionUserMessage,
4140
OpenAIMessageContentListBlock,
4241
)
42+
from litellm.types.llms.vertex_ai import FunctionCall as VertexFunctionCall
43+
from litellm.types.llms.vertex_ai import FunctionResponse as VertexFunctionResponse
44+
from litellm.types.llms.vertex_ai import PartType as VertexPartType
4345
from litellm.types.utils import GenericImageParsingChunk
4446

4547
from .common_utils import convert_content_list_to_str, is_non_content_values_set
@@ -965,11 +967,11 @@ def infer_protocol_value(
965967

966968
def _gemini_tool_call_invoke_helper(
967969
function_call_params: ChatCompletionToolCallFunctionChunk,
968-
) -> Optional[litellm.types.llms.vertex_ai.FunctionCall]:
970+
) -> Optional[VertexFunctionCall]:
969971
name = function_call_params.get("name", "") or ""
970972
arguments = function_call_params.get("arguments", "")
971973
arguments_dict = json.loads(arguments)
972-
function_call = litellm.types.llms.vertex_ai.FunctionCall(
974+
function_call = VertexFunctionCall(
973975
name=name,
974976
args=arguments_dict,
975977
)
@@ -978,7 +980,7 @@ def _gemini_tool_call_invoke_helper(
978980

979981
def convert_to_gemini_tool_call_invoke(
980982
message: ChatCompletionAssistantMessage,
981-
) -> List[litellm.types.llms.vertex_ai.PartType]:
983+
) -> List[VertexPartType]:
982984
"""
983985
OpenAI tool invokes:
984986
{
@@ -1019,22 +1021,20 @@ def convert_to_gemini_tool_call_invoke(
10191021
- json.load the arguments
10201022
"""
10211023
try:
1022-
_parts_list: List[litellm.types.llms.vertex_ai.PartType] = []
1024+
_parts_list: List[VertexPartType] = []
10231025
tool_calls = message.get("tool_calls", None)
10241026
function_call = message.get("function_call", None)
10251027
if tool_calls is not None:
10261028
for tool in tool_calls:
10271029
if "function" in tool:
1028-
gemini_function_call: Optional[
1029-
litellm.types.llms.vertex_ai.FunctionCall
1030-
] = _gemini_tool_call_invoke_helper(
1031-
function_call_params=tool["function"]
1030+
gemini_function_call: Optional[VertexFunctionCall] = (
1031+
_gemini_tool_call_invoke_helper(
1032+
function_call_params=tool["function"]
1033+
)
10321034
)
10331035
if gemini_function_call is not None:
10341036
_parts_list.append(
1035-
litellm.types.llms.vertex_ai.PartType(
1036-
function_call=gemini_function_call
1037-
)
1037+
VertexPartType(function_call=gemini_function_call)
10381038
)
10391039
else: # don't silently drop params. Make it clear to user what's happening.
10401040
raise Exception(
@@ -1047,11 +1047,7 @@ def convert_to_gemini_tool_call_invoke(
10471047
function_call_params=function_call
10481048
)
10491049
if gemini_function_call is not None:
1050-
_parts_list.append(
1051-
litellm.types.llms.vertex_ai.PartType(
1052-
function_call=gemini_function_call
1053-
)
1054-
)
1050+
_parts_list.append(VertexPartType(function_call=gemini_function_call))
10551051
else: # don't silently drop params. Make it clear to user what's happening.
10561052
raise Exception(
10571053
"function_call missing. Received tool call with 'type': 'function'. No function call in argument - {}".format(
@@ -1070,7 +1066,7 @@ def convert_to_gemini_tool_call_invoke(
10701066
def convert_to_gemini_tool_call_result(
10711067
message: Union[ChatCompletionToolMessage, ChatCompletionFunctionMessage],
10721068
last_message_with_tool_calls: Optional[dict],
1073-
) -> litellm.types.llms.vertex_ai.PartType:
1069+
) -> VertexPartType:
10741070
"""
10751071
OpenAI message with a tool result looks like:
10761072
{
@@ -1119,11 +1115,11 @@ def convert_to_gemini_tool_call_result(
11191115

11201116
# We can't determine from openai message format whether it's a successful or
11211117
# error call result so default to the successful result template
1122-
_function_response = litellm.types.llms.vertex_ai.FunctionResponse(
1118+
_function_response = VertexFunctionResponse(
11231119
name=name, response={"content": content_str} # type: ignore
11241120
)
11251121

1126-
_part = litellm.types.llms.vertex_ai.PartType(function_response=_function_response)
1122+
_part = VertexPartType(function_response=_function_response)
11271123

11281124
return _part
11291125

litellm/llms/anthropic/experimental_pass_through/transformation.py

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,12 @@
55
from openai.types.chat.chat_completion_chunk import Choice as OpenAIStreamingChoice
66

77
import litellm
8+
from litellm.litellm_core_utils.prompt_templates.factory import (
9+
anthropic_messages_pt,
10+
custom_prompt,
11+
prompt_factory,
12+
)
13+
from litellm.litellm_core_utils.streaming_handler import CustomStreamWrapper
814
from litellm.types.llms.anthropic import (
915
AllAnthropicToolsValues,
1016
AnthopicMessagesAssistantMessageParam,
@@ -53,15 +59,9 @@
5359
ChatCompletionUserMessage,
5460
OpenAIMessageContent,
5561
)
56-
from litellm.types.utils import Choices, GenericStreamingChunk
57-
from litellm.utils import CustomStreamWrapper, ModelResponse, Usage
62+
from litellm.types.utils import Choices, GenericStreamingChunk, ModelResponse, Usage
5863

5964
from ...base import BaseLLM
60-
from litellm.litellm_core_utils.prompt_templates.factory import (
61-
anthropic_messages_pt,
62-
custom_prompt,
63-
prompt_factory,
64-
)
6565

6666

6767
class AnthropicExperimentalPassThroughConfig:
@@ -338,7 +338,7 @@ def _translate_openai_finish_reason_to_anthropic(
338338
return "end_turn"
339339

340340
def translate_openai_response_to_anthropic(
341-
self, response: litellm.ModelResponse
341+
self, response: ModelResponse
342342
) -> AnthropicResponse:
343343
## translate content block
344344
anthropic_content = self._translate_openai_content_to_anthropic(choices=response.choices) # type: ignore
@@ -347,7 +347,7 @@ def translate_openai_response_to_anthropic(
347347
openai_finish_reason=response.choices[0].finish_reason # type: ignore
348348
)
349349
# extract usage
350-
usage: litellm.Usage = getattr(response, "usage")
350+
usage: Usage = getattr(response, "usage")
351351
anthropic_usage = AnthropicResponseUsageBlock(
352352
input_tokens=usage.prompt_tokens or 0,
353353
output_tokens=usage.completion_tokens or 0,
@@ -393,7 +393,7 @@ def _translate_streaming_openai_chunk_to_anthropic(
393393
return "text_delta", ContentTextBlockDelta(type="text_delta", text=text)
394394

395395
def translate_streaming_openai_response_to_anthropic(
396-
self, response: litellm.ModelResponse
396+
self, response: ModelResponse
397397
) -> Union[ContentBlockDelta, MessageBlockDelta]:
398398
## base case - final chunk w/ finish reason
399399
if response.choices[0].finish_reason is not None:
@@ -403,7 +403,7 @@ def translate_streaming_openai_response_to_anthropic(
403403
),
404404
)
405405
if getattr(response, "usage", None) is not None:
406-
litellm_usage_chunk: Optional[litellm.Usage] = response.usage # type: ignore
406+
litellm_usage_chunk: Optional[Usage] = response.usage # type: ignore
407407
elif (
408408
hasattr(response, "_hidden_params")
409409
and "usage" in response._hidden_params

0 commit comments

Comments
 (0)