Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
64 changes: 54 additions & 10 deletions libs/partners/openai/langchain_openai/chat_models/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -190,8 +190,13 @@ def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
)
if audio := _dict.get("audio"):
additional_kwargs["audio"] = audio
# Handle OpenRouter reasoning_details (provider-specific metadata)
# Store as-is in additional_kwargs, do not convert to content blocks
reasoning_details = _dict.get("reasoning_details")
if isinstance(reasoning_details, dict):
additional_kwargs["reasoning_details"] = reasoning_details
return AIMessage(
content=content,
content=content, # type: ignore[arg-type]
additional_kwargs=additional_kwargs,
name=name,
id=id_,
Expand Down Expand Up @@ -371,7 +376,7 @@ def _convert_delta_to_message_chunk(
"""Convert to a LangChain message chunk."""
id_ = _dict.get("id")
role = cast(str, _dict.get("role"))
content = cast(str, _dict.get("content") or "")
content: Any = _dict.get("content") or ""
additional_kwargs: dict = {}
if _dict.get("function_call"):
function_call = dict(_dict["function_call"])
Expand All @@ -392,6 +397,13 @@ def _convert_delta_to_message_chunk(
]
except KeyError:
pass
# Handle OpenRouter reasoning_details in streaming (provider-specific metadata)
# Store as-is in additional_kwargs, do not convert to content blocks.
# When chunks are merged via AIMessageChunk.__add__(), merge_dicts() will
# automatically handle nested dict/list merging for reasoning_details.
reasoning_details = _dict.get("reasoning_details")
if isinstance(reasoning_details, dict):
additional_kwargs["reasoning_details"] = reasoning_details

if role == "user" or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content, id=id_)
Expand Down Expand Up @@ -1447,12 +1459,6 @@ def _create_chat_result(
response_dict = (
response if isinstance(response, dict) else response.model_dump()
)
# Sometimes the AI Model calling will get error, we should raise it (this is
# typically followed by a null value for `choices`, which we raise for
# separately below).
if response_dict.get("error"):
raise ValueError(response_dict.get("error"))

# Raise informative error messages for non-OpenAI chat completions APIs
# that return malformed responses.
try:
Expand All @@ -1462,14 +1468,52 @@ def _create_chat_result(
raise KeyError(msg) from e

if choices is None:
msg = "Received response with null value for `choices`."
# Provide more diagnostic information for null choices
# This can happen with some OpenAI-compatible APIs (e.g., vLLM) that
# return null choices in error cases or edge cases
error_info = response_dict.get("error")
if error_info:
# If there's an error field, raise it with more context
if isinstance(error_info, dict):
error_msg = error_info.get("message", str(error_info))
error_type = error_info.get("type", "UnknownError")
msg = (
f"API returned error with null choices: "
f"{error_type}: {error_msg}. "
f"Full response keys: {list(response_dict.keys())}"
)
else:
msg = (
f"API returned error with null choices: {error_info}. "
f"Full response keys: {list(response_dict.keys())}"
)
raise ValueError(msg) from None
# No explicit error field, but choices is null - provide diagnostic info
msg = (
"Received response with null value for `choices`. "
"This may indicate an API error or incompatibility with the "
"OpenAI-compatible API. "
f"Response keys: {list(response_dict.keys())}. "
"If using a custom base_url, ensure the API endpoint is "
"fully OpenAI-compatible."
)
raise TypeError(msg)

# Sometimes the AI Model calling will get error, we should raise it
# (check this after validating choices to provide better error messages)
if response_dict.get("error"):
raise ValueError(response_dict.get("error"))

token_usage = response_dict.get("usage")
service_tier = response_dict.get("service_tier")

for res in choices:
message = _convert_dict_to_message(res["message"])
message_dict = dict(res["message"])
# Handle OpenRouter reasoning_details at response level
reasoning_details = response_dict.get("reasoning_details")
if isinstance(reasoning_details, dict):
message_dict["reasoning_details"] = reasoning_details
message = _convert_dict_to_message(message_dict)
if token_usage and isinstance(message, AIMessage):
message.usage_metadata = _create_usage_metadata(
token_usage, service_tier
Expand Down
190 changes: 190 additions & 0 deletions libs/partners/openai/tests/unit_tests/chat_models/test_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -3188,6 +3188,196 @@ def test_gpt_5_1_temperature_with_reasoning_effort_none(
assert "temperature" not in payload


def test_openrouter_reasoning_details() -> None:
"""Test OpenRouter reasoning_details support in AIMessage.

This test verifies that OpenRouter's reasoning_details field is correctly
stored as provider-specific metadata in additional_kwargs, without
modifying the content structure.
"""
from langchain_openai.chat_models.base import (
_convert_delta_to_message_chunk,
_convert_dict_to_message,
)

# Test non-streaming: reasoning_details in message
response_dict = {
"role": "assistant",
"content": "The answer is 42.",
"reasoning_details": {
"tokens": [{"token": "Let", "logprob": -0.1}],
"steps": ["step1", "step2"],
},
}
message = _convert_dict_to_message(response_dict)
assert isinstance(message, AIMessage)
# Content should remain unchanged (string, not converted to blocks)
assert message.content == "The answer is 42."
# reasoning_details should be in additional_kwargs
assert "reasoning_details" in message.additional_kwargs
assert message.additional_kwargs["reasoning_details"] == {
"tokens": [{"token": "Let", "logprob": -0.1}],
"steps": ["step1", "step2"],
}

# Test non-streaming: reasoning_details with content as list
response_dict_list = {
"role": "assistant",
"content": [{"type": "text", "text": "Final answer."}],
"reasoning_details": {
"reasoning": "Thinking process...",
"id": "rs_123",
},
}
message_list = _convert_dict_to_message(response_dict_list)
assert isinstance(message_list, AIMessage)
# Content should remain unchanged (list, not modified)
assert isinstance(message_list.content, list)
content_list = cast(list[dict[str, Any]], message_list.content)
assert len(content_list) == 1
assert content_list[0]["type"] == "text"
# reasoning_details should be in additional_kwargs
assert "reasoning_details" in message_list.additional_kwargs

# Test streaming: reasoning_details in delta
delta_dict = {
"role": "assistant",
"content": "Streaming response.",
"reasoning_details": {
"steps": ["step1"],
},
}
chunk = _convert_delta_to_message_chunk(delta_dict, AIMessageChunk)
assert isinstance(chunk, AIMessageChunk)
# Content should remain unchanged (string, not converted to blocks)
assert chunk.content == "Streaming response."
# reasoning_details should be in additional_kwargs
assert "reasoning_details" in chunk.additional_kwargs
assert chunk.additional_kwargs["reasoning_details"] == {"steps": ["step1"]}

# Test reasoning_details with complex structure
response_dict_complex = {
"role": "assistant",
"content": "Answer",
"reasoning_details": {
"tokens": [{"token": "test", "logprob": -0.5}],
"metadata": {"provider": "openrouter"},
},
}
message_complex = _convert_dict_to_message(response_dict_complex)
assert isinstance(message_complex, AIMessage)
assert message_complex.content == "Answer"
assert "reasoning_details" in message_complex.additional_kwargs
assert "tokens" in message_complex.additional_kwargs["reasoning_details"]
assert "metadata" in message_complex.additional_kwargs["reasoning_details"]

# Test streaming delta merging via AIMessageChunk.__add__()
chunk1 = _convert_delta_to_message_chunk(
{
"role": "assistant",
"content": "Step 1",
"reasoning_details": {"steps": ["step1"], "tokens": [{"token": "a"}]},
},
AIMessageChunk,
)
chunk2 = _convert_delta_to_message_chunk(
{
"role": "assistant",
"content": " Step 2",
"reasoning_details": {"steps": ["step2"], "tokens": [{"token": "b"}]},
},
AIMessageChunk,
)
chunk3 = _convert_delta_to_message_chunk(
{
"role": "assistant",
"content": " Done",
"reasoning_details": {"final": True},
},
AIMessageChunk,
)
merged = chunk1 + chunk2 + chunk3
assert isinstance(merged, AIMessageChunk)
assert "reasoning_details" in merged.additional_kwargs
merged_details = merged.additional_kwargs["reasoning_details"]
# Lists should be merged
assert "steps" in merged_details
assert len(merged_details["steps"]) == 2
assert "step1" in merged_details["steps"]
assert "step2" in merged_details["steps"]
# Dict values should be merged
assert "final" in merged_details
assert merged_details["final"] is True


def test_model_prefers_responses_api() -> None:
assert _model_prefers_responses_api("gpt-5.2-pro")
assert not _model_prefers_responses_api("gpt-5.1")


def test_create_chat_result_null_choices_with_error() -> None:
"""Test that null choices with error field raises ValueError with context."""
llm = ChatOpenAI(model="test-model", api_key=SecretStr("test-key"))
response_dict = {
"id": "chatcmpl-123",
"object": "chat.completion",
"created": 1234567890,
"model": "test-model",
"choices": None,
"error": {
"message": "Rate limit exceeded",
"type": "rate_limit_error",
"code": "rate_limit_exceeded",
},
}

with pytest.raises(ValueError) as excinfo:
llm._create_chat_result(response_dict)

error_msg = str(excinfo.value)
assert "API returned error with null choices" in error_msg
assert "rate_limit_error" in error_msg
assert "Rate limit exceeded" in error_msg
assert "Full response keys" in error_msg


def test_create_chat_result_null_choices_without_error() -> None:
"""Test null choices without error field raises TypeError with diagnostic info."""
llm = ChatOpenAI(model="test-model", api_key=SecretStr("test-key"))
response_dict = {
"id": "chatcmpl-123",
"object": "chat.completion",
"created": 1234567890,
"model": "test-model",
"choices": None,
"usage": {"prompt_tokens": 10, "completion_tokens": 5, "total_tokens": 15},
}

with pytest.raises(TypeError) as excinfo:
llm._create_chat_result(response_dict)

error_msg = str(excinfo.value)
assert "Received response with null value for `choices`" in error_msg
assert "API error or incompatibility" in error_msg
assert "Response keys" in error_msg
assert "custom base_url" in error_msg


def test_create_chat_result_null_choices_with_string_error() -> None:
"""Test that null choices with string error field raises ValueError."""
llm = ChatOpenAI(model="test-model", api_key=SecretStr("test-key"))
response_dict = {
"id": "chatcmpl-123",
"object": "chat.completion",
"created": 1234567890,
"model": "test-model",
"choices": None,
"error": "Internal server error",
}

with pytest.raises(ValueError) as excinfo:
llm._create_chat_result(response_dict)

error_msg = str(excinfo.value)
assert "API returned error with null choices" in error_msg
assert "Internal server error" in error_msg
Loading
Loading