Skip to content

Commit a2f3a09

Browse files
authored
fix: various fixes to make gpt-5 work better [LET-4138] (#4344)
* fix: patch gpt-5 compat * feat: add verbosity level dropdown selector to llm config panel in ade * fix: patch duplicated logic * fix: make default verbosity None, just stage just publish * fix: missing files
1 parent d2f658c commit a2f3a09

File tree

2 files changed

+13
-6
lines changed

2 files changed

+13
-6
lines changed

letta/llm_api/openai_client.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ def is_openai_reasoning_model(model: str) -> bool:
4747
"""Utility function to check if the model is a 'reasoner'"""
4848

4949
# NOTE: needs to be updated with new model releases
50-
is_reasoning = model.startswith("o1") or model.startswith("o3") or model.startswith("o4")
50+
is_reasoning = model.startswith("o1") or model.startswith("o3") or model.startswith("o4") or model.startswith("gpt-5")
5151
return is_reasoning
5252

5353

letta/schemas/llm_config.py

Lines changed: 12 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@ class LLMConfig(BaseModel):
7171
)
7272
compatibility_type: Optional[Literal["gguf", "mlx"]] = Field(None, description="The framework compatibility type for the model.")
7373
verbosity: Optional[Literal["low", "medium", "high"]] = Field(
74-
"medium",
74+
None,
7575
description="Soft control for how verbose model output should be, used for GPT-5 models.",
7676
)
7777
tier: Optional[str] = Field(None, description="The cost tier for the model (cloud only).")
@@ -206,6 +206,7 @@ def default_config(cls, model_name: str):
206206
model_endpoint="https://api.openai.com/v1",
207207
model_wrapper=None,
208208
context_window=128000,
209+
reasoning_effort="medium",
209210
verbosity="medium",
210211
max_tokens=16384,
211212
)
@@ -228,9 +229,9 @@ def pretty_print(self) -> str:
228229

229230
@classmethod
230231
def is_openai_reasoning_model(cls, config: "LLMConfig") -> bool:
231-
return config.model_endpoint_type == "openai" and (
232-
config.model.startswith("o1") or config.model.startswith("o3") or config.model.startswith("o4")
233-
)
232+
from letta.llm_api.openai_client import is_openai_reasoning_model
233+
234+
return config.model_endpoint_type == "openai" and is_openai_reasoning_model(config.model)
234235

235236
@classmethod
236237
def is_anthropic_reasoning_model(cls, config: "LLMConfig") -> bool:
@@ -261,11 +262,14 @@ def supports_verbosity(cls, config: "LLMConfig") -> bool:
261262
def apply_reasoning_setting_to_config(cls, config: "LLMConfig", reasoning: bool):
262263
if not reasoning:
263264
if cls.is_openai_reasoning_model(config):
264-
logger.warning("Reasoning cannot be disabled for OpenAI o1/o3 models")
265+
logger.warning("Reasoning cannot be disabled for OpenAI o1/o3/gpt-5 models")
265266
config.put_inner_thoughts_in_kwargs = False
266267
config.enable_reasoner = True
267268
if config.reasoning_effort is None:
268269
config.reasoning_effort = "medium"
270+
# Set verbosity for GPT-5 models
271+
if config.model.startswith("gpt-5") and config.verbosity is None:
272+
config.verbosity = "medium"
269273
elif config.model.startswith("gemini-2.5-pro"):
270274
logger.warning("Reasoning cannot be disabled for Gemini 2.5 Pro model")
271275
# Handle as non-reasoner until we support summary
@@ -292,6 +296,9 @@ def apply_reasoning_setting_to_config(cls, config: "LLMConfig", reasoning: bool)
292296
config.put_inner_thoughts_in_kwargs = False
293297
if config.reasoning_effort is None:
294298
config.reasoning_effort = "medium"
299+
# Set verbosity for GPT-5 models
300+
if config.model.startswith("gpt-5") and config.verbosity is None:
301+
config.verbosity = "medium"
295302
else:
296303
config.put_inner_thoughts_in_kwargs = True
297304

0 commit comments

Comments
 (0)