Skip to content

Commit 73e8491

Browse files
committed
🐛 Fix OpenAI thinking model token parameter handling
Add special handling for OpenAI thinking models to use correct token parameters - Add is_openai_thinking_model() helper function to detect thinking models - Skip setting max_tokens via builder for OpenAI thinking models - Convert max_tokens to max_completion_tokens in get_combined_config() - Ensure proper parameter handling for models starting with 'o' OpenAI thinking models require max_completion_tokens instead of max_tokens parameter, which was causing configuration issues.
1 parent 0a202d6 commit 73e8491

File tree

1 file changed

+23
-1
lines changed

1 file changed

+23
-1
lines changed

src/llm.rs

Lines changed: 23 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,12 @@ where
6161
}
6262

6363
// Set max tokens if specified in additional params, otherwise use 4096 as default
64-
if let Some(max_tokens) = provider_config.additional_params.get("max_tokens") {
64+
// For OpenAI thinking models, don't set max_tokens via builder since they use max_completion_tokens
65+
if is_openai_thinking_model(&provider_config.model) && provider_name.to_lowercase() == "openai"
66+
{
67+
// For thinking models, max_completion_tokens should be handled via additional_params
68+
// Don't set max_tokens via the builder for these models
69+
} else if let Some(max_tokens) = provider_config.additional_params.get("max_tokens") {
6570
if let Ok(mt_val) = max_tokens.parse::<u32>() {
6671
builder = builder.max_tokens(mt_val);
6772
}
@@ -275,6 +280,12 @@ fn requires_api_key(backend: &LLMBackend) -> bool {
275280
!matches!(backend, LLMBackend::Ollama | LLMBackend::Phind)
276281
}
277282

283+
/// Helper function: check if the model is an `OpenAI` thinking model
284+
fn is_openai_thinking_model(model: &str) -> bool {
285+
let model_lower = model.to_lowercase();
286+
model_lower.starts_with('o')
287+
}
288+
278289
/// Validates the provider configuration
279290
pub fn validate_provider_config(config: &Config, provider_name: &str) -> Result<()> {
280291
if provider_requires_api_key(provider_name) {
@@ -324,6 +335,17 @@ pub fn get_combined_config<S: ::std::hash::BuildHasher>(
324335
}
325336
}
326337

338+
// Handle OpenAI thinking models: convert max_tokens to max_completion_tokens
339+
if provider_name.to_lowercase() == "openai" {
340+
if let Some(model) = combined_params.get("model") {
341+
if is_openai_thinking_model(model) {
342+
if let Some(max_tokens) = combined_params.remove("max_tokens") {
343+
combined_params.insert("max_completion_tokens".to_string(), max_tokens);
344+
}
345+
}
346+
}
347+
}
348+
327349
combined_params
328350
}
329351

0 commit comments

Comments
 (0)