11// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
22
33import { APIResource } from '../../core/resource' ;
4+ import * as ModelsAPI from './models' ;
45import * as EmbeddingsAPI from './embeddings' ;
56import { EmbeddingListResponse , Embeddings } from './embeddings' ;
67import { APIPromise } from '../../core/api-promise' ;
@@ -11,7 +12,11 @@ export class Models extends APIResource {
1112
1213 /**
1314 * List available LLM models using the asynchronous implementation for improved
14- * performance
15+ * performance.
16+ *
17+ * Returns Model format which extends LLMConfig with additional metadata fields.
18+ * Legacy LLMConfig fields are marked as deprecated but still available for
19+ * backward compatibility.
1520 */
1621 list (
1722 query : ModelListParams | null | undefined = { } ,
@@ -251,7 +256,158 @@ export type ProviderType =
251256 | 'vllm'
252257 | 'xai' ;
253258
254- export type ModelListResponse = Array < LlmConfig > ;
259+ export type ModelListResponse = Array < ModelListResponse . ModelListResponseItem > ;
260+
261+ export namespace ModelListResponse {
262+ export interface ModelListResponseItem {
263+ /**
264+ * @deprecated Deprecated: Use 'max_context_window' field instead. The context
265+ * window size for the model.
266+ */
267+ context_window : number ;
268+
269+ /**
270+ * The maximum context window for the model
271+ */
272+ max_context_window : number ;
273+
274+ /**
275+ * @deprecated Deprecated: Use 'name' field instead. LLM model name.
276+ */
277+ model : string ;
278+
279+ /**
280+ * @deprecated Deprecated: Use 'provider_type' field instead. The endpoint type for
281+ * the model.
282+ */
283+ model_endpoint_type :
284+ | 'openai'
285+ | 'anthropic'
286+ | 'google_ai'
287+ | 'google_vertex'
288+ | 'azure'
289+ | 'groq'
290+ | 'ollama'
291+ | 'webui'
292+ | 'webui-legacy'
293+ | 'lmstudio'
294+ | 'lmstudio-legacy'
295+ | 'lmstudio-chatcompletions'
296+ | 'llamacpp'
297+ | 'koboldcpp'
298+ | 'vllm'
299+ | 'hugging-face'
300+ | 'mistral'
301+ | 'together'
302+ | 'bedrock'
303+ | 'deepseek'
304+ | 'xai' ;
305+
306+ /**
307+ * The actual model name used by the provider
308+ */
309+ name : string ;
310+
311+ /**
312+ * The type of the provider
313+ */
314+ provider_type : ModelsAPI . ProviderType ;
315+
316+ /**
317+ * @deprecated Deprecated: The framework compatibility type for the model.
318+ */
319+ compatibility_type ?: 'gguf' | 'mlx' | null ;
320+
321+ /**
322+ * A human-friendly display name for the model.
323+ */
324+ display_name ?: string | null ;
325+
326+ /**
327+ * @deprecated Deprecated: Whether or not the model should use extended thinking if
328+ * it is a 'reasoning' style model.
329+ */
330+ enable_reasoner ?: boolean ;
331+
332+ /**
333+ * @deprecated Deprecated: Positive values penalize new tokens based on their
334+ * existing frequency in the text so far.
335+ */
336+ frequency_penalty ?: number | null ;
337+
338+ /**
339+ * The handle for this config, in the format provider/model-name.
340+ */
341+ handle ?: string | null ;
342+
343+ /**
344+ * @deprecated Deprecated: Configurable thinking budget for extended thinking.
345+ */
346+ max_reasoning_tokens ?: number ;
347+
348+ /**
349+ * @deprecated Deprecated: The maximum number of tokens to generate.
350+ */
351+ max_tokens ?: number | null ;
352+
353+ /**
354+ * @deprecated Deprecated: The endpoint for the model.
355+ */
356+ model_endpoint ?: string | null ;
357+
358+ /**
359+ * Type of model (llm or embedding)
360+ */
361+ model_type ?: 'llm' ;
362+
363+ /**
364+ * @deprecated Deprecated: The wrapper for the model.
365+ */
366+ model_wrapper ?: string | null ;
367+
368+ /**
369+ * @deprecated Deprecated: If set to True, enables parallel tool calling.
370+ */
371+ parallel_tool_calls ?: boolean | null ;
372+
373+ /**
374+ * @deprecated Deprecated: The provider category for the model.
375+ */
376+ provider_category ?: ModelsAPI . ProviderCategory | null ;
377+
378+ /**
379+ * The provider name for the model.
380+ */
381+ provider_name ?: string | null ;
382+
383+ /**
384+ * @deprecated Deprecated: Puts 'inner_thoughts' as a kwarg in the function call.
385+ */
386+ put_inner_thoughts_in_kwargs ?: boolean | null ;
387+
388+ /**
389+ * @deprecated Deprecated: The reasoning effort to use when generating text
390+ * reasoning models.
391+ */
392+ reasoning_effort ?: 'minimal' | 'low' | 'medium' | 'high' | null ;
393+
394+ /**
395+ * @deprecated Deprecated: The temperature to use when generating text with the
396+ * model.
397+ */
398+ temperature ?: number ;
399+
400+ /**
401+ * @deprecated Deprecated: The cost tier for the model (cloud only).
402+ */
403+ tier ?: string | null ;
404+
405+ /**
406+ * @deprecated Deprecated: Soft control for how verbose model output should be.
407+ */
408+ verbosity ?: 'low' | 'medium' | 'high' | null ;
409+ }
410+ }
255411
256412export interface ModelListParams {
257413 provider_category ?: Array < ProviderCategory > | null ;
0 commit comments