Skip to content

Commit 137bff3

Browse files
feat: return new Model and EmbeddingModel objects for list model/embedding endpoints [LET-6090]
1 parent a394300 commit 137bff3

File tree

3 files changed

+265
-6
lines changed

3 files changed

+265
-6
lines changed

.stats.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
configured_endpoints: 110
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/letta-ai%2Fletta-sdk-779ee7a7a34dc46d2e6b71e0c87eb3b31520e439b27542a8b40963480ad63f74.yml
3-
openapi_spec_hash: d03e9b957df56ce2ea87653feb9bdb6a
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/letta-ai%2Fletta-sdk-34adfb02a9eb7951deedaaa659c0447f6e3be98751d86865c414d9ea2786efd8.yml
3+
openapi_spec_hash: d4b59a8d1256d3d5ee9b6a2b5b31f1f2
44
config_hash: a4d193a3723a422d482f909aef0b9d33

src/resources/models/embeddings.ts

Lines changed: 105 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,14 +8,117 @@ import { RequestOptions } from '../../internal/request-options';
88
export class Embeddings extends APIResource {
99
/**
1010
* List available embedding models using the asynchronous implementation for
11-
* improved performance
11+
* improved performance.
12+
*
13+
* Returns EmbeddingModel format which extends EmbeddingConfig with additional
14+
* metadata fields. Legacy EmbeddingConfig fields are marked as deprecated but
15+
* still available for backward compatibility.
1216
*/
1317
list(options?: RequestOptions): APIPromise<EmbeddingListResponse> {
1418
return this._client.get('/v1/models/embedding', options);
1519
}
1620
}
1721

18-
export type EmbeddingListResponse = Array<ModelsAPI.EmbeddingConfig>;
22+
export type EmbeddingListResponse = Array<EmbeddingListResponse.EmbeddingListResponseItem>;
23+
24+
export namespace EmbeddingListResponse {
25+
export interface EmbeddingListResponseItem {
26+
/**
27+
* Display name for the model shown in UI
28+
*/
29+
display_name: string;
30+
31+
/**
32+
* The dimension of the embedding
33+
*/
34+
embedding_dim: number;
35+
36+
/**
37+
* @deprecated Deprecated: Use 'provider_type' field instead. The endpoint type for
38+
* the embedding model.
39+
*/
40+
embedding_endpoint_type:
41+
| 'openai'
42+
| 'anthropic'
43+
| 'bedrock'
44+
| 'google_ai'
45+
| 'google_vertex'
46+
| 'azure'
47+
| 'groq'
48+
| 'ollama'
49+
| 'webui'
50+
| 'webui-legacy'
51+
| 'lmstudio'
52+
| 'lmstudio-legacy'
53+
| 'llamacpp'
54+
| 'koboldcpp'
55+
| 'vllm'
56+
| 'hugging-face'
57+
| 'mistral'
58+
| 'together'
59+
| 'pinecone';
60+
61+
/**
62+
* @deprecated Deprecated: Use 'name' field instead. Embedding model name.
63+
*/
64+
embedding_model: string;
65+
66+
/**
67+
* The actual model name used by the provider
68+
*/
69+
name: string;
70+
71+
/**
72+
* The name of the provider
73+
*/
74+
provider_name: string;
75+
76+
/**
77+
* The type of the provider
78+
*/
79+
provider_type: ModelsAPI.ProviderType;
80+
81+
/**
82+
* @deprecated Deprecated: The Azure deployment for the model.
83+
*/
84+
azure_deployment?: string | null;
85+
86+
/**
87+
* @deprecated Deprecated: The Azure endpoint for the model.
88+
*/
89+
azure_endpoint?: string | null;
90+
91+
/**
92+
* @deprecated Deprecated: The Azure version for the model.
93+
*/
94+
azure_version?: string | null;
95+
96+
/**
97+
* @deprecated Deprecated: The maximum batch size for processing embeddings.
98+
*/
99+
batch_size?: number;
100+
101+
/**
102+
* @deprecated Deprecated: The chunk size of the embedding.
103+
*/
104+
embedding_chunk_size?: number | null;
105+
106+
/**
107+
* @deprecated Deprecated: The endpoint for the model.
108+
*/
109+
embedding_endpoint?: string | null;
110+
111+
/**
112+
* The handle for this config, in the format provider/model-name.
113+
*/
114+
handle?: string | null;
115+
116+
/**
117+
* Type of model (llm or embedding)
118+
*/
119+
model_type?: 'embedding';
120+
}
121+
}
19122

20123
export declare namespace Embeddings {
21124
export { type EmbeddingListResponse as EmbeddingListResponse };

src/resources/models/models.ts

Lines changed: 158 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
22

33
import { APIResource } from '../../core/resource';
4+
import * as ModelsAPI from './models';
45
import * as EmbeddingsAPI from './embeddings';
56
import { EmbeddingListResponse, Embeddings } from './embeddings';
67
import { APIPromise } from '../../core/api-promise';
@@ -11,7 +12,11 @@ export class Models extends APIResource {
1112

1213
/**
1314
* List available LLM models using the asynchronous implementation for improved
14-
* performance
15+
* performance.
16+
*
17+
* Returns Model format which extends LLMConfig with additional metadata fields.
18+
* Legacy LLMConfig fields are marked as deprecated but still available for
19+
* backward compatibility.
1520
*/
1621
list(
1722
query: ModelListParams | null | undefined = {},
@@ -251,7 +256,158 @@ export type ProviderType =
251256
| 'vllm'
252257
| 'xai';
253258

254-
export type ModelListResponse = Array<LlmConfig>;
259+
export type ModelListResponse = Array<ModelListResponse.ModelListResponseItem>;
260+
261+
export namespace ModelListResponse {
262+
export interface ModelListResponseItem {
263+
/**
264+
* @deprecated Deprecated: Use 'max_context_window' field instead. The context
265+
* window size for the model.
266+
*/
267+
context_window: number;
268+
269+
/**
270+
* The maximum context window for the model
271+
*/
272+
max_context_window: number;
273+
274+
/**
275+
* @deprecated Deprecated: Use 'name' field instead. LLM model name.
276+
*/
277+
model: string;
278+
279+
/**
280+
* @deprecated Deprecated: Use 'provider_type' field instead. The endpoint type for
281+
* the model.
282+
*/
283+
model_endpoint_type:
284+
| 'openai'
285+
| 'anthropic'
286+
| 'google_ai'
287+
| 'google_vertex'
288+
| 'azure'
289+
| 'groq'
290+
| 'ollama'
291+
| 'webui'
292+
| 'webui-legacy'
293+
| 'lmstudio'
294+
| 'lmstudio-legacy'
295+
| 'lmstudio-chatcompletions'
296+
| 'llamacpp'
297+
| 'koboldcpp'
298+
| 'vllm'
299+
| 'hugging-face'
300+
| 'mistral'
301+
| 'together'
302+
| 'bedrock'
303+
| 'deepseek'
304+
| 'xai';
305+
306+
/**
307+
* The actual model name used by the provider
308+
*/
309+
name: string;
310+
311+
/**
312+
* The type of the provider
313+
*/
314+
provider_type: ModelsAPI.ProviderType;
315+
316+
/**
317+
* @deprecated Deprecated: The framework compatibility type for the model.
318+
*/
319+
compatibility_type?: 'gguf' | 'mlx' | null;
320+
321+
/**
322+
* A human-friendly display name for the model.
323+
*/
324+
display_name?: string | null;
325+
326+
/**
327+
* @deprecated Deprecated: Whether or not the model should use extended thinking if
328+
* it is a 'reasoning' style model.
329+
*/
330+
enable_reasoner?: boolean;
331+
332+
/**
333+
* @deprecated Deprecated: Positive values penalize new tokens based on their
334+
* existing frequency in the text so far.
335+
*/
336+
frequency_penalty?: number | null;
337+
338+
/**
339+
* The handle for this config, in the format provider/model-name.
340+
*/
341+
handle?: string | null;
342+
343+
/**
344+
* @deprecated Deprecated: Configurable thinking budget for extended thinking.
345+
*/
346+
max_reasoning_tokens?: number;
347+
348+
/**
349+
* @deprecated Deprecated: The maximum number of tokens to generate.
350+
*/
351+
max_tokens?: number | null;
352+
353+
/**
354+
* @deprecated Deprecated: The endpoint for the model.
355+
*/
356+
model_endpoint?: string | null;
357+
358+
/**
359+
* Type of model (llm or embedding)
360+
*/
361+
model_type?: 'llm';
362+
363+
/**
364+
* @deprecated Deprecated: The wrapper for the model.
365+
*/
366+
model_wrapper?: string | null;
367+
368+
/**
369+
* @deprecated Deprecated: If set to True, enables parallel tool calling.
370+
*/
371+
parallel_tool_calls?: boolean | null;
372+
373+
/**
374+
* @deprecated Deprecated: The provider category for the model.
375+
*/
376+
provider_category?: ModelsAPI.ProviderCategory | null;
377+
378+
/**
379+
* The provider name for the model.
380+
*/
381+
provider_name?: string | null;
382+
383+
/**
384+
* @deprecated Deprecated: Puts 'inner_thoughts' as a kwarg in the function call.
385+
*/
386+
put_inner_thoughts_in_kwargs?: boolean | null;
387+
388+
/**
389+
* @deprecated Deprecated: The reasoning effort to use when generating text
390+
* reasoning models.
391+
*/
392+
reasoning_effort?: 'minimal' | 'low' | 'medium' | 'high' | null;
393+
394+
/**
395+
* @deprecated Deprecated: The temperature to use when generating text with the
396+
* model.
397+
*/
398+
temperature?: number;
399+
400+
/**
401+
* @deprecated Deprecated: The cost tier for the model (cloud only).
402+
*/
403+
tier?: string | null;
404+
405+
/**
406+
* @deprecated Deprecated: Soft control for how verbose model output should be.
407+
*/
408+
verbosity?: 'low' | 'medium' | 'high' | null;
409+
}
410+
}
255411

256412
export interface ModelListParams {
257413
provider_category?: Array<ProviderCategory> | null;

0 commit comments

Comments
 (0)