Skip to content

Commit 6f231db

Browse files
authored
fix(providers): always use optional instead of mix of nullish for providerOptions (vercel#6122)
## Background We were using nullish in some places (due to AI autocompletion) ## Summary Now it's unified to use `.optional()` everywhere ## Verification Checked JSDoc to make sure we're not breaking anything
1 parent a662dea commit 6f231db

File tree

6 files changed

+25
-16
lines changed

6 files changed

+25
-16
lines changed

.changeset/weak-moles-nail.md

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
---
2+
'@ai-sdk/amazon-bedrock': patch
3+
'@ai-sdk/mistral': patch
4+
'@ai-sdk/cohere': patch
5+
'@ai-sdk/openai': patch
6+
'@ai-sdk/groq': patch
7+
---
8+
9+
fix(providers): always use optional instead of mix of nullish for providerOptions

packages/amazon-bedrock/src/bedrock-chat-options.ts

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -44,10 +44,10 @@ export const bedrockProviderOptions = z.object({
4444
additionalModelRequestFields: z.record(z.any()).optional(),
4545
reasoningConfig: z
4646
.object({
47-
type: z.union([z.literal('enabled'), z.literal('disabled')]).nullish(),
48-
budgetTokens: z.number().nullish(),
47+
type: z.union([z.literal('enabled'), z.literal('disabled')]).optional(),
48+
budgetTokens: z.number().optional(),
4949
})
50-
.nullish(),
50+
.optional(),
5151
});
5252

5353
export type BedrockProviderOptions = z.infer<typeof bedrockProviderOptions>;

packages/cohere/src/cohere-embedding-options.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ export const cohereEmbeddingOptions = z.object({
2121
*/
2222
inputType: z
2323
.enum(['search_document', 'search_query', 'classification', 'clustering'])
24-
.nullish(),
24+
.optional(),
2525

2626
/**
2727
* Specifies how the API will handle inputs longer than the maximum token length.
@@ -31,7 +31,7 @@ export const cohereEmbeddingOptions = z.object({
3131
* - "START": Will discard the start of the input until the remaining input is exactly the maximum input token length for the model.
3232
* - "END": Will discard the end of the input until the remaining input is exactly the maximum input token length for the model.
3333
*/
34-
truncate: z.enum(['NONE', 'START', 'END']).nullish(),
34+
truncate: z.enum(['NONE', 'START', 'END']).optional(),
3535
});
3636

3737
export type CohereEmbeddingOptions = z.infer<typeof cohereEmbeddingOptions>;

packages/groq/src/groq-chat-options.ts

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,18 +20,18 @@ export type GroqChatModelId =
2020
| (string & {});
2121

2222
export const groqProviderOptions = z.object({
23-
reasoningFormat: z.enum(['parsed', 'raw', 'hidden']).nullish(),
23+
reasoningFormat: z.enum(['parsed', 'raw', 'hidden']).optional(),
2424

2525
/**
2626
* Whether to enable parallel function calling during tool use. Default to true.
2727
*/
28-
parallelToolCalls: z.boolean().nullish(),
28+
parallelToolCalls: z.boolean().optional(),
2929

3030
/**
3131
* A unique identifier representing your end-user, which can help OpenAI to
3232
* monitor and detect abuse. Learn more.
3333
*/
34-
user: z.string().nullish(),
34+
user: z.string().optional(),
3535
});
3636

3737
export type GroqProviderOptions = z.infer<typeof groqProviderOptions>;

packages/mistral/src/mistral-chat-options.ts

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -22,10 +22,10 @@ Whether to inject a safety prompt before all conversations.
2222
2323
Defaults to `false`.
2424
*/
25-
safePrompt: z.boolean().nullish(),
25+
safePrompt: z.boolean().optional(),
2626

27-
documentImageLimit: z.number().nullish(),
28-
documentPageLimit: z.number().nullish(),
27+
documentImageLimit: z.number().optional(),
28+
documentPageLimit: z.number().optional(),
2929
});
3030

3131
export type MistralProviderOptions = z.infer<typeof mistralProviderOptions>;

packages/openai/src/openai-transcription-options.ts

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -12,23 +12,23 @@ export const openAITranscriptionProviderOptions = z.object({
1212
* Additional information to include in the transcription response.
1313
*/
1414

15-
include: z.array(z.string()).nullish(),
15+
include: z.array(z.string()).optional(),
1616

1717
/**
1818
* The language of the input audio in ISO-639-1 format.
1919
*/
20-
language: z.string().nullish(),
20+
language: z.string().optional(),
2121

2222
/**
2323
* An optional text to guide the model's style or continue a previous audio segment.
2424
*/
25-
prompt: z.string().nullish(),
25+
prompt: z.string().optional(),
2626

2727
/**
2828
* The sampling temperature, between 0 and 1.
2929
* @default 0
3030
*/
31-
temperature: z.number().min(0).max(1).default(0).nullish(),
31+
temperature: z.number().min(0).max(1).default(0).optional(),
3232

3333
/**
3434
* The timestamp granularities to populate for this transcription.
@@ -37,7 +37,7 @@ export const openAITranscriptionProviderOptions = z.object({
3737
timestampGranularities: z
3838
.array(z.enum(['word', 'segment']))
3939
.default(['segment'])
40-
.nullish(),
40+
.optional(),
4141
});
4242

4343
export type OpenAITranscriptionProviderOptions = z.infer<

0 commit comments

Comments
 (0)