Skip to content

Commit f3aa5a3

Browse files
committed
refactor(api/chat): update for changes in the ai dependency (v4)
1 parent a7e645a commit f3aa5a3

File tree

1 file changed

+23
-22
lines changed

1 file changed

+23
-22
lines changed

app/api/chat/route.ts

Lines changed: 23 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
1-
import { AzureKeyCredential, OpenAIClient } from '@azure/openai';
2-
import { OpenAIStream, StreamingTextResponse } from 'ai';
1+
import { createAzure } from '@ai-sdk/azure';
2+
import { streamText } from 'ai';
33

44
// destructure env vars we need
55
const {
6-
AZURE_OPENAI_BASE_PATH,
6+
AZURE_OPENAI_DEPLOYMENT_NAME,
77
AZURE_OPENAI_API_KEY,
88
AZURE_OPENAI_MODEL_DEPLOYMENT,
99
AZURE_OPENAI_GPT4_DEPLOYMENT,
@@ -16,7 +16,7 @@ const {
1616
// make sure env vars are set
1717
if (
1818
!AZURE_OPENAI_API_KEY ||
19-
!AZURE_OPENAI_BASE_PATH ||
19+
!AZURE_OPENAI_DEPLOYMENT_NAME ||
2020
!AZURE_OPENAI_MODEL_DEPLOYMENT ||
2121
!AZURE_OPENAI_API_VERSION
2222
) {
@@ -87,15 +87,14 @@ export async function POST(req: Request) {
8787
chatMessages = [systemPrompt, ...messages];
8888
}
8989

90-
const openai = new OpenAIClient(
91-
AZURE_OPENAI_BASE_PATH,
92-
new AzureKeyCredential(AZURE_OPENAI_API_KEY),
93-
{
94-
apiVersion: AZURE_OPENAI_API_VERSION,
95-
}
96-
);
90+
// create azure client
91+
const azure = createAzure({
92+
resourceName: AZURE_OPENAI_DEPLOYMENT_NAME,
93+
apiKey: AZURE_OPENAI_API_KEY,
94+
});
9795

98-
const response = await openai.streamChatCompletions(
96+
// instantiate azure openai model
97+
const openai = azure(
9998
model === 'gpt-35-turbo' && AZURE_OPENAI_GPT35_DEPLOYMENT
10099
? AZURE_OPENAI_GPT35_DEPLOYMENT
101100
: model === 'gpt-4' && AZURE_OPENAI_GPT4_DEPLOYMENT
@@ -107,20 +106,22 @@ export async function POST(req: Request) {
107106
: model === 'gpt-4o-mini' && AZURE_OPENAI_GPT4O_MINI_DEPLOYMENT
108107
? AZURE_OPENAI_GPT4O_MINI_DEPLOYMENT
109108
: AZURE_OPENAI_GPT4O_DEPLOYMENT,
110-
chatMessages,
111109
{
112-
frequencyPenalty: frequency_penalty,
113-
maxTokens: max_tokens,
114-
presencePenalty: presence_penalty,
115-
temperature,
116-
topP: top_p,
117110
user,
118111
}
119112
);
120113

121-
// convert the response into a friendly text-stream
122-
const stream = OpenAIStream(response);
114+
// send the request and store the response
115+
const response = streamText({
116+
model: openai,
117+
messages: chatMessages,
118+
temperature,
119+
topP: top_p,
120+
frequencyPenalty: frequency_penalty,
121+
presencePenalty: presence_penalty,
122+
maxTokens: max_tokens,
123+
});
123124

124-
// send the stream back to the client
125-
return new StreamingTextResponse(stream);
125+
// convert the response into a friendly text-stream and return to client
126+
return response.toDataStreamResponse();
126127
}

0 commit comments

Comments
 (0)