@@ -10,6 +10,7 @@ import {
1010 isAIMessageChunk ,
1111 isBaseMessage ,
1212 isAIMessage ,
13+ MessageOutputVersion ,
1314} from "../messages/index.js" ;
1415import {
1516 convertToOpenAIImageBlock ,
@@ -113,7 +114,7 @@ export type BaseChatModelParams = BaseLanguageModelParams & {
113114 *
114115 * @default "v0"
115116 */
116- outputVersion ?: "v0" | "v1" ;
117+ outputVersion ?: MessageOutputVersion ;
117118} ;
118119
119120/**
@@ -135,6 +136,22 @@ export type BaseChatModelCallOptions = BaseLanguageModelCallOptions & {
135136 * if used with an unsupported model.
136137 */
137138 tool_choice ?: ToolChoice ;
139+ /**
140+ * Version of `AIMessage` output format to store in message content.
141+ *
142+ * `AIMessage.contentBlocks` will lazily parse the contents of `content` into a
143+ * standard format. This flag can be used to additionally store the standard format
144+ * as the message content, e.g., for serialization purposes.
145+ *
146+ * - "v0": provider-specific format in content (can lazily parse with `.contentBlocks`)
147+ * - "v1": standardized format in content (consistent with `.contentBlocks`)
148+ *
149+ * You can also set `LC_OUTPUT_VERSION` as an environment variable to "v1" to
150+ * enable this by default.
151+ *
152+ * @default "v0"
153+ */
154+ outputVersion ?: MessageOutputVersion ;
138155} ;
139156
140157function _formatForTracing ( messages : BaseMessage [ ] ) : BaseMessage [ ] {
@@ -202,7 +219,7 @@ export abstract class BaseChatModel<
202219
203220 disableStreaming = false ;
204221
205- outputVersion ?: "v0" | "v1" ;
222+ outputVersion ?: MessageOutputVersion ;
206223
207224 constructor ( fields : BaseChatModelParams ) {
208225 super ( fields ) ;
@@ -308,6 +325,7 @@ export abstract class BaseChatModel<
308325 invocation_params : this ?. invocationParams ( callOptions ) ,
309326 batch_size : 1 ,
310327 } ;
328+ const outputVersion = callOptions . outputVersion ?? this . outputVersion ;
311329 const runManagers = await callbackManager_ ?. handleChatModelStart (
312330 this . toJSON ( ) ,
313331 [ _formatForTracing ( messages ) ] ,
@@ -335,7 +353,7 @@ export abstract class BaseChatModel<
335353 ...chunk . generationInfo ,
336354 ...chunk . message . response_metadata ,
337355 } ;
338- if ( this . outputVersion === "v1" ) {
356+ if ( outputVersion === "v1" ) {
339357 yield castStandardMessageContent (
340358 chunk . message
341359 ) as OutputMessageType ;
@@ -440,6 +458,7 @@ export abstract class BaseChatModel<
440458 handledOptions . runName
441459 ) ;
442460 }
461+ const outputVersion = parsedOptions . outputVersion ?? this . outputVersion ;
443462 const generations : ChatGeneration [ ] [ ] = [ ] ;
444463 const llmOutputs : LLMResult [ "llmOutput" ] [ ] = [ ] ;
445464 // Even if stream is not explicitly called, check if model is implicitly
@@ -508,7 +527,7 @@ export abstract class BaseChatModel<
508527 { ...parsedOptions , promptIndex : i } ,
509528 runManagers ?. [ i ]
510529 ) ;
511- if ( this . outputVersion === "v1" ) {
530+ if ( outputVersion === "v1" ) {
512531 for ( const generation of generateResults . generations ) {
513532 generation . message = castStandardMessageContent (
514533 generation . message
@@ -650,6 +669,7 @@ export abstract class BaseChatModel<
650669 ) ;
651670
652671 // Handle results and call run managers
672+ const outputVersion = parsedOptions . outputVersion ?? this . outputVersion ;
653673 const generations : Generation [ ] [ ] = [ ] ;
654674 await Promise . all (
655675 cachedResults . map ( async ( { result : promiseResult , runManager } , i ) => {
@@ -666,7 +686,7 @@ export abstract class BaseChatModel<
666686 output_tokens : 0 ,
667687 total_tokens : 0 ,
668688 } ;
669- if ( this . outputVersion === "v1" ) {
689+ if ( outputVersion === "v1" ) {
670690 result . message = castStandardMessageContent ( result . message ) ;
671691 }
672692 }
0 commit comments