@@ -5,7 +5,7 @@ import { RunnableSequence } from '@langchain/core/runnables'
55import { BaseChatModel } from '@langchain/core/language_models/chat_models'
66import { ChatPromptTemplate , MessagesPlaceholder , HumanMessagePromptTemplate , PromptTemplate } from '@langchain/core/prompts'
77import { formatToOpenAIToolMessages } from 'langchain/agents/format_scratchpad/openai_tools'
8- import { getBaseClasses , transformBracesWithColon } from '../../../src/utils'
8+ import { getBaseClasses , transformBracesWithColon , convertChatHistoryToText , convertBaseMessagetoIMessage } from '../../../src/utils'
99import { type ToolsAgentStep } from 'langchain/agents/openai/output_parser'
1010import {
1111 FlowiseMemory ,
@@ -23,8 +23,10 @@ import { Moderation, checkInputs, streamResponse } from '../../moderation/Modera
2323import { formatResponse } from '../../outputparsers/OutputParserHelpers'
2424import type { Document } from '@langchain/core/documents'
2525import { BaseRetriever } from '@langchain/core/retrievers'
26- import { RESPONSE_TEMPLATE } from '../../chains/ConversationalRetrievalQAChain/prompts'
26+ import { RESPONSE_TEMPLATE , REPHRASE_TEMPLATE } from '../../chains/ConversationalRetrievalQAChain/prompts'
2727import { addImagesToMessages , llmSupportsVision } from '../../../src/multiModalUtils'
28+ import { StringOutputParser } from '@langchain/core/output_parsers'
29+ import { Tool } from '@langchain/core/tools'
2830
2931class ConversationalRetrievalToolAgent_Agents implements INode {
3032 label : string
@@ -42,7 +44,7 @@ class ConversationalRetrievalToolAgent_Agents implements INode {
4244 constructor ( fields ?: { sessionId ?: string } ) {
4345 this . label = 'Conversational Retrieval Tool Agent'
4446 this . name = 'conversationalRetrievalToolAgent'
45- this . author = 'niztal(falkor)'
47+ this . author = 'niztal(falkor) and nikitas-novatix '
4648 this . version = 1.0
4749 this . type = 'AgentExecutor'
4850 this . category = 'Agents'
@@ -79,6 +81,26 @@ class ConversationalRetrievalToolAgent_Agents implements INode {
7981 optional : true ,
8082 default : RESPONSE_TEMPLATE
8183 } ,
84+ {
85+ label : 'Rephrase Prompt' ,
86+ name : 'rephrasePrompt' ,
87+ type : 'string' ,
88+ description : 'Using previous chat history, rephrase question into a standalone question' ,
89+ warning : 'Prompt must include input variables: {chat_history} and {question}' ,
90+ rows : 4 ,
91+ additionalParams : true ,
92+ optional : true ,
93+ default : REPHRASE_TEMPLATE
94+ } ,
95+ {
96+ label : 'Rephrase Model' ,
97+ name : 'rephraseModel' ,
98+ type : 'BaseChatModel' ,
99+ description :
100+ 'Optional: Use a different (faster/cheaper) model for rephrasing. If not specified, uses the main Tool Calling Chat Model.' ,
101+ optional : true ,
102+ additionalParams : true
103+ } ,
82104 {
83105 label : 'Input Moderation' ,
84106 description : 'Detect text that could generate harmful output and prevent it from being sent to the language model' ,
@@ -103,8 +125,9 @@ class ConversationalRetrievalToolAgent_Agents implements INode {
103125 this . sessionId = fields ?. sessionId
104126 }
105127
106- async init ( nodeData : INodeData , input : string , options : ICommonObject ) : Promise < any > {
107- return prepareAgent ( nodeData , options , { sessionId : this . sessionId , chatId : options . chatId , input } )
128+ // The agent will be prepared in run() with the correct user message - it needs the actual runtime input for rephrasing
129+ async init ( _nodeData : INodeData , _input : string , _options : ICommonObject ) : Promise < any > {
130+ return null
108131 }
109132
110133 async run ( nodeData : INodeData , input : string , options : ICommonObject ) : Promise < string | ICommonObject > {
@@ -148,6 +171,23 @@ class ConversationalRetrievalToolAgent_Agents implements INode {
148171 sseStreamer . streamUsedToolsEvent ( chatId , res . usedTools )
149172 usedTools = res . usedTools
150173 }
174+
175+ // If the tool is set to returnDirect, stream the output to the client
176+ if ( res . usedTools && res . usedTools . length ) {
177+ let inputTools = nodeData . inputs ?. tools
178+ inputTools = flatten ( inputTools )
179+ for ( const tool of res . usedTools ) {
180+ const inputTool = inputTools . find ( ( inputTool : Tool ) => inputTool . name === tool . tool )
181+ if ( inputTool && ( inputTool as any ) . returnDirect && shouldStreamResponse ) {
182+ sseStreamer . streamTokenEvent ( chatId , tool . toolOutput )
183+ // Prevent CustomChainHandler from streaming the same output again
184+ if ( res . output === tool . toolOutput ) {
185+ res . output = ''
186+ }
187+ }
188+ }
189+ }
190+ // The CustomChainHandler will send the stream end event
151191 } else {
152192 res = await executor . invoke ( { input } , { callbacks : [ loggerHandler , ...callbacks ] } )
153193 if ( res . sourceDocuments ) {
@@ -210,16 +250,21 @@ const prepareAgent = async (
210250 flowObj : { sessionId ?: string ; chatId ?: string ; input ?: string }
211251) => {
212252 const model = nodeData . inputs ?. model as BaseChatModel
253+ const rephraseModel = ( nodeData . inputs ?. rephraseModel as BaseChatModel ) || model // Use main model if not specified
213254 const maxIterations = nodeData . inputs ?. maxIterations as string
214255 const memory = nodeData . inputs ?. memory as FlowiseMemory
215256 let systemMessage = nodeData . inputs ?. systemMessage as string
257+ let rephrasePrompt = nodeData . inputs ?. rephrasePrompt as string
216258 let tools = nodeData . inputs ?. tools
217259 tools = flatten ( tools )
218260 const memoryKey = memory . memoryKey ? memory . memoryKey : 'chat_history'
219261 const inputKey = memory . inputKey ? memory . inputKey : 'input'
220262 const vectorStoreRetriever = nodeData . inputs ?. vectorStoreRetriever as BaseRetriever
221263
222264 systemMessage = transformBracesWithColon ( systemMessage )
265+ if ( rephrasePrompt ) {
266+ rephrasePrompt = transformBracesWithColon ( rephrasePrompt )
267+ }
223268
224269 const prompt = ChatPromptTemplate . fromMessages ( [
225270 [ 'system' , systemMessage ? systemMessage : `You are a helpful AI assistant.` ] ,
@@ -263,6 +308,37 @@ const prepareAgent = async (
263308
264309 const modelWithTools = model . bindTools ( tools )
265310
311+ // Function to get standalone question (either rephrased or original)
312+ const getStandaloneQuestion = async ( input : string ) : Promise < string > => {
313+ // If no rephrase prompt, return the original input
314+ if ( ! rephrasePrompt ) {
315+ return input
316+ }
317+
318+ // Get chat history (use empty string if none)
319+ const messages = ( await memory . getChatMessages ( flowObj ?. sessionId , true ) ) as BaseMessage [ ]
320+ const iMessages = convertBaseMessagetoIMessage ( messages )
321+ const chatHistoryString = convertChatHistoryToText ( iMessages )
322+
323+ // Always rephrase to normalize/expand user queries for better retrieval
324+ try {
325+ const CONDENSE_QUESTION_PROMPT = PromptTemplate . fromTemplate ( rephrasePrompt )
326+ const condenseQuestionChain = RunnableSequence . from ( [ CONDENSE_QUESTION_PROMPT , rephraseModel , new StringOutputParser ( ) ] )
327+ const res = await condenseQuestionChain . invoke ( {
328+ question : input ,
329+ chat_history : chatHistoryString
330+ } )
331+ return res
332+ } catch ( error ) {
333+ console . error ( 'Error rephrasing question:' , error )
334+ // On error, fall back to original input
335+ return input
336+ }
337+ }
338+
339+ // Get standalone question before creating runnable
340+ const standaloneQuestion = await getStandaloneQuestion ( flowObj ?. input || '' )
341+
266342 const runnableAgent = RunnableSequence . from ( [
267343 {
268344 [ inputKey ] : ( i : { input : string ; steps : ToolsAgentStep [ ] } ) => i . input ,
@@ -272,7 +348,9 @@ const prepareAgent = async (
272348 return messages ?? [ ]
273349 } ,
274350 context : async ( i : { input : string ; chatHistory ?: string } ) => {
275- const relevantDocs = await vectorStoreRetriever . invoke ( i . input )
351+ // Use the standalone question (rephrased or original) for retrieval
352+ const retrievalQuery = standaloneQuestion || i . input
353+ const relevantDocs = await vectorStoreRetriever . invoke ( retrievalQuery )
276354 const formattedDocs = formatDocs ( relevantDocs )
277355 return formattedDocs
278356 }
@@ -295,4 +373,6 @@ const prepareAgent = async (
295373 return executor
296374}
297375
298- module . exports = { nodeClass : ConversationalRetrievalToolAgent_Agents }
376+ module . exports = {
377+ nodeClass : ConversationalRetrievalToolAgent_Agents
378+ }
0 commit comments