1515from pipecat .adapters .schemas .function_schema import FunctionSchema
1616from pipecat .adapters .schemas .tools_schema import ToolsSchema
1717from pipecat .audio .vad .silero import SileroVADAnalyzer
18- from pipecat .frames .frames import LLMRunFrame , LLMSetToolsFrame , TranscriptionMessage
18+ from pipecat .frames .frames import LLMRunFrame , LLMSetToolsFrame
1919from pipecat .observers .loggers .transcription_log_observer import TranscriptionLogObserver
2020from pipecat .pipeline .pipeline import Pipeline
2121from pipecat .pipeline .runner import PipelineRunner
2222from pipecat .pipeline .task import PipelineParams , PipelineTask
2323from pipecat .processors .aggregators .llm_context import LLMContext
24- from pipecat .processors .aggregators .llm_response_universal import LLMContextAggregatorPair
25- from pipecat .processors .transcript_processor import TranscriptProcessor
24+ from pipecat .processors .aggregators .llm_response_universal import (
25+ AssistantTurnStoppedMessage ,
26+ LLMContextAggregatorPair ,
27+ UserTurnStoppedMessage ,
28+ )
2629from pipecat .runner .types import RunnerArguments
2730from pipecat .runner .utils import create_transport
2831from pipecat .services .llm_service import FunctionCallParams
@@ -177,8 +180,6 @@ async def run_bot(transport: BaseTransport, runner_args: RunnerArguments):
177180 llm .register_function ("get_restaurant_recommendation" , fetch_restaurant_recommendation )
178181 llm .register_function ("get_news" , get_news )
179182
180- transcript = TranscriptProcessor ()
181-
182183 # Create a standard OpenAI LLM context object using the normal messages format. The
183184 # OpenAIRealtimeLLMService will convert this internally to messages that the
184185 # openai WebSocket API can understand.
@@ -189,15 +190,16 @@ async def run_bot(transport: BaseTransport, runner_args: RunnerArguments):
189190
190191 context_aggregator = LLMContextAggregatorPair (context )
191192
193+ user_aggregator = context_aggregator .user ()
194+ assistant_aggregator = context_aggregator .assistant ()
195+
192196 pipeline = Pipeline (
193197 [
194198 transport .input (), # Transport user input
195- context_aggregator .user (),
196- transcript .user (), # LLM pushes TranscriptionFrames upstream
199+ user_aggregator ,
197200 llm , # LLM
198201 transport .output (), # Transport bot output
199- transcript .assistant (), # After the transcript output, to time with the audio output
200- context_aggregator .assistant (),
202+ assistant_aggregator ,
201203 ]
202204 )
203205
@@ -238,14 +240,18 @@ async def on_client_disconnected(transport, client):
238240 logger .info (f"Client disconnected" )
239241 await task .cancel ()
240242
241- # Register event handler for transcript updates
242- @transcript .event_handler ("on_transcript_update" )
243- async def on_transcript_update (processor , frame ):
244- for msg in frame .messages :
245- if isinstance (msg , TranscriptionMessage ):
246- timestamp = f"[{ msg .timestamp } ] " if msg .timestamp else ""
247- line = f"{ timestamp } { msg .role } : { msg .content } "
248- logger .info (f"Transcript: { line } " )
243+ # Log transcript updates
244+ @user_aggregator .event_handler ("on_user_turn_stopped" )
245+ async def on_user_turn_stopped (aggregator , strategy , message : UserTurnStoppedMessage ):
246+ timestamp = f"[{ message .timestamp } ] " if message .timestamp else ""
247+ line = f"{ timestamp } user: { message .content } "
248+ logger .info (f"Transcript: { line } " )
249+
250+ @assistant_aggregator .event_handler ("on_assistant_turn_stopped" )
251+ async def on_assistant_turn_stopped (aggregator , message : AssistantTurnStoppedMessage ):
252+ timestamp = f"[{ message .timestamp } ] " if message .timestamp else ""
253+ line = f"{ timestamp } assistant: { message .content } "
254+ logger .info (f"Transcript: { line } " )
249255
250256 runner = PipelineRunner (handle_sigint = runner_args .handle_sigint )
251257
0 commit comments