@@ -105,15 +105,18 @@ async def _reset():
105105 try :
106106 with open (filename , "r" ) as file :
107107 messages = json .load (file )
108- messages .append (
109- {
110- "role" : "user" ,
111- "content" : f"{ AWSNovaSonicLLMService .AWAIT_TRIGGER_ASSISTANT_RESPONSE_INSTRUCTION } " ,
112- }
113- )
108+ # HACK: if using the older Nova Sonic (pre-2) model, you need a special way of
109+ # triggering the first assistant response. The call to trigger_assistant_response(),
110+ # commented out below, is part of this.
111+ # messages.append(
112+ # {
113+ # "role": "user",
114+ # "content": f"{AWSNovaSonicLLMService.AWAIT_TRIGGER_ASSISTANT_RESPONSE_INSTRUCTION}",
115+ # }
116+ # )
114117 params .context .set_messages (messages )
115118 await params .llm .reset_conversation ()
116- await params .llm .trigger_assistant_response ()
119+ # await params.llm.trigger_assistant_response()
117120 except Exception as e :
118121 await params .result_callback ({"success" : False , "error" : str (e )})
119122
@@ -199,14 +202,14 @@ async def run_bot(transport: BaseTransport, runner_args: RunnerArguments):
199202 logger .info (f"Starting bot" )
200203
201204 # Specify initial system instruction.
202- # HACK: note that, for now, we need to inject a special bit of text into this instruction to
203- # allow the first assistant response to be programmatically triggered (which happens in the
204- # on_client_connected handler, below)
205205 system_instruction = (
206206 "You are a friendly assistant. The user and you will engage in a spoken dialog exchanging "
207207 "the transcripts of a natural real-time conversation. Keep your responses short, generally "
208208 "two or three sentences for chatty scenarios. "
209- f"{ AWSNovaSonicLLMService .AWAIT_TRIGGER_ASSISTANT_RESPONSE_INSTRUCTION } "
209+ # HACK: if using the older Nova Sonic (pre-2) model, note that you need to inject a special
210+ # bit of text into this instruction to allow the first assistant response to be
211+ # programmatically triggered (which happens in the on_client_connected handler)
212+ # f"{AWSNovaSonicLLMService.AWAIT_TRIGGER_ASSISTANT_RESPONSE_INSTRUCTION}"
210213 )
211214
212215 llm = AWSNovaSonicLLMService (
@@ -228,6 +231,7 @@ async def run_bot(transport: BaseTransport, runner_args: RunnerArguments):
228231 context = LLMContext (
229232 messages = [
230233 {"role" : "system" , "content" : f"{ system_instruction } " },
234+ {"role" : "user" , "content" : "Hello!" },
231235 ],
232236 tools = tools ,
233237 )
@@ -257,10 +261,10 @@ async def on_client_connected(transport, client):
257261 logger .info (f"Client connected" )
258262 # Kick off the conversation.
259263 await task .queue_frames ([LLMRunFrame ()])
260- # HACK: for now, we need this special way of triggering the first assistant response in AWS
261- # Nova Sonic . Note that this trigger requires a special corresponding bit of text in the
262- # system instruction. In the future, simply queueing the context frame should be sufficient .
263- await llm .trigger_assistant_response ()
264+ # HACK: if using the older Nova Sonic (pre-2) model, you need this special way of
265+ # triggering the first assistant response . Note that this trigger requires a special
266+ # corresponding bit of text in the system instruction .
267+ # await llm.trigger_assistant_response()
264268
265269 @transport .event_handler ("on_client_disconnected" )
266270 async def on_client_disconnected (transport , client ):
0 commit comments