Skip to content

Commit fdb71a5

Browse files
committed
fix(agent): populating "parsed" attribute in the "Agent" class.
1 parent 0fbb186 commit fdb71a5

File tree

2 files changed

+177
-11
lines changed

2 files changed

+177
-11
lines changed

agentle/agents/agent.py

Lines changed: 175 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1733,6 +1733,9 @@ async def _stream_direct_response() -> AsyncIterator[
17331733
parsed=generation_chunk.parsed
17341734
if hasattr(generation_chunk, "parsed")
17351735
else cast(T_Schema, None),
1736+
generation_text=generation_chunk.text
1737+
if generation_chunk
1738+
else "",
17361739
is_streaming_chunk=True,
17371740
is_final_chunk=False,
17381741
performance_metrics=partial_metrics,
@@ -1837,6 +1840,9 @@ async def _stream_direct_response() -> AsyncIterator[
18371840
generation=final_generation,
18381841
context=context,
18391842
parsed=final_generation.parsed,
1843+
generation_text=final_generation.text
1844+
if final_generation
1845+
else "",
18401846
is_streaming_chunk=False,
18411847
is_final_chunk=True,
18421848
performance_metrics=performance_metrics,
@@ -1953,6 +1959,7 @@ async def _stream_direct_response() -> AsyncIterator[
19531959
generation=generation,
19541960
context=context,
19551961
parsed=generation.parsed,
1962+
generation_text=generation.text if generation else "",
19561963
performance_metrics=performance_metrics,
19571964
)
19581965

@@ -2152,6 +2159,9 @@ async def _stream_with_tools() -> AsyncIterator[AgentRunOutput[T_Schema]]:
21522159
parsed=generation_chunk.parsed
21532160
if hasattr(generation_chunk, "parsed")
21542161
else cast(T_Schema, None),
2162+
generation_text=generation_chunk.text
2163+
if generation_chunk
2164+
else "",
21552165
is_streaming_chunk=True,
21562166
is_final_chunk=False,
21572167
performance_metrics=partial_metrics,
@@ -2698,8 +2708,9 @@ async def _stream_with_tools() -> AsyncIterator[AgentRunOutput[T_Schema]]:
26982708
generation=None,
26992709
context=context,
27002710
parsed=cast(T_Schema, None),
2711+
generation_text="",
27012712
is_suspended=True,
2702-
suspension_reason=suspension_error.reason,
2713+
suspension_reason=suspension_reason,
27032714
resumption_token=resumption_token,
27042715
performance_metrics=performance_metrics,
27052716
)
@@ -3065,7 +3076,8 @@ async def _stream_with_tools() -> AsyncIterator[AgentRunOutput[T_Schema]]:
30653076
step_metrics=step_metrics,
30663077
average_generation_time_ms=generation_time_total
30673078
/ max(
3068-
1, len([s for s in step_metrics if s.step_type == "generation"])
3079+
1,
3080+
len([s for s in step_metrics if s.step_type == "generation"]),
30693081
),
30703082
average_tool_execution_time_ms=tool_execution_time_total
30713083
/ max(1, tool_calls_count),
@@ -3377,6 +3389,7 @@ async def _stream_with_tools() -> AsyncIterator[AgentRunOutput[T_Schema]]:
33773389
generation=None,
33783390
context=context,
33793391
parsed=cast(T_Schema, None),
3392+
generation_text="",
33803393
is_suspended=True,
33813394
suspension_reason=suspension_error.reason,
33823395
resumption_token=resumption_token,
@@ -3802,7 +3815,9 @@ def input2context(
38023815
return Context(
38033816
message_history=[
38043817
developer_message,
3805-
UserMessage(parts=[TextPart(text=f"```json\n{text}\n```")]),
3818+
UserMessage(
3819+
parts=[TextPart(text=f"```json\n{text}\n```")],
3820+
),
38063821
]
38073822
)
38083823
except (ImportError, AttributeError):
@@ -3986,8 +4001,24 @@ async def _continue_execution_from_context(
39864001
generation=None,
39874002
context=context,
39884003
parsed=cast(T_Schema, None),
3989-
is_suspended=False,
3990-
suspension_reason=denial_message,
4004+
generation_text="",
4005+
performance_metrics=PerformanceMetrics(
4006+
total_execution_time_ms=0.0,
4007+
input_processing_time_ms=0.0,
4008+
static_knowledge_processing_time_ms=0.0,
4009+
mcp_tools_preparation_time_ms=0.0,
4010+
generation_time_ms=0.0,
4011+
tool_execution_time_ms=0.0,
4012+
final_response_processing_time_ms=0.0,
4013+
iteration_count=0,
4014+
tool_calls_count=0,
4015+
total_tokens_processed=0,
4016+
cache_hit_rate=0.0,
4017+
average_generation_time_ms=0.0,
4018+
average_tool_execution_time_ms=0.0,
4019+
longest_step_duration_ms=0.0,
4020+
shortest_step_duration_ms=0.0,
4021+
),
39914022
)
39924023

39934024
try:
@@ -4010,7 +4041,24 @@ async def _continue_execution_from_context(
40104041
generation=None,
40114042
context=context,
40124043
parsed=cast(T_Schema, None),
4013-
is_suspended=False,
4044+
generation_text="",
4045+
performance_metrics=PerformanceMetrics(
4046+
total_execution_time_ms=0.0,
4047+
input_processing_time_ms=0.0,
4048+
static_knowledge_processing_time_ms=0.0,
4049+
mcp_tools_preparation_time_ms=0.0,
4050+
generation_time_ms=0.0,
4051+
tool_execution_time_ms=0.0,
4052+
final_response_processing_time_ms=0.0,
4053+
iteration_count=0,
4054+
tool_calls_count=0,
4055+
total_tokens_processed=0,
4056+
cache_hit_rate=0.0,
4057+
average_generation_time_ms=0.0,
4058+
average_tool_execution_time_ms=0.0,
4059+
longest_step_duration_ms=0.0,
4060+
shortest_step_duration_ms=0.0,
4061+
),
40144062
)
40154063

40164064
suspension_type = suspension_state.get("type", "unknown")
@@ -4050,8 +4098,24 @@ async def _continue_execution_from_context(
40504098
generation=None,
40514099
context=context,
40524100
parsed=cast(T_Schema, None),
4053-
is_suspended=False,
4054-
suspension_reason=error_message,
4101+
generation_text="",
4102+
performance_metrics=PerformanceMetrics(
4103+
total_execution_time_ms=0.0,
4104+
input_processing_time_ms=0.0,
4105+
static_knowledge_processing_time_ms=0.0,
4106+
mcp_tools_preparation_time_ms=0.0,
4107+
generation_time_ms=0.0,
4108+
tool_execution_time_ms=0.0,
4109+
final_response_processing_time_ms=0.0,
4110+
iteration_count=0,
4111+
tool_calls_count=0,
4112+
total_tokens_processed=0,
4113+
cache_hit_rate=0.0,
4114+
average_generation_time_ms=0.0,
4115+
average_tool_execution_time_ms=0.0,
4116+
longest_step_duration_ms=0.0,
4117+
shortest_step_duration_ms=0.0,
4118+
),
40554119
)
40564120

40574121
async def _resume_from_tool_suspension(
@@ -4063,6 +4127,7 @@ async def _resume_from_tool_suspension(
40634127
This handles the most common suspension scenario where a tool
40644128
raised ToolSuspensionError and required approval.
40654129
"""
4130+
execution_start_time = time.perf_counter()
40664131
_logger = Maybe(logger if self.debug else None)
40674132

40684133
# Extract suspension state
@@ -4198,9 +4263,30 @@ async def _resume_from_tool_suspension(
41984263
generation=None,
41994264
context=context,
42004265
parsed=cast(T_Schema, None),
4266+
generation_text="",
42014267
is_suspended=True,
42024268
suspension_reason=suspension_error.reason,
42034269
resumption_token=resumption_token,
4270+
performance_metrics=PerformanceMetrics(
4271+
total_execution_time_ms=(time.perf_counter() - execution_start_time)
4272+
* 1000
4273+
if execution_start_time
4274+
else 24,
4275+
input_processing_time_ms=0.0,
4276+
static_knowledge_processing_time_ms=0.0,
4277+
mcp_tools_preparation_time_ms=0.0,
4278+
generation_time_ms=0.0,
4279+
tool_execution_time_ms=0.0,
4280+
final_response_processing_time_ms=0.0,
4281+
iteration_count=0,
4282+
tool_calls_count=0,
4283+
total_tokens_processed=0,
4284+
cache_hit_rate=0.0,
4285+
average_generation_time_ms=0.0,
4286+
average_tool_execution_time_ms=0.0,
4287+
longest_step_duration_ms=0.0,
4288+
shortest_step_duration_ms=0.0,
4289+
),
42044290
)
42054291
except Exception as e:
42064292
# Tool execution failed
@@ -4232,8 +4318,24 @@ async def _resume_from_tool_suspension(
42324318
generation=None,
42334319
context=context,
42344320
parsed=cast(T_Schema, None),
4235-
is_suspended=False,
4236-
suspension_reason=error_message,
4321+
generation_text="",
4322+
performance_metrics=PerformanceMetrics(
4323+
total_execution_time_ms=0.0,
4324+
input_processing_time_ms=0.0,
4325+
static_knowledge_processing_time_ms=0.0,
4326+
mcp_tools_preparation_time_ms=0.0,
4327+
generation_time_ms=0.0,
4328+
tool_execution_time_ms=0.0,
4329+
final_response_processing_time_ms=0.0,
4330+
iteration_count=0,
4331+
tool_calls_count=0,
4332+
total_tokens_processed=0,
4333+
cache_hit_rate=0.0,
4334+
average_generation_time_ms=0.0,
4335+
average_tool_execution_time_ms=0.0,
4336+
longest_step_duration_ms=0.0,
4337+
shortest_step_duration_ms=0.0,
4338+
),
42374339
)
42384340

42394341
# Complete the step and add to context
@@ -4319,6 +4421,26 @@ async def _resume_with_normal_flow(
43194421
generation=generation,
43204422
context=context,
43214423
parsed=generation.parsed,
4424+
generation_text=generation.text if generation else "",
4425+
performance_metrics=PerformanceMetrics(
4426+
total_execution_time_ms=0.0,
4427+
input_processing_time_ms=0.0,
4428+
static_knowledge_processing_time_ms=0.0,
4429+
mcp_tools_preparation_time_ms=0.0,
4430+
generation_time_ms=0.0,
4431+
tool_execution_time_ms=0.0,
4432+
final_response_processing_time_ms=0.0,
4433+
iteration_count=1,
4434+
tool_calls_count=0,
4435+
total_tokens_processed=generation.usage.total_tokens
4436+
if generation
4437+
else 0,
4438+
cache_hit_rate=0.0,
4439+
average_generation_time_ms=0.0,
4440+
average_tool_execution_time_ms=0.0,
4441+
longest_step_duration_ms=0.0,
4442+
shortest_step_duration_ms=0.0,
4443+
),
43224444
)
43234445

43244446
# Has tools, continue with tool execution loop
@@ -4349,6 +4471,7 @@ async def _continue_normal_execution_flow(
43494471
This method continues the standard tool execution loop from where
43504472
the agent left off, handling iterations and tool calls.
43514473
"""
4474+
execution_start_time = time.perf_counter()
43524475
_logger = Maybe(logger if self.debug else None)
43534476
generation_provider = self.generation_provider
43544477

@@ -4473,6 +4596,26 @@ async def _continue_normal_execution_flow(
44734596
generation=generation,
44744597
context=context,
44754598
parsed=generation.parsed,
4599+
generation_text=generation.text if generation else "",
4600+
performance_metrics=PerformanceMetrics(
4601+
total_execution_time_ms=0.0,
4602+
input_processing_time_ms=0.0,
4603+
static_knowledge_processing_time_ms=0.0,
4604+
mcp_tools_preparation_time_ms=0.0,
4605+
generation_time_ms=0.0,
4606+
tool_execution_time_ms=0.0,
4607+
final_response_processing_time_ms=0.0,
4608+
iteration_count=1,
4609+
tool_calls_count=0,
4610+
total_tokens_processed=generation.usage.total_tokens
4611+
if generation
4612+
else 0,
4613+
cache_hit_rate=0.0,
4614+
average_generation_time_ms=0.0,
4615+
average_tool_execution_time_ms=0.0,
4616+
longest_step_duration_ms=0.0,
4617+
shortest_step_duration_ms=0.0,
4618+
),
44764619
)
44774620

44784621
# Execute tools
@@ -4538,9 +4681,30 @@ async def _continue_normal_execution_flow(
45384681
generation=None,
45394682
context=context,
45404683
parsed=cast(T_Schema, None),
4684+
generation_text="",
45414685
is_suspended=True,
45424686
suspension_reason=suspension_error.reason,
45434687
resumption_token=resumption_token,
4688+
performance_metrics=PerformanceMetrics(
4689+
total_execution_time_ms=(
4690+
time.perf_counter() - execution_start_time
4691+
)
4692+
* 1000,
4693+
input_processing_time_ms=0.0,
4694+
static_knowledge_processing_time_ms=0.0,
4695+
mcp_tools_preparation_time_ms=0.0,
4696+
generation_time_ms=0.0,
4697+
tool_execution_time_ms=0.0,
4698+
final_response_processing_time_ms=0.0,
4699+
iteration_count=current_iteration,
4700+
tool_calls_count=0,
4701+
total_tokens_processed=0,
4702+
cache_hit_rate=0.0,
4703+
average_generation_time_ms=0.0,
4704+
average_tool_execution_time_ms=0.0,
4705+
longest_step_duration_ms=0.0,
4706+
shortest_step_duration_ms=0.0,
4707+
),
45444708
)
45454709

45464710
# Complete step and continue
@@ -4625,6 +4789,7 @@ def _build_agent_run_output(
46254789
generation=generation,
46264790
context=context,
46274791
parsed=parsed,
4792+
generation_text=generation.text if generation else "",
46284793
performance_metrics=performance_metrics,
46294794
)
46304795

agentle/agents/agent_run_output.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,8 @@ class AgentRunOutput[T_StructuredOutput](BaseModel):
8787
parsed: T_StructuredOutput
8888
"""
8989
Structured data extracted from the agent's response.
90-
In streaming mode, only available in the final chunk.
90+
In streaming mode, contains incrementally parsed partial data in each chunk,
91+
with complete data available in the final chunk.
9192
"""
9293

9394
generation_text: str = Field(default="")

0 commit comments

Comments
 (0)