@@ -865,7 +865,15 @@ async def send_message(
865865 # TODO: This is redundant, remove soon
866866 agent = await server .agent_manager .get_agent_by_id_async (agent_id , actor , include_relationships = ["multi_agent_group" ])
867867 agent_eligible = agent .multi_agent_group is None or agent .multi_agent_group .manager_type in ["sleeptime" , "voice_sleeptime" ]
868- model_compatible = agent .llm_config .model_endpoint_type in ["anthropic" , "openai" , "together" , "google_ai" , "google_vertex" , "bedrock" ]
868+ model_compatible = agent .llm_config .model_endpoint_type in [
869+ "anthropic" ,
870+ "openai" ,
871+ "together" ,
872+ "google_ai" ,
873+ "google_vertex" ,
874+ "bedrock" ,
875+ "ollama" ,
876+ ]
869877
870878 # Create a new run for execution tracking
871879 if settings .track_agent_run :
@@ -999,7 +1007,15 @@ async def send_message_streaming(
9991007 # TODO: This is redundant, remove soon
10001008 agent = await server .agent_manager .get_agent_by_id_async (agent_id , actor , include_relationships = ["multi_agent_group" ])
10011009 agent_eligible = agent .multi_agent_group is None or agent .multi_agent_group .manager_type in ["sleeptime" , "voice_sleeptime" ]
1002- model_compatible = agent .llm_config .model_endpoint_type in ["anthropic" , "openai" , "together" , "google_ai" , "google_vertex" , "bedrock" ]
1010+ model_compatible = agent .llm_config .model_endpoint_type in [
1011+ "anthropic" ,
1012+ "openai" ,
1013+ "together" ,
1014+ "google_ai" ,
1015+ "google_vertex" ,
1016+ "bedrock" ,
1017+ "ollama" ,
1018+ ]
10031019 model_compatible_token_streaming = agent .llm_config .model_endpoint_type in ["anthropic" , "openai" , "bedrock" ]
10041020 not_letta_endpoint = agent .llm_config .model_endpoint != LETTA_MODEL_ENDPOINT
10051021
@@ -1194,6 +1210,7 @@ async def _process_message_background(
11941210 "google_ai" ,
11951211 "google_vertex" ,
11961212 "bedrock" ,
1213+ "ollama" ,
11971214 ]
11981215 if agent_eligible and model_compatible :
11991216 if agent .enable_sleeptime and agent .agent_type != AgentType .voice_convo_agent :
@@ -1373,7 +1390,15 @@ async def preview_raw_payload(
13731390 actor = await server .user_manager .get_actor_or_default_async (actor_id = actor_id )
13741391 agent = await server .agent_manager .get_agent_by_id_async (agent_id , actor , include_relationships = ["multi_agent_group" ])
13751392 agent_eligible = agent .multi_agent_group is None or agent .multi_agent_group .manager_type in ["sleeptime" , "voice_sleeptime" ]
1376- model_compatible = agent .llm_config .model_endpoint_type in ["anthropic" , "openai" , "together" , "google_ai" , "google_vertex" , "bedrock" ]
1393+ model_compatible = agent .llm_config .model_endpoint_type in [
1394+ "anthropic" ,
1395+ "openai" ,
1396+ "together" ,
1397+ "google_ai" ,
1398+ "google_vertex" ,
1399+ "bedrock" ,
1400+ "ollama" ,
1401+ ]
13771402
13781403 if agent_eligible and model_compatible :
13791404 if agent .enable_sleeptime :
@@ -1433,7 +1458,15 @@ async def summarize_agent_conversation(
14331458 actor = await server .user_manager .get_actor_or_default_async (actor_id = actor_id )
14341459 agent = await server .agent_manager .get_agent_by_id_async (agent_id , actor , include_relationships = ["multi_agent_group" ])
14351460 agent_eligible = agent .multi_agent_group is None or agent .multi_agent_group .manager_type in ["sleeptime" , "voice_sleeptime" ]
1436- model_compatible = agent .llm_config .model_endpoint_type in ["anthropic" , "openai" , "together" , "google_ai" , "google_vertex" , "bedrock" ]
1461+ model_compatible = agent .llm_config .model_endpoint_type in [
1462+ "anthropic" ,
1463+ "openai" ,
1464+ "together" ,
1465+ "google_ai" ,
1466+ "google_vertex" ,
1467+ "bedrock" ,
1468+ "ollama" ,
1469+ ]
14371470
14381471 if agent_eligible and model_compatible :
14391472 agent = LettaAgent (
0 commit comments