Skip to content

Commit 6409f40

Browse files
authored
Merge branch 'main' into 0.8.6-release2
2 parents 2989d20 + 24c6f74 commit 6409f40

33 files changed

+8989
-2154
lines changed

.github/ISSUE_TEMPLATE/bug_report.md

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -11,20 +11,25 @@ assignees: ''
1111
A clear and concise description of what the bug is.
1212

1313
**Please describe your setup**
14-
- [ ] How did you install letta?
15-
- `pip install letta`? `pip install letta-nightly`? `git clone`?
14+
- [ ] How are you running Letta?
15+
- Docker
16+
- pip (legacy)
17+
- From source
18+
- Desktop
1619
- [ ] Describe your setup
1720
- What's your OS (Windows/MacOS/Linux)?
18-
- How are you running `letta`? (`cmd.exe`/Powershell/Anaconda Shell/Terminal)
21+
- What is your `docker run ...` command (if applicable)
1922

2023
**Screenshots**
2124
If applicable, add screenshots to help explain your problem.
2225

2326
**Additional context**
2427
Add any other context about the problem here.
28+
- What model you are using
29+
30+
**Agent File (optional)**
31+
Please attach your `.af` file, as this helps with reproducing issues.
2532

26-
**Letta Config**
27-
Please attach your `~/.letta/config` file or copy paste it below.
2833

2934
---
3035

Lines changed: 287 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,287 @@
1+
import logging
2+
import os
3+
import requests
4+
import socket
5+
import threading
6+
import time
7+
8+
from datetime import datetime, timezone
9+
from typing import Generator
10+
11+
import pytest
12+
from anthropic.types.beta.messages import BetaMessageBatch, BetaMessageBatchRequestCounts
13+
14+
from dotenv import load_dotenv
15+
16+
from letta_client import Letta, AsyncLetta
17+
18+
from letta.schemas.agent import AgentState
19+
20+
from letta.schemas.llm_config import LLMConfig
21+
22+
from letta.services.organization_manager import OrganizationManager
23+
from letta.services.user_manager import UserManager
24+
from letta.settings import tool_settings
25+
26+
27+
def pytest_configure(config):
28+
logging.basicConfig(level=logging.DEBUG)
29+
30+
31+
@pytest.fixture
32+
def disable_e2b_api_key() -> Generator[None, None, None]:
33+
"""
34+
Temporarily disables the E2B API key by setting `tool_settings.e2b_api_key` to None
35+
for the duration of the test. Restores the original value afterward.
36+
"""
37+
from letta.settings import tool_settings
38+
39+
original_api_key = tool_settings.e2b_api_key
40+
tool_settings.e2b_api_key = None
41+
yield
42+
tool_settings.e2b_api_key = original_api_key
43+
44+
45+
@pytest.fixture
46+
def check_e2b_key_is_set():
47+
from letta.settings import tool_settings
48+
49+
original_api_key = tool_settings.e2b_api_key
50+
assert original_api_key is not None, "Missing e2b key! Cannot execute these tests."
51+
yield
52+
53+
54+
@pytest.fixture
55+
def default_organization():
56+
"""Fixture to create and return the default organization."""
57+
manager = OrganizationManager()
58+
org = manager.create_default_organization()
59+
yield org
60+
61+
62+
@pytest.fixture
63+
def default_user(default_organization):
64+
"""Fixture to create and return the default user within the default organization."""
65+
manager = UserManager()
66+
user = manager.create_default_user(org_id=default_organization.id)
67+
yield user
68+
69+
70+
@pytest.fixture
71+
def check_composio_key_set():
72+
original_api_key = tool_settings.composio_api_key
73+
assert original_api_key is not None, "Missing composio key! Cannot execute this test."
74+
yield
75+
76+
77+
# --- Tool Fixtures ---
78+
@pytest.fixture
79+
def weather_tool_func():
80+
def get_weather(location: str) -> str:
81+
"""
82+
Fetches the current weather for a given location.
83+
84+
Parameters:
85+
location (str): The location to get the weather for.
86+
87+
Returns:
88+
str: A formatted string describing the weather in the given location.
89+
90+
Raises:
91+
RuntimeError: If the request to fetch weather data fails.
92+
"""
93+
import requests
94+
95+
url = f"https://wttr.in/{location}?format=%C+%t"
96+
97+
response = requests.get(url)
98+
if response.status_code == 200:
99+
weather_data = response.text
100+
return f"The weather in {location} is {weather_data}."
101+
else:
102+
raise RuntimeError(f"Failed to get weather data, status code: {response.status_code}")
103+
104+
yield get_weather
105+
106+
107+
@pytest.fixture
108+
def print_tool_func():
109+
"""Fixture to create a tool with default settings and clean up after the test."""
110+
111+
def print_tool(message: str):
112+
"""
113+
Args:
114+
message (str): The message to print.
115+
116+
Returns:
117+
str: The message that was printed.
118+
"""
119+
print(message)
120+
return message
121+
122+
yield print_tool
123+
124+
125+
@pytest.fixture
126+
def roll_dice_tool_func():
127+
def roll_dice():
128+
"""
129+
Rolls a 6 sided die.
130+
131+
Returns:
132+
str: The roll result.
133+
"""
134+
import time
135+
136+
time.sleep(1)
137+
return "Rolled a 10!"
138+
139+
yield roll_dice
140+
141+
142+
@pytest.fixture
143+
def dummy_beta_message_batch() -> BetaMessageBatch:
144+
return BetaMessageBatch(
145+
id="msgbatch_013Zva2CMHLNnXjNJJKqJ2EF",
146+
archived_at=datetime(2024, 8, 20, 18, 37, 24, 100435, tzinfo=timezone.utc),
147+
cancel_initiated_at=datetime(2024, 8, 20, 18, 37, 24, 100435, tzinfo=timezone.utc),
148+
created_at=datetime(2024, 8, 20, 18, 37, 24, 100435, tzinfo=timezone.utc),
149+
ended_at=datetime(2024, 8, 20, 18, 37, 24, 100435, tzinfo=timezone.utc),
150+
expires_at=datetime(2024, 8, 20, 18, 37, 24, 100435, tzinfo=timezone.utc),
151+
processing_status="in_progress",
152+
request_counts=BetaMessageBatchRequestCounts(
153+
canceled=10,
154+
errored=30,
155+
expired=10,
156+
processing=100,
157+
succeeded=50,
158+
),
159+
results_url="https://api.anthropic.com/v1/messages/batches/msgbatch_013Zva2CMHLNnXjNJJKqJ2EF/results",
160+
type="message_batch",
161+
)
162+
163+
# --- Model Sweep ---
164+
# Global flag to track server state
165+
_server_started = False
166+
_server_url = None
167+
168+
def _start_server_once() -> str:
169+
"""Start server exactly once, return URL"""
170+
global _server_started, _server_url
171+
172+
if _server_started and _server_url:
173+
return _server_url
174+
175+
url = os.getenv("LETTA_SERVER_URL", "http://localhost:8283")
176+
177+
# Check if already running
178+
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
179+
if s.connect_ex(('localhost', 8283)) == 0:
180+
_server_started = True
181+
_server_url = url
182+
return url
183+
184+
# Start server (your existing logic)
185+
if not os.getenv("LETTA_SERVER_URL"):
186+
def _run_server():
187+
load_dotenv()
188+
from letta.server.rest_api.app import start_server
189+
start_server(debug=True)
190+
191+
thread = threading.Thread(target=_run_server, daemon=True)
192+
thread.start()
193+
194+
# Poll until up
195+
timeout_seconds = 30
196+
deadline = time.time() + timeout_seconds
197+
while time.time() < deadline:
198+
try:
199+
resp = requests.get(url + "/v1/health")
200+
if resp.status_code < 500:
201+
break
202+
except requests.exceptions.RequestException:
203+
pass
204+
time.sleep(0.1)
205+
else:
206+
raise RuntimeError(f"Could not reach {url} within {timeout_seconds}s")
207+
208+
_server_started = True
209+
_server_url = url
210+
return url
211+
212+
# ------------------------------
213+
# Fixtures
214+
# ------------------------------
215+
216+
@pytest.fixture(scope="module")
217+
def server_url() -> str:
218+
"""Return URL of already-started server"""
219+
return _start_server_once()
220+
221+
@pytest.fixture(scope="module")
222+
def client(server_url: str) -> Letta:
223+
"""
224+
Creates and returns a synchronous Letta REST client for testing.
225+
"""
226+
client_instance = Letta(base_url=server_url)
227+
yield client_instance
228+
229+
230+
@pytest.fixture(scope="function")
231+
def async_client(server_url: str) -> AsyncLetta:
232+
"""
233+
Creates and returns an asynchronous Letta REST client for testing.
234+
"""
235+
async_client_instance = AsyncLetta(base_url=server_url)
236+
yield async_client_instance
237+
238+
239+
@pytest.fixture(scope="module")
240+
def agent_state(client: Letta) -> AgentState:
241+
"""
242+
Creates and returns an agent state for testing with a pre-configured agent.
243+
The agent is named 'supervisor' and is configured with base tools and the roll_dice tool.
244+
"""
245+
client.tools.upsert_base_tools()
246+
247+
send_message_tool = client.tools.list(name="send_message")[0]
248+
agent_state_instance = client.agents.create(
249+
name="supervisor",
250+
include_base_tools=False,
251+
tool_ids=[send_message_tool.id],
252+
model="openai/gpt-4o",
253+
embedding="letta/letta-free",
254+
tags=["supervisor"],
255+
)
256+
yield agent_state_instance
257+
258+
client.agents.delete(agent_state_instance.id)
259+
260+
261+
@pytest.fixture(scope="module")
262+
def all_available_llm_configs(client: Letta) -> [LLMConfig]:
263+
"""
264+
Returns a list of all available LLM configs.
265+
"""
266+
llm_configs = client.models.list()
267+
return llm_configs
268+
269+
270+
# create a client to the started server started at
271+
def get_available_llm_configs() -> [LLMConfig]:
272+
"""Get configs, starting server if needed"""
273+
server_url = _start_server_once()
274+
temp_client = Letta(base_url=server_url)
275+
return temp_client.models.list()
276+
277+
# dynamically insert llm_config paramter at collection time
278+
def pytest_generate_tests(metafunc):
279+
"""Dynamically parametrize tests that need llm_config."""
280+
if "llm_config" in metafunc.fixturenames:
281+
configs = get_available_llm_configs()
282+
if configs:
283+
metafunc.parametrize(
284+
"llm_config",
285+
configs,
286+
ids=[c.model for c in configs]
287+
)
Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
{
2+
"Basic": [
3+
"test_greeting_with_assistant_message",
4+
"test_greeting_without_assistant_message",
5+
"test_async_greeting_with_assistant_message",
6+
"test_agent_loop_error",
7+
"test_step_stream_agent_loop_error",
8+
"test_step_streaming_greeting_with_assistant_message",
9+
"test_step_streaming_greeting_without_assistant_message",
10+
"test_step_streaming_tool_call",
11+
"test_tool_call",
12+
"test_auto_summarize"
13+
],
14+
"Token Streaming": [
15+
"test_token_streaming_greeting_with_assistant_message",
16+
"test_token_streaming_greeting_without_assistant_message",
17+
"test_token_streaming_agent_loop_error",
18+
"test_token_streaming_tool_call"
19+
],
20+
"Multimodal": [
21+
"test_base64_image_input",
22+
"test_url_image_input"
23+
]
24+
}

0 commit comments

Comments
 (0)