Skip to content

Commit f8c5e1e

Browse files
committed
Fix completion stream token snapshots
1 parent 6b0e759 commit f8c5e1e

File tree

2 files changed

+66
-13
lines changed

2 files changed

+66
-13
lines changed

tests/contrib/openai/test_openai_v1.py

Lines changed: 17 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -921,10 +921,19 @@ def test_span_finish_on_stream_error(openai, openai_vcr, snapshot_tracer):
921921
)
922922

923923

924-
@pytest.mark.snapshot(
925-
token="tests.contrib.openai.test_openai.test_completion_stream",
926-
ignores=["metrics.openai.response.usage.completion_tokens", "metrics.openai.response.usage.total_tokens"],
927-
)
924+
@pytest.mark.snapshot
925+
@pytest.mark.skipif(TIKTOKEN_AVAILABLE, reason="This test estimates token counts")
926+
def test_completion_stream_est_tokens(openai, openai_vcr, mock_metrics, snapshot_tracer):
927+
with openai_vcr.use_cassette("completion_streamed.yaml"):
928+
with mock.patch("ddtrace.contrib.internal.openai.utils.encoding_for_model", create=True) as mock_encoding:
929+
mock_encoding.return_value.encode.side_effect = lambda x: [1, 2]
930+
client = openai.OpenAI()
931+
resp = client.completions.create(model="ada", prompt="Hello world", stream=True, n=None)
932+
_ = [c for c in resp]
933+
934+
935+
@pytest.mark.skipif(not TIKTOKEN_AVAILABLE, reason="This test computes token counts using tiktoken")
936+
@pytest.mark.snapshot(token="tests.contrib.openai.test_openai.test_completion_stream")
928937
def test_completion_stream(openai, openai_vcr, mock_metrics, snapshot_tracer):
929938
with openai_vcr.use_cassette("completion_streamed.yaml"):
930939
with mock.patch("ddtrace.contrib.internal.openai.utils.encoding_for_model", create=True) as mock_encoding:
@@ -934,10 +943,8 @@ def test_completion_stream(openai, openai_vcr, mock_metrics, snapshot_tracer):
934943
_ = [c for c in resp]
935944

936945

937-
@pytest.mark.snapshot(
938-
token="tests.contrib.openai.test_openai.test_completion_stream",
939-
ignores=["metrics.openai.response.usage.completion_tokens", "metrics.openai.response.usage.total_tokens"],
940-
)
946+
@pytest.mark.skipif(not TIKTOKEN_AVAILABLE, reason="This test computes token counts using tiktoken")
947+
@pytest.mark.snapshot(token="tests.contrib.openai.test_openai.test_completion_stream")
941948
async def test_completion_async_stream(openai, openai_vcr, mock_metrics, snapshot_tracer):
942949
with openai_vcr.use_cassette("completion_streamed.yaml"):
943950
with mock.patch("ddtrace.contrib.internal.openai.utils.encoding_for_model", create=True) as mock_encoding:
@@ -948,13 +955,10 @@ async def test_completion_async_stream(openai, openai_vcr, mock_metrics, snapsho
948955

949956

950957
@pytest.mark.skipif(
951-
parse_version(openai_module.version.VERSION) < (1, 6, 0),
958+
parse_version(openai_module.version.VERSION) < (1, 6, 0) and not TIKTOKEN_AVAILABLE,
952959
reason="Streamed response context managers are only available v1.6.0+",
953960
)
954-
@pytest.mark.snapshot(
955-
token="tests.contrib.openai.test_openai.test_completion_stream",
956-
ignores=["metrics.openai.response.usage.completion_tokens", "metrics.openai.response.usage.total_tokens"],
957-
)
961+
@pytest.mark.snapshot(token="tests.contrib.openai.test_openai.test_completion_stream")
958962
def test_completion_stream_context_manager(openai, openai_vcr, mock_metrics, snapshot_tracer):
959963
with openai_vcr.use_cassette("completion_streamed.yaml"):
960964
with mock.patch("ddtrace.contrib.internal.openai.utils.encoding_for_model", create=True) as mock_encoding:
Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
[[
2+
{
3+
"name": "openai.request",
4+
"service": "tests.contrib.openai",
5+
"resource": "createCompletion",
6+
"trace_id": 0,
7+
"span_id": 1,
8+
"parent_id": 0,
9+
"type": "",
10+
"error": 0,
11+
"meta": {
12+
"_dd.p.dm": "-0",
13+
"_dd.p.tid": "677c221c00000000",
14+
"component": "openai",
15+
"language": "python",
16+
"openai.base_url": "https://api.openai.com/v1/",
17+
"openai.organization.name": "datadog-4",
18+
"openai.request.client": "OpenAI",
19+
"openai.request.endpoint": "/v1/completions",
20+
"openai.request.method": "POST",
21+
"openai.request.model": "ada",
22+
"openai.request.n": "None",
23+
"openai.request.prompt.0": "Hello world",
24+
"openai.request.stream": "True",
25+
"openai.response.choices.0.finish_reason": "length",
26+
"openai.response.choices.0.text": "! ... A page layouts page drawer? ... Interesting. The \"Tools\" is",
27+
"openai.response.model": "ada",
28+
"openai.user.api_key": "sk-...key>",
29+
"runtime-id": "24f8e851c87e4f758c73d6acd0aaf82b"
30+
},
31+
"metrics": {
32+
"_dd.measured": 1,
33+
"_dd.top_level": 1,
34+
"_dd.tracer_kr": 1.0,
35+
"_sampling_priority_v1": 1,
36+
"openai.organization.ratelimit.requests.limit": 3000,
37+
"openai.organization.ratelimit.requests.remaining": 2999,
38+
"openai.organization.ratelimit.tokens.limit": 250000,
39+
"openai.organization.ratelimit.tokens.remaining": 249979,
40+
"openai.request.prompt_tokens_estimated": 1,
41+
"openai.response.completion_tokens_estimated": 1,
42+
"openai.response.usage.completion_tokens": 16,
43+
"openai.response.usage.prompt_tokens": 2,
44+
"openai.response.usage.total_tokens": 18,
45+
"process_id": 47101
46+
},
47+
"duration": 37957000,
48+
"start": 1736188444222291000
49+
}]]

0 commit comments

Comments
 (0)