@@ -921,10 +921,19 @@ def test_span_finish_on_stream_error(openai, openai_vcr, snapshot_tracer):
921
921
)
922
922
923
923
924
- @pytest .mark .snapshot (
925
- token = "tests.contrib.openai.test_openai.test_completion_stream" ,
926
- ignores = ["metrics.openai.response.usage.completion_tokens" , "metrics.openai.response.usage.total_tokens" ],
927
- )
924
+ @pytest .mark .snapshot
925
+ @pytest .mark .skipif (TIKTOKEN_AVAILABLE , reason = "This test estimates token counts" )
926
+ def test_completion_stream_est_tokens (openai , openai_vcr , mock_metrics , snapshot_tracer ):
927
+ with openai_vcr .use_cassette ("completion_streamed.yaml" ):
928
+ with mock .patch ("ddtrace.contrib.internal.openai.utils.encoding_for_model" , create = True ) as mock_encoding :
929
+ mock_encoding .return_value .encode .side_effect = lambda x : [1 , 2 ]
930
+ client = openai .OpenAI ()
931
+ resp = client .completions .create (model = "ada" , prompt = "Hello world" , stream = True , n = None )
932
+ _ = [c for c in resp ]
933
+
934
+
935
+ @pytest .mark .skipif (not TIKTOKEN_AVAILABLE , reason = "This test computes token counts using tiktoken" )
936
+ @pytest .mark .snapshot (token = "tests.contrib.openai.test_openai.test_completion_stream" )
928
937
def test_completion_stream (openai , openai_vcr , mock_metrics , snapshot_tracer ):
929
938
with openai_vcr .use_cassette ("completion_streamed.yaml" ):
930
939
with mock .patch ("ddtrace.contrib.internal.openai.utils.encoding_for_model" , create = True ) as mock_encoding :
@@ -934,10 +943,8 @@ def test_completion_stream(openai, openai_vcr, mock_metrics, snapshot_tracer):
934
943
_ = [c for c in resp ]
935
944
936
945
937
- @pytest .mark .snapshot (
938
- token = "tests.contrib.openai.test_openai.test_completion_stream" ,
939
- ignores = ["metrics.openai.response.usage.completion_tokens" , "metrics.openai.response.usage.total_tokens" ],
940
- )
946
+ @pytest .mark .skipif (not TIKTOKEN_AVAILABLE , reason = "This test computes token counts using tiktoken" )
947
+ @pytest .mark .snapshot (token = "tests.contrib.openai.test_openai.test_completion_stream" )
941
948
async def test_completion_async_stream (openai , openai_vcr , mock_metrics , snapshot_tracer ):
942
949
with openai_vcr .use_cassette ("completion_streamed.yaml" ):
943
950
with mock .patch ("ddtrace.contrib.internal.openai.utils.encoding_for_model" , create = True ) as mock_encoding :
@@ -948,13 +955,10 @@ async def test_completion_async_stream(openai, openai_vcr, mock_metrics, snapsho
948
955
949
956
950
957
@pytest .mark .skipif (
951
- parse_version (openai_module .version .VERSION ) < (1 , 6 , 0 ),
958
+ parse_version (openai_module .version .VERSION ) < (1 , 6 , 0 ) and not TIKTOKEN_AVAILABLE ,
952
959
reason = "Streamed response context managers are only available v1.6.0+" ,
953
960
)
954
- @pytest .mark .snapshot (
955
- token = "tests.contrib.openai.test_openai.test_completion_stream" ,
956
- ignores = ["metrics.openai.response.usage.completion_tokens" , "metrics.openai.response.usage.total_tokens" ],
957
- )
961
+ @pytest .mark .snapshot (token = "tests.contrib.openai.test_openai.test_completion_stream" )
958
962
def test_completion_stream_context_manager (openai , openai_vcr , mock_metrics , snapshot_tracer ):
959
963
with openai_vcr .use_cassette ("completion_streamed.yaml" ):
960
964
with mock .patch ("ddtrace.contrib.internal.openai.utils.encoding_for_model" , create = True ) as mock_encoding :
0 commit comments