|
20 | 20 | ) |
21 | 21 |
|
22 | 22 | from letta.errors import BedrockError, BedrockPermissionError |
23 | | -from letta.helpers.datetime_helpers import get_utc_time |
| 23 | +from letta.helpers.datetime_helpers import get_utc_time_int, timestamp_to_datetime |
24 | 24 | from letta.llm_api.aws_bedrock import get_bedrock_client |
25 | 25 | from letta.llm_api.helpers import add_inner_thoughts_to_functions |
26 | 26 | from letta.local_llm.constants import INNER_THOUGHTS_KWARG, INNER_THOUGHTS_KWARG_DESCRIPTION |
@@ -396,7 +396,7 @@ def convert_anthropic_response_to_chatcompletion( |
396 | 396 | return ChatCompletionResponse( |
397 | 397 | id=response.id, |
398 | 398 | choices=[choice], |
399 | | - created=get_utc_time(), |
| 399 | + created=get_utc_time_int(), |
400 | 400 | model=response.model, |
401 | 401 | usage=UsageStatistics( |
402 | 402 | prompt_tokens=prompt_tokens, |
@@ -451,7 +451,7 @@ def convert_anthropic_stream_event_to_chatcompletion( |
451 | 451 | 'logprobs': None |
452 | 452 | } |
453 | 453 | ], |
454 | | - 'created': datetime.datetime(2025, 1, 24, 0, 18, 55, tzinfo=TzInfo(UTC)), |
| 454 | + 'created': 1713216662, |
455 | 455 | 'model': 'gpt-4o-mini-2024-07-18', |
456 | 456 | 'system_fingerprint': 'fp_bd83329f63', |
457 | 457 | 'object': 'chat.completion.chunk' |
@@ -613,7 +613,7 @@ def convert_anthropic_stream_event_to_chatcompletion( |
613 | 613 | return ChatCompletionChunkResponse( |
614 | 614 | id=message_id, |
615 | 615 | choices=[choice], |
616 | | - created=get_utc_time(), |
| 616 | + created=get_utc_time_int(), |
617 | 617 | model=model, |
618 | 618 | output_tokens=completion_chunk_tokens, |
619 | 619 | ) |
@@ -920,7 +920,7 @@ def anthropic_chat_completions_process_stream( |
920 | 920 | chat_completion_response = ChatCompletionResponse( |
921 | 921 | id=dummy_message.id if create_message_id else TEMP_STREAM_RESPONSE_ID, |
922 | 922 | choices=[], |
923 | | - created=dummy_message.created_at, |
| 923 | + created=int(dummy_message.created_at.timestamp()), |
924 | 924 | model=chat_completion_request.model, |
925 | 925 | usage=UsageStatistics( |
926 | 926 | prompt_tokens=prompt_tokens, |
@@ -954,7 +954,11 @@ def anthropic_chat_completions_process_stream( |
954 | 954 | message_type = stream_interface.process_chunk( |
955 | 955 | chat_completion_chunk, |
956 | 956 | message_id=chat_completion_response.id if create_message_id else chat_completion_chunk.id, |
957 | | - message_date=chat_completion_response.created if create_message_datetime else chat_completion_chunk.created, |
| 957 | + message_date=( |
| 958 | + timestamp_to_datetime(chat_completion_response.created) |
| 959 | + if create_message_datetime |
| 960 | + else timestamp_to_datetime(chat_completion_chunk.created) |
| 961 | + ), |
958 | 962 | # if extended_thinking is on, then reasoning_content will be flowing as chunks |
959 | 963 | # TODO handle emitting redacted reasoning content (e.g. as concat?) |
960 | 964 | expect_reasoning_content=extended_thinking, |
|
0 commit comments