Skip to content

Commit a910009

Browse files
committed
resolve PaddlePaddle#4086 conflict
1 parent 1e88754 commit a910009

File tree

5 files changed

+25
-24
lines changed

5 files changed

+25
-24
lines changed

fastdeploy/entrypoints/engine_client.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -256,8 +256,13 @@ def vaild_parameters(self, data):
256256
raise ValueError(f"max_tokens can be defined [1, {self.max_model_len}).")
257257

258258
if data.get("reasoning_max_tokens") is not None:
259-
if data["reasoning_max_tokens"] > data["max_tokens"] or data["reasoning_max_tokens"] < 0:
260-
raise ValueError("reasoning_max_tokens must be between max_tokens and 0")
259+
if data["reasoning_max_tokens"] < 1:
260+
raise ValueError("reasoning_max_tokens must be greater than 1")
261+
if data["reasoning_max_tokens"] > data["max_tokens"]:
262+
data["reasoning_max_tokens"] = data["max_tokens"]
263+
api_server_logger.warning(
264+
f"req_id: {data['request_id']}, reasoning_max_tokens exceeds max_tokens, the value of reasoning_max_tokens will be adjusted to match that of max_tokens"
265+
)
261266

262267
if data.get("top_p") is not None:
263268
if data["top_p"] > 1 or data["top_p"] < 0:

fastdeploy/entrypoints/openai/protocol.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -594,6 +594,7 @@ class ChatCompletionRequest(BaseModel):
594594
prompt_token_ids: Optional[List[int]] = None
595595
max_streaming_response_tokens: Optional[int] = None
596596
disable_chat_template: Optional[bool] = False
597+
completion_token_ids: Optional[List[int]] = None
597598
# doc: end-chat-completion-extra-params
598599

599600
def to_dict_for_infer(self, request_id=None):
@@ -619,6 +620,9 @@ def to_dict_for_infer(self, request_id=None):
619620
), "The parameter `raw_request` is not supported now, please use completion api instead."
620621
for key, value in self.metadata.items():
621622
req_dict[key] = value
623+
from fastdeploy.utils import api_server_logger
624+
625+
api_server_logger.warning("The parameter metadata is obsolete.")
622626
for key, value in self.dict().items():
623627
if value is not None:
624628
req_dict[key] = value

fastdeploy/input/ernie4_5_vl_processor/ernie4_5_vl_processor.py

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -241,10 +241,8 @@ def process_request_dict(self, request, max_model_len=None):
241241
else:
242242
raise ValueError(f"Request must contain 'prompt', or 'messages': {request}")
243243

244-
metadata = request.get("metadata")
245-
# 如果metadata包含之前输出的token,将这些token添加到input_ids末尾
246-
if metadata and metadata.get("generated_token_ids"):
247-
self.append_generated_tokens(outputs, metadata["generated_token_ids"])
244+
if request.get("completion_token_ids"):
245+
self.append_completion_tokens(outputs, request["completion_token_ids"])
248246
outputs = self.pack_outputs(outputs)
249247
request["prompt_token_ids"] = outputs["input_ids"].tolist()
250248
request["prompt_token_ids_len"] = len(request["prompt_token_ids"])
@@ -263,11 +261,11 @@ def process_request_dict(self, request, max_model_len=None):
263261

264262
return request
265263

266-
def append_generated_tokens(self, multimodal_inputs, generated_token_ids):
267-
"append already generated tokens"
264+
def append_completion_tokens(self, multimodal_inputs, completion_token_ids):
265+
"append already completion tokens"
268266

269-
num_tokens = len(generated_token_ids)
270-
multimodal_inputs["input_ids"].extend(generated_token_ids)
267+
num_tokens = len(completion_token_ids)
268+
multimodal_inputs["input_ids"].extend(completion_token_ids)
271269
multimodal_inputs["token_type_ids"].extend([IDS_TYPE_FLAG["text"]] * num_tokens)
272270

273271
start = multimodal_inputs["cur_position"]

fastdeploy/input/qwen_vl_processor/qwen_vl_processor.py

Lines changed: 6 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -245,15 +245,11 @@ def process_request_dict(self, request, max_model_len=None):
245245
else:
246246
raise ValueError(f"Request must contain 'prompt', or 'messages': {request}")
247247

248-
metadata = request.get("metadata")
249248
# Handle continuation of previous generation by appending existing tokens
250-
if metadata and metadata.get("generated_token_ids"):
251-
self.append_generated_tokens(outputs, metadata["generated_token_ids"])
249+
if request.get("completion_token_ids"):
250+
self.append_completion_tokens(outputs, request["completion_token_ids"])
252251

253252
enable_thinking = False
254-
if metadata:
255-
enable_thinking = metadata.get("enable_thinking", False)
256-
257253
if request.get("chat_template_kwargs"):
258254
chat_template_kwargs = request.get("chat_template_kwargs")
259255
enable_thinking = chat_template_kwargs.get("enable_thinking", False)
@@ -278,16 +274,16 @@ def process_request_dict(self, request, max_model_len=None):
278274

279275
return request
280276

281-
def append_generated_tokens(self, outputs, generated_token_ids):
277+
def append_completion_tokens(self, outputs, completion_token_ids):
282278
"""
283-
Append generated tokens to existing outputs.
279+
Append completion tokens to existing outputs.
284280
285281
Args:
286282
outputs: Current model outputs
287-
generated_token_ids: Generated tokens to append
283+
completion_token_ids: completion tokens to append
288284
"""
289285
out = {"input_ids": [], "token_type_ids": [], "position_ids": [], "cur_position": outputs["cur_position"]}
290-
self.processor._add_text(generated_token_ids, out)
286+
self.processor._add_text(completion_token_ids, out)
291287

292288
outputs["input_ids"] = np.concatenate(
293289
[outputs["input_ids"], np.array(out["input_ids"], dtype=np.int64)], axis=0

tests/input/test_qwen_vl_processor.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -176,12 +176,10 @@ def test_process_request_dict(self):
176176
3. Video processing produces expected output dimensions
177177
4. Correct counts for images (1) and videos (1)
178178
"""
179-
num_generated_token_ids = 10
179+
num_completion_token_ids = 10
180180
request = {
181181
"request_id": "12345",
182-
"metadata": {
183-
"generated_token_ids": [1] * num_generated_token_ids,
184-
},
182+
"completion_token_ids": [1] * num_completion_token_ids,
185183
"stop": ["stop", "eof"],
186184
"messages": [
187185
{

0 commit comments

Comments
 (0)