Skip to content

Commit 0ee6679

Browse files
Fix linter
Signed-off-by: Jeffrey Wang <jeffreywang@anyscale.com>
1 parent fca6b5f commit 0ee6679

File tree

2 files changed

+12
-3
lines changed

2 files changed

+12
-3
lines changed

python/ray/llm/_internal/batch/stages/vllm_engine_stage.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -506,7 +506,9 @@ async def _generate_async(self, request: vLLMEngineRequest) -> Any:
506506
async for request_output in stream:
507507
if request_output.finished:
508508
# Bypass the original full prompt.
509-
request_output.prompt = request.prompt if request.prompt is not None else ""
509+
request_output.prompt = (
510+
request.prompt if request.prompt is not None else ""
511+
)
510512
return request_output
511513

512514
raise RuntimeError(
@@ -635,7 +637,10 @@ def validate_inputs(self, inputs: List[Dict[str, Any]]):
635637
)
636638

637639
original_expected_keys = self.expected_input_keys.copy()
638-
self.expected_input_keys = self.expected_input_keys - {"prompt", "tokenized_prompt"}
640+
self.expected_input_keys = self.expected_input_keys - {
641+
"prompt",
642+
"tokenized_prompt",
643+
}
639644

640645
try:
641646
super().validate_inputs(inputs)

python/ray/llm/tests/batch/gpu/processor/test_vllm_engine_proc.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -203,6 +203,7 @@ def test_generation_model(gpu_type, model_opt_125m, backend):
203203

204204
def test_generation_model_tokenized_prompt(gpu_type, model_opt_125m):
205205
from transformers import AutoTokenizer
206+
206207
tokenizer = AutoTokenizer.from_pretrained(model_opt_125m, trust_remote_code=True)
207208

208209
processor_config = vLLMEngineProcessorConfig(
@@ -293,7 +294,10 @@ def test_generation_model_missing_prompt_and_tokenized_prompt(gpu_type, model_op
293294
ds = ds.materialize()
294295

295296
error_str = str(exc_info.value)
296-
assert "Either 'prompt' (text) or 'tokenized_prompt' (tokens) must be provided" in error_str
297+
assert (
298+
"Either 'prompt' (text) or 'tokenized_prompt' (tokens) must be provided"
299+
in error_str
300+
)
297301

298302

299303
def test_embedding_model(gpu_type, model_smolvlm_256m):

0 commit comments

Comments
 (0)