forked from flagos-ai/vllm-plugin-FL
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathqwen3_5_offline_inference.py
More file actions
34 lines (27 loc) · 1.01 KB
/
qwen3_5_offline_inference.py
File metadata and controls
34 lines (27 loc) · 1.01 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
# Copyright (c) 2025 BAAI. All rights reserved.
# Adapted from https://github.com/vllm-project/vllm/blob/v0.11.0/examples/offline_inference/basic/basic.py
# Below is the original copyright:
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import os
from vllm import LLM, SamplingParams
os.environ["VLLM_ALLOW_LONG_MAX_MODEL_LEN"] = "1"
if __name__ == "__main__":
prompts = [
"Hello, my name is",
]
# Create a sampling params object.
sampling_params = SamplingParams(max_tokens=10, temperature=0.0)
# Create an LLM.
llm = LLM(
model="/models/Qwen3.5-397B-A17B",
tensor_parallel_size=8,
pipeline_parallel_size=2,
enforce_eager=False,
)
# Generate texts from the prompts.
outputs = llm.generate(prompts, sampling_params)
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")