Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion vllm_ascend/spec_decode/mtp_proposer.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,8 @@
from vllm.v1.utils import CpuGpuBuffer
from vllm.v1.worker.gpu_input_batch import CachedRequestState, InputBatch

from vllm_ascend.ascend_forward_context import set_ascend_forward_context
from vllm_ascend.ascend_forward_context import (MoECommType,
set_ascend_forward_context)
from vllm_ascend.attention.attention_v1 import AscendAttentionState
from vllm_ascend.attention.utils import AscendCommonAttentionMetadata
from vllm_ascend.compilation.acl_graph import (ACLGraphWrapper,
Expand Down Expand Up @@ -237,6 +238,9 @@ def dummy_run(self,
) = self.runner._sync_metadata_across_dp(num_tokens, with_prefill)

moe_comm_type = self.runner._select_moe_comm_method(num_tokens)
# TODO: remove this after moe_comm_type selection logic is finalized
moe_comm_type = (MoECommType.ALLTOALL if moe_comm_type
== MoECommType.FUSED_ALLTOALL else moe_comm_type)

if skip_attn:
attn_metadata = None
Expand Down
13 changes: 7 additions & 6 deletions vllm_ascend/worker/model_runner_v1.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,8 +52,8 @@
has_kv_transfer_group)
from vllm.distributed.kv_transfer.kv_connector.v1 import KVConnectorBase_V1
from vllm.distributed.parallel_state import (get_dcp_group, get_dp_group,
get_pcp_group, get_pp_group,
get_tp_group,
get_ep_group, get_pcp_group,
get_pp_group, get_tp_group,
is_global_first_rank)
from vllm.forward_context import get_forward_context
from vllm.logger import logger
Expand Down Expand Up @@ -2332,10 +2332,11 @@ def _select_moe_comm_method(self,
moe_comm_type = MoECommType.ALLGATHER

elif soc_version in {AscendDeviceType._910_93}:
moe_comm_type = (MoECommType.MC2
if num_tokens <= self.mc2_tokens_capacity else
MoECommType.FUSED_ALLTOALL if quant_type
== "w8a8_dynamic" else MoECommType.ALLTOALL)
# TODO: drop the EP-size guard when dispatch_ffn_combine supports larger EP sizes
moe_comm_type = (
MoECommType.MC2 if num_tokens <= self.mc2_tokens_capacity else
MoECommType.FUSED_ALLTOALL if quant_type == "w8a8_dynamic"
and get_ep_group().world_size <= 16 else MoECommType.ALLTOALL)
else:
raise ValueError(f"Unsupported soc_version: {soc_version}")

Expand Down
Loading