From 3c529d6e3d13681a7145293bee19157b3b130607 Mon Sep 17 00:00:00 2001 From: Lu Fang Date: Mon, 29 Sep 2025 20:28:59 -0700 Subject: [PATCH 1/6] MTP fallback to eager for v32 Signed-off-by: Lu Fang --- vllm/config/speculative.py | 8 +++++++- vllm/v1/spec_decode/eagle.py | 5 ++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/vllm/config/speculative.py b/vllm/config/speculative.py index f684e4e4ccd4..d5c6d1d4d866 100644 --- a/vllm/config/speculative.py +++ b/vllm/config/speculative.py @@ -41,7 +41,8 @@ @dataclass class SpeculativeConfig: """Configuration for speculative decoding.""" - + enforce_eager: Optional[bool] = None + """Override the default enforce_eager from model_config""" # General speculative decoding control num_speculative_tokens: SkipValidation[int] = None # type: ignore """The number of speculative tokens, if provided. It will default to the @@ -219,6 +220,11 @@ def __post_init__(self): assert ( self.target_model_config is not None), "target_model_config must be present for mtp" + if self.target_model_config.hf_text_config.model_type \ + == "deepseek_v32": + # FIXME(luccafong): cudgraph with v32 MTP is not supported, + # remove this when the issue is fixed. + self.enforce_eager = True # use the draft model from the same model: self.model = self.target_model_config.model # Align the quantization of draft model for cases such as diff --git a/vllm/v1/spec_decode/eagle.py b/vllm/v1/spec_decode/eagle.py index bb11a543fd8b..70ec9863e8ac 100644 --- a/vllm/v1/spec_decode/eagle.py +++ b/vllm/v1/spec_decode/eagle.py @@ -50,6 +50,7 @@ def __init__( ): self.vllm_config = vllm_config self.speculative_config = vllm_config.speculative_config + assert self.speculative_config is not None self.draft_model_config = self.speculative_config.draft_model_config self.method = self.speculative_config.method @@ -78,7 +79,9 @@ def __init__( self.use_cuda_graph = (not current_platform.is_xpu() and self.vllm_config.compilation_config.level == CompilationLevel.PIECEWISE and - not self.vllm_config.model_config.enforce_eager) + not self.vllm_config.model_config.enforce_eager + and not self.speculative_config.enforce_eager + ) self.cudagraph_batch_sizes = list( reversed(self.vllm_config.compilation_config. cudagraph_capture_sizes)) if self.use_cuda_graph else [] From f3536c61851808cd32bf6ee4c46f3129382ee1a7 Mon Sep 17 00:00:00 2001 From: Lu Fang Date: Tue, 30 Sep 2025 12:44:09 -0700 Subject: [PATCH 2/6] fix eagle tests Signed-off-by: Lu Fang --- vllm/v1/spec_decode/eagle.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/vllm/v1/spec_decode/eagle.py b/vllm/v1/spec_decode/eagle.py index 70ec9863e8ac..dc6db0138806 100644 --- a/vllm/v1/spec_decode/eagle.py +++ b/vllm/v1/spec_decode/eagle.py @@ -75,13 +75,16 @@ def __init__( vllm_config.model_config) self.attn_metadata_builder: Optional[AttentionMetadataBuilder] = None + self.draft_indexer_metadata_builder: Optional[ + AttentionMetadataBuilder] = None + self.attn_layer_names: list[str] = [] + self.indexer_layer_names: list[str] = [] self.use_cuda_graph = (not current_platform.is_xpu() and self.vllm_config.compilation_config.level == CompilationLevel.PIECEWISE and not self.vllm_config.model_config.enforce_eager - and not self.speculative_config.enforce_eager - ) + and not self.speculative_config.enforce_eager) self.cudagraph_batch_sizes = list( reversed(self.vllm_config.compilation_config. cudagraph_capture_sizes)) if self.use_cuda_graph else [] From cc43fce0e31bf81d58c1ec9a337e8eace4aef727 Mon Sep 17 00:00:00 2001 From: Lu Fang Date: Tue, 30 Sep 2025 13:11:50 -0700 Subject: [PATCH 3/6] further eagle test fix Signed-off-by: Lu Fang --- tests/v1/spec_decode/test_eagle.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tests/v1/spec_decode/test_eagle.py b/tests/v1/spec_decode/test_eagle.py index 690732eb1232..caf667e7fc1b 100644 --- a/tests/v1/spec_decode/test_eagle.py +++ b/tests/v1/spec_decode/test_eagle.py @@ -337,13 +337,19 @@ def test_load_model(mock_get_model, mock_get_layers, mock_get_pp_group, method, "target_attn_1": mock.MagicMock(), "target_attn_2": mock.MagicMock() } + target_indx_layers: dict[str, mock.MagicMock] = {} # Draft model has one extra attention layer compared to target model all_attn_layers = { **target_attn_layers, "draft_extra_attn": mock.MagicMock() } + all_indx_layers: dict[str, mock.MagicMock] = {} + # Make mock_get_layers return different values for each call - mock_get_layers.side_effect = [target_attn_layers, all_attn_layers] + mock_get_layers.side_effect = [ + target_attn_layers, target_indx_layers, all_attn_layers, + all_indx_layers + ] # Setup mock for pp group to return the appropriate value for world size mock_pp_group = mock.MagicMock() From 3676c94e8151a6be2b03c5dfd3bf14d68d1f99d4 Mon Sep 17 00:00:00 2001 From: Lu Fang Date: Tue, 30 Sep 2025 13:24:20 -0700 Subject: [PATCH 4/6] default using piecewise when mtp enabled for indexer Signed-off-by: Lu Fang --- vllm/v1/attention/backends/mla/indexer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/v1/attention/backends/mla/indexer.py b/vllm/v1/attention/backends/mla/indexer.py index 4e6b974ad74d..e87b51b15191 100644 --- a/vllm/v1/attention/backends/mla/indexer.py +++ b/vllm/v1/attention/backends/mla/indexer.py @@ -171,7 +171,7 @@ def get_max_prefill_buffer_size(vllm_config: VllmConfig): class DeepseekV32IndexerMetadataBuilder(AttentionMetadataBuilder): cudagraph_support: ClassVar[AttentionCGSupport] = \ - AttentionCGSupport.UNIFORM_BATCH + AttentionCGSupport.UNIFORM_SINGLE_TOKEN_DECODE reorder_batch_threshold: int = 1 From ef7e8d454c873ed2cd8c0bd7c856b5c587c5aeea Mon Sep 17 00:00:00 2001 From: Lu Fang Date: Tue, 30 Sep 2025 15:23:30 -0700 Subject: [PATCH 5/6] fix eagle tree test Signed-off-by: Lu Fang --- tests/v1/spec_decode/test_eagle.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/v1/spec_decode/test_eagle.py b/tests/v1/spec_decode/test_eagle.py index caf667e7fc1b..49311c0005e7 100644 --- a/tests/v1/spec_decode/test_eagle.py +++ b/tests/v1/spec_decode/test_eagle.py @@ -664,6 +664,9 @@ def create_deterministic_logits(token_ids, k: int): # Mock runner for attention metadata building. proposer.runner = mock.MagicMock() proposer.runner.attn_groups.append([mock.MagicMock()]) + proposer.runner.attn_groups[0][0].metadata_builders = [ + attn_metadata_builder + ] proposer.runner.attn_groups[0][0].get_metadata_builder.return_value = \ attn_metadata_builder proposer._get_attention_metadata_builder = mock.MagicMock( From fb807c51f2d0c8fd8f23ca232392950a238d6fac Mon Sep 17 00:00:00 2001 From: Nick Hill Date: Tue, 30 Sep 2025 17:06:14 -0700 Subject: [PATCH 6/6] also fix test_mtp.py Signed-off-by: Nick Hill --- tests/v1/spec_decode/test_mtp.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tests/v1/spec_decode/test_mtp.py b/tests/v1/spec_decode/test_mtp.py index e4881859ece1..5b9ccfc3f48b 100644 --- a/tests/v1/spec_decode/test_mtp.py +++ b/tests/v1/spec_decode/test_mtp.py @@ -63,7 +63,13 @@ def test_mtp_load_model_unified(mock_get_model, mock_get_layers, target_attn_layers = {"target_attn_1": mock.MagicMock()} all_attn_layers = {**target_attn_layers, "draft_attn_1": mock.MagicMock()} - mock_get_layers.side_effect = [target_attn_layers, all_attn_layers] + target_indexer_layers: dict = {} + all_indexer_layers: dict = {} + + mock_get_layers.side_effect = [ + target_attn_layers, target_indexer_layers, all_attn_layers, + all_indexer_layers + ] mock_pp_group = mock.MagicMock() mock_pp_group.world_size = 1