From 44bbd54721d4e1196b9b7da8ae492c44895559e1 Mon Sep 17 00:00:00 2001 From: Eitan Date: Mon, 15 Sep 2025 13:43:01 +0300 Subject: [PATCH 1/3] Fix incorrect defaults for use_combined_linear and add default for use_flash_attention in docstring --- monai/networks/nets/diffusion_model_unet.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/monai/networks/nets/diffusion_model_unet.py b/monai/networks/nets/diffusion_model_unet.py index 5604d9de1e..0b577ada44 100644 --- a/monai/networks/nets/diffusion_model_unet.py +++ b/monai/networks/nets/diffusion_model_unet.py @@ -1527,9 +1527,9 @@ class DiffusionModelUNet(nn.Module): upcast_attention: if True, upcast attention operations to full precision. dropout_cattn: if different from zero, this will be the dropout value for the cross-attention layers. include_fc: whether to include the final linear layer. Default to True. - use_combined_linear: whether to use a single linear layer for qkv projection, default to True. + use_combined_linear: whether to use a single linear layer for qkv projection, default to False. use_flash_attention: if True, use Pytorch's inbuilt flash attention for a memory efficient attention mechanism - (see https://pytorch.org/docs/2.2/generated/torch.nn.functional.scaled_dot_product_attention.html). + (see https://pytorch.org/docs/2.2/generated/torch.nn.functional.scaled_dot_product_attention.html), default to False. """ def __init__( From f3440aedf1edadab3a992bd6fd3e52742c54185f Mon Sep 17 00:00:00 2001 From: Eitan Date: Mon, 15 Sep 2025 14:24:54 +0300 Subject: [PATCH 2/3] I, Eitan , hereby add my Signed-off-by to this commit: 44bbd54721d4e1196b9b7da8ae492c44895559e1 Signed-off-by: Eitan From 98752259e5c903c712f3fa7cf6ae67107a8fabef Mon Sep 17 00:00:00 2001 From: Eitan Date: Mon, 15 Sep 2025 14:25:32 +0300 Subject: [PATCH 3/3] I, Eitan , hereby add my Signed-off-by to this commit: 44bbd54721d4e1196b9b7da8ae492c44895559e1 Signed-off-by: Eitan