Skip to content

Commit

Permalink
remove dropout, as in the paper, they show it is unhelpful (and also …
Browse files Browse the repository at this point in the history
…input jitter noise is unhelpful too at scale
  • Loading branch information
lucidrains committed Aug 20, 2023
1 parent 7ac3f11 commit 95d5c08
Show file tree
Hide file tree
Showing 2 changed files with 5 additions and 10 deletions.
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
setup(
name = 'st-moe-pytorch',
packages = find_packages(exclude=[]),
version = '0.0.5',
version = '0.0.6',
license='MIT',
description = 'ST - Mixture of Experts - Pytorch',
author = 'Phil Wang',
Expand Down
13 changes: 4 additions & 9 deletions st_moe_pytorch/st_moe_pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,6 @@ def __init__(
dim,
hidden_mult = 4,
mult_bias = True,
dropout = 0.,
prenorm = False
):
super().__init__()
Expand All @@ -104,7 +103,6 @@ def __init__(
RMSNorm(dim) if prenorm else None,
nn.Linear(dim, dim_hidden * 2),
GEGLU(dim_hidden, mult_bias = mult_bias),
nn.Dropout(dropout),
nn.Linear(dim_hidden, dim)
)

Expand All @@ -126,11 +124,10 @@ def __init__(
self,
dim,
num_experts = 16,
hidden_mult = 4,
dropout = 0.
hidden_mult = 4
):
super().__init__()
self.experts = ModuleList([Expert(dim = dim, hidden_mult = hidden_mult, dropout = dropout) for _ in range(num_experts)])
self.experts = ModuleList([Expert(dim = dim, hidden_mult = hidden_mult) for _ in range(num_experts)])

def forward(self, x):
outputs = []
Expand Down Expand Up @@ -308,7 +305,6 @@ def __init__(self,
dim,
num_experts = 16,
expert_hidden_mult = 4,
dropout = 0.,
second_policy_train = 'random',
second_policy_eval = 'random',
second_threshold_train = 0.2,
Expand All @@ -333,7 +329,7 @@ def __init__(self,
)

self.gate = Top2Gating(dim, num_gates = num_experts, **gating_kwargs)
self.experts = default(experts, lambda: Experts(dim, num_experts = num_experts, hidden_mult = expert_hidden_mult, dropout = dropout))
self.experts = default(experts, lambda: Experts(dim, num_experts = num_experts, hidden_mult = expert_hidden_mult))

self.loss_coef = loss_coef
self.router_z_loss_coef = router_z_loss_coef
Expand Down Expand Up @@ -370,7 +366,6 @@ def __init__(
dim,
num_experts: Tuple[int, int] = (4, 4),
expert_hidden_mult = 4,
dropout = 0.,
second_policy_train = 'random',
second_policy_eval = 'random',
second_threshold_train = 0.2,
Expand Down Expand Up @@ -403,7 +398,7 @@ def __init__(
self.gate_inner = Top2Gating(dim, num_gates = num_experts_inner, outer_expert_dims = (num_experts_outer,), **gating_kwargs)

num_experts_outer, num_experts_inner = num_experts
self.experts = ModuleList([Experts(dim, num_experts = num_experts_inner, hidden_mult = expert_hidden_mult, dropout = dropout) for _ in range(num_experts_outer)])
self.experts = ModuleList([Experts(dim, num_experts = num_experts_inner, hidden_mult = expert_hidden_mult) for _ in range(num_experts_outer)])

self.loss_coef = loss_coef
self.router_z_loss_coef = router_z_loss_coef
Expand Down

0 comments on commit 95d5c08

Please sign in to comment.