@@ -445,6 +445,19 @@ class Linear(nn.Linear):
445445 def __str__ (self ):
446446 return f'{ Dom .name } .' + super ().__str__ ()
447447
448+ @classmethod
449+ def from_module (cls , src : nn .Linear ) -> Linear :
450+ with_bias = src .bias is not None
451+ new_lin = Linear (src .in_features , src .out_features , with_bias )
452+ new_lin .load_state_dict (src .state_dict ())
453+ return new_lin
454+
455+ def export (self ) -> nn .Linear :
456+ with_bias = self .bias is not None
457+ lin = nn .Linear (self .in_features , self .out_features , with_bias )
458+ lin .load_state_dict (self .state_dict ())
459+ return lin
460+
448461 def forward (self , * ts : Union [Tensor , Ele ]) -> Union [Tensor , Ele , Tuple [Tensor , ...]]:
449462 """
450463 :param ts: either Tensor, Ele, or Ele tensors
@@ -814,6 +827,9 @@ class ReLU(nn.ReLU):
814827 def __str__ (self ):
815828 return f'{ Dom .name } .' + super ().__str__ ()
816829
830+ def export (self ) -> nn .ReLU :
831+ return nn .ReLU ()
832+
817833 def forward (self , * ts : Union [Tensor , Ele ]) -> Union [Tensor , Ele , Tuple [Tensor , ...]]:
818834 """ According to paper, it approximates E by either of the two cases, whichever has smaller areas.
819835 Mathematically, it can be proved that the (linear) approximation is optimal in terms of approximated areas.
@@ -933,6 +949,9 @@ class Tanh(nn.Tanh):
933949 def __str__ (self ):
934950 return f'{ Dom .name } .' + super ().__str__ ()
935951
952+ def export (self ) -> nn .Tanh :
953+ return nn .Tanh ()
954+
936955 def forward (self , * ts : Union [Tensor , Ele ]) -> Union [Tensor , Ele , Tuple [Tensor , ...]]:
937956 """ For both LB' and UB', it chooses the smaller slope between LB-UB and LB'/UB'. Specifically,
938957 when L > 0, LB' chooses LB-UB, otherwise LB';
0 commit comments