Skip to content

Commit

Permalink
Finish up layer conversion interfaces.
Browse files Browse the repository at this point in the history
  • Loading branch information
fchollet committed Mar 12, 2017
1 parent 21bf90c commit a783cea
Show file tree
Hide file tree
Showing 7 changed files with 136 additions and 6 deletions.
2 changes: 0 additions & 2 deletions keras/layers/convolutional.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,6 @@
from .pooling import MaxPooling2D
from .pooling import MaxPooling3D

from ..legacy import interfaces


class _Conv(Layer):
"""Abstract nD convolution layer (private, used as implementation base).
Expand Down
2 changes: 2 additions & 0 deletions keras/layers/convolutional_recurrent.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
import numpy as np
from ..engine import InputSpec
from ..utils import conv_utils
from ..legacy import interfaces


class ConvRecurrent2D(Recurrent):
Expand Down Expand Up @@ -270,6 +271,7 @@ class ConvLSTM2D(ConvRecurrent2D):
cells output
"""

@interfaces.legacy_convlstm2d_support
def __init__(self, filters,
kernel_size,
strides=(1, 1),
Expand Down
3 changes: 3 additions & 0 deletions keras/layers/local.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
from ..engine import Layer
from ..engine import InputSpec
from ..utils import conv_utils
from ..legacy import interfaces


class LocallyConnected1D(Layer):
Expand Down Expand Up @@ -71,6 +72,7 @@ class LocallyConnected1D(Layer):
`steps` value might have changed due to padding or strides.
"""

@interfaces.legacy_conv1d_support
def __init__(self, filters,
kernel_size,
strides=1,
Expand Down Expand Up @@ -266,6 +268,7 @@ class LocallyConnected2D(Layer):
`rows` and `cols` values might have changed due to padding.
"""

@interfaces.legacy_conv2d_support
def __init__(self, filters,
kernel_size,
strides=(1, 1),
Expand Down
2 changes: 2 additions & 0 deletions keras/layers/normalization.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from .. import regularizers
from .. import constraints
from .. import backend as K
from ..legacy import interfaces


class BatchNormalization(Layer):
Expand Down Expand Up @@ -51,6 +52,7 @@ class BatchNormalization(Layer):
- [Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift](https://arxiv.org/abs/1502.03167)
"""

@interfaces.legacy_batchnorm_support
def __init__(self,
axis=-1,
momentum=0.99,
Expand Down
70 changes: 68 additions & 2 deletions keras/legacy/interfaces.py
Original file line number Diff line number Diff line change
Expand Up @@ -262,11 +262,14 @@ def conv2d_args_preprocessor(args, kwargs):


def separable_conv2d_args_preprocessor(args, kwargs):
converted = []
if 'init' in kwargs:
init = kwargs.pop('init')
kwargs['depthwise_initializer'] = init
kwargs['pointwise_initializer'] = init
return conv2d_args_preprocessor(args, kwargs)
converted.append(('init', 'depthwise_initializer/pointwise_initializer'))
args, kwargs, _converted = conv2d_args_preprocessor(args, kwargs)
return args, kwargs, converted + _converted

legacy_separable_conv2d_support = generate_legacy_interface(
allowed_positional_args=['filters', 'kernel_size'],
Expand All @@ -284,12 +287,16 @@ def separable_conv2d_args_preprocessor(args, kwargs):


def deconv2d_args_preprocessor(args, kwargs):
converted = []
if len(args) == 5:
if isinstance(args[4], tuple):
args = args[:-1]
converted.append(('output_shape', None))
if 'output_shape' in kwargs:
kwargs.pop('output_shape')
return conv2d_args_preprocessor(args, kwargs)
converted.append(('output_shape', None))
args, kwargs, _converted = conv2d_args_preprocessor(args, kwargs)
return args, kwargs, converted + _converted

legacy_deconv2d_support = generate_legacy_interface(
allowed_positional_args=['filters', 'kernel_size'],
Expand Down Expand Up @@ -360,3 +367,62 @@ def conv3d_args_preprocessor(args, kwargs):
'th': 'channels_first',
'default': None}},
preprocessor=conv3d_args_preprocessor)


def batchnorm_args_preprocessor(args, kwargs):
converted = []
if len(args) > 1:
raise TypeError('The `BatchNormalization` layer '
'does not accept positional arguments. '
'Use keyword arguments instead.')
if 'mode' in kwargs:
value = kwargs.pop('mode')
if value != 0:
raise TypeError('The `mode` argument of `BatchNormalization` '
'no longer exists. `mode=1` and `mode=2` '
'are no longer supported.')
converted.append(('mode', None))
return args, kwargs, converted


def convlstm2d_args_preprocessor(args, kwargs):
converted = []
if 'forget_bias_init' in kwargs:
value = kwargs.pop('forget_bias_init')
if value == 'one':
kwargs['unit_forget_bias'] = True
converted.append(('forget_bias_init', 'unit_forget_bias'))
else:
warnings.warn('The `forget_bias_init` argument '
'has been ignored. Use `unit_forget_bias=True` '
'instead to intialize with ones')
args, kwargs, _converted = conv2d_args_preprocessor(args, kwargs)
return args, kwargs, converted + _converted


legacy_convlstm2d_support = generate_legacy_interface(
allowed_positional_args=['filters', 'kernel_size'],
conversions=[('nb_filter', 'filters'),
('subsample', 'strides'),
('border_mode', 'padding'),
('dim_ordering', 'data_format'),
('init', 'kernel_initializer'),
('inner_init', 'recurrent_initializer'),
('W_regularizer', 'kernel_regularizer'),
('U_regularizer', 'recurrent_regularizer'),
('b_regularizer', 'bias_regularizer'),
('inner_activation', 'recurrent_activation'),
('dropout_W', 'dropout'),
('dropout_U', 'recurrent_dropout'),
('bias', 'use_bias')],
value_conversions={'dim_ordering': {'tf': 'channels_last',
'th': 'channels_first',
'default': None}},
preprocessor=convlstm2d_args_preprocessor)


legacy_batchnorm_support = generate_legacy_interface(
allowed_positional_args=[],
conversions=[('beta_init', 'beta_initializer'),
('gamma_init', 'gamma_initializer')],
preprocessor=batchnorm_args_preprocessor)
2 changes: 0 additions & 2 deletions pytest.ini
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,6 @@
addopts=-v
-n 2
--durations=10
--cov-report term-missing
--cov=keras

# Do not run tests in the build folder
norecursedirs= build
Expand Down
61 changes: 61 additions & 0 deletions tests/keras/legacy/interface_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -592,5 +592,66 @@ def test_conv3d_legacy_interface():
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())


@keras_test
def test_convlstm2d_legacy_interface():
old_layer = keras.layers.ConvLSTM2D(5, 3, 3, name='conv')
new_layer = keras.layers.ConvLSTM2D(5, (3, 3), name='conv')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())

old_layer = keras.layers.ConvLSTM2D(5, 3, nb_col=3, name='conv')
new_layer = keras.layers.ConvLSTM2D(5, (3, 3), name='conv')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())

old_layer = keras.layers.ConvLSTM2D(5, nb_row=3, nb_col=3, name='conv')
new_layer = keras.layers.ConvLSTM2D(5, (3, 3), name='conv')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())

old_layer = keras.layers.ConvLSTM2D(5, 3, 3,
init='normal',
inner_init='uniform',
forget_bias_init='one',
inner_activation='relu',
subsample=(2, 2),
border_mode='valid',
dim_ordering='th',
W_regularizer='l1',
U_regularizer='l2',
b_regularizer='l2',
dropout_W=0.2,
dropout_U=0.1,
name='conv')
new_layer = keras.layers.ConvLSTM2D(5, (3, 3),
kernel_initializer='normal',
recurrent_initializer='uniform',
unit_forget_bias=True,
recurrent_activation='relu',
strides=(2, 2),
padding='valid',
kernel_regularizer='l1',
recurrent_regularizer='l2',
bias_regularizer='l2',
data_format='channels_first',
dropout=0.2,
recurrent_dropout=0.1,
name='conv')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())


@keras_test
def test_batchnorm_legacy_interface():
old_layer = keras.layers.BatchNormalization(mode=0, name='bn')
new_layer = keras.layers.BatchNormalization(name='bn')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())

old_layer = keras.layers.BatchNormalization(mode=0,
beta_init='one',
gamma_init='uniform',
name='bn')
new_layer = keras.layers.BatchNormalization(beta_initializer='ones',
gamma_initializer='uniform',
name='bn')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())


if __name__ == '__main__':
pytest.main([__file__])

0 comments on commit a783cea

Please sign in to comment.