def test_tiny_separable_conv_valid(self):
np.random.seed(1988)
input_dim = 16
input_shape = (input_dim, input_dim, 3)
depth_multiplier = 1
kernel_height = 3
kernel_width = 3
num_kernels = 4
# Define a model
model = Sequential()
model.add(SeparableConv2D(filters = num_kernels, kernel_size=(kernel_height, kernel_width),
padding = 'valid', strides = (1,1), depth_multiplier = depth_multiplier,
input_shape = input_shape))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_keras_model(model)
python类SeparableConv2D()的实例源码
def test_tiny_separable_conv_same_fancy(self):
np.random.seed(1988)
input_dim = 16
input_shape = (input_dim, input_dim, 3)
depth_multiplier = 1
kernel_height = 3
kernel_width = 3
num_kernels = 4
# Define a model
model = Sequential()
model.add(SeparableConv2D(filters = num_kernels, kernel_size=(kernel_height, kernel_width),
padding = 'same', strides = (2,2), activation='relu', depth_multiplier = depth_multiplier,
input_shape = input_shape))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_keras_model(model)
def test_tiny_separable_conv_valid_depth_multiplier(self):
np.random.seed(1988)
input_dim = 16
input_shape = (input_dim, input_dim, 3)
depth_multiplier = 5
kernel_height = 3
kernel_width = 3
num_kernels = 40
# Define a model
model = Sequential()
model.add(SeparableConv2D(filters = num_kernels, kernel_size=(kernel_height, kernel_width),
padding = 'valid', strides = (1,1), depth_multiplier = depth_multiplier,
input_shape = input_shape))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_keras_model(model)
def test_tiny_separable_conv_same_fancy_depth_multiplier(
self,
model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
input_dim = 16
input_shape = (input_dim, input_dim, 3)
depth_multiplier = 2
kernel_height = 3
kernel_width = 3
num_kernels = 40
# Define a model
model = Sequential()
model.add(SeparableConv2D(filters = num_kernels, kernel_size=(kernel_height, kernel_width),
padding = 'same', strides = (2,2), activation='relu', depth_multiplier = depth_multiplier,
input_shape = input_shape))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_keras_model(model, model_precision=model_precision)
def lin_interpolation_2d(inp, dim):
num_rows, num_cols, num_filters = K.int_shape(inp)[1:]
conv = SeparableConv2D(num_filters, (num_rows, num_cols), use_bias=False)
x = conv(inp)
w = conv.get_weights()
w[0].fill(0)
w[1].fill(0)
linspace = linspace_2d(num_rows, num_cols, dim=dim)
for i in range(num_filters):
w[0][:,:, i, 0] = linspace[:,:]
w[1][0, 0, i, i] = 1.
conv.set_weights(w)
conv.trainable = False
x = Lambda(lambda x: K.squeeze(x, axis=1))(x)
x = Lambda(lambda x: K.squeeze(x, axis=1))(x)
x = Lambda(lambda x: K.expand_dims(x, axis=-1))(x)
return x
def _separable_conv_block(ip, filters, kernel_size=(3, 3), strides=(1, 1), weight_decay=5e-5, id=None):
'''Adds 2 blocks of [relu-separable conv-batchnorm]
# Arguments:
ip: input tensor
filters: number of output filters per layer
kernel_size: kernel size of separable convolutions
strides: strided convolution for downsampling
weight_decay: l2 regularization weight
id: string id
# Returns:
a Keras tensor
'''
channel_dim = 1 if K.image_data_format() == 'channels_first' else -1
with K.name_scope('separable_conv_block_%s' % id):
x = Activation('relu')(ip)
x = SeparableConv2D(filters, kernel_size, strides=strides, name='separable_conv_1_%s' % id,
padding='same', use_bias=False, kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay))(x)
x = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY, epsilon=_BN_EPSILON,
name="separable_conv_1_bn_%s" % (id))(x)
x = Activation('relu')(x)
x = SeparableConv2D(filters, kernel_size, name='separable_conv_2_%s' % id,
padding='same', use_bias=False, kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay))(x)
x = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY, epsilon=_BN_EPSILON,
name="separable_conv_2_bn_%s" % (id))(x)
return x
def test_tiny_xception(self, model_precision=_MLMODEL_FULL_PRECISION):
img_input = Input(shape=(32,32,3))
x = Conv2D(2, (3, 3), strides=(2, 2), use_bias=False, name='block1_conv1')(img_input)
x = BatchNormalization(name='block1_conv1_bn')(x)
x = Activation('relu', name='block1_conv1_act')(x)
x = Conv2D(4, (3, 3), use_bias=False, name='block1_conv2')(x)
x = BatchNormalization(name='block1_conv2_bn')(x)
x = Activation('relu', name='block1_conv2_act')(x)
residual = Conv2D(8, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = SeparableConv2D(8, (3, 3), padding='same', use_bias=False, name='block2_sepconv1')(x)
x = BatchNormalization(name='block2_sepconv1_bn')(x)
x = Activation('relu', name='block2_sepconv2_act')(x)
x = SeparableConv2D(8, (3, 3), padding='same', use_bias=False, name='block2_sepconv2')(x)
x = BatchNormalization(name='block2_sepconv2_bn')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block2_pool')(x)
x = add([x, residual])
residual = Conv2D(16, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
model = Model(inputs=[img_input], outputs=[residual])
self._test_keras_model(model, delta=1e-2, model_precision=model_precision)
def separable_act_conv_bn(x, filters, size, strides=(1, 1), padding='same',
name=None):
if name is not None:
conv_name = name + '_conv'
act_name = name + '_act'
else:
conv_name = None
act_name = None
x = Activation('relu', name=act_name)(x)
x = SeparableConv2D(filters, size, strides=strides, padding=padding,
use_bias=False, name=conv_name)(x)
x = BatchNormalization(axis=-1, scale=False, name=name)(x)
return x