def wavenetBlock(n_atrous_filters, atrous_filter_size, atrous_rate,
n_conv_filters, conv_filter_size):
def f(input_):
residual = input_
tanh_out = AtrousConvolution1D(n_atrous_filters, atrous_filter_size,
atrous_rate=atrous_rate,
border_mode='same',
activation='tanh')(input_)
sigmoid_out = AtrousConvolution1D(n_atrous_filters, atrous_filter_size,
atrous_rate=atrous_rate,
border_mode='same',
activation='sigmoid')(input_)
merged = merge([tanh_out, sigmoid_out], mode='mul')
skip_out = Convolution1D(1, 1, activation='relu', border_mode='same')(merged)
out = merge([skip_out, residual], mode='sum')
return out, skip_out
return f
python类relu()的实例源码
simple-generative-model-regressor.py 文件源码
项目:keras-wavenet
作者: usernaamee
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
simple-generative-model-regressor.py 文件源码
项目:keras-wavenet
作者: usernaamee
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def get_basic_generative_model(input_size):
input = Input(shape=(1, input_size, 1))
l1a, l1b = wavenetBlock(10, 5, 2, 1, 3)(input)
l2a, l2b = wavenetBlock(1, 2, 4, 1, 3)(l1a)
l3a, l3b = wavenetBlock(1, 2, 8, 1, 3)(l2a)
l4a, l4b = wavenetBlock(1, 2, 16, 1, 3)(l3a)
l5a, l5b = wavenetBlock(1, 2, 32, 1, 3)(l4a)
l6 = merge([l1b, l2b, l3b, l4b, l5b], mode='sum')
l7 = Lambda(relu)(l6)
l8 = Convolution2D(1, 1, 1, activation='relu')(l7)
l9 = Convolution2D(1, 1, 1)(l8)
l10 = Flatten()(l9)
l11 = Dense(1, activation='tanh')(l10)
model = Model(input=input, output=l11)
model.compile(loss='mse', optimizer='rmsprop', metrics=['accuracy'])
model.summary()
return model
def test_relu():
'''
Relu implementation doesn't depend on the value being
a theano variable. Testing ints, floats and theano tensors.
'''
from keras.activations import relu as r
assert r(5) == 5
assert r(-5) == 0
assert r(-0.1) == 0
assert r(0.1) == 0.1
x = T.vector()
exp = r(x)
f = theano.function([x], exp)
test_values = get_standard_values()
result = f(test_values)
list_assert_equal(result, test_values) # because no negatives in test values
def test_relu():
'''
Relu implementation doesn't depend on the value being
a theano variable. Testing ints, floats and theano tensors.
'''
from keras.activations import relu as r
assert r(5) == 5
assert r(-5) == 0
assert r(-0.1) == 0
assert r(0.1) == 0.1
x = T.vector()
exp = r(x)
f = theano.function([x], exp)
test_values = get_standard_values()
result = f(test_values)
list_assert_equal(result, test_values) # because no negatives in test values
def test_relu():
'''
Relu implementation doesn't depend on the value being
a theano variable. Testing ints, floats and theano tensors.
'''
from keras.activations import relu as r
assert r(5) == 5
assert r(-5) == 0
assert r(-0.1) == 0
assert r(0.1) == 0.1
x = T.vector()
exp = r(x)
f = theano.function([x], exp)
test_values = get_standard_values()
result = f(test_values)
list_assert_equal(result, test_values) # because no negatives in test values
def test_relu():
'''
Relu implementation doesn't depend on the value being
a theano variable. Testing ints, floats and theano tensors.
'''
x = K.placeholder(ndim=2)
f = K.function([x], [activations.relu(x)])
test_values = get_standard_values()
result = f([test_values])[0]
# because no negatives in test values
assert_allclose(result, test_values, rtol=1e-05)
def test_relu():
'''
Relu implementation doesn't depend on the value being
a theano variable. Testing ints, floats and theano tensors.
'''
x = K.placeholder(ndim=2)
f = K.function([x], [activations.relu(x)])
test_values = get_standard_values()
result = f([test_values])[0]
# because no negatives in test values
assert_allclose(result, test_values, rtol=1e-05)
def temporal_convs_linear(n_nodes, conv_len, n_classes, n_feat, max_len,
causal=False, loss='categorical_crossentropy',
optimizer='adam', return_param_str=False):
""" Used in paper:
Segmental Spatiotemporal CNNs for Fine-grained Action Segmentation
Lea et al. ECCV 2016
Note: Spatial dropout was not used in the original paper.
It tends to improve performance a little.
"""
inputs = Input(shape=(max_len,n_feat))
if causal: model = ZeroPadding1D((conv_len//2,0))(model)
model = Convolution1D(n_nodes, conv_len, input_dim=n_feat, input_length=max_len, border_mode='same', activation='relu')(inputs)
if causal: model = Cropping1D((0,conv_len//2))(model)
model = SpatialDropout1D(0.3)(model)
model = TimeDistributed(Dense(n_classes, activation="softmax" ))(model)
model = Model(input=inputs, output=model)
model.compile(loss=loss, optimizer=optimizer, sample_weight_mode="temporal")
if return_param_str:
param_str = "tConv_C{}".format(conv_len)
if causal:
param_str += "_causal"
return model, param_str
else:
return model
def test_relu():
'''
Relu implementation doesn't depend on the value being
a theano variable. Testing ints, floats and theano tensors.
'''
x = K.placeholder(ndim=2)
f = K.function([x], [activations.relu(x)])
test_values = get_standard_values()
result = f([test_values])[0]
# because no negatives in test values
assert_allclose(result, test_values, rtol=1e-05)
def ED_TCN(n_nodes, conv_len, n_classes, n_feat, max_len,
loss='categorical_crossentropy', causal=False,
optimizer="rmsprop", activation='norm_relu',
return_param_str=False):
n_layers = len(n_nodes)
inputs = Input(shape=(max_len,n_feat))
model = inputs
# ---- Encoder ----
for i in range(n_layers):
# Pad beginning of sequence to prevent usage of future data
if causal: model = ZeroPadding1D((conv_len//2,0))(model)
model = Convolution1D(n_nodes[i], conv_len, border_mode='same')(model)
if causal: model = Cropping1D((0,conv_len//2))(model)
model = SpatialDropout1D(0.3)(model)
if activation=='norm_relu':
model = Activation('relu')(model)
model = Lambda(channel_normalization, name="encoder_norm_{}".format(i))(model)
elif activation=='wavenet':
model = WaveNet_activation(model)
else:
model = Activation(activation)(model)
model = MaxPooling1D(2)(model)
# ---- Decoder ----
for i in range(n_layers):
model = UpSampling1D(2)(model)
if causal: model = ZeroPadding1D((conv_len//2,0))(model)
model = Convolution1D(n_nodes[-i-1], conv_len, border_mode='same')(model)
if causal: model = Cropping1D((0,conv_len//2))(model)
model = SpatialDropout1D(0.3)(model)
if activation=='norm_relu':
model = Activation('relu')(model)
model = Lambda(channel_normalization, name="decoder_norm_{}".format(i))(model)
elif activation=='wavenet':
model = WaveNet_activation(model)
else:
model = Activation(activation)(model)
# Output FC layer
model = TimeDistributed(Dense(n_classes, activation="softmax" ))(model)
model = Model(input=inputs, output=model)
model.compile(loss=loss, optimizer=optimizer, sample_weight_mode="temporal", metrics=['accuracy'])
if return_param_str:
param_str = "ED-TCN_C{}_L{}".format(conv_len, n_layers)
if causal:
param_str += "_causal"
return model, param_str
else:
return model
def ED_TCN_atrous(n_nodes, conv_len, n_classes, n_feat, max_len,
loss='categorical_crossentropy', causal=False,
optimizer="rmsprop", activation='norm_relu',
return_param_str=False):
n_layers = len(n_nodes)
inputs = Input(shape=(None,n_feat))
model = inputs
# ---- Encoder ----
for i in range(n_layers):
# Pad beginning of sequence to prevent usage of future data
if causal: model = ZeroPadding1D((conv_len//2,0))(model)
model = AtrousConvolution1D(n_nodes[i], conv_len, atrous_rate=i+1, border_mode='same')(model)
if causal: model = Cropping1D((0,conv_len//2))(model)
model = SpatialDropout1D(0.3)(model)
if activation=='norm_relu':
model = Activation('relu')(model)
model = Lambda(channel_normalization, name="encoder_norm_{}".format(i))(model)
elif activation=='wavenet':
model = WaveNet_activation(model)
else:
model = Activation(activation)(model)
# ---- Decoder ----
for i in range(n_layers):
if causal: model = ZeroPadding1D((conv_len//2,0))(model)
model = AtrousConvolution1D(n_nodes[-i-1], conv_len, atrous_rate=n_layers-i, border_mode='same')(model)
if causal: model = Cropping1D((0,conv_len//2))(model)
model = SpatialDropout1D(0.3)(model)
if activation=='norm_relu':
model = Activation('relu')(model)
model = Lambda(channel_normalization, name="decoder_norm_{}".format(i))(model)
elif activation=='wavenet':
model = WaveNet_activation(model)
else:
model = Activation(activation)(model)
# Output FC layer
model = TimeDistributed(Dense(n_classes, activation="softmax" ))(model)
model = Model(input=inputs, output=model)
model.compile(loss=loss, optimizer=optimizer, sample_weight_mode="temporal", metrics=['accuracy'])
if return_param_str:
param_str = "ED-TCNa_C{}_L{}".format(conv_len, n_layers)
if causal:
param_str += "_causal"
return model, param_str
else:
return model
def TimeDelayNeuralNetwork(n_nodes, conv_len, n_classes, n_feat, max_len,
loss='categorical_crossentropy', causal=False,
optimizer="rmsprop", activation='sigmoid',
return_param_str=False):
# Time-delay neural network
n_layers = len(n_nodes)
inputs = Input(shape=(max_len,n_feat))
model = inputs
inputs_mask = Input(shape=(max_len,1))
model_masks = [inputs_mask]
# ---- Encoder ----
for i in range(n_layers):
# Pad beginning of sequence to prevent usage of future data
if causal: model = ZeroPadding1D((conv_len//2,0))(model)
model = AtrousConvolution1D(n_nodes[i], conv_len, atrous_rate=i+1, border_mode='same')(model)
# model = SpatialDropout1D(0.3)(model)
if causal: model = Cropping1D((0,conv_len//2))(model)
if activation=='norm_relu':
model = Activation('relu')(model)
model = Lambda(channel_normalization, name="encoder_norm_{}".format(i))(model)
elif activation=='wavenet':
model = WaveNet_activation(model)
else:
model = Activation(activation)(model)
# Output FC layer
model = TimeDistributed(Dense(n_classes, activation="softmax"))(model)
model = Model(input=inputs, output=model)
model.compile(loss=loss, optimizer=optimizer, sample_weight_mode="temporal", metrics=['accuracy'])
if return_param_str:
param_str = "TDN_C{}".format(conv_len)
if causal:
param_str += "_causal"
return model, param_str
else:
return model