def mean_acc(y_true, y_pred):
s = K.shape(y_true)
# reshape such that w and h dim are multiplied together
y_true_reshaped = K.reshape( y_true, tf.stack( [-1, s[1]*s[2], s[-1]] ) )
y_pred_reshaped = K.reshape( y_pred, tf.stack( [-1, s[1]*s[2], s[-1]] ) )
# correctly classified
clf_pred = K.one_hot( K.argmax(y_pred_reshaped), nb_classes = s[-1])
equal_entries = K.cast(K.equal(clf_pred,y_true_reshaped), dtype='float32') * y_true_reshaped
correct_pixels_per_class = K.sum(equal_entries, axis=1)
n_pixels_per_class = K.sum(y_true_reshaped,axis=1)
acc = correct_pixels_per_class / n_pixels_per_class
acc_mask = tf.is_finite(acc)
acc_masked = tf.boolean_mask(acc,acc_mask)
return K.mean(acc_masked)
python类reshape()的实例源码
def step(self, input_t, states):
reader_states = states[:2]
flattened_mem_tm1, flattened_shared_mem_tm1 = states[2:4]
writer_h_tm1, writer_c_tm1 = states[4:]
input_mem_shape = K.shape(flattened_mem_tm1)
mem_shape = (input_mem_shape[0], input_mem_shape[1]/self.output_dim, self.output_dim)
mem_tm1 = K.reshape(flattened_mem_tm1, mem_shape)
shared_mem_tm1 = K.reshape(flattened_shared_mem_tm1, mem_shape)
reader_constants = self.reader.get_constants(input_t)
reader_states += reader_constants
o_t, [_, reader_c_t] = self.reader.step(input_t, reader_states)
z_t, m_rt = self.summarize_memory(o_t, mem_tm1)
shared_z_t, shared_m_rt = self.summarize_memory(o_t, shared_mem_tm1)
c_t = self.compose_memory_and_output([o_t, m_rt, shared_m_rt])
# Collecting the necessary variables to directly call writer's step function.
writer_constants = self.writer.get_constants(c_t) # returns dropouts for W and U (all 1s, see init)
writer_states = [writer_h_tm1, writer_c_tm1] + writer_constants
# Making a call to writer's step function, Equation 5
h_t, [_, writer_c_t] = self.writer.step(c_t, writer_states) # h_t, writer_c_t: (batch_size, output_dim)
mem_t = self.update_memory(z_t, h_t, mem_tm1)
shared_mem_t = self.update_memory(shared_z_t, h_t, shared_mem_tm1)
return h_t, [o_t, reader_c_t, K.batch_flatten(mem_t), K.batch_flatten(shared_mem_t), h_t, writer_c_t]
def step_with_training(self, training=None):
def step(inputs, states):
input_shape = K.int_shape(inputs)
y_tm1 = self.layer.preprocess_input(
K.expand_dims(states[0], axis=1),
training
)
y_tm1 = K.reshape(y_tm1, (-1, input_shape[-1]))
inputs_sum = tf.reduce_sum(inputs)
def inputs_f(): return inputs
def output_f(): return y_tm1
current_inputs = tf.case(
[(tf.equal(inputs_sum, 0.0), output_f)],
default=inputs_f
)
return self.layer.step(
current_inputs,
states
)
return step
def call(self, inputs, mask=None, initial_state=None, training=None):
inputs_shape = K.shape(inputs)
zeros = tf.zeros(
shape=[
inputs_shape[0],
inputs_shape[1] - 1,
self.layer.units
]
)
outputs = self.layer.call(
inputs=inputs,
mask=mask,
initial_state=initial_state,
training=training
)
outputs = K.reshape(
tf.slice(outputs, [0, inputs_shape[1] - 1, 0], [-1, 1, -1]),
shape=(inputs_shape[0], 1, self.layer.units)
)
outputs = K.concatenate([outputs, zeros], axis=1)
if 0 < self.layer.dropout + self.layer.recurrent_dropout:
outputs._uses_learning_phase = True
return outputs
def call(self, inputs, mask=None):
input_shape = K.int_shape(inputs)
outputs = self.layer.call(inputs)
outputs = K.permute_dimensions(
outputs,
self.permute_pattern + [len(input_shape) - 1]
)
outputs_shape = self.compute_output_shape(input_shape)
outputs = K.reshape(
outputs,
(-1, outputs_shape[1], outputs_shape[2])
)
mask_tensor = self.compute_mask(
inputs,
mask
)
mask_tensor = K.cast(mask_tensor, K.floatx())
mask_tensor = K.expand_dims(mask_tensor)
mask_output = K.repeat_elements(
mask_tensor,
outputs_shape[2],
2
)
return outputs * mask_output
def label_test_file(self):
outfile = open("pred_vld.txt","w")
prep_alfa = lambda X: pad_sequences(sequences=self.indexer.texts_to_sequences(X),
maxlen=self.SentMaxLen)
vld = json.loads(open('validation.json', 'r').read())
for prem, hypo, label in zip(vld[0], vld[1], vld[2]):
prem_pad, hypo_pad = prep_alfa([prem]), prep_alfa([hypo])
ans = np.reshape(self.model.predict(x=[prem_pad, hypo_pad], batch_size = 1), -1) # PREDICTION
if np.argmax(ans) != label:
outfile.write(prem + "\n" + hypo + "\n")
outfile.write("Truth: " + self.rLabels[label] + "\n")
outfile.write('Contradiction \t{:.1f}%\n'.format(float(ans[0]) * 100) +
'Neutral \t\t{:.1f}%\n'.format(float(ans[1]) * 100) +
'Entailment \t{:.1f}%\n'.format(float(ans[2]) * 100))
outfile.write("-"*15 + "\n")
outfile.close()
rnnlayer.py 文件源码
项目:recurrent-attention-for-QA-SQUAD-based-on-keras
作者: wentaozhu
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def get_constants(self, inputs, training=None):
constants = []
'''if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
if 0 < self.dropout_W < 1:
input_shape = K.int_shape(x)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
constants.append(B_W)
else:'''
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
return constants
rnnlayer.py 文件源码
项目:recurrent-attention-for-QA-SQUAD-based-on-keras
作者: wentaozhu
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def get_constants(self, inputs, training=None):
constants = []
'''if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
if 0 < self.dropout_W < 1:
input_shape = K.int_shape(x)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
constants.append(B_W)
else:'''
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
return constants
rnnlayer.py 文件源码
项目:recurrent-attention-for-QA-SQUAD-based-on-keras
作者: wentaozhu
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def get_constants(self, inputs, training=None):
constants = []
'''if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
if 0 < self.dropout_W < 1:
input_shape = K.int_shape(x)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
constants.append(B_W)
else:'''
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
return constants
def _full_matching(self, h1, h2, w):
"""Full matching operation.
# Arguments
h1: (batch_size, h1_timesteps, embedding_size)
h2: (batch_size, h2_timesteps, embedding_size)
w: weights of one direction, (mp_dim, embedding_size)
# Output shape
(batch_size, h1_timesteps, mp_dim)
"""
# h2 forward last step hidden vector, (batch_size, embedding_size)
h2_last_state = h2[:, -1, :]
# h1 * weights, (batch_size, h1_timesteps, mp_dim, embedding_size)
h1 = self._time_distributed_multiply(h1, w)
# h2_last_state * weights, (batch_size, mp_dim, embedding_size)
h2 = self._time_distributed_multiply(h2_last_state, w)
# reshape to (batch_size, 1, mp_dim, embedding_size)
h2 = K.expand_dims(h2, axis=1)
# matching vector, (batch_size, h1_timesteps, mp_dim)
matching = self._cosine_similarity(h1, h2)
return matching
def _max_pooling_matching(self, h1, h2, w):
"""Max pooling matching operation.
# Arguments
h1: (batch_size, h1_timesteps, embedding_size)
h2: (batch_size, h2_timesteps, embedding_size)
w: weights of one direction, (mp_dim, embedding_size)
# Output shape
(batch_size, h1_timesteps, mp_dim)
"""
# h1 * weights, (batch_size, h1_timesteps, mp_dim, embedding_size)
h1 = self._time_distributed_multiply(h1, w)
# h2 * weights, (batch_size, h2_timesteps, mp_dim, embedding_size)
h2 = self._time_distributed_multiply(h2, w)
# reshape v1 to (batch_size, h1_timesteps, 1, mp_dim, embedding_size)
h1 = K.expand_dims(h1, axis=2)
# reshape v1 to (batch_size, 1, h2_timesteps, mp_dim, embedding_size)
h2 = K.expand_dims(h2, axis=1)
# cosine similarity, (batch_size, h1_timesteps, h2_timesteps, mp_dim)
cos = self._cosine_similarity(h1, h2)
# (batch_size, h1_timesteps, mp_dim)
matching = K.max(cos, axis=2)
return matching
customlayers.py 文件源码
项目:deep-mil-for-whole-mammogram-classification
作者: wentaozhu
项目源码
文件源码
阅读 28
收藏 0
点赞 0
评论 0
def call(self, x,mask=None):
import theano.tensor as T
newx = T.sort(x)
#response = K.reverse(newx, axes=1)
#response = K.sum(x> 0.5, axis=1) / self.k
return newx
#response = K.reshape(newx,[-1,1])
#return K.concatenate([1-response, response], axis=self.label)
#response = K.reshape(x[:,self.axis], (-1,1))
#return K.concatenate([1-response, response], axis=self.axis)
#e = K.exp(x - K.max(x, axis=self.axis, keepdims=True))
#s = K.sum(e, axis=self.axis, keepdims=True)
#return e / s
customlayers.py 文件源码
项目:deep-mil-for-whole-mammogram-classification
作者: wentaozhu
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def call(self, x,mask=None):
newx = K.sort(x)
#response = K.reverse(newx, axes=1)
#response = K.sum(x> 0.5, axis=1) / self.k
return K.concatenate([newx[:,:self.softmink], newx[:,newx.shape[1]-self.softmaxk:]], axis=-1)
#response = K.reshape(newx,[-1,1])
#return K.concatenate([1-response, response], axis=self.label)
#response = K.reshape(x[:,self.axis], (-1,1))
#return K.concatenate([1-response, response], axis=self.axis)
#e = K.exp(x - K.max(x, axis=self.axis, keepdims=True))
#s = K.sum(e, axis=self.axis, keepdims=True)
#return e / s
customlayers.py 文件源码
项目:deep-mil-for-whole-mammogram-classification
作者: wentaozhu
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def call(self, x,mask=None):
import theano.tensor as T
newx = T.sort(x)
#response = K.reverse(newx, axes=1)
#response = K.sum(x> 0.5, axis=1) / self.k
return newx
#response = K.reshape(newx,[-1,1])
#return K.concatenate([1-response, response], axis=self.label)
#response = K.reshape(x[:,self.axis], (-1,1))
#return K.concatenate([1-response, response], axis=self.axis)
#e = K.exp(x - K.max(x, axis=self.axis, keepdims=True))
#s = K.sum(e, axis=self.axis, keepdims=True)
#return e / s
customlayers.py 文件源码
项目:deep-mil-for-whole-mammogram-classification
作者: wentaozhu
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def call(self, x,mask=None):
newx = K.sort(x)
#response = K.reverse(newx, axes=1)
#response = K.sum(x> 0.5, axis=1) / self.k
return K.concatenate([newx[:,:self.softmink], newx[:,newx.shape[1]-self.softmaxk:]], axis=-1)
#response = K.reshape(newx,[-1,1])
#return K.concatenate([1-response, response], axis=self.label)
#response = K.reshape(x[:,self.axis], (-1,1))
#return K.concatenate([1-response, response], axis=self.axis)
#e = K.exp(x - K.max(x, axis=self.axis, keepdims=True))
#s = K.sum(e, axis=self.axis, keepdims=True)
#return e / s
def to_configs(states, verbose=True, **kwargs):
base = setting['base']
width = states.shape[1] // base
height = states.shape[1] // base
load(width,height)
def build():
P = len(setting['panels'])
states = Input(shape=(height*base,width*base))
error = build_error(states, height, width, base)
matches = 1 - K.clip(K.sign(error - threshold),0,1)
# a, h, w, panel
matches = K.reshape(matches, [K.shape(states)[0], height * width, -1])
# a, pos, panel
matches = K.permute_dimensions(matches, [0,2,1])
# a, panel, pos
config = matches * K.arange(height*width,dtype='float')
config = K.sum(config, axis=-1)
return Model(states, wrap(states, config))
model = build()
return model.predict(states, **kwargs)
def tensor_swirl(image, center=None, strength=1, radius=100, rotation=0, cval=0.0, **kwargs):
# **kwargs is for unsupported options (ignored)
cval = tf.fill(K.shape(image)[0:1], cval)
shape = K.int_shape(image)[1:3]
if center is None:
center = np.array(shape) / 2
ys = np.expand_dims(np.repeat(np.arange(shape[0]), shape[1]),-1)
xs = np.expand_dims(np.tile (np.arange(shape[1]), shape[0]),-1)
map_xs, map_ys = swirl_mapping(xs, ys, center, rotation, strength, radius)
mapping = np.zeros((*shape, *shape))
for map_x, map_y, x, y in zip(map_xs, map_ys, xs, ys):
results = tensor_linear_interpolation(image, map_x, map_y, cval)
for _y, _x, w in results:
# mapping[int(y),int(x),int(_y),int(_x),] = w
mapping[int(_y),int(_x),int(y),int(x),] = w
results = tf.tensordot(image, K.variable(mapping), [[1,2],[0,1]])
# results = K.reshape(results, K.shape(image))
return results
def generate_gpu(configs,**kwargs):
configs = np.array(configs)
import math
size = int(math.sqrt(len(configs[0])))
base = panels.shape[1]
dim = base*size
def build():
P = 2
configs = Input(shape=(size*size,))
_configs = 1 - K.round((configs/2)+0.5) # from -1/1 to 1/0
configs_one_hot = K.one_hot(K.cast(_configs,'int32'), P)
configs_one_hot = K.reshape(configs_one_hot, [-1,P])
_panels = K.variable(panels)
_panels = K.reshape(_panels, [P, base*base])
states = tf.matmul(configs_one_hot, _panels)
states = K.reshape(states, [-1, size, size, base, base])
states = K.permute_dimensions(states, [0, 1, 3, 2, 4])
states = K.reshape(states, [-1, size*base, size*base, 1])
states = K.spatial_2d_padding(states, padding=((pad,pad),(pad,pad)))
states = K.squeeze(states, -1)
return Model(configs, wrap(configs, states))
return preprocess(batch_swirl(build().predict(configs,**kwargs)))
def to_configs(states, verbose=True, **kwargs):
base = panels.shape[1]
dim = states.shape[1] - pad*2
size = dim // base
def build():
states = Input(shape=(dim+2*pad,dim+2*pad))
s = tensor_swirl(states, radius=dim+2*pad * relative_swirl_radius, **unswirl_args)
error = build_errors(s,base,pad,dim,size)
matches = 1 - K.clip(K.sign(error - threshold),0,1)
# a, h, w, panel
matches = K.reshape(matches, [K.shape(states)[0], size * size, -1])
# a, pos, panel
config = matches * K.arange(2,dtype='float')
config = K.sum(config, axis=-1)
# this is 0,1 configs; for compatibility, we need -1 and 1
config = - (config - 0.5)*2
return Model(states, wrap(states, K.round(config)))
return build().predict(states, **kwargs)
def generate_cpu(configs, **kwargs):
import math
size = int(math.sqrt(len(configs[0])))
base = panels.shape[1]
dim = base*size
def generate(config):
figure = np.zeros((dim,dim))
for pos,value in enumerate(config):
x = pos % size
y = pos // size
if value > 0:
figure[y*base:(y+1)*base,
x*base:(x+1)*base] = panels[0]
else:
figure[y*base:(y+1)*base,
x*base:(x+1)*base] = panels[1]
return preprocess(figure)
return np.array([ generate(c) for c in configs ]).reshape((-1,dim,dim))
def generate_gpu(configs, **kwargs):
import math
size = int(math.sqrt(len(configs[0])))
base = panels.shape[1]
dim = base*size
def build():
P = 2
configs = Input(shape=(size*size,))
_configs = 1 - K.round((configs/2)+0.5) # from -1/1 to 1/0
configs_one_hot = K.one_hot(K.cast(_configs,'int32'), P)
configs_one_hot = K.reshape(configs_one_hot, [-1,P])
_panels = K.variable(panels)
_panels = K.reshape(_panels, [P, base*base])
states = tf.matmul(configs_one_hot, _panels)
states = K.reshape(states, [-1, size, size, base, base])
states = K.permute_dimensions(states, [0, 1, 3, 2, 4])
states = K.reshape(states, [-1, size*base, size*base])
return Model(configs, wrap(configs, states))
return build().predict(np.array(configs),**kwargs)
def call(self, x, mask=None):
stride = self.subsample_length
output_length, feature_dim, nb_filter = self.W_shape
xs = []
for i in range(output_length):
slice_length = slice(i * stride, i * stride + self.filter_length)
xs.append(K.reshape(x[:, slice_length, :], (1, -1, feature_dim)))
x_aggregate = K.concatenate(xs, axis=0)
# (output_length, batch_size, nb_filter)
output = K.batch_dot(x_aggregate, self.W)
output = K.permute_dimensions(output, (1, 0, 2))
if self.bias:
output += K.reshape(self.b, (1, output_length, nb_filter))
output = self.activation(output)
return output
itosfm.py 文件源码
项目:State-Frequency-Memory-stock-prediction
作者: z331565360
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def get_initial_states(self, x):
init_state_h = K.zeros_like(x)
init_state_h = K.sum(init_state_h, axis = 1)
reducer_s = K.zeros((self.input_dim, self.hidden_dim))
reducer_f = K.zeros((self.hidden_dim, self.freq_dim))
reducer_p = K.zeros((self.hidden_dim, self.output_dim))
init_state_h = K.dot(init_state_h, reducer_s)
init_state_p = K.dot(init_state_h, reducer_p)
init_state = K.zeros_like(init_state_h)
init_freq = K.dot(init_state_h, reducer_f)
init_state = K.reshape(init_state, (-1, self.hidden_dim, 1))
init_freq = K.reshape(init_freq, (-1, 1, self.freq_dim))
init_state_S_re = init_state * init_freq
init_state_S_im = init_state * init_freq
init_state_time = K.cast_to_floatx(0.)
initial_states = [init_state_p, init_state_h, init_state_S_re, init_state_S_im, init_state_time]
return initial_states
itosfm.py 文件源码
项目:State-Frequency-Memory-stock-prediction
作者: z331565360
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def get_initial_states(self, x):
init_state_h = K.zeros_like(x)
init_state_h = K.sum(init_state_h, axis = 1)
reducer_s = K.zeros((self.input_dim, self.hidden_dim))
reducer_f = K.zeros((self.hidden_dim, self.freq_dim))
reducer_p = K.zeros((self.hidden_dim, self.output_dim))
init_state_h = K.dot(init_state_h, reducer_s)
init_state_p = K.dot(init_state_h, reducer_p)
init_state = K.zeros_like(init_state_h)
init_freq = K.dot(init_state_h, reducer_f)
init_state = K.reshape(init_state, (-1, self.hidden_dim, 1))
init_freq = K.reshape(init_freq, (-1, 1, self.freq_dim))
init_state_S_re = init_state * init_freq
init_state_S_im = init_state * init_freq
init_state_time = K.cast_to_floatx(0.)
initial_states = [init_state_p, init_state_h, init_state_S_re, init_state_S_im, init_state_time]
return initial_states
def setup_output(self, x):
"""
Setup output tensor
"""
x_max = K.max(x, axis=1)
x_max = K.flatten(x_max)
z = K.dot(x_max, self.w_proj_to_z) #+ self.b_proj_to_z
hidden = K.dot(z, self.weights[0]) + self.biases[0]
hidden = K.reshape(hidden, shape=(self.input_channels,
self.hidden_dim))
output = K.dot(hidden, self.weights[1]) + self.biases[1]
self.output = K.reshape(output, (self.num_filters, self.input_channels,
*self.output_shape))
return self.output
def call(self, x, mask=None):
# x should be an output and a target
assert len(x) == 2
losses = _per_sample_loss(self.loss, mask, x)
if self.fast:
grads = K.sqrt(sum([
K.sum(K.square(g), axis=1)
for g in K.gradients(losses, self.parameter_list)
]))
else:
nb_samples = K.shape(losses)[0]
grads = K.map_fn(
lambda i: self._grad_norm(losses[i]),
K.arange(0, nb_samples),
dtype=K.floatx()
)
return K.reshape(grads, (-1, 1))
def call(self, x, mask=None):
# x should be an output and a target
assert len(x) == 2
losses = _per_sample_loss(self.loss, mask, x)
if self.fast:
grads = K.sqrt(sum([
K.sum(K.square(g), axis=1)
for g in K.gradients(losses, self.parameter_list)
]))
else:
nb_samples = K.shape(losses)[0]
grads = K.map_fn(
lambda i: self._grad_norm(losses[i]),
K.arange(0, nb_samples),
dtype=K.floatx()
)
return K.reshape(grads, (-1, 1))
def call(self, x, mask=None):
# x should be an output and a target
assert len(x) == 2
losses = _per_sample_loss(self.loss, mask, x)
if self.fast:
grads = K.sqrt(sum([
K.sum(K.square(g), axis=1)
for g in K.gradients(losses, self.parameter_list)
]))
else:
nb_samples = K.shape(losses)[0]
grads = K.map_fn(
lambda i: self._grad_norm(losses[i]),
K.arange(0, nb_samples),
dtype=K.floatx()
)
return K.reshape(grads, (-1, 1))
def call(self, x, mask=None):
# x should be an output and a target
assert len(x) == 2
losses = _per_sample_loss(self.loss, mask, x)
if self.fast:
grads = K.sqrt(sum([
K.sum(K.square(g), axis=1)
for g in K.gradients(losses, self.parameter_list)
]))
else:
nb_samples = K.shape(losses)[0]
grads = K.map_fn(
lambda i: self._grad_norm(losses[i]),
K.arange(0, nb_samples),
dtype=K.floatx()
)
return K.reshape(grads, (-1, 1))
def call(self, x, mask=None):
conv_out = K.conv2d(x, self.W, strides=self.strides,
padding=self.padding,
data_format=self.data_format,
filter_shape=self.kernel_shape)
if self.data_format == 'channels_first':
# Complex-cell filter operation
conv_out1 = K.sqrt(K.square(conv_out[:, :self.filters_complex, :, :]) + K.square(conv_out[:, self.filters_complex:2*self.filters_complex, :, :]) + K.epsilon())
# Simple-cell filter operation
conv_out2 = K.concatenate([conv_out1, conv_out[:, 2*self.filters_complex:, :, :]], axis=1)
elif self.data_format == 'channels_last':
# Complex-cell filter operation
conv_out1 = K.sqrt(K.square(conv_out[:, :, :, :self.filters_complex]) + K.square(conv_out[:, :, :, self.filters_complex:2*self.filters_complex]) + K.epsilon())
# Simple-cell filter operation
conv_out2 = K.concatenate([conv_out1, conv_out[:, :, :, 2*self.filters_complex:]], axis=3)
if self.bias:
if self.data_format == 'channels_first':
conv_out2 += K.reshape(self.b, (1, self.filters_complex + self.filters_simple, 1, 1))
elif self.data_format == 'channels_last':
conv_out2 += K.reshape(self.b, (1, 1, 1, self.filters_complex + self.filters_simple))
return self.activation(conv_out2)