def get_constants(self, inputs, training=None):
constants = self.recurrent_layer.get_constants(
inputs=inputs,
training=training
)
if 0 < self.dense_dropout < 1:
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.recurrent_layer.units))
def dropped_inputs():
return K.dropout(ones, self.dense_dropout)
out_dp_mask = [K.in_train_phase(dropped_inputs,
ones,
training=training)]
constants.append(out_dp_mask)
else:
constants.append([K.cast_to_floatx(1.)])
return constants
python类ones_like()的实例源码
def call(self, x, mask=None):
mean = super(IntraAttention, self).call(x, mask)
# x: (batch_size, input_length, input_dim)
# mean: (batch_size, input_dim)
ones = K.expand_dims(K.mean(K.ones_like(x), axis=(0, 2)), dim=0) # (1, input_length)
# (batch_size, input_length, input_dim)
tiled_mean = K.permute_dimensions(K.dot(K.expand_dims(mean), ones), (0, 2, 1))
if mask is not None:
if K.ndim(mask) > K.ndim(x):
# Assuming this is because of the bug in Bidirectional. Temporary fix follows.
# TODO: Fix Bidirectional.
mask = K.any(mask, axis=(-2, -1))
if K.ndim(mask) < K.ndim(x):
mask = K.expand_dims(mask)
x = switch(mask, x, K.zeros_like(x))
# (batch_size, input_length, proj_dim)
projected_combination = K.tanh(K.dot(x, self.vector_projector) + K.dot(tiled_mean, self.mean_projector))
scores = K.dot(projected_combination, self.scorer) # (batch_size, input_length)
weights = K.softmax(scores) # (batch_size, input_length)
attended_x = K.sum(K.expand_dims(weights) * x, axis=1) # (batch_size, input_dim)
return attended_x
rnnlayer.py 文件源码
项目:recurrent-attention-for-QA-SQUAD-based-on-keras
作者: wentaozhu
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def get_constants(self, inputs, training=None):
constants = []
'''if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
if 0 < self.dropout_W < 1:
input_shape = K.int_shape(x)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
constants.append(B_W)
else:'''
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
return constants
rnnlayer.py 文件源码
项目:recurrent-attention-for-QA-SQUAD-based-on-keras
作者: wentaozhu
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def get_constants(self, inputs, training=None):
constants = []
'''if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
if 0 < self.dropout_W < 1:
input_shape = K.int_shape(x)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
constants.append(B_W)
else:'''
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
return constants
rnnlayer.py 文件源码
项目:recurrent-attention-for-QA-SQUAD-based-on-keras
作者: wentaozhu
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def get_constants(self, inputs, training=None):
constants = []
'''if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
if 0 < self.dropout_W < 1:
input_shape = K.int_shape(x)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
constants.append(B_W)
else:'''
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
return constants
QnA.py 文件源码
项目:recurrent-attention-for-QA-SQUAD-based-on-keras
作者: wentaozhu
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.output_dim))
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
if 0 < self.dropout_W < 1:
input_shape = K.int_shape(x)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
constants.append(B_W)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
return constants
def _ternarize(W, H=1):
'''The weights' ternarization function,
# References:
- [Recurrent Neural Networks with Limited Numerical Precision](http://arxiv.org/abs/1608.06902)
- [Ternary Weight Networks](http://arxiv.org/abs/1605.04711)
'''
W /= H
ones = K.ones_like(W)
zeros = K.zeros_like(W)
Wt = switch(W > 0.5, ones, switch(W <= -0.5, -ones, zeros))
Wt *= H
return Wt
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.output_dim))
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
if 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
constants.append(B_W)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
return constants
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.output_dim))
B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
constants.append(B_U)
else:
constants.append(K.cast_to_floatx(1.))
if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, input_dim))
B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
constants.append(B_W)
else:
constants.append(K.cast_to_floatx(1.))
return constants
def contingency_table(y, z):
"""Compute contingency table."""
y = K.round(y)
z = K.round(z)
def count_matches(a, b):
tmp = K.concatenate([a, b])
return K.sum(K.cast(K.all(tmp, -1), K.floatx()))
ones = K.ones_like(y)
zeros = K.zeros_like(y)
y_ones = K.equal(y, ones)
y_zeros = K.equal(y, zeros)
z_ones = K.equal(z, ones)
z_zeros = K.equal(z, zeros)
tp = count_matches(y_ones, z_ones)
tn = count_matches(y_zeros, z_zeros)
fp = count_matches(y_zeros, z_ones)
fn = count_matches(y_ones, z_zeros)
return (tp, tn, fp, fn)
recurrent_convolutional.py 文件源码
项目:keras-prednet
作者: kunimasa-kawasaki
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.concatenate([ones] * self.output_dim, 1)
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
if 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.concatenate([ones] * input_dim, 1)
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
constants.append(B_W)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
return constants
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.output_dim))
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
if 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, input_dim))
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
constants.append(B_W)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
return constants
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.output_dim))
B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
constants.append(B_U)
else:
constants.append(K.cast_to_floatx(1.0))
if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, input_dim))
B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
constants.append(B_W)
else:
constants.append(K.cast_to_floatx(1.0))
return constants
def yoloconfidloss(y_true, y_pred, t):
real_y_true = tf.select(t, y_true, K.zeros_like(y_true))
pobj = K.sigmoid(y_pred)
lo = K.square(real_y_true-pobj)
value_if_true = lamda_confid_obj*(lo)
value_if_false = lamda_confid_noobj*(lo)
loss1 = tf.select(t, value_if_true, value_if_false)
loss = K.mean(loss1)
#
noobj = tf.select(t, K.zeros_like(y_pred), pobj)
noobjcount = tf.select(t, K.zeros_like(y_pred), K.ones_like(y_pred))
ave_anyobj = K.sum(noobj) / K.sum(noobjcount)
#ave_anyobj = K.mean(pobj)
obj = tf.select(t, pobj, K.zeros_like(y_pred))
objcount = tf.select(t, K.ones_like(y_pred), K.zeros_like(y_pred))
#ave_obj = K.mean( K.sum(obj, axis=1) / (K.sum(objcount, axis=1)+0.000001) ) # prevent div 0
ave_obj = K.sum(obj) / (K.sum(objcount)+0.000001) # prevent div 0
return loss, ave_anyobj, ave_obj
# shape is (gridcells*2,)
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.output_dim))
B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
constants.append(B_U)
else:
constants.append(K.cast_to_floatx(1.))
if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, input_dim))
B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
constants.append(B_W)
else:
constants.append(K.cast_to_floatx(1.))
return constants
losses.py 文件源码
项目:Kaggle-Carvana-Image-Masking-Challenge
作者: petrosgk
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def weighted_dice_loss(y_true, y_pred):
y_true = K.cast(y_true, 'float32')
y_pred = K.cast(y_pred, 'float32')
# if we want to get same size of output, kernel size must be odd number
if K.int_shape(y_pred)[1] == 128:
kernel_size = 11
elif K.int_shape(y_pred)[1] == 256:
kernel_size = 21
elif K.int_shape(y_pred)[1] == 512:
kernel_size = 21
elif K.int_shape(y_pred)[1] == 1024:
kernel_size = 41
else:
raise ValueError('Unexpected image size')
averaged_mask = K.pool2d(
y_true, pool_size=(kernel_size, kernel_size), strides=(1, 1), padding='same', pool_mode='avg')
border = K.cast(K.greater(averaged_mask, 0.005), 'float32') * K.cast(K.less(averaged_mask, 0.995), 'float32')
weight = K.ones_like(averaged_mask)
w0 = K.sum(weight)
weight += border * 2
w1 = K.sum(weight)
weight *= (w0 / w1)
loss = 1 - weighted_dice_coeff(y_true, y_pred, weight)
return loss
losses.py 文件源码
项目:Kaggle-Carvana-Image-Masking-Challenge
作者: petrosgk
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def weighted_bce_dice_loss(y_true, y_pred):
y_true = K.cast(y_true, 'float32')
y_pred = K.cast(y_pred, 'float32')
# if we want to get same size of output, kernel size must be odd number
if K.int_shape(y_pred)[1] == 128:
kernel_size = 11
elif K.int_shape(y_pred)[1] == 256:
kernel_size = 21
elif K.int_shape(y_pred)[1] == 512:
kernel_size = 21
elif K.int_shape(y_pred)[1] == 1024:
kernel_size = 41
else:
raise ValueError('Unexpected image size')
averaged_mask = K.pool2d(
y_true, pool_size=(kernel_size, kernel_size), strides=(1, 1), padding='same', pool_mode='avg')
border = K.cast(K.greater(averaged_mask, 0.005), 'float32') * K.cast(K.less(averaged_mask, 0.995), 'float32')
weight = K.ones_like(averaged_mask)
w0 = K.sum(weight)
weight += border * 2
w1 = K.sum(weight)
weight *= (w0 / w1)
loss = weighted_bce_loss(y_true, y_pred, weight) + (1 - weighted_dice_coeff(y_true, y_pred, weight))
return loss
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.output_dim))
B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
constants.append(B_U)
else:
constants.append(K.cast_to_floatx(1.))
if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
constants.append(B_W)
else:
constants.append(K.cast_to_floatx(1.))
return constants
layer_normalization_RNN.py 文件源码
项目:New_Layers-Keras-Tensorflow
作者: WeidiXie
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.output_dim))
B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
constants.append(B_U)
else:
constants.append(K.cast_to_floatx(1.))
if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
constants.append(B_W)
else:
constants.append(K.cast_to_floatx(1.))
return constants
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.concatenate([ones] * self.output_dim, 1)
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
if 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.concatenate([ones] * input_dim, 1)
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
constants.append(B_W)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
return constants
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.concatenate([ones] * self.output_dim, 1)
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
if 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.concatenate([ones] * input_dim, 1)
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
constants.append(B_W)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
return constants
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.output_dim))
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(2)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(2)])
if 0 < self.dropout_W < 1:
input_shape = K.int_shape(x)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(2)]
constants.append(B_W)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(2)])
return constants
def call(self, inputs, mask=None):
assert(isinstance(inputs, list) and len(inputs) == 5)
uQ, WQ_u, WQ_v, v, VQ_r = inputs
uQ_mask = mask[0] if mask is not None else None
ones = K.ones_like(K.sum(uQ, axis=1, keepdims=True)) # (B, 1, 2H)
s_hat = K.dot(uQ, WQ_u)
s_hat += K.dot(ones, K.dot(WQ_v, VQ_r))
s_hat = K.tanh(s_hat)
s = K.dot(s_hat, v)
s = K.batch_flatten(s)
a = softmax(s, mask=uQ_mask, axis=1)
rQ = K.batch_dot(uQ, a, axes=[1, 1])
return rQ
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.hidden_recurrent_dim))
B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
constants.append(B_U)
else:
constants.append(K.cast_to_floatx(1.))
if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, input_dim))
B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
constants.append(B_W)
else:
constants.append(K.cast_to_floatx(1.))
return constants
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.input_dim))
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
if 0 < self.dropout_W < 1:
input_shape = K.int_shape(x)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
constants.append(B_W)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
return constants
def discriminator_loss(y_true,y_pred):
BATCH_SIZE=10
return K.mean(K.binary_crossentropy(K.flatten(y_pred), K.concatenate([K.ones_like(K.flatten(y_pred[:BATCH_SIZE,:,:,:])),K.zeros_like(K.flatten(y_pred[:BATCH_SIZE,:,:,:])) ]) ), axis=-1)
def discriminator_on_generator_loss(y_true,y_pred):
BATCH_SIZE=10
return K.mean(K.binary_crossentropy(K.flatten(y_pred), K.ones_like(K.flatten(y_pred))), axis=-1)
def complementary_mask(x):
return K.ones_like(x) - x
def fill_background_mask(x):
tensor, mask = x
rep = K.int_shape(tensor)[4] - 1
full_mask = K.ones_like(mask) - mask
K.repeat_elements(mask, rep, axis=4)
return tensor + full_mask
def dice_whole_mod(y_true, y_pred):
"""
Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,0
TP
Dice = 2 -------
T + P
Parameters
----------
y_true : keras.placeholder
Placeholder that contains the ground truth labels of the classes
y_pred : keras.placeholder
Placeholder that contains the class prediction
Returns
-------
scalar
Dice metric
"""
# mask = K.expand_dims(K.sum(y_true,axis=4),axis=4)
# cmp_mask = K.concatenate([K.ones_like(mask) - mask,K.zeros_like(mask), K.zeros_like(mask)],axis=4)
# y_pred = y_pred + cmp_mask
y_true = y_true[:,:,:,:,:3]
y_pred_decision = tf.floor((y_pred + K.epsilon()) / K.max(y_pred, axis=4, keepdims=True))
mask_true = K.sum(y_true, axis=4)
mask_pred = K.sum(y_pred_decision, axis=4) * K.sum(y_true, axis=4)
y_sum = K.sum(mask_true * mask_pred)
return (2. * y_sum + K.epsilon()) / (K.sum(mask_true) + K.sum(mask_pred) + K.epsilon())