def add_boundary_energy(x, b_start=None, b_end=None, mask=None):
'''Given the observations x, it adds the start boundary energy b_start (resp.
end boundary energy b_end on the start (resp. end) elements and multiplies
the mask.'''
if mask is None:
if b_start is not None:
x = K.concatenate([x[:, :1, :] + b_start, x[:, 1:, :]], axis=1)
if b_end is not None:
x = K.concatenate([x[:, :-1, :], x[:, -1:, :] + b_end], axis=1)
else:
mask = K.cast(mask, K.floatx())
mask = K.expand_dims(mask, 2)
x *= mask
if b_start is not None:
mask_r = K.concatenate([K.zeros_like(mask[:, :1]), mask[:, :-1]], axis=1)
start_mask = K.cast(K.greater(mask, mask_r), K.floatx())
x = x + start_mask * b_start
if b_end is not None:
mask_l = K.concatenate([mask[:, 1:], K.zeros_like(mask[:, -1:])], axis=1)
end_mask = K.cast(K.greater(mask, mask_l), K.floatx())
x = x + end_mask * b_end
return x
python类zeros_like()的实例源码
def call(self, x, mask=None):
# x: (batch_size, input_length, input_dim)
if mask is None:
return K.mean(x, axis=1) # (batch_size, input_dim)
else:
# This is to remove padding from the computational graph.
if K.ndim(mask) > K.ndim(x):
# This is due to the bug in Bidirectional that is passing the input mask
# instead of computing output mask.
# TODO: Fix the implementation of Bidirectional.
mask = K.any(mask, axis=(-2, -1))
if K.ndim(mask) < K.ndim(x):
mask = K.expand_dims(mask)
masked_input = switch(mask, x, K.zeros_like(x))
weights = K.cast(mask / (K.sum(mask) + K.epsilon()), 'float32')
return K.sum(masked_input * weights, axis=1) # (batch_size, input_dim)
def _ternarize(W, H=1):
'''The weights' ternarization function,
# References:
- [Recurrent Neural Networks with Limited Numerical Precision](http://arxiv.org/abs/1608.06902)
- [Ternary Weight Networks](http://arxiv.org/abs/1605.04711)
'''
W /= H
ones = K.ones_like(W)
zeros = K.zeros_like(W)
Wt = switch(W > 0.5, ones, switch(W <= -0.5, -ones, zeros))
Wt *= H
return Wt
itosfm.py 文件源码
项目:State-Frequency-Memory-stock-prediction
作者: z331565360
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def get_initial_states(self, x):
init_state_h = K.zeros_like(x)
init_state_h = K.sum(init_state_h, axis = 1)
reducer_s = K.zeros((self.input_dim, self.hidden_dim))
reducer_f = K.zeros((self.hidden_dim, self.freq_dim))
reducer_p = K.zeros((self.hidden_dim, self.output_dim))
init_state_h = K.dot(init_state_h, reducer_s)
init_state_p = K.dot(init_state_h, reducer_p)
init_state = K.zeros_like(init_state_h)
init_freq = K.dot(init_state_h, reducer_f)
init_state = K.reshape(init_state, (-1, self.hidden_dim, 1))
init_freq = K.reshape(init_freq, (-1, 1, self.freq_dim))
init_state_S_re = init_state * init_freq
init_state_S_im = init_state * init_freq
init_state_time = K.cast_to_floatx(0.)
initial_states = [init_state_p, init_state_h, init_state_S_re, init_state_S_im, init_state_time]
return initial_states
itosfm.py 文件源码
项目:State-Frequency-Memory-stock-prediction
作者: z331565360
项目源码
文件源码
阅读 44
收藏 0
点赞 0
评论 0
def get_initial_states(self, x):
init_state_h = K.zeros_like(x)
init_state_h = K.sum(init_state_h, axis = 1)
reducer_s = K.zeros((self.input_dim, self.hidden_dim))
reducer_f = K.zeros((self.hidden_dim, self.freq_dim))
reducer_p = K.zeros((self.hidden_dim, self.output_dim))
init_state_h = K.dot(init_state_h, reducer_s)
init_state_p = K.dot(init_state_h, reducer_p)
init_state = K.zeros_like(init_state_h)
init_freq = K.dot(init_state_h, reducer_f)
init_state = K.reshape(init_state, (-1, self.hidden_dim, 1))
init_freq = K.reshape(init_freq, (-1, 1, self.freq_dim))
init_state_S_re = init_state * init_freq
init_state_S_im = init_state * init_freq
init_state_time = K.cast_to_floatx(0.)
initial_states = [init_state_p, init_state_h, init_state_S_re, init_state_S_im, init_state_time]
return initial_states
def _forward(x, reduce_step, initial_states, U, mask=None):
'''Forward recurrence of the linear chain crf.'''
def _forward_step(energy_matrix_t, states):
alpha_tm1 = states[-1]
new_states = reduce_step(K.expand_dims(alpha_tm1, 2) + energy_matrix_t)
return new_states[0], new_states
U_shared = K.expand_dims(K.expand_dims(U, 0), 0)
if mask is not None:
mask = K.cast(mask, K.floatx())
mask_U = K.expand_dims(K.expand_dims(mask[:, :-1] * mask[:, 1:], 2), 3)
U_shared = U_shared * mask_U
inputs = K.expand_dims(x[:, 1:, :], 2) + U_shared
inputs = K.concatenate([inputs, K.zeros_like(inputs[:, -1:, :, :])], axis=1)
last, values, _ = K.rnn(_forward_step, inputs, initial_states)
return last, values
def _backward(gamma, mask):
'''Backward recurrence of the linear chain crf.'''
gamma = K.cast(gamma, 'int32')
def _backward_step(gamma_t, states):
y_tm1 = K.squeeze(states[0], 0)
y_t = batch_gather(gamma_t, y_tm1)
return y_t, [K.expand_dims(y_t, 0)]
initial_states = [K.expand_dims(K.zeros_like(gamma[:, 0, 0]), 0)]
_, y_rev, _ = K.rnn(_backward_step,
gamma,
initial_states,
go_backwards=True)
y = K.reverse(y_rev, 1)
if mask is not None:
mask = K.cast(mask, dtype='int32')
# mask output
y *= mask
# set masked values to -1
y += -(1 - mask)
return y
def add_boundary_energy(x, b_start=None, b_end=None, mask=None):
'''Given the observations x, it adds the start boundary energy b_start (resp.
end boundary energy b_end on the start (resp. end) elements and multiplies
the mask.'''
if mask is None:
if b_start is not None:
x = K.concatenate([x[:, :1, :] + b_start, x[:, 1:, :]], axis=1)
if b_end is not None:
x = K.concatenate([x[:, :-1, :], x[:, -1:, :] + b_end], axis=1)
else:
mask = K.cast(mask, K.floatx())
mask = K.expand_dims(mask, 2)
x *= mask
if b_start is not None:
mask_r = K.concatenate([K.zeros_like(mask[:, :1]), mask[:, :-1]], axis=1)
start_mask = K.cast(K.greater(mask, mask_r), K.floatx())
x = x + start_mask * b_start
if b_end is not None:
mask_l = K.concatenate([mask[:, 1:], K.zeros_like(mask[:, -1:])], axis=1)
end_mask = K.cast(K.greater(mask, mask_l), K.floatx())
x = x + end_mask * b_end
return x
def _forward(x, reduce_step, initial_states, U, mask=None):
'''Forward recurrence of the linear chain crf.'''
def _forward_step(energy_matrix_t, states):
alpha_tm1 = states[-1]
new_states = reduce_step(K.expand_dims(alpha_tm1, 2) + energy_matrix_t)
return new_states[0], new_states
U_shared = K.expand_dims(K.expand_dims(U, 0), 0)
if mask is not None:
mask = K.cast(mask, K.floatx())
mask_U = K.expand_dims(K.expand_dims(mask[:, :-1] * mask[:, 1:], 2), 3)
U_shared = U_shared * mask_U
inputs = K.expand_dims(x[:, 1:, :], 2) + U_shared
inputs = K.concatenate([inputs, K.zeros_like(inputs[:, -1:, :, :])], axis=1)
last, values, _ = K.rnn(_forward_step, inputs, initial_states)
return last, values
def _backward(gamma, mask):
'''Backward recurrence of the linear chain crf.'''
gamma = K.cast(gamma, 'int32')
def _backward_step(gamma_t, states):
y_tm1 = K.squeeze(states[0], 0)
y_t = KC.batch_gather(gamma_t, y_tm1)
return y_t, [K.expand_dims(y_t, 0)]
initial_states = [K.expand_dims(K.zeros_like(gamma[:, 0, 0]), 0)]
_, y_rev, _ = K.rnn(_backward_step,
gamma,
initial_states,
go_backwards=True)
y = K.reverse(y_rev, 1)
if mask is not None:
mask = K.cast(mask, dtype='int32')
# mask output
y *= mask
# set masked values to -1
y += -(1 - mask)
return y
def contingency_table(y, z):
"""Compute contingency table."""
y = K.round(y)
z = K.round(z)
def count_matches(a, b):
tmp = K.concatenate([a, b])
return K.sum(K.cast(K.all(tmp, -1), K.floatx()))
ones = K.ones_like(y)
zeros = K.zeros_like(y)
y_ones = K.equal(y, ones)
y_zeros = K.equal(y, zeros)
z_ones = K.equal(z, ones)
z_zeros = K.equal(z, zeros)
tp = count_matches(y_ones, z_ones)
tn = count_matches(y_zeros, z_zeros)
fp = count_matches(y_zeros, z_ones)
fn = count_matches(y_ones, z_zeros)
return (tp, tn, fp, fn)
def f1_score_keras(y_true, y_pred):
#convert probas to 0,1
y_ppred = K.zeros_like(y_true)
y_pred_ones = K.T.set_subtensor(y_ppred[K.T.arange(y_true.shape[0]), K.argmax(y_pred, axis=-1)], 1)
#where y_ture=1 and y_pred=1 -> true positive
y_true_pred = K.sum(y_true*y_pred_ones, axis=0)
#for each class: how many where classified as said class
pred_cnt = K.sum(y_pred_ones, axis=0)
#for each class: how many are true members of said class
gold_cnt = K.sum(y_true, axis=0)
#precision for each class
precision = K.T.switch(K.T.eq(pred_cnt, 0), 0, y_true_pred/pred_cnt)
#recall for each class
recall = K.T.switch(K.T.eq(gold_cnt, 0), 0, y_true_pred/gold_cnt)
#f1 for each class
f1_class = K.T.switch(K.T.eq(precision + recall, 0), 0, 2*(precision*recall)/(precision+recall))
#return average f1 score over all classes
return K.mean(f1_class)
def f1_score_taskB(y_true, y_pred):
#convert probas to 0,1
y_pred_ones = K.zeros_like(y_true)
y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1
#where y_ture=1 and y_pred=1 -> true positive
y_true_pred = K.sum(y_true*y_pred_ones, axis=0)
#for each class: how many where classified as said class
pred_cnt = K.sum(y_pred_ones, axis=0)
#for each class: how many are true members of said class
gold_cnt = K.sum(y_true, axis=0)
#precision for each class
precision = K.switch(K.equal(pred_cnt, 0), 0, y_true_pred/pred_cnt)
#recall for each class
recall = K.switch(K.equal(gold_cnt, 0), 0, y_true_pred/gold_cnt)
#f1 for each class
f1_class = K.switch(K.equal(precision + recall, 0), 0, 2*(precision*recall)/(precision+recall))
#return average f1 score over all classes
return f1_class
def f1_score_semeval(y_true, y_pred):
# convert probas to 0,1
y_ppred = K.zeros_like(y_true)
y_pred_ones = K.T.set_subtensor(y_ppred[K.T.arange(y_true.shape[0]), K.argmax(y_pred, axis=-1)], 1)
# where y_ture=1 and y_pred=1 -> true positive
y_true_pred = K.sum(y_true * y_pred_ones, axis=0)
# for each class: how many where classified as said class
pred_cnt = K.sum(y_pred_ones, axis=0)
# for each class: how many are true members of said class
gold_cnt = K.sum(y_true, axis=0)
# precision for each class
precision = K.T.switch(K.T.eq(pred_cnt, 0), 0, y_true_pred / pred_cnt)
# recall for each class
recall = K.T.switch(K.T.eq(gold_cnt, 0), 0, y_true_pred / gold_cnt)
# f1 for each class
f1_class = K.T.switch(K.T.eq(precision + recall, 0), 0, 2 * (precision * recall) / (precision + recall))
#return average f1 score over all classes
return (f1_class[0] + f1_class[2])/2.0
def precision_keras(y_true, y_pred):
#convert probas to 0,1
y_pred_ones = K.zeros_like(y_true)
y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1
#where y_ture=1 and y_pred=1 -> true positive
y_true_pred = K.sum(y_true*y_pred_ones, axis=0)
#for each class: how many where classified as said class
pred_cnt = K.sum(y_pred_ones, axis=0)
#precision for each class
precision = K.switch(K.equal(pred_cnt, 0), 0, y_true_pred/pred_cnt)
#return average f1 score over all classes
return K.mean(precision)
def f1_score_task3(y_true, y_pred):
#convert probas to 0,1
y_ppred = K.zeros_like(y_true)
y_pred_ones = K.T.set_subtensor(y_ppred[K.T.arange(y_true.shape[0]), K.argmax(y_pred, axis=-1)], 1)
#where y_ture=1 and y_pred=1 -> true positive
y_true_pred = K.sum(y_true*y_pred_ones, axis=0)
#for each class: how many where classified as said class
pred_cnt = K.sum(y_pred_ones, axis=0)
#for each class: how many are true members of said class
gold_cnt = K.sum(y_true, axis=0)
#precision for each class
precision = K.T.switch(K.T.eq(pred_cnt, 0), 0, y_true_pred/pred_cnt)
#recall for each class
recall = K.T.switch(K.T.eq(gold_cnt, 0), 0, y_true_pred/gold_cnt)
#f1 for each class
f1_class = K.T.switch(K.T.eq(precision + recall, 0), 0, 2*(precision*recall)/(precision+recall))
#return average f1 score over all classes
return f1_class[1]
def f1_score_taskB(y_true, y_pred):
# convert probas to 0,1
y_pred_ones = K.zeros_like(y_true)
y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1
# where y_ture=1 and y_pred=1 -> true positive
y_true_pred = K.sum(y_true * y_pred_ones, axis=0)
# for each class: how many where classified as said class
pred_cnt = K.sum(y_pred_ones, axis=0)
# for each class: how many are true members of said class
gold_cnt = K.sum(y_true, axis=0)
# precision for each class
precision = K.switch(K.equal(pred_cnt, 0), 0, y_true_pred / pred_cnt)
# recall for each class
recall = K.switch(K.equal(gold_cnt, 0), 0, y_true_pred / gold_cnt)
# f1 for each class
f1_class = K.switch(K.equal(precision + recall, 0), 0, 2 * (precision * recall) / (precision + recall))
# return average f1 score over all classes
return f1_class
def precision_keras(y_true, y_pred):
# convert probas to 0,1
y_pred_ones = K.zeros_like(y_true)
y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1
# where y_ture=1 and y_pred=1 -> true positive
y_true_pred = K.sum(y_true * y_pred_ones, axis=0)
# for each class: how many where classified as said class
pred_cnt = K.sum(y_pred_ones, axis=0)
# precision for each class
precision = K.switch(K.equal(pred_cnt, 0), 0, y_true_pred / pred_cnt)
# return average f1 score over all classes
return K.mean(precision)
def precision_keras(y_true, y_pred):
#convert probas to 0,1
y_pred_ones = K.zeros_like(y_true)
y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1
#where y_ture=1 and y_pred=1 -> true positive
y_true_pred = K.sum(y_true*y_pred_ones, axis=0)
#for each class: how many where classified as said class
pred_cnt = K.sum(y_pred_ones, axis=0)
#precision for each class
precision = K.switch(K.equal(pred_cnt, 0), 0, y_true_pred/pred_cnt)
#return average f1 score over all classes
return K.mean(precision)
def add_boundary_energy(x, b_start=None, b_end=None, mask=None):
"""Given the observations x, it adds the start boundary energy b_start (resp.
end boundary energy b_end on the start (resp. end) elements and multiplies
the mask."""
if mask is None:
if b_start is not None:
x = K.concatenate([x[:, :1, :] + b_start, x[:, 1:, :]], axis=1)
if b_end is not None:
x = K.concatenate([x[:, :-1, :], x[:, -1:, :] + b_end], axis=1)
else:
mask = K.cast(mask, K.floatx())
mask = K.expand_dims(mask, 2)
x *= mask
if b_start is not None:
mask_r = K.concatenate([K.zeros_like(mask[:, :1]), mask[:, :-1]], axis=1)
start_mask = K.cast(K.greater(mask, mask_r), K.floatx())
x = x + start_mask * b_start
if b_end is not None:
mask_l = K.concatenate([mask[:, 1:], K.zeros_like(mask[:, -1:])], axis=1)
end_mask = K.cast(K.greater(mask, mask_l), K.floatx())
x = x + end_mask * b_end
return x
def _forward(x, reduce_step, initial_states, U, mask=None):
"""Forward recurrence of the linear chain crf."""
def _forward_step(energy_matrix_t, states):
alpha_tm1 = states[-1]
new_states = reduce_step(K.expand_dims(alpha_tm1, 2) + energy_matrix_t)
return new_states[0], new_states
U_shared = K.expand_dims(K.expand_dims(U, 0), 0)
if mask is not None:
mask = K.cast(mask, K.floatx())
mask_U = K.expand_dims(K.expand_dims(mask[:, :-1] * mask[:, 1:], 2), 3)
U_shared = U_shared * mask_U
inputs = K.expand_dims(x[:, 1:, :], 2) + U_shared
inputs = K.concatenate([inputs, K.zeros_like(inputs[:, -1:, :, :])], axis=1)
last, values, _ = K.rnn(_forward_step, inputs, initial_states)
return last, values
def _backward(gamma, mask):
"""Backward recurrence of the linear chain crf."""
gamma = K.cast(gamma, 'int32')
def _backward_step(gamma_t, states):
y_tm1 = K.squeeze(states[0], 0)
y_t = batch_gather(gamma_t, y_tm1)
return y_t, [K.expand_dims(y_t, 0)]
initial_states = [K.expand_dims(K.zeros_like(gamma[:, 0, 0]), 0)]
_, y_rev, _ = K.rnn(_backward_step,
gamma,
initial_states,
go_backwards=True)
y = K.reverse(y_rev, 1)
if mask is not None:
mask = K.cast(mask, dtype='int32')
# mask output
y *= mask
# set masked values to -1
y += -(1 - mask)
return y
def yoloconfidloss(y_true, y_pred, t):
real_y_true = tf.select(t, y_true, K.zeros_like(y_true))
pobj = K.sigmoid(y_pred)
lo = K.square(real_y_true-pobj)
value_if_true = lamda_confid_obj*(lo)
value_if_false = lamda_confid_noobj*(lo)
loss1 = tf.select(t, value_if_true, value_if_false)
loss = K.mean(loss1)
#
noobj = tf.select(t, K.zeros_like(y_pred), pobj)
noobjcount = tf.select(t, K.zeros_like(y_pred), K.ones_like(y_pred))
ave_anyobj = K.sum(noobj) / K.sum(noobjcount)
#ave_anyobj = K.mean(pobj)
obj = tf.select(t, pobj, K.zeros_like(y_pred))
objcount = tf.select(t, K.ones_like(y_pred), K.zeros_like(y_pred))
#ave_obj = K.mean( K.sum(obj, axis=1) / (K.sum(objcount, axis=1)+0.000001) ) # prevent div 0
ave_obj = K.sum(obj) / (K.sum(objcount)+0.000001) # prevent div 0
return loss, ave_anyobj, ave_obj
# shape is (gridcells*2,)
def yoloclassloss(y_true, y_pred, t):
lo = K.square(y_true-y_pred)
value_if_true = lamda_class*(lo)
value_if_false = K.zeros_like(y_true)
loss1 = tf.select(t, value_if_true, value_if_false)
# only extract predicted class value at obj location
cat = K.sum(tf.select(t, y_pred, K.zeros_like(y_pred)), axis=1)
# check valid class value
objsum = K.sum(y_true, axis=1)
# if objsum > 0.5 , means it contain some valid obj(may be 1,2.. objs)
isobj = K.greater(objsum, 0.5)
# only extract class value at obj location
valid_cat = tf.select(isobj, cat, K.zeros_like(cat))
# prevent div 0
ave_cat = tf.select(K.greater(K.sum(objsum),0.5), K.sum(valid_cat) / K.sum(objsum) , -1)
return K.mean(loss1), ave_cat
def call(self, X, mask=None):
if mask is not None:
assert K.ndim(mask) == 2, 'Input mask to CRF must have dim 2 if not None'
if self.test_mode == 'viterbi':
test_output = self.viterbi_decoding(X, mask)
else:
test_output = self.get_marginal_prob(X, mask)
self.uses_learning_phase = True
if self.learn_mode == 'join':
train_output = K.zeros_like(K.dot(X, self.kernel))
out = K.in_train_phase(train_output, test_output)
else:
if self.test_mode == 'viterbi':
train_output = self.get_marginal_prob(X, mask)
out = K.in_train_phase(train_output, test_output)
else:
out = test_output
return out
def get_attention_initial_state(self, inputs):
"""Creates initial state for attention mechanism. By default the
attention representation `attention_h` computed by attention_step is
passed as attention state between timesteps.
Extending attention implementations that requires additional states
must modify over implement this method accordingly.
# Arguments
inputs: layer inputs
# Returns
list (length one) of initial state (zeros)
"""
# build an all-zero tensor of shape (samples, output_dim)
initial_state = K.zeros_like(inputs) # (samples, timesteps, input_dim)
initial_state = K.sum(initial_state, axis=(1, 2)) # (samples,)
initial_state = K.expand_dims(initial_state) # (samples, 1)
initial_state = K.tile(initial_state, [1, self.attention_output_dim]) # (samples, output_dim)
return [initial_state]
def get_split_averages(input_tensor, input_mask, indices):
# Splits input tensor into three parts based on the indices and
# returns average of values prior to index, values at the index and
# average of values after the index.
# input_tensor: (batch_size, input_length, input_dim)
# input_mask: (batch_size, input_length)
# indices: (batch_size, 1)
# (1, input_length)
length_range = K.expand_dims(K.arange(K.shape(input_tensor)[1]), dim=0)
# (batch_size, input_length)
batched_range = K.repeat_elements(length_range, K.shape(input_tensor)[0], 0)
tiled_indices = K.repeat_elements(indices, K.shape(input_tensor)[1], 1) # (batch_size, input_length)
greater_mask = K.greater(batched_range, tiled_indices) # (batch_size, input_length)
lesser_mask = K.lesser(batched_range, tiled_indices) # (batch_size, input_length)
equal_mask = K.equal(batched_range, tiled_indices) # (batch_size, input_length)
# We also need to mask these masks using the input mask.
# (batch_size, input_length)
if input_mask is not None:
greater_mask = switch(input_mask, greater_mask, K.zeros_like(greater_mask))
lesser_mask = switch(input_mask, lesser_mask, K.zeros_like(lesser_mask))
post_sum = K.sum(switch(K.expand_dims(greater_mask), input_tensor, K.zeros_like(input_tensor)), axis=1) # (batch_size, input_dim)
pre_sum = K.sum(switch(K.expand_dims(lesser_mask), input_tensor, K.zeros_like(input_tensor)), axis=1) # (batch_size, input_dim)
values_at_indices = K.sum(switch(K.expand_dims(equal_mask), input_tensor, K.zeros_like(input_tensor)), axis=1) # (batch_size, input_dim)
post_normalizer = K.expand_dims(K.sum(greater_mask, axis=1) + K.epsilon(), dim=1) # (batch_size, 1)
pre_normalizer = K.expand_dims(K.sum(lesser_mask, axis=1) + K.epsilon(), dim=1) # (batch_size, 1)
return K.cast(pre_sum / pre_normalizer, 'float32'), values_at_indices, K.cast(post_sum / post_normalizer, 'float32')
def compute_mask(self, input, mask):
if mask is None or self.return_mode == "last_output":
return None
elif self.return_mode == "all_outputs":
return mask # (batch_size, input_length)
else:
# Return mode is output_and_memory
# Mask memory corresponding to all the inputs that are masked, and do not mask the output
# (batch_size, input_length + 1)
return K.cast(K.concatenate([K.zeros_like(mask[:, :1]), mask]), 'uint8')
def discriminator_loss(y_true,y_pred):
BATCH_SIZE=10
return K.mean(K.binary_crossentropy(K.flatten(y_pred), K.concatenate([K.ones_like(K.flatten(y_pred[:BATCH_SIZE,:,:,:])),K.zeros_like(K.flatten(y_pred[:BATCH_SIZE,:,:,:])) ]) ), axis=-1)
def dice_whole_mod(y_true, y_pred):
"""
Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,0
TP
Dice = 2 -------
T + P
Parameters
----------
y_true : keras.placeholder
Placeholder that contains the ground truth labels of the classes
y_pred : keras.placeholder
Placeholder that contains the class prediction
Returns
-------
scalar
Dice metric
"""
# mask = K.expand_dims(K.sum(y_true,axis=4),axis=4)
# cmp_mask = K.concatenate([K.ones_like(mask) - mask,K.zeros_like(mask), K.zeros_like(mask)],axis=4)
# y_pred = y_pred + cmp_mask
y_true = y_true[:,:,:,:,:3]
y_pred_decision = tf.floor((y_pred + K.epsilon()) / K.max(y_pred, axis=4, keepdims=True))
mask_true = K.sum(y_true, axis=4)
mask_pred = K.sum(y_pred_decision, axis=4) * K.sum(y_true, axis=4)
y_sum = K.sum(mask_true * mask_pred)
return (2. * y_sum + K.epsilon()) / (K.sum(mask_true) + K.sum(mask_pred) + K.epsilon())