def get_constants(self, inputs, training=None):
constants = self.recurrent_layer.get_constants(
inputs=inputs,
training=training
)
if 0 < self.dense_dropout < 1:
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.recurrent_layer.units))
def dropped_inputs():
return K.dropout(ones, self.dense_dropout)
out_dp_mask = [K.in_train_phase(dropped_inputs,
ones,
training=training)]
constants.append(out_dp_mask)
else:
constants.append([K.cast_to_floatx(1.)])
return constants
python类tile()的实例源码
rnnlayer.py 文件源码
项目:recurrent-attention-for-QA-SQUAD-based-on-keras
作者: wentaozhu
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def get_constants(self, inputs, training=None):
constants = []
'''if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
if 0 < self.dropout_W < 1:
input_shape = K.int_shape(x)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
constants.append(B_W)
else:'''
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
return constants
rnnlayer.py 文件源码
项目:recurrent-attention-for-QA-SQUAD-based-on-keras
作者: wentaozhu
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def get_constants(self, inputs, training=None):
constants = []
'''if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
if 0 < self.dropout_W < 1:
input_shape = K.int_shape(x)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
constants.append(B_W)
else:'''
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
return constants
rnnlayer.py 文件源码
项目:recurrent-attention-for-QA-SQUAD-based-on-keras
作者: wentaozhu
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def get_constants(self, inputs, training=None):
constants = []
'''if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
if 0 < self.dropout_W < 1:
input_shape = K.int_shape(x)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
constants.append(B_W)
else:'''
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
return constants
QnA.py 文件源码
项目:recurrent-attention-for-QA-SQUAD-based-on-keras
作者: wentaozhu
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.output_dim))
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
if 0 < self.dropout_W < 1:
input_shape = K.int_shape(x)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
constants.append(B_W)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
return constants
def tensor_swirl(image, center=None, strength=1, radius=100, rotation=0, cval=0.0, **kwargs):
# **kwargs is for unsupported options (ignored)
cval = tf.fill(K.shape(image)[0:1], cval)
shape = K.int_shape(image)[1:3]
if center is None:
center = np.array(shape) / 2
ys = np.expand_dims(np.repeat(np.arange(shape[0]), shape[1]),-1)
xs = np.expand_dims(np.tile (np.arange(shape[1]), shape[0]),-1)
map_xs, map_ys = swirl_mapping(xs, ys, center, rotation, strength, radius)
mapping = np.zeros((*shape, *shape))
for map_x, map_y, x, y in zip(map_xs, map_ys, xs, ys):
results = tensor_linear_interpolation(image, map_x, map_y, cval)
for _y, _x, w in results:
# mapping[int(y),int(x),int(_y),int(_x),] = w
mapping[int(_y),int(_x),int(y),int(x),] = w
results = tf.tensordot(image, K.variable(mapping), [[1,2],[0,1]])
# results = K.reshape(results, K.shape(image))
return results
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.output_dim))
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
if 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
constants.append(B_W)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
return constants
def create_conv_model(self):
# This is the place where neural network model initialized
init = 'glorot_uniform'
self.state_in = Input(self.state_dim)
self.l1 = Convolution2D(32, 8, 8, activation='elu', init=init, subsample=(4, 4), border_mode='same')(
self.state_in)
self.l2 = Convolution2D(64, 4, 4, activation='elu', init=init, subsample=(2, 2), border_mode='same')(
self.l1)
# self.l3 = Convolution2D(64, 3, 3, activation='relu', init=init, subsample=(1, 1), border_mode='same')(
# self.l2)
self.l3 = self.l2
self.h = Flatten()(self.l3)
self.hidden = Dense(256, init=init, activation='elu')(self.h)
self.value = Dense(1, init=init)(self.hidden)
self.policy = Dense(self.action_dim, init=init, activation='softmax')(self.hidden)
self.q_values = self.entropy_coef * (Theano.log(self.policy + 1e-18) -
Theano.tile(Theano.sum(Theano.log(self.policy + 1e-18) * self.policy,
axis=[1], keepdims=True), (1, self.action_dim)))
self.q_values = self.q_values + Theano.tile(self.value, (1, self.action_dim))
self.model = Model(self.state_in, output=[self.policy, self.value])
def create_fc_model(self):
# This is the place where neural network model initialized
init = 'glorot_uniform'
self.state_in = Input(self.state_dim)
self.hidden = Dense(256, init=init, activation='elu')(self.state_in)
self.value = Dense(1)(self.hidden)
self.policy = Dense(self.action_dim, init=init, activation='softmax')(self.hidden)
self.q_values = self.entropy_coef * (Theano.log(self.policy + 1e-18) -
Theano.tile(Theano.sum(Theano.log(self.policy + 1e-18) * self.policy,
axis=[1], keepdims=True), (1, self.action_dim)))
# print (type(Theano.sum(Theano.log(self.policy + 1e-18) * self.policy,
# axis=[1], keepdims=True)))
# print(Theano.function([self.state_in], [Theano.sum(Theano.log(self.policy + 1e-18) * self.policy,
# axis=[1], keepdims=True)])([np.zeros((32,) + self.state_dim)])[0].shape)
# 1/0
self.q_values = self.q_values + Theano.tile(self.value, (1, self.action_dim))
self.model = Model(self.state_in, output=[self.policy, self.value])
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.output_dim))
B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
constants.append(B_U)
else:
constants.append(K.cast_to_floatx(1.))
if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, input_dim))
B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
constants.append(B_W)
else:
constants.append(K.cast_to_floatx(1.))
return constants
def custom_for_keras(self, ALL_word_embeds):
## only the top 20 rows from word_vectors is legit!
def top_accuracy(true_word_indices, image_vectors):
l2 = lambda x, axis: K.sqrt(K.sum(K.square(x), axis=axis, keepdims=True))
l2norm = lambda x, axis: x/l2(x, axis)
l2_words = l2norm(ALL_word_embeds, axis=1)
l2_images = l2norm(image_vectors, axis=1)
tiled_words = K.tile(K.expand_dims(l2_words, axis=1) , (1, 200, 1))
tiled_images = K.tile(K.expand_dims(l2_images, axis=1), (1, 20, 1))
diff = K.squeeze(l2(l2_words - l2_images, axis=2))
# slice_top3 = lambda x: x[:, 0:3]
# slice_top1 = lambda x: x[:, 0:1]
diff_top5 = metrics.top_k_categorical_accuracy(tiled_images, diff)
return diff_top5
return top_accuracy
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.output_dim))
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
if 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, input_dim))
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
constants.append(B_W)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
return constants
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.output_dim))
B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
constants.append(B_U)
else:
constants.append(K.cast_to_floatx(1.0))
if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, input_dim))
B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
constants.append(B_W)
else:
constants.append(K.cast_to_floatx(1.0))
return constants
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.output_dim))
B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
constants.append(B_U)
else:
constants.append(K.cast_to_floatx(1.))
if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, input_dim))
B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
constants.append(B_W)
else:
constants.append(K.cast_to_floatx(1.))
return constants
def get_attention_initial_state(self, inputs):
"""Creates initial state for attention mechanism. By default the
attention representation `attention_h` computed by attention_step is
passed as attention state between timesteps.
Extending attention implementations that requires additional states
must modify over implement this method accordingly.
# Arguments
inputs: layer inputs
# Returns
list (length one) of initial state (zeros)
"""
# build an all-zero tensor of shape (samples, output_dim)
initial_state = K.zeros_like(inputs) # (samples, timesteps, input_dim)
initial_state = K.sum(initial_state, axis=(1, 2)) # (samples,)
initial_state = K.expand_dims(initial_state) # (samples, 1)
initial_state = K.tile(initial_state, [1, self.attention_output_dim]) # (samples, output_dim)
return [initial_state]
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.output_dim))
B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
constants.append(B_U)
else:
constants.append(K.cast_to_floatx(1.))
if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
constants.append(B_W)
else:
constants.append(K.cast_to_floatx(1.))
return constants
layer_normalization_RNN.py 文件源码
项目:New_Layers-Keras-Tensorflow
作者: WeidiXie
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.output_dim))
B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
constants.append(B_U)
else:
constants.append(K.cast_to_floatx(1.))
if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
constants.append(B_W)
else:
constants.append(K.cast_to_floatx(1.))
return constants
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.output_dim))
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(2)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(2)])
if 0 < self.dropout_W < 1:
input_shape = K.int_shape(x)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(2)]
constants.append(B_W)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(2)])
return constants
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.hidden_recurrent_dim))
B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
constants.append(B_U)
else:
constants.append(K.cast_to_floatx(1.))
if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, input_dim))
B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
constants.append(B_W)
else:
constants.append(K.cast_to_floatx(1.))
return constants
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.input_dim))
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
if 0 < self.dropout_W < 1:
input_shape = K.int_shape(x)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
constants.append(B_W)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
return constants
def optimize_pi(self, batch):
if not self.built:
self.build()
sampled_action_for_M = self.sess.run(self.sampled_action_for_M, {self.states: batch['states']})
sampled_action = np.transpose(sampled_action_for_M, (1, 0, 2))[:, :, np.newaxis, :]
pairwise_d = np.sum((np.tile(sampled_action, (self.M_pi, 1)) - \
np.transpose(np.tile(sampled_action, (self.M_pi, 1)), (0, 2, 1, 3)))**2, axis=3).reshape(sampled_action.shape[0], -1)
d = np.median(pairwise_d, axis=1)
h = d/(2*np.log(self.M_pi+1))
feed_in = {
self.states: batch['states'],
self.actions: batch['actions'],
self.sampled_action_feeder: sampled_action_for_M,
self.h: h,
}
self.sess.run(self.pi_updater, feed_in)
def update_memory(self, z_t, h_t, mem_tm1):
'''
This method takes the attention vector (z_t), writer output (h_t) and previous timestep's memory (mem_tm1)
and updates the memory. Implements equations 6, 14 or 15.
'''
tiled_z_t = K.tile(K.expand_dims(z_t), (self.output_dim)) # (batch_size, input_length, output_dim)
input_length = K.shape(mem_tm1)[1]
# (batch_size, input_length, output_dim)
tiled_h_t = K.permute_dimensions(K.tile(K.expand_dims(h_t), (input_length)), (0, 2, 1))
# Updating memory. First term in summation corresponds to selective forgetting and the second term to
# selective addition. Equation 6.
mem_t = mem_tm1 * (1 - tiled_z_t) + tiled_h_t * tiled_z_t # (batch_size, input_length, output_dim)
return mem_t
def get_initial_state(self, inputs):
dense_initial_state = K.zeros_like(inputs)
dense_initial_state = K.sum(dense_initial_state, axis=(1, 2))
dense_initial_state = K.expand_dims(dense_initial_state)
dense_initial_state = K.tile(dense_initial_state, [1, self.dense_layer.units])
return [dense_initial_state] + self.recurrent_layer.get_initial_state(inputs)
def compute_mask(self, inputs, mask):
output_mask = self.layer.compute_mask(
inputs=inputs,
mask=mask,
)
if self.time_steps is None:
return output_mask
else:
output_mask = K.ones_like(output_mask)
output_mask = K.any(output_mask, axis=1, keepdims=True)
return K.tile(output_mask, [1, self.time_steps])
def get_initial_states(self, inputs):
# build an all-zero tensor of shape (samples, units)
initial_state = K.zeros_like(inputs) # (samples, timesteps, input_dim)
initial_state = K.sum(initial_state, axis=(1, 2)) # (samples,)
initial_state = K.expand_dims(initial_state) # (samples, 1)
initial_state = K.tile(initial_state, [1, self.units]) # (samples, units)
initial_states = [initial_state for _ in range(len(self.states))]
return initial_states
def get_constants(self, inputs, training=None):
constants = []
if 0 < self.dropout < 1:
input_shape = K.int_shape(inputs)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
def dropped_inputs():
return K.dropout(ones, self.dropout)
dp_mask = K.in_train_phase(dropped_inputs,
ones,
training=training)
constants.append(dp_mask)
else:
constants.append(K.cast_to_floatx(1.))
if 0 < self.recurrent_dropout < 1:
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
def dropped_inputs():
return K.dropout(ones, self.recurrent_dropout)
rec_dp_mask = K.in_train_phase(dropped_inputs,
ones,
training=training)
constants.append(rec_dp_mask)
else:
constants.append(K.cast_to_floatx(1.))
return constants
# Aliases
multiplicative_lstm.py 文件源码
项目:Keras-Multiplicative-LSTM
作者: titu1994
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def get_constants(self, inputs, training=None):
constants = []
if self.implementation != 0 and 0 < self.dropout < 1:
input_shape = K.int_shape(inputs)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
def dropped_inputs():
return K.dropout(ones, self.dropout)
dp_mask = [K.in_train_phase(dropped_inputs,
ones,
training=training) for _ in range(5)]
constants.append(dp_mask)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(5)])
if 0 < self.recurrent_dropout < 1:
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
def dropped_inputs():
return K.dropout(ones, self.recurrent_dropout)
rec_dp_mask = [K.in_train_phase(dropped_inputs,
ones,
training=training) for _ in range(5)]
constants.append(rec_dp_mask)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(5)])
return constants
def build_error(s, height, width, base):
P = len(setting['panels'])
s = K.reshape(s,[-1,height,base,width,base])
s = K.permute_dimensions(s, [0,1,3,2,4])
s = K.reshape(s,[-1,height,width,1,base,base])
s = K.tile(s, [1,1,1,P,1,1,])
allpanels = K.variable(np.array(setting['panels']))
allpanels = K.reshape(allpanels, [1,1,1,P,base,base])
allpanels = K.tile(allpanels, [K.shape(s)[0], height, width, 1, 1, 1])
def hash(x):
## 2x2 average hashing
x = K.reshape(x, [-1,height,width,P, base//2, 2, base//2, 2])
x = K.mean(x, axis=(5,7))
return K.round(x)
## diff hashing (horizontal diff)
# x1 = x[:,:,:,:,:,:-1]
# x2 = x[:,:,:,:,:,1:]
# d = x1 - x2
# return K.round(d)
## just rounding
# return K.round(x)
## do nothing
# return x
# s = hash(s)
# allpanels = hash(allpanels)
# error = K.binary_crossentropy(s, allpanels)
error = K.abs(s - allpanels)
error = hash(error)
error = K.mean(error, axis=(4,5))
return error
def build_errors(states,base,pad,dim,size):
# address the numerical viscosity in swirling
s = K.round(states+viscosity_adjustment)
s = Reshape((dim+2*pad,dim+2*pad,1))(s)
s = Cropping2D(((pad,pad),(pad,pad)))(s)
s = K.reshape(s,[-1,size,base,size,base])
s = K.permute_dimensions(s, [0,1,3,2,4])
s = K.reshape(s,[-1,size,size,1,base,base])
s = K.tile (s,[1, 1, 1, 2, 1, 1,]) # number of panels : 2
allpanels = K.variable(panels)
allpanels = K.reshape(allpanels, [1,1,1,2,base,base])
allpanels = K.tile(allpanels, [K.shape(s)[0], size,size, 1, 1, 1])
def hash(x):
## 2x2 average hashing
x = K.reshape(x, [-1,size,size,2, base//3, 3, base//3, 3])
x = K.mean(x, axis=(5,7))
return K.round(x)
## diff hashing (horizontal diff)
# x1 = x[:,:,:,:,:,:-1]
# x2 = x[:,:,:,:,:,1:]
# d = x1 - x2
# return K.round(d)
## just rounding
# return K.round(x)
## do nothing
# return x
# s = hash(s)
# allpanels = hash(allpanels)
# error = K.binary_crossentropy(s, allpanels)
error = K.abs(s - allpanels)
error = hash(error)
error = K.mean(error, axis=(4,5))
return error
def build_errors(states,base,dim,size):
s = K.reshape(states,[-1,size,base,size,base])
s = K.permute_dimensions(s, [0,1,3,2,4])
s = K.reshape(s,[-1,size,size,1,base,base])
s = K.tile (s,[1, 1, 1, 2, 1, 1,]) # number of panels : 2
allpanels = K.variable(panels)
allpanels = K.reshape(allpanels, [1,1,1,2,base,base])
allpanels = K.tile(allpanels, [K.shape(s)[0], size,size, 1, 1, 1])
def hash(x):
## 2x2 average hashing
# x = K.reshape(x, [-1,size,size,2, base//2, 2, base//2, 2])
# x = K.mean(x, axis=(5,7))
# return K.round(x)
## diff hashing (horizontal diff)
# x1 = x[:,:,:,:,:,:-1]
# x2 = x[:,:,:,:,:,1:]
# d = x1 - x2
# return K.round(d)
## just rounding
return K.round(x)
## do nothing
# return x
# s = hash(s)
# allpanels = hash(allpanels)
# error = K.binary_crossentropy(s, allpanels)
error = K.abs(s - allpanels)
error = hash(error)
error = K.mean(error, axis=(4,5))
return error