def preprocess_input(self, x):
if self.consume_less == 'cpu':
if 0 < self.dropout_W < 1:
dropout = self.dropout_W
else:
dropout = 0
input_shape = self.input_spec[0].shape
input_dim = input_shape[2]
timesteps = input_shape[1]
x_i = time_distributed_dense(x, self.W_i, self.b_i, dropout,
input_dim, self.output_dim, timesteps)
x_f = time_distributed_dense(x, self.W_f, self.b_f, dropout,
input_dim, self.output_dim, timesteps)
x_c = time_distributed_dense(x, self.W_c, self.b_c, dropout,
input_dim, self.output_dim, timesteps)
x_o = time_distributed_dense(x, self.W_o, self.b_o, dropout,
input_dim, self.output_dim, timesteps)
return K.concatenate([x_i, x_f, x_c, x_o], axis=2)
else:
return x
python类time_distributed_dense()的实例源码
def preprocess_input(self, x):
if self.consume_less == 'cpu':
if 0 < self.dropout_W < 1:
dropout = self.dropout_W
else:
dropout = 0
input_shape = self.input_spec[0].shape
input_dim = input_shape[2]
timesteps = input_shape[1]
x_i = time_distributed_dense(x, self.W_i, self.b_i, dropout,
input_dim, self.output_dim, timesteps)
x_f = time_distributed_dense(x, self.W_f, self.b_f, dropout,
input_dim, self.output_dim, timesteps)
x_c = time_distributed_dense(x, self.W_c, self.b_c, dropout,
input_dim, self.output_dim, timesteps)
x_o = time_distributed_dense(x, self.W_o, self.b_o, dropout,
input_dim, self.output_dim, timesteps)
return K.concatenate([x_i, x_f, x_c, x_o], axis=2)
else:
return x
def preprocess_input(self, x):
print("begin preprocess_input(self, x)")
if self.consume_less == 'cpu':
if 0 < self.dropout_W < 1:
dropout = self.dropout_W
else:
dropout = 0
input_shape = self.input_spec[0].shape
input_dim = input_shape[2]
timesteps = input_shape[1]
# x_i = time_distributed_dense(x, self.W_i, self.b_i, dropout,
# input_dim, self.output_dim, timesteps)
# x_f = time_distributed_dense(x, self.W_f, self.b_f, dropout,
# input_dim, self.output_dim, timesteps)
# x_c = time_distributed_dense(x, self.W_c, self.b_c, dropout,
# input_dim, self.output_dim, timesteps)
# x_o = time_distributed_dense(x, self.W_o, self.b_o, dropout,
# input_dim, self.output_dim, timesteps)
# add by Robot Steven ****************************************#
x_i = time_distributed_dense(x, self.W_i, self.b_i, dropout,
input_dim, self.controller_output_dim, timesteps)
x_f = time_distributed_dense(x, self.W_f, self.b_f, dropout,
input_dim, self.controller_output_dim, timesteps)
x_c = time_distributed_dense(x, self.W_c, self.b_c, dropout,
input_dim, self.controller_output_dim, timesteps)
x_o = time_distributed_dense(x, self.W_o, self.b_o, dropout,
input_dim, self.controller_output_dim, timesteps)
# add by Robot Steven ****************************************#
print("end preprocess_input(self,x )")
return K.concatenate([x_i, x_f, x_c, x_o], axis=2)
else:
print("end preprocess_input(self,x )\n")
return x
def preprocess_input(self, x):
if self.consume_less == 'cpu':
input_shape = self.input_spec[0].shape
input_dim = input_shape[2]
timesteps = input_shape[1]
return time_distributed_dense(x, self.W, None, self.dropout_W,
input_dim, self.output_dim,
timesteps)
else:
return x
# override Recurrent's get_initial_states function to load the trainable
# initial hidden state
def preprocess_input(self, x):
if self.consume_less == 'cpu':
input_shape = self.input_spec[0].shape
input_dim = input_shape[2]
timesteps = input_shape[1]
x_t = time_distributed_dense(x, self.W_t, self.b_t, self.dropout_W,
input_dim, self.output_dim, timesteps)
x_h = time_distributed_dense(x, self.W_h, self.b_h, self.dropout_W,
input_dim, self.output_dim, timesteps)
return K.concatenate([x_t, x_h], axis=2)
else:
return x
def preprocess_input(self, x):
if self.consume_less == 'cpu':
input_shape = self.input_spec[0].shape
input_dim = input_shape[2]
timesteps = input_shape[1]
return time_distributed_dense(x, self.W, None, self.dropout_W,
input_dim, self.output_dim,
timesteps)
else:
return x
# override Recurrent's get_initial_states function to load the trainable
# initial hidden state