def multilayer_perceptron(_X, input_size, n_hidden, n_class, forward_only=False):
with variable_scope.variable_scope("DNN"):
bias_start = 0.0
weight_hidden = variable_scope.get_variable("Weight_Hidden", [input_size, n_hidden])
bias_hidden = variable_scope.get_variable("Bias_Hidden", [n_hidden],
initializer=init_ops.constant_initializer(bias_start))
#Hidden layer with RELU activation
layer_1 = tf.nn.relu(tf.add(tf.matmul(_X, weight_hidden), bias_hidden))
if not forward_only:
layer_1 = tf.nn.dropout(layer_1, 0.5)
weight_out = variable_scope.get_variable("Weight_Out", [n_hidden, n_class])
bias_out = variable_scope.get_variable("Bias_Out", [n_class],
initializer=init_ops.constant_initializer(bias_start))
output = tf.matmul(layer_1, weight_out) + bias_out
#regularizers = tf.nn.l2_loss(weight_hidden) + tf.nn.l2_loss(bias_hidden) + tf.nn.l2_loss(weight_out) + tf.nn.l2_loss(bias_out)
return output
评论列表
文章目录