def create_base(self, inputs, is_training):
params = self._config.cnn_params
print("input dimension = {}".format(inputs.get_shape()))
with tf.name_scope('Model'):
with slim.arg_scope([slim.conv2d, slim.fully_connected], activation_fn=tf.nn.relu,
# normalizer_fn=slim.batch_norm,
# normalizer_params={'is_training': is_training}
# weights_initializer=initializer = tf.contrib.layers.xavier_initializer(seed = 10)
):
# inputs is 2D with dimension (3 x feature_len)
net = slim.conv2d(inputs, params['num_filters'][0], [3,5], scope='conv1')
net = slim.conv2d(net, params['num_filters'][1], [3, 5], scope='conv2')
net = slim.conv2d(net, params['num_filters'][2], [3, 5], scope='conv3')
net = slim.flatten(net, scope='flatten1')
net = slim.fully_connected(net, params['num_fc_1'], scope='fc1')
net = slim.dropout(net, self._config.keep_prob, is_training=is_training, scope='dropout1')
logits = slim.fully_connected(net, self._config.num_classes, activation_fn=None, scope='fc2')
with tf.name_scope('output'):
predicted_classes = tf.to_int32(tf.argmax(logits, dimension=1), name='y')
return logits, predicted_classes
评论列表
文章目录