def buildEvalGraph(self):
with tf.variable_scope('eval_variables', reuse=False):
self.logits = tf.nn.softmax(self.layers[-1].activations, name='logits')
self.correct_predication = tf.equal(tf.arg_max(self.logits, 1), tf.arg_max(self.output, 1))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_predication, tf.float32))
python类arg_max()的实例源码
def accuracy(labels_placeholder, inference):
with tf.name_scope('test'):
correct_prediction = tf.equal(
tf.arg_max(inference, 1), tf.argmax(labels_placeholder, 1)
)
acc = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
tf.scalar_summary('accuracy', acc)
return acc
def accuracy(logits, targets_pl, one_hot=False):
targets = tf.to_int64(targets_pl)
if one_hot:
# compare the indices of the outputs. For a correct prediction they should be the same
correct_prediction = tf.equal(tf.arg_max(logits, 1), tf.arg_max(targets, 1), name='accuracy_equals_oh')
else:
# compare the indices of the outputs with the correct label which is a number here.
correct_prediction = tf.equal(tf.arg_max(logits, 1), targets, name='accuracy_equals')
accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float32'), name='accuracy_mean')
tf.summary.scalar('accuracy_mean', accuracy)
return accuracy
multilayer_perceptron.py 文件源码
项目:Deep_Learning_In_Action
作者: SunnyMarkLiu
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def init_train_test_op(self):
# loss function
self.loss_function = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y,
logits=self.read_out_logits))
# training op
self.training_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss_function)
self.predict_op = tf.arg_max(self.read_out_logits, 1)
# predict
predict_matches = tf.equal(tf.arg_max(self.y, dimension=1),
tf.arg_max(self.read_out_logits, 1))
# accuracy metric
self.accuracy = tf.reduce_mean(tf.cast(predict_matches, tf.float32))
def init_train_test_op(self):
# loss function
self.loss_function = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y,
logits=self.read_out_logits))
# training op
self.training_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss_function)
self.predict_op = tf.arg_max(self.read_out_logits, 1)
# predict
predict_matches = tf.equal(tf.arg_max(self.y, dimension=1),
tf.arg_max(self.read_out_logits, 1))
# accuracy metric
self.accuracy = tf.reduce_mean(tf.cast(predict_matches, tf.float32))
network_in_network.py 文件源码
项目:Deep_Learning_In_Action
作者: SunnyMarkLiu
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def init_train_test_op(self):
# some loss functions and all -> total loss
self.loss_function = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y,
logits=self.read_out_logits))
# training op
self.training_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss_function)
self.predict_op = tf.arg_max(self.read_out_logits, 1)
# predict
predict_matches = tf.equal(tf.arg_max(self.y, dimension=1),
tf.arg_max(self.read_out_logits, 1))
# accuracy metric
self.accuracy = tf.reduce_mean(tf.cast(predict_matches, tf.float32))
def init_train_test_op(self):
# some loss functions and all -> total loss
self.loss_function = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y,
logits=self.read_out_logits))
# training op
self.training_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss_function)
self.predict_op = tf.arg_max(self.read_out_logits, 1)
# predict
predict_matches = tf.equal(tf.arg_max(self.y, dimension=1),
tf.arg_max(self.read_out_logits, 1))
# accuracy metric
self.accuracy = tf.reduce_mean(tf.cast(predict_matches, tf.float32))
def init_train_test_op(self):
# loss function
self.loss_function = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y,
logits=self.read_out_logits))
# training op
self.training_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss_function)
self.predict_op = tf.arg_max(self.read_out_logits, 1)
# predict
predict_matches = tf.equal(tf.arg_max(self.y, dimension=1),
tf.arg_max(self.read_out_logits, 1))
# accuracy metric
self.accuracy = tf.reduce_mean(tf.cast(predict_matches, tf.float32))
def calc_reward(outputs):
# consider the action at the last time step
outputs = outputs[-1] # look at ONLY THE END of the sequence
outputs = tf.reshape(outputs, (batch_size, cell_out_size))
# get the baseline
b = tf.pack(baselines)
b = tf.concat(2, [b, b])
b = tf.reshape(b, (batch_size, (nGlimpses) * 2))
no_grad_b = tf.stop_gradient(b)
# get the action(classification)
p_y = tf.nn.softmax(tf.matmul(outputs, Wa_h_a) + Ba_h_a)
max_p_y = tf.arg_max(p_y, 1)
correct_y = tf.cast(labels_placeholder, tf.int64)
# reward for all examples in the batch
R = tf.cast(tf.equal(max_p_y, correct_y), tf.float32)
reward = tf.reduce_mean(R) # mean reward
R = tf.reshape(R, (batch_size, 1))
R = tf.tile(R, [1, (nGlimpses)*2])
# get the location
p_loc = gaussian_pdf(mean_locs, sampled_locs)
p_loc = tf.tanh(p_loc)
p_loc_orig = p_loc
p_loc = tf.reshape(p_loc, (batch_size, (nGlimpses) * 2))
# define the cost function
J = tf.concat(1, [tf.log(p_y + SMALL_NUM) * (onehot_labels_placeholder), tf.log(p_loc + SMALL_NUM) * (R - no_grad_b)])
J = tf.reduce_sum(J, 1)
J = J - tf.reduce_sum(tf.square(R - b), 1)
J = tf.reduce_mean(J, 0)
cost = -J
# define the optimizer
optimizer = tf.train.MomentumOptimizer(lr, momentumValue)
train_op = optimizer.minimize(cost, global_step)
return cost, reward, max_p_y, correct_y, train_op, b, tf.reduce_mean(b), tf.reduce_mean(R - b), lr
def num_correct_prediction(logits, labels):
"""Evaluate the quality of the logits at predicting the label.
Return:
the number of correct predictions
"""
correct = tf.equal(tf.arg_max(logits, 1), tf.arg_max(labels, 1))
correct = tf.cast(correct, tf.int32)
n_correct = tf.reduce_sum(correct)
return n_correct
#%%
def _setup_net(self):
with self.cnn_net.variable_scope([self.data_batches]) as variable_scope:
end_points_collection = self.cnn_net.end_points_collection_name(variable_scope)
net, _ = self.cnn_net.cnn_layers(self.data_batches, variable_scope, end_points_collection)
net = slim.fully_connected(net, self.embedding_size, activation_fn=None, scope='fc0')
net = rnn.rnn_layers(net, tf.arg_max(self.numbers_label_batches, dimension=2), self.embedding_size)
net = tf.reshape(net, [-1, self.embedding_size])
self.model_output = slim.fully_connected(net, 11, activation_fn=None, scope='fc4')
def batch_iou_fast(anchors, bboxes):
""" Compute iou of two batch of boxes. Box format '[y_min, x_min, y_max, x_max]'.
Args:
anchors: know shape
bboxes: dynamic shape
Return:
ious: 2-D with shape '[num_bboxes, num_anchors]'
indices: [num_bboxes, 1]
"""
num_anchors = anchors.get_shape().as_list()[0]
tensor_num_bboxes = tf.shape(bboxes)[0]
indices = tf.reshape(tf.range(tensor_num_bboxes), shape=[-1, 1])
indices = tf.reshape(tf.stack([indices]*num_anchors, axis=1), shape=[-1, 1])
bboxes_m = tf.gather_nd(bboxes, indices)
anchors_m = tf.tile(anchors, [tensor_num_bboxes, 1])
lr = tf.maximum(
tf.minimum(bboxes_m[:, 3], anchors_m[:, 3]) -
tf.maximum(bboxes_m[:, 1], anchors_m[:, 1]),
0
)
tb = tf.maximum(
tf.minimum(bboxes_m[:, 2], anchors_m[:, 2]) -
tf.maximum(bboxes_m[:, 0], anchors_m[:, 0]),
0
)
intersection = tf.multiply(tb, lr)
union = tf.subtract(
tf.multiply((bboxes_m[:, 3] - bboxes_m[:, 1]), (bboxes_m[:, 2] - bboxes_m[:, 0])) +
tf.multiply((anchors_m[:, 3] - anchors_m[:, 1]), (anchors_m[:, 2] - anchors_m[:, 0])),
intersection
)
ious = tf.div(intersection, union)
ious = tf.reshape(ious, shape=[tensor_num_bboxes, num_anchors])
indices = tf.arg_max(ious, dimension=1)
return ious, indices
def __init__(self, sent_length, class_num,
embedding_size, initial_embedding_dict,
l2_lambda, hidden_size):
self.input_x = tf.placeholder(tf.int32, [None, sent_length], name="input_x")
self.input_y = tf.placeholder(tf.float32, [None, class_num], name="input_y")
self.dropout_keep_prob_1 = tf.placeholder(tf.float32, name="dropout_keep_prob_1")
self.dropout_keep_prob_2 = tf.placeholder(tf.float32, name="dropout_keep_prob_2")
l2_loss = tf.constant(0.0)
with tf.name_scope("embedding"):
self.embedding_dict = tf.Variable(initial_embedding_dict, name="Embedding", dtype=tf.float32)
self.embedded_chars = tf.nn.embedding_lookup(self.embedding_dict, self.input_x)
# unstack embedded input
self.unstacked = tf.unstack(self.embedded_chars, sent_length, 1)
with tf.name_scope("lstm"):
# create a LSTM network
lstm_cell = rnn.BasicLSTMCell(hidden_size)
self.output, self.states = rnn.static_rnn(lstm_cell, self.unstacked, dtype=tf.float32)
self.pooling = tf.reduce_mean(self.output, 0)
with tf.name_scope("linear"):
weights = tf.get_variable(
"W",
shape=[hidden_size, class_num],
initializer=tf.contrib.layers.xavier_initializer())
bias = tf.Variable(tf.constant(0.1, shape=[class_num]), name="b")
l2_loss += tf.nn.l2_loss(weights)
l2_loss += tf.nn.l2_loss(bias)
self.linear_result = tf.nn.xw_plus_b(self.pooling, weights, bias, name="linear")
self.predictions = tf.arg_max(self.linear_result, 1, name="predictions")
with tf.name_scope("loss"):
losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.linear_result, labels=self.input_y)
self.loss = tf.reduce_mean(losses) + l2_lambda * l2_loss
with tf.name_scope("accuracy"):
correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
def __init__(self, sent_length, class_num, embedding_size, l2_lambda):
self.input_x = tf.placeholder(tf.float32, [None, sent_length, embedding_size], name="input_x")
self.input_y = tf.placeholder(tf.float32, [None, class_num], name="input_y")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
l2_loss = tf.constant(0.0)
with tf.name_scope("flat"):
self.flatted = tf.reshape(self.input_x, [-1, sent_length * embedding_size])
with tf.name_scope("linear"):
weights = tf.get_variable(
"W",
shape=[sent_length * embedding_size, class_num],
initializer=tf.contrib.layers.xavier_initializer())
bias = tf.Variable(tf.constant(0.1, shape=[class_num]), name="b")
l2_loss += tf.nn.l2_loss(weights)
l2_loss += tf.nn.l2_loss(bias)
self.linear_result = tf.nn.xw_plus_b(self.flatted, weights, bias, name="linear")
self.predictions = tf.arg_max(self.linear_result, 1, name="predictions")
with tf.name_scope("loss"):
losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.linear_result, labels=self.input_y)
self.loss = tf.reduce_mean(losses) + l2_lambda * l2_loss
with tf.name_scope("accuracy"):
correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
def __init__(self, input, n_in, n_out):
self.W = tf.Variable(tf.zeros(shape=(n_in, n_out)), name="LR_W")
self.b = tf.Variable(tf.zeros(shape=(n_out,)), name="LR_b")
self.linear = tf.add(tf.matmul(input, self.W), self.b)
self.p_y_given_x = tf.nn.softmax(tf.add(tf.matmul(input, self.W), self.b))
self.y_pred = tf.arg_max(self.p_y_given_x, 1)
def errors(self, y):
return tf.reduce_mean(tf.cast(tf.not_equal(self.y_pred, tf.arg_max(y,1)), dtype=tf.float32))
def accuracy(logits, targets_pl, one_hot=False):
targets = tf.to_int64(targets_pl)
if one_hot:
# compare the indices of the outputs. For a correct prediction they should be the same
correct_prediction = tf.equal(tf.arg_max(logits, 1), tf.arg_max(targets, 1), name='accuracy_equals_oh')
else:
# compare the indices of the outputs with the correct label which is a number here.
correct_prediction = tf.equal(tf.arg_max(logits, 1), targets, name='accuracy_equals')
accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float32'), name='accuracy_mean')
tf.summary.scalar('accuracy_mean', accuracy)
return accuracy
def f1_score(logits, targets_pl, one_hot=False):
targets = tf.to_int64(targets_pl)
y_predicted = tf.arg_max(logits, 1)
if one_hot:
y_true = tf.arg_max(targets, 1)
else:
y_true = logits
# get true positives (by multiplying the predicted and actual labels we will only get a 1 if both labels are 1)
tp = tf.count_nonzero(y_predicted * y_true)
# get true negatives (basically the same as tp only the inverse)
tn = tf.count_nonzero((y_predicted - 1) * (y_true - 1))
fp = tf.count_nonzero(y_predicted * (y_true - 1))
fn = tf.count_nonzero((y_predicted - 1) * y_true)
# Calculate accuracy, precision, recall and F1 score.
accuracy = (tp + tn) / (tp + fp + fn + tn)
precision = tp / (tp + fp)
recall = tp / (tp + fn)
f1_score = (2 * precision * recall) / (precision + recall)
tf.summary.scalar('accuracy', accuracy)
tf.summary.scalar('precision', precision)
tf.summary.scalar('recall', recall)
tf.summary.scalar('f1-score', f1_score)
f1_score = tf.reduce_mean(tf.cast(f1_score, 'float32'), name='f1_score_reduce_mean')
return f1_score
def get_pre_y(self):
# TODO ???
# pre_y = tf.reshape(tf.round(tf.sigmoid(self._output)), [-1])
pre_y = tf.arg_max(input=self._output, dimension=1)
return pre_y
def get_class(self, index):
label = self.db.test.labels[index:index+1]
return self.sess.run(tf.arg_max(label, 1))