def evaluate_one_row(candidates_tensor, true_context, sess, model, test_score, true_response):
for batch in batch_iter(candidates_tensor, 512):
candidate_responses = batch[:, 0, :]
context_batch = np.repeat(true_context, candidate_responses.shape[0], axis=0)
scores = sess.run(
model.f_pos,
feed_dict={model.context_batch: context_batch,
model.response_batch: candidate_responses,
model.neg_response_batch: candidate_responses}
)
for ind, score in enumerate(scores):
if score == float('Inf') or score == -float('Inf') or score == float('NaN'):
print(score, ind, scores[ind])
raise ValueError
if score >= test_score and not np.array_equal(candidate_responses[ind], true_response):
return False
return True
python类batch_iter()的实例源码
def evaluate_one_row(candidates_tensor, true_context, sess, model, test_score, true_response):
for batch in batch_iter(candidates_tensor, 512):
candidate_responses = batch[:, 0, :]
context_batch = np.repeat(true_context, candidate_responses.shape[0], axis=0)
scores = sess.run(
model.f_pos,
feed_dict={model.context_batch: context_batch,
model.response_batch: candidate_responses,
model.neg_response_batch: candidate_responses}
)
for ind, score in enumerate(scores):
if score == float('Inf') or score == -float('Inf') or score == float('NaN'):
print(score, ind, scores[ind])
raise ValueError
if score >= test_score and not np.array_equal(candidate_responses[ind], true_response):
return False
return True
def _train(train_tensor, batch_size, neg_size, model, optimizer, sess):
avg_loss = 0
for batch in batch_iter(train_tensor, batch_size, True):
for neg_batch in neg_sampling_iter(train_tensor, batch_size, neg_size):
loss = sess.run(
[model.loss, optimizer],
feed_dict={model.context_batch: batch[:, 0, :],
model.response_batch: batch[:, 1, :],
model.neg_response_batch: neg_batch[:, 1, :]}
)
avg_loss += loss[0]
avg_loss = avg_loss / (train_tensor.shape[0]*neg_size)
return avg_loss
def _forward_all(dev_tensor, model, sess):
avg_dev_loss = 0
for batch in batch_iter(dev_tensor, 256):
for neg_batch in neg_sampling_iter(dev_tensor, 256, 1, 42):
loss = sess.run(
[model.loss],
feed_dict={model.context_batch: batch[:, 0, :],
model.response_batch: batch[:, 1, :],
model.neg_response_batch: neg_batch[:, 1, :]}
)
avg_dev_loss += loss[0]
avg_dev_loss = avg_dev_loss / (dev_tensor.shape[0]*1)
return avg_dev_loss
def test(self, sess, mode, seq1, len1, seq2, len2, labels):
acc = 0
loss = 0
cnt = 0
for d1, l1, d2, l2, l in utils.batch_iter(seq1, len1, seq2, len2, labels):
tacc, tloss, tpred = sess.run(
[self.accuracy, self.tot_loss, self.pred],
feed_dict={
self.input_seq1: d1,
self.input_len1: l1,
self.input_seq2: d2,
self.input_len2: l2,
self.labels: l,
self.initial_state: np.zeros((
Options.batch_size,
2 * Options.lstm_dim * Options.lstm_layers
)),
self.lstm_keep_prob: 1.0,
self.nnet_keep_prob: 1.0
})
cnt += 1
acc += tacc
loss += tloss
acc /= cnt
loss /= cnt
print("{0} Accuracy = {1}\t {0} Loss = {2}".format(mode, acc, loss))
def exploremodel(self, seq1, len1, seq2, len2, labels):
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, "model.ckpt")
preds = []
for d1, l1, d2, l2, l in utils.batch_iter(seq1, len1, seq2, len2, labels):
val1 = sess.run([self.pred], feed_dict={
self.input_seq1: d1,
self.input_len1: l1,
self.input_seq2: d2,
self.input_len2: l2,
self.labels: l,
self.initial_state: np.zeros((
Options.batch_size,
2 * Options.lstm_dim * Options.lstm_layers
)),
self.lstm_keep_prob: 1.0,
self.nnet_keep_prob: 1.0
})
preds.extend(val1[0])
classes = np.argmax(labels[:4900], axis=1)
cm = confusion_matrix(classes, preds)
print(cm)
print(np.mean(np.asarray(classes) == np.asarray(preds)))
for row in cm:
print(row / np.sum(row))
def _train(train_tensor, batch_size, neg_size, model, optimizer, sess):
avg_loss = 0
for batch in batch_iter(train_tensor, batch_size, True):
for neg_batch in neg_sampling_iter(train_tensor, batch_size, neg_size):
loss = sess.run(
[model.loss, optimizer],
feed_dict={model.context_batch: batch[:, 0, :],
model.response_batch: batch[:, 1, :],
model.neg_response_batch: neg_batch[:, 1, :]}
)
avg_loss += loss[0]
avg_loss = avg_loss / (train_tensor.shape[0]*neg_size)
return avg_loss
def _forward_all(dev_tensor, model, sess):
avg_dev_loss = 0
for batch in batch_iter(dev_tensor, 256):
for neg_batch in neg_sampling_iter(dev_tensor, 256, 1, 42):
loss = sess.run(
[model.loss],
feed_dict={model.context_batch: batch[:, 0, :],
model.response_batch: batch[:, 1, :],
model.neg_response_batch: neg_batch[:, 1, :]}
)
avg_dev_loss += loss[0]
avg_dev_loss = avg_dev_loss / (dev_tensor.shape[0]*1)
return avg_dev_loss