def main():
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# Placeholder that will be fed image data.
x = tf.placeholder(tf.float32, [None, 784])
# Placeholder that will be fed the correct labels.
y_ = tf.placeholder(tf.float32, [None, 10])
# Define weight and bias.
W = weight_variable([784, 10])
b = bias_variable([10])
# Here we define our model which utilizes the softmax regression.
y = tf.nn.softmax(tf.matmul(x, W) + b)
# Define our loss.
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
# Define our optimizer.
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
# Define accuracy.
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
correct_prediction = tf.cast(correct_prediction, tf.float32)
accuracy = tf.reduce_mean(correct_prediction)
python类equal()的实例源码
def __init__(self, channels=3, n_class=2, cost="cross_entropy", cost_kwargs={}, **kwargs):
tf.reset_default_graph()
self.n_class = n_class
self.summaries = kwargs.get("summaries", True)
self.x = tf.placeholder("float", shape=[None, None, None, channels])
self.y = tf.placeholder("float", shape=[None, None, None, n_class])
self.keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)
logits, self.variables, self.offset = create_conv_net(self.x, self.keep_prob, channels, n_class, **kwargs)
self.cost = self._get_cost(logits, cost, cost_kwargs)
self.gradients_node = tf.gradients(self.cost, self.variables)
self.cross_entropy = tf.reduce_mean(cross_entropy(tf.reshape(self.y, [-1, n_class]),
tf.reshape(pixel_wise_softmax_2(logits), [-1, n_class])))
self.predicter = pixel_wise_softmax_2(logits)
self.correct_pred = tf.equal(tf.argmax(self.predicter, 3), tf.argmax(self.y, 3))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_pred, tf.float32))
def step_with_training(self, training=None):
def step(inputs, states):
input_shape = K.int_shape(inputs)
y_tm1 = self.layer.preprocess_input(
K.expand_dims(states[0], axis=1),
training
)
y_tm1 = K.reshape(y_tm1, (-1, input_shape[-1]))
inputs_sum = tf.reduce_sum(inputs)
def inputs_f(): return inputs
def output_f(): return y_tm1
current_inputs = tf.case(
[(tf.equal(inputs_sum, 0.0), output_f)],
default=inputs_f
)
return self.layer.step(
current_inputs,
states
)
return step
def add_evaluation_step(graph, final_tensor_name, ground_truth_tensor_name):
"""Inserts the operations we need to evaluate the accuracy of our results.
Args:
graph: Container for the existing model's Graph.
final_tensor_name: Name string for the new final node that produces results.
ground_truth_tensor_name: Name string for the node we feed ground truth data
into.
Returns:
Nothing.
"""
result_tensor = graph.get_tensor_by_name(ensure_name_has_port(
final_tensor_name))
ground_truth_tensor = graph.get_tensor_by_name(ensure_name_has_port(
ground_truth_tensor_name))
correct_prediction = tf.equal(
tf.argmax(result_tensor, 1), tf.argmax(ground_truth_tensor, 1))
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
return evaluation_step
def build_loss(self, inp, output):
y_gt = inp['y_gt']
y_out = output['y_out']
ce = tfplus.nn.CE()({'y_gt': y_gt, 'y_out': y_out})
num_ex_f = tf.to_float(tf.shape(inp['x'])[0])
ce = tf.reduce_sum(ce) / num_ex_f
self.add_loss(ce)
total_loss = self.get_loss()
self.register_var('loss', total_loss)
ans = tf.argmax(y_gt, 1)
correct = tf.equal(ans, tf.argmax(y_out, 1))
top5_acc = tf.reduce_sum(tf.to_float(
tf.nn.in_top_k(y_out, ans, 5))) / num_ex_f
self.register_var('top5_acc', top5_acc)
acc = tf.reduce_sum(tf.to_float(correct)) / num_ex_f
self.register_var('acc', acc)
return total_loss
def build_loss_grad(self, inp, output):
y_gt = inp['y_gt']
y_out = output['y_out']
ce = tfplus.nn.CE()({'y_gt': y_gt, 'y_out': y_out})
num_ex_f = tf.to_float(tf.shape(inp['x'])[0])
ce = tf.reduce_sum(ce) / num_ex_f
self.add_loss(ce)
learn_rate = self.get_option('learn_rate')
total_loss = self.get_loss()
self.register_var('loss', total_loss)
eps = self.get_option('adam_eps')
optimizer = tf.train.AdamOptimizer(learn_rate, epsilon=eps)
global_step = tf.Variable(0.0)
self.register_var('step', global_step)
train_step = optimizer.minimize(
total_loss, global_step=global_step)
self.register_var('train_step', train_step)
correct = tf.equal(tf.argmax(y_gt, 1), tf.argmax(y_out, 1))
acc = tf.reduce_sum(tf.to_float(correct)) / num_ex_f
self.register_var('acc', acc)
pass
def _build(self):
# ?????????? --- build
self._lin = photinia.Linear('LINEAR', self._input_size, self._num_classes).build()
# ????
x = tf.placeholder(dtype=photinia.D_TYPE, shape=[None, self._input_size])
y_ = tf.placeholder(dtype=photinia.D_TYPE, shape=[None, self._num_classes])
# ?????? --- setup
y = self._lin.setup(x)
# ??????? softmax?????
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
# accuracy??
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, photinia.D_TYPE))
# ????????slot
self._add_slot(
'train',
outputs=loss,
inputs=(x, y_),
updates=tf.train.GradientDescentOptimizer(0.5).minimize(loss)
)
self._add_slot(
'predict',
outputs=accuracy,
inputs=(x, y_)
)
def categorical_accuracy_with_variable_timestep(y_true, y_pred):
# Actually discarding is not needed if the dummy is an all-zeros array
# (It is indeed encoded in an all-zeros array by
# CaptionPreprocessing.preprocess_batch)
y_true = y_true[:, :-1, :] # Discard the last timestep/word (dummy)
y_pred = y_pred[:, :-1, :] # Discard the last timestep/word (dummy)
# Flatten the timestep dimension
shape = tf.shape(y_true)
y_true = tf.reshape(y_true, [-1, shape[-1]])
y_pred = tf.reshape(y_pred, [-1, shape[-1]])
# Discard rows that are all zeros as they represent dummy or padding words.
is_zero_y_true = tf.equal(y_true, 0)
is_zero_row_y_true = tf.reduce_all(is_zero_y_true, axis=-1)
y_true = tf.boolean_mask(y_true, ~is_zero_row_y_true)
y_pred = tf.boolean_mask(y_pred, ~is_zero_row_y_true)
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(y_true, axis=1),
tf.argmax(y_pred, axis=1)),
dtype=tf.float32))
return accuracy
# As Keras stores a function's name as its metric's name
def __init__(self, preds, labels, model, num_nodes, pos_weight, norm):
preds_sub = preds
labels_sub = labels
self.cost = norm * tf.reduce_mean(tf.nn.weighted_cross_entropy_with_logits(logits=preds_sub, targets=labels_sub, pos_weight=pos_weight))
self.optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate) # Adam Optimizer
# Latent loss
self.log_lik = self.cost
self.kl = (0.5 / num_nodes) * tf.reduce_mean(tf.reduce_sum(1 + 2 * model.z_log_std - tf.square(model.z_mean) -
tf.square(tf.exp(model.z_log_std)), 1))
self.cost -= self.kl
self.opt_op = self.optimizer.minimize(self.cost)
self.grads_vars = self.optimizer.compute_gradients(self.cost)
self.correct_prediction = tf.equal(tf.cast(tf.greater_equal(tf.sigmoid(preds_sub), 0.5), tf.int32),
tf.cast(labels_sub, tf.int32))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
def generate_mask(img_mask_list, h, w, l):
img_masks, loss_masks = [], []
for i in range(l):
# generate image mask
img_mask = img_mask_list[i]
img_mask = tf.cast(tf.image.decode_png(img_mask), tf.float32)
img_mask = tf.reshape(img_mask, (h, w))
img_masks.append(img_mask)
# generate loss mask
s_total = h * w
s_mask = tf.reduce_sum(img_mask)
def f1(): return img_mask*((s_total-s_mask)/s_mask-1)+1
def f2(): return tf.zeros_like(img_mask)
def f3(): return tf.ones_like(img_mask)
loss_mask = tf.case([(tf.equal(s_mask, 0), f2), \
(tf.less(s_mask, s_total/2), f1)],
default=f3)
loss_masks.append(loss_mask)
return tf.stack(img_masks), tf.stack(loss_masks)
def _largest_size_at_most(height, width, largest_side):
"""Computes new shape with the largest side equal to `largest_side`.
Computes new shape with the largest side equal to `largest_side` while
preserving the original aspect ratio.
Args:
height: an int32 scalar tensor indicating the current height.
width: an int32 scalar tensor indicating the current width.
largest_side: A python integer or scalar `Tensor` indicating the size of
the largest side after resize.
Returns:
new_height: an int32 scalar tensor indicating the new height.
new_width: and int32 scalar tensor indicating the new width.
"""
largest_side = tf.convert_to_tensor(largest_side, dtype=tf.int32)
height = tf.to_float(height)
width = tf.to_float(width)
largest_side = tf.to_float(largest_side)
scale = tf.cond(tf.greater(height, width),
lambda: largest_side / height,
lambda: largest_side / width)
new_height = tf.to_int32(height * scale)
new_width = tf.to_int32(width * scale)
return new_height, new_width
def __init__(self, embedding_length):
self._calculator_loom = CalculatorLoom(embedding_length)
self._labels_placeholder = tf.placeholder(tf.float32)
self._classifier_weights = tf.Variable(
tf.truncated_normal([embedding_length, 3],
dtype=tf.float32,
stddev=1),
name='classifier_weights')
self._output_weights = tf.matmul(
self._calculator_loom.output(), self._classifier_weights)
self._loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=self._output_weights, labels=self._labels_placeholder))
self._true_labels = tf.argmax(self._labels_placeholder, dimension=1)
self._prediction = tf.argmax(self._output_weights, dimension=1)
self._accuracy = tf.reduce_mean(tf.cast(
tf.equal(self._true_labels, self._prediction),
dtype=tf.float32))
def add_evaluation_step(result_tensor, ground_truth_tensor):
"""Inserts the operations we need to evaluate the accuracy of our results.
Args:
result_tensor: The new final node that produces results.
ground_truth_tensor: The node we feed ground truth data
into.
Returns:
Tuple of (evaluation step, prediction).
"""
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
prediction = tf.argmax(result_tensor, 1)
correct_prediction = tf.equal(
prediction, tf.argmax(ground_truth_tensor, 1))
with tf.name_scope('accuracy'):
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', evaluation_step)
return evaluation_step, prediction
def train_neural_network(x):
prediction = convolutional_neural_network(x)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(prediction, y))
optimizer = tf.train.AdamOptimizer().minimize(cost)
hm_epochs = 10
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for epoch in range(hm_epochs):
epoch_loss = 0
for _ in range(int(mnist.train.num_examples / batch_size)):
epoch_x, epoch_y = mnist.train.next_batch(batch_size)
_, c = sess.run([optimizer, cost], feed_dict={x: epoch_x, y: epoch_y})
epoch_loss += c
print('Epoch', epoch, 'completed out of', hm_epochs, 'loss:', epoch_loss)
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print('Accuracy:', accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
dermatologic_preprocessing.py 文件源码
项目:isbi2017-part3
作者: learningtitans
项目源码
文件源码
阅读 33
收藏 0
点赞 0
评论 0
def apply_with_random_selector(x, func, num_cases):
"""Computes func(x, sel), with sel sampled from [0...num_cases-1].
Args:
x: input Tensor.
func: Python function to apply.
num_cases: Python int32, number of cases to sample sel from.
Returns:
The result of func(x, sel), where func receives the value of the
selector as a python integer, but sel is sampled dynamically.
"""
sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)
# Pass the real x only to one of the func calls.
return control_flow_ops.merge([
func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case)
for case in range(num_cases)])[0]
def apply_with_random_selector(x, func, num_cases):
"""Computes func(x, sel), with sel sampled from [0...num_cases-1].
Args:
x: input Tensor.
func: Python function to apply.
num_cases: Python int32, number of cases to sample sel from.
Returns:
The result of func(x, sel), where func receives the value of the
selector as a python integer, but sel is sampled dynamically.
"""
sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)
# Pass the real x only to one of the func calls.
return control_flow_ops.merge([
func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case)
for case in range(num_cases)])[0]
def apply_with_random_selector(x, func, num_cases):
"""Computes func(x, sel), with sel sampled from [0...num_cases-1].
Args:
x: input Tensor.
func: Python function to apply.
num_cases: Python int32, number of cases to sample sel from.
Returns:
The result of func(x, sel), where func receives the value of the
selector as a python integer, but sel is sampled dynamically.
"""
sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)
# Pass the real x only to one of the func calls.
return control_flow_ops.merge([
func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case)
for case in range(num_cases)])[0]
def add_evaluation_step(result_tensor, ground_truth_tensor):
"""Inserts the operations we need to evaluate the accuracy of our results.
Args:
result_tensor: The new final node that produces results.
ground_truth_tensor: The node we feed ground truth data
into.
Returns:
Nothing.
"""
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
correct_prediction = tf.equal(tf.argmax(result_tensor, 1), \
tf.argmax(ground_truth_tensor, 1))
with tf.name_scope('accuracy'):
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', evaluation_step)
return evaluation_step
def is_same_dynamic_shape(x, y):
"""
Whether `x` and `y` has the same dynamic shape.
:param x: A Tensor.
:param y: A Tensor.
:return: A scalar Tensor of `bool`.
"""
# There is a BUG of Tensorflow for not doing static shape inference
# right in nested tf.cond()'s, so we are not comparing x and y's
# shape directly but working with their concatenations.
return tf.cond(
tf.equal(tf.rank(x), tf.rank(y)),
lambda: tf.reduce_all(tf.equal(
tf.concat([tf.shape(x), tf.shape(y)], 0),
tf.concat([tf.shape(y), tf.shape(x)], 0))),
lambda: tf.convert_to_tensor(False, tf.bool))
def add_evaluation_step(result_tensor, ground_truth_tensor):
"""Inserts the operations we need to evaluate the accuracy of our results.
Args:
result_tensor: The new final node that produces results.
ground_truth_tensor: The node we feed ground truth data
into.
Returns:
Tuple of (evaluation step, prediction).
"""
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
prediction = tf.argmax(result_tensor, 1)
correct_prediction = tf.equal(
prediction, tf.argmax(ground_truth_tensor, 1))
with tf.name_scope('accuracy'):
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', evaluation_step)
return evaluation_step, prediction
def cal_loss(self):
one_hot_labels = tf.one_hot(
self.labels, depth=self.conf.class_num,
axis=self.channel_axis, name='labels/one_hot')
losses = tf.losses.softmax_cross_entropy(
one_hot_labels, self.predictions, scope='loss/losses')
self.loss_op = tf.reduce_mean(losses, name='loss/loss_op')
self.decoded_preds = tf.argmax(
self.predictions, self.channel_axis, name='accuracy/decode_pred')
correct_prediction = tf.equal(
self.labels, self.decoded_preds,
name='accuracy/correct_pred')
self.accuracy_op = tf.reduce_mean(
tf.cast(correct_prediction, tf.float32, name='accuracy/cast'),
name='accuracy/accuracy_op')
# weights = tf.cast(
# tf.greater(self.decoded_preds, 0, name='m_iou/greater'),
# tf.int32, name='m_iou/weights')
weights = tf.cast(
tf.less(self.labels, self.conf.channel, name='m_iou/greater'),
tf.int64, name='m_iou/weights')
labels = tf.multiply(self.labels, weights, name='m_iou/mul')
self.m_iou, self.miou_op = tf.metrics.mean_iou(
self.labels, self.decoded_preds, self.conf.class_num,
weights, name='m_iou/m_ious')
def create(config):
batch_size = config["batch_size"]
x = tf.placeholder(tf.float32, [batch_size, X_DIMS[0], X_DIMS[1], 1], name="x")
y = tf.placeholder(tf.float32, [batch_size, Y_DIMS], name="y")
hidden = hidden_layers(config, x)
output = output_layer(config, hidden)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(output, y), name="loss")
output = tf.nn.softmax(output)
correct_prediction = tf.equal(tf.argmax(output,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
variables = tf.trainable_variables()
optimizer = tf.train.GradientDescentOptimizer(config['learning_rate']).minimize(loss)
set_tensor("x", x)
set_tensor("y", y)
set_tensor("loss", loss)
set_tensor("optimizer", optimizer)
set_tensor("accuracy", accuracy)
def _deepfool2(model, x, epochs, eta, clip_min, clip_max, min_prob):
y0 = tf.stop_gradient(tf.reshape(model(x), [-1])[0])
y0 = tf.to_int32(tf.greater(y0, 0.5))
def _cond(i, z):
xadv = tf.clip_by_value(x + z*(1+eta), clip_min, clip_max)
y = tf.stop_gradient(tf.reshape(model(xadv), [-1])[0])
y = tf.to_int32(tf.greater(y, 0.5))
return tf.logical_and(tf.less(i, epochs), tf.equal(y0, y))
def _body(i, z):
xadv = tf.clip_by_value(x + z*(1+eta), clip_min, clip_max)
y = tf.reshape(model(xadv), [-1])[0]
g = tf.gradients(y, xadv)[0]
dx = - y * g / tf.norm(g)
return i+1, z+dx
_, noise = tf.while_loop(_cond, _body, [0, tf.zeros_like(x)],
name='_deepfool2_impl', back_prop=False)
return noise
def add_evaluation_step(result_tensor, ground_truth_tensor):
"""Inserts the operations we need to evaluate the accuracy of our results.
Args:
result_tensor: The new final node that produces results.
ground_truth_tensor: The node we feed ground truth data
into.
Returns:
Nothing.
"""
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
correct_prediction = tf.equal(tf.argmax(result_tensor, 1), \
tf.argmax(ground_truth_tensor, 1))
with tf.name_scope('accuracy'):
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.scalar_summary('accuracy', evaluation_step)
return evaluation_step
def dice_accuracy(decoded_predictions, annotations, class_nums):
DiceRatio = tf.constant(0,tf.float32)
misclassnum = tf.constant(0,tf.float32)
class_num = tf.constant(class_nums,tf.float32)
sublist = []
for index in range(1,class_nums-2):
current_annotation = tf.cast(tf.equal(tf.ones_like(annotations)*index,\
annotations),tf.float32)
cureent_prediction = tf.cast(tf.equal(tf.ones_like(decoded_predictions)*index,\
decoded_predictions),tf.float32)
Overlap = tf.add(current_annotation,cureent_prediction)
Common = tf.reduce_sum(tf.cast(tf.equal(tf.ones_like(Overlap)*2,Overlap),\
tf.float32),[0,1,2,3])
annotation_num = tf.reduce_sum(current_annotation,[0,1,2,3])
predict_num = tf.reduce_sum(cureent_prediction,[0,1,2,3])
all_num = tf.add(annotation_num,predict_num)
Sub_DiceRatio = Common*2/tf.clip_by_value(all_num, 1e-10, 1e+10)
misclassnum = tf.cond(tf.equal(Sub_DiceRatio,0.0), lambda: misclassnum + 1, lambda: misclassnum)
sublist.append(Sub_DiceRatio)
DiceRatio = DiceRatio + Sub_DiceRatio
DiceRatio = DiceRatio/tf.clip_by_value(tf.cast((class_num-misclassnum-3),tf.float32),1e-10,1e+1000)
return DiceRatio, sublist
def dice_accuracy(decoded_predictions, annotations, class_nums):
DiceRatio = tf.constant(0,tf.float32)
misclassnum = tf.constant(0,tf.float32)
class_num = tf.constant(class_nums,tf.float32)
sublist = []
for index in range(1,class_nums-2):
current_annotation = tf.cast(tf.equal(tf.ones_like(annotations)*index,\
annotations),tf.float32)
cureent_prediction = tf.cast(tf.equal(tf.ones_like(decoded_predictions)*index,\
decoded_predictions),tf.float32)
Overlap = tf.add(current_annotation,cureent_prediction)
Common = tf.reduce_sum(tf.cast(tf.equal(tf.ones_like(Overlap)*2,Overlap),\
tf.float32),[0,1,2,3])
annotation_num = tf.reduce_sum(current_annotation,[0,1,2,3])
predict_num = tf.reduce_sum(cureent_prediction,[0,1,2,3])
all_num = tf.add(annotation_num,predict_num)
Sub_DiceRatio = 0
Sub_DiceRatio = Common*2/tf.clip_by_value(all_num, 1e-10, 1e+10)
misclassnum = tf.cond(tf.equal(Sub_DiceRatio,0.0), lambda: misclassnum + 1, lambda: misclassnum)
sublist.append(Sub_DiceRatio)
DiceRatio = DiceRatio + Sub_DiceRatio
del Sub_DiceRatio
DiceRatio = DiceRatio/tf.clip_by_value(tf.cast((class_num-misclassnum-3),tf.float32),1e-10,1e+1000)
return DiceRatio, sublist
def cal_loss(self):
expand_annotations = tf.expand_dims(
self.annotations, -1, name='annotations/expand_dims')
one_hot_annotations = tf.squeeze(
expand_annotations, axis=[self.channel_axis],
name='annotations/squeeze')
one_hot_annotations = tf.one_hot(
one_hot_annotations, depth=self.conf.class_num,
axis=self.channel_axis, name='annotations/one_hot')
losses = tf.losses.softmax_cross_entropy(
one_hot_annotations, self.predictions, scope='loss/losses')
self.loss_op = tf.reduce_mean(losses, name='loss/loss_op')
self.decoded_predictions = tf.argmax(
self.predictions, self.channel_axis, name='accuracy/decode_pred')
self.dice_accuracy_op, self.sub_dice_list = ops.dice_accuracy(self.decoded_predictions,\
self.annotations,self.conf.class_num)
correct_prediction = tf.equal(
self.annotations, self.decoded_predictions,
name='accuracy/correct_pred')
self.accuracy_op = tf.reduce_mean(
tf.cast(correct_prediction, tf.float32, name='accuracy/cast'),
name='accuracy/accuracy_op')
def plot(samples):
width = min(12,int(np.sqrt(len(samples))))
fig = plt.figure(figsize=(width, width))
gs = gridspec.GridSpec(width, width)
gs.update(wspace=0.05, hspace=0.05)
for ind, sample in enumerate(samples):
if ind >= width*width:
break
ax = plt.subplot(gs[ind])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
sample = sample * 0.5 + 0.5
sample = np.transpose(sample, (1, 2, 0))
plt.imshow(sample)
return fig
def apply_with_random_selector(x, func, num_cases):
"""Computes func(x, sel), with sel sampled from [0...num_cases-1].
Args:
x: input Tensor.
func: Python function to apply.
num_cases: Python int32, number of cases to sample sel from.
Returns:
The result of func(x, sel), where func receives the value of the
selector as a python integer, but sel is sampled dynamically.
"""
sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)
# Pass the real x only to one of the func calls.
return control_flow_ops.merge([
func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case)
for case in range(num_cases)])[0]
beam_aligner.py 文件源码
项目:almond-nnparser
作者: Stanford-Mobisocial-IoT-Lab
项目源码
文件源码
阅读 38
收藏 0
点赞 0
评论 0
def _maybe_tensor_gather_helper(gather_indices, gather_from, batch_size,
range_size, gather_shape):
"""Maybe applies _tensor_gather_helper.
This applies _tensor_gather_helper when the gather_from dims is at least as
big as the length of gather_shape. This is used in conjunction with nest so
that we don't apply _tensor_gather_helper to inapplicable values like scalars.
Args:
gather_indices: The tensor indices that we use to gather.
gather_from: The tensor that we are gathering from.
batch_size: The batch size.
range_size: The number of values in each range. Likely equal to beam_width.
gather_shape: What we should reshape gather_from to in order to preserve the
correct values. An example is when gather_from is the attention from an
AttentionWrapperState with shape [batch_size, beam_width, attention_size].
There, we want to preserve the attention_size elements, so gather_shape is
[batch_size * beam_width, -1]. Then, upon reshape, we still have the
attention_size as desired.
Returns:
output: Gathered tensor of shape tf.shape(gather_from)[:1+len(gather_shape)]
or the original tensor if its dimensions are too small.
"""
if gather_from.shape.ndims >= len(gather_shape):
return _tensor_gather_helper(
gather_indices=gather_indices,
gather_from=gather_from,
batch_size=batch_size,
range_size=range_size,
gather_shape=gather_shape)
else:
return gather_from