def value_transition(self, curr_state, next_symbols, batch_size):
first_value_token = self.num_functions + self.num_begin_tokens + self.num_control_tokens
num_value_tokens = self.output_size - first_value_token
with tf.name_scope('grammar_transition'):
adjusted_next_symbols = tf.where(next_symbols >= self.num_control_tokens, next_symbols + (first_value_token - self.num_control_tokens), next_symbols)
assert1 = tf.Assert(tf.reduce_all(tf.logical_and(next_symbols < num_value_tokens, next_symbols >= 0)), [curr_state, next_symbols])
with tf.control_dependencies([assert1]):
transitions = tf.gather(tf.constant(self.transition_matrix), curr_state)
assert transitions.get_shape()[1:] == (self.output_size,)
indices = tf.stack((tf.range(0, batch_size), adjusted_next_symbols), axis=1)
next_state = tf.gather_nd(transitions, indices)
assert2 = tf.Assert(tf.reduce_all(next_state >= 0), [curr_state, adjusted_next_symbols, next_state])
with tf.control_dependencies([assert2]):
return tf.identity(next_state)
python类Assert()的实例源码
def gauss_KL(mu1, logstd1, mu2, logstd2):
""" Returns KL divergence among two multivariate Gaussians, component-wise.
It assumes the covariance matrix is diagonal. All inputs have shape (n,a).
It is not necessary to know the number of actions because reduce_sum will
sum over this to get the `d` constant offset. The part consisting of the
trace in the formula is blended with the mean difference squared due to the
common "denominator" of var2_na. This forumula generalizes for an arbitrary
number of actions. I think mu2 and logstd2 should represent the policy
before the update.
Returns the KL divergence for each of the n components in the minibatch,
then we do a reduce_mean outside this.
"""
var1_na = tf.exp(2.*logstd1)
var2_na = tf.exp(2.*logstd2)
tmp_matrix = 2.*(logstd2 - logstd1) + (var1_na + tf.square(mu1-mu2))/var2_na - 1
kl_n = tf.reduce_sum(0.5 * tmp_matrix, axis=[1]) # Don't forget the 1/2 !!
assert_op = tf.Assert(tf.reduce_all(kl_n >= -0.0000001), [kl_n])
with tf.control_dependencies([assert_op]):
kl_n = tf.identity(kl_n)
return kl_n
def get_cubic_root(self):
# We have the equation x^2 D^2 + (1-x)^4 * C / h_min^2
# where x = sqrt(mu).
# We substitute x, which is sqrt(mu), with x = y + 1.
# It gives y^3 + py = q
# where p = (D^2 h_min^2)/(2*C) and q = -p.
# We use the Vieta's substution to compute the root.
# There is only one real solution y (which is in [0, 1] ).
# http://mathworld.wolfram.com/VietasSubstitution.html
# assert_array = \
# [tf.Assert(tf.logical_not(tf.is_nan(self._dist_to_opt_avg) ), [self._dist_to_opt_avg,]),
# tf.Assert(tf.logical_not(tf.is_nan(self._h_min) ), [self._h_min,]),
# tf.Assert(tf.logical_not(tf.is_nan(self._grad_var) ), [self._grad_var,]),
# tf.Assert(tf.logical_not(tf.is_inf(self._dist_to_opt_avg) ), [self._dist_to_opt_avg,]),
# tf.Assert(tf.logical_not(tf.is_inf(self._h_min) ), [self._h_min,]),
# tf.Assert(tf.logical_not(tf.is_inf(self._grad_var) ), [self._grad_var,])]
# with tf.control_dependencies(assert_array):
# EPS in the numerator to prevent momentum being exactly one in case of 0 gradient
p = (self._dist_to_opt_avg + EPS)**2 * (self._h_min + EPS)**2 / 2 / (self._grad_var + EPS)
w3 = (-tf.sqrt(p**2 + 4.0 / 27.0 * p**3) - p) / 2.0
w = tf.sign(w3) * tf.pow(tf.abs(w3), 1.0/3.0)
y = w - p / 3.0 / (w + EPS)
x = y + 1
return x
def _crop(image, offset_height, offset_width, crop_height, crop_width):
original_shape = tf.shape(image)
rank_assertion = tf.Assert(
tf.equal(tf.rank(image), 3),
['Rank of image must be equal to 3.'])
cropped_shape = control_flow_ops.with_dependencies(
[rank_assertion],
tf.stack([crop_height, crop_width, original_shape[2]]))
size_assertion = tf.Assert(
tf.logical_and(
tf.greater_equal(original_shape[0], crop_height),
tf.greater_equal(original_shape[1], crop_width)),
['Crop size greater than the image size.'])
offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to
# define the crop size.
image = control_flow_ops.with_dependencies([size_assertion], tf.slice(image, offsets, cropped_shape))
return tf.reshape(image, cropped_shape)
def _crop(image, offset_height, offset_width, crop_height, crop_width):
original_shape = tf.shape(image)
rank_assertion = tf.Assert(
tf.equal(tf.rank(image), 3),
['Rank of image must be equal to 3.'])
cropped_shape = control_flow_ops.with_dependencies(
[rank_assertion],
tf.stack([crop_height, crop_width, original_shape[2]]))
size_assertion = tf.Assert(
tf.logical_and(
tf.greater_equal(original_shape[0], crop_height),
tf.greater_equal(original_shape[1], crop_width)),
['Crop size greater than the image size.'])
offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to
# define the crop size.
image = control_flow_ops.with_dependencies(
[size_assertion],
tf.slice(image, offsets, cropped_shape))
return tf.reshape(image, cropped_shape)
def preturn_network(rewards, discounts, values):
# First reward must be zero, first discount must be one
first_reward = tf.Assert(
tf.reduce_all(tf.equal(rewards[:, 0, :], 0.0)), [rewards[:, 0, :]])
first_discount = tf.Assert(
tf.reduce_all(tf.equal(discounts[:, 0, :], 1.0)), [discounts[:, 0, :]])
with tf.control_dependencies([first_reward, first_discount]):
with tf.variable_scope('preturn'):
accum_value_discounts = tf.cumprod(discounts, axis=1, exclusive=False)
accum_reward_discounts = tf.cumprod(discounts, axis=1, exclusive=True)
discounted_values = values * accum_value_discounts
discounted_rewards = rewards * accum_reward_discounts
cumulative_rewards = tf.cumsum(discounted_rewards, axis=1)
preturns = cumulative_rewards + discounted_values
util.activation_summary(preturns)
return preturns
def preturn_network(rewards, discounts, values):
# First reward must be zero, first discount must be one
first_reward = tf.Assert(
tf.reduce_all(tf.equal(rewards[:, 0, :], 0.0)), [rewards[:, 0, :]])
first_discount = tf.Assert(
tf.reduce_all(tf.equal(discounts[:, 0, :], 1.0)), [discounts[:, 0, :]])
with tf.control_dependencies([first_reward, first_discount]):
with tf.variable_scope('preturn'):
accum_value_discounts = tf.cumprod(discounts, axis=1, exclusive=False)
accum_reward_discounts = tf.cumprod(discounts, axis=1, exclusive=True)
discounted_values = values * accum_value_discounts
discounted_rewards = rewards * accum_reward_discounts
cumulative_rewards = tf.cumsum(discounted_rewards, axis=1)
preturns = cumulative_rewards + discounted_values
util.activation_summary(preturns)
return preturns
def _crop(image, offset_height, offset_width, crop_height, crop_width):
original_shape = tf.shape(image)
rank_assertion = tf.Assert(
tf.equal(tf.rank(image), 3),
['Rank of image must be equal to 3.'])
cropped_shape = control_flow_ops.with_dependencies(
[rank_assertion],
tf.stack([crop_height, crop_width, original_shape[2]]))
size_assertion = tf.Assert(
tf.logical_and(
tf.greater_equal(original_shape[0], crop_height),
tf.greater_equal(original_shape[1], crop_width)),
['Crop size greater than the image size.'])
offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to
# define the crop size.
image = control_flow_ops.with_dependencies(
[size_assertion],
tf.slice(image, offsets, cropped_shape))
return tf.reshape(image, cropped_shape)
def _crop(image, offset_height, offset_width, crop_height, crop_width):
original_shape = tf.shape(image)
rank_assertion = tf.Assert(
tf.equal(tf.rank(image), 3),
['Rank of image must be equal to 3.'])
cropped_shape = control_flow_ops.with_dependencies(
[rank_assertion],
tf.stack([crop_height, crop_width, original_shape[2]]))
size_assertion = tf.Assert(
tf.logical_and(
tf.greater_equal(original_shape[0], crop_height),
tf.greater_equal(original_shape[1], crop_width)),
['Crop size greater than the image size.'])
offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to
# define the crop size.
image = control_flow_ops.with_dependencies(
[size_assertion],
tf.slice(image, offsets, cropped_shape))
return tf.reshape(image, cropped_shape)
def decoderFn(num_samples=1):
class decoder_func(slim.data_decoder.DataDecoder):
@staticmethod
def list_items():
return ['image', 'label']
@staticmethod
def decode(data, items):
image_buffer = _decode_from_string(data)
# if num_samples == 1:
# tf.Assert(tf.shape(image_buffer)[0] == 1, image_buffer)
# image_buffer = image_buffer[0]
# else:
image_buffer = tf.pack(image_buffer)
return image_buffer
return decoder_func
def _crop(image, offset_height, offset_width, crop_height, crop_width):
original_shape = tf.shape(image)
rank_assertion = tf.Assert(
tf.equal(tf.rank(image), 3),
['Rank of image must be equal to 3.'])
cropped_shape = control_flow_ops.with_dependencies(
[rank_assertion],
tf.stack([crop_height, crop_width, original_shape[2]]))
size_assertion = tf.Assert(
tf.logical_and(
tf.greater_equal(original_shape[0], crop_height),
tf.greater_equal(original_shape[1], crop_width)),
['Crop size greater than the image size.'])
offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to
# define the crop size.
image = control_flow_ops.with_dependencies(
[size_assertion],
tf.slice(image, offsets, cropped_shape))
return tf.reshape(image, cropped_shape)
def get_function_init_state(self, function_tokens):
next_state = tf.gather(self.function_states, function_tokens - (self.num_begin_tokens + self.num_control_tokens))
assert2 = tf.Assert(tf.reduce_all(next_state >= 0), [function_tokens])
with tf.control_dependencies([assert2]):
return tf.identity(next_state)
def _tf_nth(fns, n):
"""Runs only the nth element of fns, where n is a scalar integer tensor."""
cases = [(tf.equal(tf.constant(i, n.dtype), n), fn)
for i, fn in enumerate(fns)]
final_pred, final_fn = cases.pop()
def default():
with tf.control_dependencies([
tf.Assert(final_pred, [n, len(fns)], name='nth_index_error')]):
return final_fn()
if len(fns) == 1: return default()
return tf.case(cases, default)
def crop(images, boxes, batch_inds, stride = 1, pooled_height = 7, pooled_width = 7, scope='ROIAlign'):
"""Cropping areas of features into fixed size
Params:
--------
images: a 4-d Tensor of shape (N, H, W, C)
boxes: rois in the original image, of shape (N, ..., 4), [x1, y1, x2, y2]
batch_inds:
Returns:
--------
A Tensor of shape (N, pooled_height, pooled_width, C)
"""
with tf.name_scope(scope):
#
boxes = boxes / (stride + 0.0)
boxes = tf.reshape(boxes, [-1, 4])
# normalize the boxes and swap x y dimensions
shape = tf.shape(images)
boxes = tf.reshape(boxes, [-1, 2]) # to (x, y)
xs = boxes[:, 0]
ys = boxes[:, 1]
xs = xs / tf.cast(shape[2], tf.float32)
ys = ys / tf.cast(shape[1], tf.float32)
boxes = tf.concat([ys[:, tf.newaxis], xs[:, tf.newaxis]], axis=1)
boxes = tf.reshape(boxes, [-1, 4]) # to (y1, x1, y2, x2)
# if batch_inds is False:
# num_boxes = tf.shape(boxes)[0]
# batch_inds = tf.zeros([num_boxes], dtype=tf.int32, name='batch_inds')
# batch_inds = boxes[:, 0] * 0
# batch_inds = tf.cast(batch_inds, tf.int32)
# assert_op = tf.Assert(tf.greater(tf.shape(images)[0], tf.reduce_max(batch_inds)), [images, batch_inds])
assert_op = tf.Assert(tf.greater(tf.size(images), 0), [images, batch_inds])
with tf.control_dependencies([assert_op, images, batch_inds]):
return tf.image.crop_and_resize(images, boxes, batch_inds,
[pooled_height, pooled_width],
method='bilinear',
name='Crop')
def _crop(image, offset_height, offset_width, crop_height, crop_width):
"""Crops the given image using the provided offsets and sizes.
Note that the method doesn't assume we know the input image size but it does
assume we know the input image rank.
Args:
image: an image of shape [height, width, channels].
offset_height: a scalar tensor indicating the height offset.
offset_width: a scalar tensor indicating the width offset.
crop_height: the height of the cropped image.
crop_width: the width of the cropped image.
Returns:
the cropped (and resized) image.
Raises:
InvalidArgumentError: if the rank is not 3 or if the image dimensions are
less than the crop size.
"""
original_shape = tf.shape(image)
rank_assertion = tf.Assert(
tf.equal(tf.rank(image), 3),
['Rank of image must be equal to 3.'])
with tf.control_dependencies([rank_assertion]):
cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])
size_assertion = tf.Assert(
tf.logical_and(
tf.greater_equal(original_shape[0], crop_height),
tf.greater_equal(original_shape[1], crop_width)),
['Crop size greater than the image size.'])
offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to
# define the crop size.
with tf.control_dependencies([size_assertion]):
image = tf.slice(image, offsets, cropped_shape)
return tf.reshape(image, cropped_shape)
def _get_cubic_root(self):
"""Get the cubic root."""
# We have the equation x^2 D^2 + (1-x)^4 * C / h_min^2
# where x = sqrt(mu).
# We substitute x, which is sqrt(mu), with x = y + 1.
# It gives y^3 + py = q
# where p = (D^2 h_min^2)/(2*C) and q = -p.
# We use the Vieta's substution to compute the root.
# There is only one real solution y (which is in [0, 1] ).
# http://mathworld.wolfram.com/VietasSubstitution.html
assert_array = [
tf.Assert(
tf.logical_not(tf.is_nan(self._dist_to_opt_avg)),
[self._dist_to_opt_avg, ]),
tf.Assert(
tf.logical_not(tf.is_nan(self._h_min)),
[self._h_min, ]),
tf.Assert(
tf.logical_not(tf.is_nan(self._grad_var)),
[self._grad_var, ]),
tf.Assert(
tf.logical_not(tf.is_inf(self._dist_to_opt_avg)),
[self._dist_to_opt_avg, ]),
tf.Assert(
tf.logical_not(tf.is_inf(self._h_min)),
[self._h_min, ]),
tf.Assert(
tf.logical_not(tf.is_inf(self._grad_var)),
[self._grad_var, ])
]
with tf.control_dependencies(assert_array):
p = self._dist_to_opt_avg**2 * self._h_min**2 / 2 / self._grad_var
w3 = (-tf.sqrt(p**2 + 4.0 / 27.0 * p**3) - p) / 2.0
w = tf.sign(w3) * tf.pow(tf.abs(w3), 1.0 / 3.0)
y = w - p / 3.0 / w
x = y + 1
return x
def correlation_loss(source_samples, target_samples, weight, name='corr_loss'):
"""Adds a similarity loss term, the correlation between two representations.
Args:
source_samples: a tensor of shape [num_samples, num_features]
target_samples: a tensor of shape [num_samples, num_features]
weight: a scalar weight for the loss.
scope: optional name scope for summary tags.
Returns:
a scalar tensor representing the correlation loss value.
"""
with tf.name_scope(name):
source_samples -= tf.reduce_mean(source_samples, 0)
target_samples -= tf.reduce_mean(target_samples, 0)
source_samples = tf.nn.l2_normalize(source_samples, 1)
target_samples = tf.nn.l2_normalize(target_samples, 1)
source_cov = tf.matmul(tf.transpose(source_samples), source_samples)
target_cov = tf.matmul(tf.transpose(target_samples), target_samples)
corr_loss = tf.reduce_mean(
tf.square(source_cov - target_cov)) * weight
assert_op = tf.Assert(tf.is_finite(corr_loss), [corr_loss])
with tf.control_dependencies([assert_op]):
tag = 'Correlation Loss'
barrier = tf.no_op(tag)
return corr_loss
def mmd_loss(source_samples, target_samples, weight, name='mmd_loss'):
"""Adds a similarity loss term, the MMD between two representations.
This Maximum Mean Discrepancy (MMD) loss is calculated with a number of
different Gaussian kernels.
Args:
source_samples: a tensor of shape [num_samples, num_features].
target_samples: a tensor of shape [num_samples, num_features].
weight: the weight of the MMD loss.
scope: optional name scope for summary tags.
Returns:
a scalar tensor representing the MMD loss value.
"""
with tf.name_scope(name):
sigmas = [
1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 5, 10, 15, 20, 25, 30, 35, 100,
1e3, 1e4, 1e5, 1e6
]
gaussian_kernel = partial(
util.gaussian_kernel_matrix, sigmas=tf.constant(sigmas))
loss_value = maximum_mean_discrepancy(
source_samples, target_samples, kernel=gaussian_kernel)
loss_value = tf.maximum(1e-4, loss_value) * weight
assert_op = tf.Assert(tf.is_finite(loss_value), [loss_value])
with tf.control_dependencies([assert_op]):
tag = 'MMD Loss'
barrier = tf.no_op(tag)
return loss_value
def dann_loss(source_samples, target_samples, weight, name='dann_loss'):
"""Adds the domain adversarial (DANN) loss
Args:
source_samples: a tensor of shape [num_samples, num_features].
target_samples: a tensor of shape [num_samples, num_features].
weight: the weight of the loss.
scope: optional name scope for summary tags.
Returns:
a scalar tensor representing the correlation loss value.
"""
with tf.variable_scope(name):
batch_size = tf.shape(source_samples)[0]
samples = tf.concat(values=[source_samples, target_samples], axis=0)
samples = flatten(samples)
domain_selection_mask = tf.concat(
values=[tf.zeros((batch_size, 1)), tf.ones((batch_size, 1))], axis=0)
grl = gradient_reverse(samples)
grl = tf.reshape(grl, (-1, samples.get_shape().as_list()[1]))
grl = fc(grl, 100, True, None, activation=relu, name='fc1')
logits = fc(grl, 1, True, None, activation=None, name='fc2')
domain_predictions = tf.sigmoid(logits)
domain_loss = tf.losses.log_loss(
domain_selection_mask, domain_predictions, weights=weight)
domain_accuracy = util.accuracy_tf(domain_selection_mask,
tf.round(domain_predictions))
assert_op = tf.Assert(tf.is_finite(domain_loss), [domain_loss])
with tf.control_dependencies([assert_op]):
tag_loss = 'losses/domain_loss'
barrier = tf.no_op(tag_loss)
return domain_loss
def difference_loss(private_samples, shared_samples, weight=1.0, name='difference_loss'):
"""Adds the difference loss between the private and shared representations.
Args:
private_samples: a tensor of shape [num_samples, num_features].
shared_samples: a tensor of shape [num_samples, num_features].
weight: the weight of the incoherence loss.
name: the name of the tf summary.
"""
with tf.name_scope(name):
private_samples -= tf.reduce_mean(private_samples, 0)
shared_samples -= tf.reduce_mean(shared_samples, 0)
private_samples = tf.nn.l2_normalize(private_samples, 1)
shared_samples = tf.nn.l2_normalize(shared_samples, 1)
correlation_matrix = tf.matmul(
private_samples, shared_samples, transpose_a=True)
cost = tf.reduce_mean(tf.square(correlation_matrix)) * weight
cost = tf.where(cost > 0, cost, 0, name='value')
assert_op = tf.Assert(tf.is_finite(cost), [cost])
with tf.control_dependencies([assert_op]):
barrier = tf.no_op(name)
return cost
def _crop(self, image, offset_height, offset_width, crop_height, crop_width):
"""Crops the given image using the provided offsets and sizes.
Note that the method doesn't assume we know the input image size but it does
assume we know the input image rank.
Args:
image: an image of shape [height, width, channels].
offset_height: a scalar tensor indicating the height offset.
offset_width: a scalar tensor indicating the width offset.
crop_height: the height of the cropped image.
crop_width: the width of the cropped image.
Returns:
the cropped (and resized) image.
Raises:
InvalidArgumentError: if the rank is not 3 or if the image dimensions are
less than the crop size.
"""
original_shape = tf.shape(image)
rank_assertion = tf.Assert(
tf.equal(tf.rank(image), 3),
['Rank of image must be equal to 3.'])
with tf.control_dependencies([rank_assertion]):
cropped_shape = tf.stack(
[crop_height, crop_width, original_shape[2]])
size_assertion = tf.Assert(
tf.logical_and(
tf.greater_equal(original_shape[0], crop_height),
tf.greater_equal(original_shape[1], crop_width)),
['Crop size greater than the image size.'])
offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))
with tf.control_dependencies([size_assertion]):
image = tf.slice(image, offsets, cropped_shape)
return tf.reshape(image, cropped_shape)
def _crop(self, image, offset_height, offset_width, crop_height, crop_width):
"""Crops the given image using the provided offsets and sizes.
Note that the method doesn't assume we know the input image size but it does
assume we know the input image rank.
Args:
image: an image of shape [height, width, channels].
offset_height: a scalar tensor indicating the height offset.
offset_width: a scalar tensor indicating the width offset.
crop_height: the height of the cropped image.
crop_width: the width of the cropped image.
Returns:
the cropped (and resized) image.
Raises:
InvalidArgumentError: if the rank is not 3 or if the image dimensions are
less than the crop size.
"""
original_shape = tf.shape(image)
rank_assertion = tf.Assert(
tf.equal(tf.rank(image), 3),
['Rank of image must be equal to 3.'])
with tf.control_dependencies([rank_assertion]):
cropped_shape = tf.stack(
[crop_height, crop_width, original_shape[2]])
size_assertion = tf.Assert(
tf.logical_and(
tf.greater_equal(original_shape[0], crop_height),
tf.greater_equal(original_shape[1], crop_width)),
['Crop size greater than the image size.'])
offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))
with tf.control_dependencies([size_assertion]):
image = tf.slice(image, offsets, cropped_shape)
return tf.reshape(image, cropped_shape)
def crop_to_fixed_size(img_tensor,annotation_tensor,output_shape):
"""
the output_shape must be smaller than the input_shape
:param img_tensor: [w,h,depth]
:param annotation_tensor: [w,h,1]
:param output_shape:
:param mask_out_num:
:return: (output_shape,output_shape,3) (output_shape,output_shape,1)
"""
original_shape = tf.shape(img_tensor)
crop_width, crop_height = output_shape[0],output_shape[1]
image_width, image_height = original_shape[0],original_shape[1]
img_cropped_shape = tf.stack([output_shape[0], output_shape[1], 3])
annotate_cropped_shape = tf.stack([output_shape[0], output_shape[1], 1])
size_assertion = tf.Assert(
tf.logical_and(
tf.greater_equal(original_shape[0], crop_width),
tf.greater_equal(original_shape[1], crop_height)),
['Crop size greater than the image size.'])
max_offset_height = tf.reshape(image_height - crop_height + 1, [])
max_offset_width = tf.reshape(image_width - crop_width + 1, [])
offset_height = tf.random_uniform(
[], maxval=max_offset_height, dtype=tf.int32)
offset_width = tf.random_uniform(
[], maxval=max_offset_width, dtype=tf.int32)
offsets = tf.to_int32(tf.stack([offset_width, offset_height, 0]))
annotation_tensor = tf.to_int32(annotation_tensor)
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to
# define the crop size.
with tf.control_dependencies([size_assertion]):
image = tf.slice(img_tensor, offsets, img_cropped_shape)
annotate = tf.slice(annotation_tensor,offsets,annotate_cropped_shape)
return tf.reshape(image, img_cropped_shape),tf.reshape(annotate,annotate_cropped_shape)
def _crop(image, offset_height, offset_width, crop_height, crop_width):
"""Crops the given image using the provided offsets and sizes.
Note that the method doesn't assume we know the input image size but it does
assume we know the input image rank.
Args:
image: an image of shape [height, width, channels].
offset_height: a scalar tensor indicating the height offset.
offset_width: a scalar tensor indicating the width offset.
crop_height: the height of the cropped image.
crop_width: the width of the cropped image.
Returns:
the cropped (and resized) image.
Raises:
InvalidArgumentError: if the rank is not 3 or if the image dimensions are
less than the crop size.
"""
original_shape = tf.shape(image)
rank_assertion = tf.Assert(
tf.equal(tf.rank(image), 3),
['Rank of image must be equal to 3.'])
with tf.control_dependencies([rank_assertion]):
cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])
size_assertion = tf.Assert(
tf.logical_and(
tf.greater_equal(original_shape[0], crop_height),
tf.greater_equal(original_shape[1], crop_width)),
['Crop size greater than the image size.'])
offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to
# define the crop size.
with tf.control_dependencies([size_assertion]):
image = tf.slice(image, offsets, cropped_shape)
return tf.reshape(image, cropped_shape)
def _crop(image, offset_height, offset_width, crop_height, crop_width):
"""Crops the given image using the provided offsets and sizes.
Note that the method doesn't assume we know the input image size but it does
assume we know the input image rank.
Args:
image: an image of shape [height, width, channels].
offset_height: a scalar tensor indicating the height offset.
offset_width: a scalar tensor indicating the width offset.
crop_height: the height of the cropped image.
crop_width: the width of the cropped image.
Returns:
the cropped (and resized) image.
Raises:
InvalidArgumentError: if the rank is not 3 or if the image dimensions are
less than the crop size.
"""
original_shape = tf.shape(image)
rank_assertion = tf.Assert(
tf.equal(tf.rank(image), 3), ['Rank of image must be equal to 3.'])
cropped_shape = control_flow_ops.with_dependencies(
[rank_assertion], tf.pack([crop_height, crop_width, original_shape[2]]))
size_assertion = tf.Assert(
tf.logical_and(
tf.greater_equal(original_shape[0], crop_height),
tf.greater_equal(original_shape[1], crop_width)),
['Crop size greater than the image size.'])
offsets = tf.to_int32(tf.pack([offset_height, offset_width, 0]))
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to
# define the crop size.
image = control_flow_ops.with_dependencies(
[size_assertion], tf.slice(image, offsets, cropped_shape))
return tf.reshape(image, cropped_shape)
def _crop(image, offset_height, offset_width, crop_height, crop_width):
"""Crops the given image using the provided offsets and sizes.
Note that the method doesn't assume we know the input image size but it does
assume we know the input image rank.
Args:
image: an image of shape [height, width, channels].
offset_height: a scalar tensor indicating the height offset.
offset_width: a scalar tensor indicating the width offset.
crop_height: the height of the cropped image.
crop_width: the width of the cropped image.
Returns:
the cropped (and resized) image.
Raises:
InvalidArgumentError: if the rank is not 3 or if the image dimensions are
less than the crop size.
"""
original_shape = tf.shape(image)
rank_assertion = tf.Assert(
tf.equal(tf.rank(image), 3),
['Rank of image must be equal to 3.'])
with tf.control_dependencies([rank_assertion]):
cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])
size_assertion = tf.Assert(
tf.logical_and(
tf.greater_equal(original_shape[0], crop_height),
tf.greater_equal(original_shape[1], crop_width)),
['Crop size greater than the image size.'])
offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to
# define the crop size.
with tf.control_dependencies([size_assertion]):
image = tf.slice(image, offsets, cropped_shape)
return tf.reshape(image, cropped_shape)
def loss(self, x, h, q=None):
"""
Calculate the estimate loss of Importance sampling approximation
@Param x(NxD): The target word or batch
@Param h(NxD): This is usually the output of neural network
@Param q(N): The Weight of target
"""
# K
weights = self.get_sample_weights()
tf.Assert(tf.equal(weights, 0.0), [weights])
if weights is None:
raise ValueError("sample weights must be set")
# KxD
samples = self.get_samples()
if samples is None:
raise ValueError("samples must be set")
# N
target_scores = tf.reduce_sum(x * h, 1)
self.target_exp_ = tf.exp(target_scores)
# N x K
samples_scores = tf.matmul(h, samples, transpose_b=True)
# N
exp_weight = tf.exp(samples_scores) / weights
self.Z_ = tf.reduce_sum(tf.check_numerics(exp_weight, "each Z "), 1)
# The loss of each element in target
# N
element_loss = target_scores - tf.log(q) - tf.log(self.Z_)
loss = tf.reduce_mean(element_loss)
return -loss
def lambda_preturn_network(preturns, lambdas):
# Final lamdba must be zero
final_lambda = tf.Assert(
tf.reduce_all(tf.equal(lambdas[:, -1, :], 0.0)), [lambdas[:, -1, :]])
with tf.control_dependencies([final_lambda]):
with tf.variable_scope('lambda_preturn'):
accum_lambda = tf.cumprod(lambdas, axis=1, exclusive=True)
lambda_bar = (1 - lambdas) * accum_lambda # This should always sum to 1
lambda_preturn = tf.reduce_sum(
lambda_bar * preturns, reduction_indices=1)
util.activation_summary(lambda_preturn)
return lambda_preturn
def lambda_preturn_network(preturns, lambdas):
# Final lamdba must be zero
final_lambda = tf.Assert(
tf.reduce_all(tf.equal(lambdas[:, -1, :], 0.0)), [lambdas[:, -1, :]])
with tf.control_dependencies([final_lambda]):
with tf.variable_scope('lambda_preturn'):
accum_lambda = tf.cumprod(lambdas, axis=1, exclusive=True)
lambda_bar = (1 - lambdas) * accum_lambda # This should always sum to 1
lambda_preturn = tf.reduce_sum(
lambda_bar * preturns, reduction_indices=1)
util.activation_summary(lambda_preturn)
return lambda_preturn
def deserialize(self, state):
# Deserialize state from previous timestep.
M0 = tf.slice(
state,
[0, 0],
[-1, self.mem_nrows * self.mem_ncols],
)
M0 = tf.reshape(M0, [-1, self.mem_nrows, self.mem_ncols])
state_idx = self.mem_nrows * self.mem_ncols
# Deserialize read weights from previous time step.
read_w0s = []
for i in xrange(self.n_heads):
# Number of weights == Rows of memory matrix
w0 = tf.slice(state, [0, state_idx], [-1, self.mem_nrows])
read_w0s.append(w0)
state_idx += self.mem_nrows
# Do the same for write heads.
write_w0s = []
for _ in xrange(self.n_heads):
w0 = tf.slice(state, [0, state_idx], [-1, self.mem_nrows])
write_w0s.append(w0)
state_idx += self.mem_nrows
tf.Assert(
tf.equal(state_idx, tf.shape(state)[1]),
[tf.shape(state)],
)
return M0, write_w0s, read_w0s