def _mode(self):
s = self.df - self.dimension - 1.
s = math_ops.select(
math_ops.less(s, 0.),
constant_op.constant(float("NaN"), dtype=self.dtype, name="nan"),
s)
if self.cholesky_input_output_matrices:
return math_ops.sqrt(s) * self.scale_operator_pd.sqrt_to_dense()
return s * self.scale_operator_pd.to_dense()
python类select()的实例源码
def _prob_with_sf_and_cdf(self, y):
# There are two options that would be equal if we had infinite precision:
# sf(y - 1) - sf(y)
# cdf(y) - cdf(y - 1)
sf_y = self.survival_function(y)
sf_y_minus_1 = self.survival_function(y - 1)
cdf_y = self.cdf(y)
cdf_y_minus_1 = self.cdf(y - 1)
# sf_prob has greater precision iff we're on the right side of the median.
return math_ops.select(
sf_y < cdf_y, # True iff we're on the right side of the median.
sf_y_minus_1 - sf_y,
cdf_y - cdf_y_minus_1)
def _log_cdf(self, y):
lower_cutoff = self._lower_cutoff
upper_cutoff = self._upper_cutoff
# Recall the promise:
# cdf(y) := P[Y <= y]
# = 1, if y >= upper_cutoff,
# = 0, if y < lower_cutoff,
# = P[X <= y], otherwise.
# P[Y <= j] = P[floor(Y) <= j] since mass is only at integers, not in
# between.
j = math_ops.floor(y)
result_so_far = self.distribution.log_cdf(j)
# Broadcast, because it's possible that this is a single distribution being
# evaluated on a number of samples, or something like that.
j += array_ops.zeros_like(result_so_far)
# Re-define values at the cutoffs.
if lower_cutoff is not None:
neg_inf = -np.inf * array_ops.ones_like(result_so_far)
result_so_far = math_ops.select(j < lower_cutoff, neg_inf, result_so_far)
if upper_cutoff is not None:
result_so_far = math_ops.select(j >= upper_cutoff,
array_ops.zeros_like(result_so_far),
result_so_far)
return result_so_far
def _cdf(self, y):
lower_cutoff = self._lower_cutoff
upper_cutoff = self._upper_cutoff
# Recall the promise:
# cdf(y) := P[Y <= y]
# = 1, if y >= upper_cutoff,
# = 0, if y < lower_cutoff,
# = P[X <= y], otherwise.
# P[Y <= j] = P[floor(Y) <= j] since mass is only at integers, not in
# between.
j = math_ops.floor(y)
# P[X <= j], used when lower_cutoff < X < upper_cutoff.
result_so_far = self.distribution.cdf(j)
# Broadcast, because it's possible that this is a single distribution being
# evaluated on a number of samples, or something like that.
j += array_ops.zeros_like(result_so_far)
# Re-define values at the cutoffs.
if lower_cutoff is not None:
result_so_far = math_ops.select(j < lower_cutoff,
array_ops.zeros_like(result_so_far),
result_so_far)
if upper_cutoff is not None:
result_so_far = math_ops.select(j >= upper_cutoff,
array_ops.ones_like(result_so_far),
result_so_far)
return result_so_far
def _log_survival_function(self, y):
lower_cutoff = self._lower_cutoff
upper_cutoff = self._upper_cutoff
# Recall the promise:
# survival_function(y) := P[Y > y]
# = 0, if y >= upper_cutoff,
# = 1, if y < lower_cutoff,
# = P[X > y], otherwise.
# P[Y > j] = P[ceiling(Y) > j] since mass is only at integers, not in
# between.
j = math_ops.ceil(y)
# P[X > j], used when lower_cutoff < X < upper_cutoff.
result_so_far = self.distribution.log_survival_function(j)
# Broadcast, because it's possible that this is a single distribution being
# evaluated on a number of samples, or something like that.
j += array_ops.zeros_like(result_so_far)
# Re-define values at the cutoffs.
if lower_cutoff is not None:
result_so_far = math_ops.select(j < lower_cutoff,
array_ops.zeros_like(result_so_far),
result_so_far)
if upper_cutoff is not None:
neg_inf = -np.inf * array_ops.ones_like(result_so_far)
result_so_far = math_ops.select(j >= upper_cutoff, neg_inf, result_so_far)
return result_so_far
def _survival_function(self, y):
lower_cutoff = self._lower_cutoff
upper_cutoff = self._upper_cutoff
# Recall the promise:
# survival_function(y) := P[Y > y]
# = 0, if y >= upper_cutoff,
# = 1, if y < lower_cutoff,
# = P[X > y], otherwise.
# P[Y > j] = P[ceiling(Y) > j] since mass is only at integers, not in
# between.
j = math_ops.ceil(y)
# P[X > j], used when lower_cutoff < X < upper_cutoff.
result_so_far = self.distribution.survival_function(j)
# Broadcast, because it's possible that this is a single distribution being
# evaluated on a number of samples, or something like that.
j += array_ops.zeros_like(result_so_far)
# Re-define values at the cutoffs.
if lower_cutoff is not None:
result_so_far = math_ops.select(j < lower_cutoff,
array_ops.ones_like(result_so_far),
result_so_far)
if upper_cutoff is not None:
result_so_far = math_ops.select(j >= upper_cutoff,
array_ops.zeros_like(result_so_far),
result_so_far)
return result_so_far
def _mode(self):
mode = (self.alpha - 1.) / self.beta
if self.allow_nan_stats:
nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
return math_ops.select(
self.alpha >= 1.,
mode,
array_ops.fill(self.batch_shape(), nan, name="nan"))
else:
return control_flow_ops.with_dependencies([
check_ops.assert_less(
array_ops.ones((), self.dtype),
self.alpha,
message="mode not defined for components of alpha <= 1"),
], mode)
def _prob(self, x):
broadcasted_x = x * array_ops.ones(self.batch_shape())
return math_ops.select(
math_ops.is_nan(broadcasted_x),
broadcasted_x,
math_ops.select(
math_ops.logical_or(broadcasted_x < self.a,
broadcasted_x > self.b),
array_ops.zeros_like(broadcasted_x),
(1. / self.range()) * array_ops.ones_like(broadcasted_x)))
def _cdf(self, x):
broadcasted_x = x * array_ops.ones(self.batch_shape())
zeros = array_ops.zeros_like(x + self.a + self.b, dtype=self.dtype)
ones = array_ops.ones_like(x + self.a + self.b, dtype=self.dtype)
result_if_not_big = math_ops.select(
x < self.a, zeros, (broadcasted_x - self.a) / self.range())
return math_ops.select(x >= self.b, ones, result_if_not_big)
def _num_present(losses, weight, per_batch=False):
"""Computes the number of elements in the loss function induced by `weight`.
A given weight tensor induces different numbers of usable elements in the
`losses` tensor. The `weight` tensor is broadcast across `losses` for all
possible dimensions. For example, if `losses` is a tensor of dimension
[4, 5, 6, 3] and weight is a tensor of size [4, 5], then weight is, in effect,
tiled to match the size of `losses`. Following this effective tile, the total
number of present elements is the number of non-zero weights.
Args:
losses: A tensor of size [batch_size, d1, ... dN].
weight: A tensor of size [1] or [batch_size, d1, ... dK] where K < N.
per_batch: Whether to return the number of elements per batch or as a sum
total.
Returns:
The number of present (non-zero) elements in the losses tensor. If
`per_batch` is True, the value is returned as a tensor of size
[batch_size]. Otherwise, a single scalar tensor is returned.
"""
# To ensure that dims of [2, 1] gets mapped to [2,]
weight = array_ops.squeeze(weight)
# If the weight is a scalar, its easy to compute:
if weight.get_shape().ndims == 0:
batch_size = array_ops.reshape(array_ops.slice(array_ops.shape(losses),
[0], [1]), [])
num_per_batch = math_ops.div(math_ops.to_float(array_ops.size(losses)),
math_ops.to_float(batch_size))
num_per_batch = math_ops.select(math_ops.equal(weight, 0),
0.0, num_per_batch)
num_per_batch = math_ops.mul(array_ops.ones(
array_ops.reshape(batch_size, [1])), num_per_batch)
return num_per_batch if per_batch else math_ops.reduce_sum(num_per_batch)
# First, count the number of nonzero weights:
if weight.get_shape().ndims >= 1:
reduction_indices = list(range(1, weight.get_shape().ndims))
num_nonzero_per_batch = math_ops.reduce_sum(
math_ops.to_float(math_ops.not_equal(weight, 0)),
reduction_indices=reduction_indices)
# Next, determine the number of elements that weight would broadcast to:
broadcast_dims = array_ops.slice(array_ops.shape(losses),
[weight.get_shape().ndims], [-1])
num_to_broadcast = math_ops.to_float(math_ops.reduce_prod(broadcast_dims))
num_per_batch = math_ops.mul(num_nonzero_per_batch, num_to_broadcast)
return num_per_batch if per_batch else math_ops.reduce_sum(num_per_batch)
def _calculate_acceptance_probabilities(init_probs, target_probs):
"""Calculate the per-class acceptance rates.
Args:
init_probs: The class probabilities of the data.
target_probs: The desired class proportion in minibatches.
Returns:
A list of the per-class acceptance probabilities.
This method is based on solving the following analysis:
Let F be the probability of a rejection (on any example).
Let p_i be the proportion of examples in the data in class i (init_probs)
Let a_i is the rate the rejection sampler should *accept* class i
Let t_i is the target proportion in the minibatches for class i (target_probs)
F = sum_i(p_i (1-a_i)) = 1 - sum_i(p_i a_i) using sum_i(p_i) = 1
An example with class `i` will be accepted if `k` rejections occur, then an
example with class `i` is seen by the rejector, and it is accepted. This can
be written as follows:
t_i = sum_k=0^inf(F^k p_i a_i) = p_i a_j / (1 - F) using geometric series identity, since 0 <= F < 1 = p_i a_i / sum_j(p_j * a_j) using F from above
Note that the following constraints hold:
0 <= p_i <= 1, sum_i(p_i) = 1 0 <= a_i <= 1 0 <= t_i <= 1, sum_i(t_i) = 1
A solution for a_i in terms of the other variabes is the following:
```a_i = (t_i / p_i) / max_i[t_i / p_i]
"""
Make list of t_i / p_i.
ratio_l = target_probs / init_probs
Replace NaNs with 0s.
ratio_l = math_ops.select(math_ops.is_nan(ratio_l), array_ops.zeros_like(ratio_l), ratio_l)
Calculate list of acceptance probabilities.
max_ratio = math_ops.reduce_max(ratio_l) return ratio_l / max_ratio ```
def streaming_mean_relative_error(predictions, labels, normalizer, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean relative error by normalizing with the given values.
The `streaming_mean_relative_error` function creates two local variables,
`total` and `count` that are used to compute the mean relative absolute error.
This average is weighted by `weights`, and it is ultimately returned as
`mean_relative_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_reative_error`. Internally, a `relative_errors` operation divides the
absolute value of the differences between `predictions` and `labels` by the
`normalizer`. Then `update_op` increments `total` with the reduced sum of the
product of `weights` and `relative_errors`, and it increments `count` with the
reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of arbitrary shape.
labels: A `Tensor` of the same shape as `predictions`.
normalizer: A `Tensor` of the same shape as `predictions`.
weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
metrics_collections: An optional list of collections that
`mean_relative_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_relative_error: A tensor representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_relative_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
predictions, labels = metric_ops_util.remove_squeezable_dimensions(
predictions, labels)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions, normalizer = metric_ops_util.remove_squeezable_dimensions(
predictions, normalizer)
predictions.get_shape().assert_is_compatible_with(normalizer.get_shape())
relative_errors = math_ops.select(
math_ops.equal(normalizer, 0.0),
array_ops.zeros_like(labels),
math_ops.div(math_ops.abs(labels - predictions), normalizer))
return streaming_mean(relative_errors, weights, metrics_collections,
updates_collections, name or 'mean_relative_error')
def undo_make_batch_of_event_sample_matrices(
self, x, sample_shape, name="undo_make_batch_of_event_sample_matrices"):
"""Reshapes/transposes `Distribution` `Tensor` from B_+E_+S_ to S+B+E.
Where:
- `B_ = B if B else [1]`,
- `E_ = E if E else [1]`,
- `S_ = [tf.reduce_prod(S)]`.
This function "reverses" `make_batch_of_event_sample_matrices`.
Args:
x: `Tensor` of shape `B_+E_+S_`.
sample_shape: `Tensor` (1D, `int32`).
name: `String`. The name to give this op.
Returns:
x: `Tensor`. Input transposed/reshaped to `S+B+E`.
"""
with self._name_scope(name, values=[x, sample_shape]):
x = ops.convert_to_tensor(x, name="x")
sample_shape = ops.convert_to_tensor(sample_shape, name="sample_shape")
x = distribution_util.rotate_transpose(x, shift=1)
if self._is_all_constant_helper(self.batch_ndims, self.event_ndims):
if self._batch_ndims_is_0 or self._event_ndims_is_0:
b = ((min(-2, -1 - self._event_ndims_static),)
if self._batch_ndims_is_0 else ())
e = (-1,) if self._event_ndims_is_0 else ()
x = array_ops.squeeze(x, squeeze_dims=b + e)
_, batch_shape, event_shape = self.get_shape(x)
else:
s = (x.get_shape().as_list() if x.get_shape().is_fully_defined()
else array_ops.shape(x))
batch_shape = array_ops.slice(s, (1,), (self.batch_ndims,))
# Since sample_dims=1 and is left-most, we add 1 to the number of
# batch_ndims to get the event start dim.
event_start = math_ops.select(
self._batch_ndims_is_0, 2, 1 + self.batch_ndims)
event_shape = array_ops.slice(s, (event_start,), (self.event_ndims,))
new_shape = array_ops.concat(0, (sample_shape, batch_shape, event_shape))
x = array_ops.reshape(x, shape=new_shape)
return x
def pick_vector(cond,
true_vector,
false_vector,
name="pick_vector"):
"""Picks possibly different length row `Tensor`s based on condition.
Value `Tensor`s should have exactly one dimension.
If `cond` is a python Boolean or `tf.constant` then either `true_vector` or
`false_vector` is immediately returned. I.e., no graph nodes are created and
no validation happens.
Args:
cond: `Tensor`. Must have `dtype=tf.bool` and be scalar.
true_vector: `Tensor` of one dimension. Returned when cond is `True`.
false_vector: `Tensor` of one dimension. Returned when cond is `False`.
name: `String`. The name to give this op.
Example:
```python
pick_vector(tf.less(0, 5), tf.range(10, 12), tf.range(15, 18))
# result is tensor: [10, 11].
pick_vector(tf.less(5, 0), tf.range(10, 12), tf.range(15, 18))
# result is tensor: [15, 16, 17].
Returns:
true_or_false_vector: Tensor
.
Raises:
TypeError: if cond.dtype != tf.bool
TypeError: if cond
is not a constant and
true_vector.dtype != false_vector.dtype
"""
with ops.op_scope((cond, true_vector, false_vector), name):
cond = ops.convert_to_tensor(cond, name="cond")
if cond.dtype != dtypes.bool:
raise TypeError("%s.dtype=%s which is not %s" %
(cond.name, cond.dtype, dtypes.bool))
cond_value_static = tensor_util.constant_value(cond)
if cond_value_static is not None:
return true_vector if cond_value_static else false_vector
true_vector = ops.convert_to_tensor(true_vector, name="true_vector")
false_vector = ops.convert_to_tensor(false_vector, name="false_vector")
if true_vector.dtype != false_vector.dtype:
raise TypeError(
"%s.dtype=%s does not match %s.dtype=%s"
% (true_vector.name, true_vector.dtype,
false_vector.name, false_vector.dtype))
n = array_ops.shape(true_vector)[0]
return array_ops.slice(array_ops.concat(0, (true_vector, false_vector)),
[math_ops.select(cond, 0, n)],
[math_ops.select(cond, n, -1)])
```
def _process_scale(self, scale, event_ndims):
"""Helper to __init__ which gets scale in batch-ready form.
This function expands dimensions of `scale` according to the following
table:
event_ndims
scale.ndims 0 1
0 [1]+S+[1,1] "silent error"
1 [ ]+S+[1,1] "silent error"
2 [ ]+S+[1,1] [1]+S+[ ]
3 [ ]+S+[1,1] [ ]+S+[ ]
... (same) (same)
The idea is that we want to convert `scale` into something which can always
work for, say, the left-hand argument of `batch_matmul`.
Args:
scale: `Tensor`.
event_ndims: `Tensor` (0D, `int32`).
Returns:
scale: `Tensor` with dims expanded according to [above] table.
batch_ndims: `Tensor` (0D, `int32`). The ndims of the `batch` portion.
"""
ndims = array_ops.rank(scale)
left = math_ops.select(
math_ops.reduce_any([
math_ops.reduce_all([
math_ops.equal(ndims, 0),
math_ops.equal(event_ndims, 0)
]),
math_ops.reduce_all([
math_ops.equal(ndims, 2),
math_ops.equal(event_ndims, 1)
])]), 1, 0)
right = math_ops.select(math_ops.equal(event_ndims, 0), 2, 0)
pad = array_ops.concat(0, (
array_ops.ones([left], dtype=dtypes.int32),
array_ops.shape(scale),
array_ops.ones([right], dtype=dtypes.int32)))
scale = array_ops.reshape(scale, pad)
batch_ndims = ndims - 2 + right
return scale, batch_ndims
def _num_present(losses, weights, per_batch=False):
"""Computes the number of elements in the loss function induced by `weights`.
A given weights tensor induces different numbers of usable elements in the
`losses` tensor. The `weights` tensor is broadcast across `losses` for all
possible dimensions. For example, if `losses` is a tensor of dimension
[4, 5, 6, 3] and `weights` is a tensor of size [4, 5], then `weights` is, in
effect, tiled to match the size of `losses`. Following this effective tile,
the total number of present elements is the number of non-zero weights.
Args:
losses: A tensor of size [batch_size, d1, ... dN].
weights: A tensor of size [1] or [batch_size, d1, ... dK] where K < N.
per_batch: Whether to return the number of elements per batch or as a sum
total.
Returns:
The number of present (non-zero) elements in the losses tensor. If
`per_batch` is True, the value is returned as a tensor of size
[batch_size]. Otherwise, a single scalar tensor is returned.
"""
# If weights is a scalar, its easy to compute:
if weights.get_shape().ndims == 0:
batch_size = array_ops.reshape(array_ops.slice(array_ops.shape(losses),
[0], [1]), [])
num_per_batch = math_ops.div(math_ops.to_float(array_ops.size(losses)),
math_ops.to_float(batch_size))
num_per_batch = math_ops.select(math_ops.equal(weights, 0),
0.0, num_per_batch)
num_per_batch = math_ops.mul(array_ops.ones(
array_ops.reshape(batch_size, [1])), num_per_batch)
return num_per_batch if per_batch else math_ops.reduce_sum(num_per_batch)
# First, count the number of nonzero weights:
if weights.get_shape().ndims >= 1:
reduction_indices = list(range(1, weights.get_shape().ndims))
num_nonzero_per_batch = math_ops.reduce_sum(
math_ops.to_float(math_ops.not_equal(weights, 0)),
reduction_indices=reduction_indices)
# Next, determine the number of elements that weight would broadcast to:
broadcast_dims = array_ops.slice(array_ops.shape(losses),
[weights.get_shape().ndims], [-1])
num_to_broadcast = math_ops.to_float(math_ops.reduce_prod(broadcast_dims))
num_per_batch = math_ops.mul(num_nonzero_per_batch, num_to_broadcast)
return num_per_batch if per_batch else math_ops.reduce_sum(num_per_batch)
def _calculate_acceptance_probabilities(init_probs, target_probs):
"""Calculate the per-class acceptance rates.
Args:
init_probs: The class probabilities of the data.
target_probs: The desired class proportion in minibatches.
Returns:
A list of the per-class acceptance probabilities.
This method is based on solving the following analysis:
Let F be the probability of a rejection (on any example).
Let p_i be the proportion of examples in the data in class i (init_probs)
Let a_i is the rate the rejection sampler should *accept* class i
Let t_i is the target proportion in the minibatches for class i (target_probs)
F = sum_i(p_i (1-a_i)) = 1 - sum_i(p_i a_i) using sum_i(p_i) = 1
An example with class `i` will be accepted if `k` rejections occur, then an
example with class `i` is seen by the rejector, and it is accepted. This can
be written as follows:
t_i = sum_k=0^inf(F^k p_i a_i) = p_i a_j / (1 - F) using geometric series identity, since 0 <= F < 1 = p_i a_i / sum_j(p_j * a_j) using F from above
Note that the following constraints hold:
0 <= p_i <= 1, sum_i(p_i) = 1 0 <= a_i <= 1 0 <= t_i <= 1, sum_i(t_i) = 1
A solution for a_i in terms of the other variabes is the following:
```a_i = (t_i / p_i) / max_i[t_i / p_i]
"""
Make list of t_i / p_i.
ratio_l = target_probs / init_probs
Replace NaNs with 0s.
ratio_l = math_ops.select(math_ops.is_nan(ratio_l), array_ops.zeros_like(ratio_l), ratio_l)
Calculate list of acceptance probabilities.
max_ratio = math_ops.reduce_max(ratio_l) return ratio_l / max_ratio ```
def undo_make_batch_of_event_sample_matrices(
self, x, sample_shape, name="undo_make_batch_of_event_sample_matrices"):
"""Reshapes/transposes `Distribution` `Tensor` from B_+E_+S_ to S+B+E.
Where:
- `B_ = B if B else [1]`,
- `E_ = E if E else [1]`,
- `S_ = [tf.reduce_prod(S)]`.
This function "reverses" `make_batch_of_event_sample_matrices`.
Args:
x: `Tensor` of shape `B_+E_+S_`.
sample_shape: `Tensor` (1D, `int32`).
name: `String`. The name to give this op.
Returns:
x: `Tensor`. Input transposed/reshaped to `S+B+E`.
"""
with self._name_scope(name, values=[x, sample_shape]):
x = ops.convert_to_tensor(x, name="x")
sample_shape = ops.convert_to_tensor(sample_shape, name="sample_shape")
x = distribution_util.rotate_transpose(x, shift=1)
if self._is_all_constant_helper(self.batch_ndims, self.event_ndims):
if self._batch_ndims_is_0 or self._event_ndims_is_0:
b = ((min(-2, -1 - self._event_ndims_static),)
if self._batch_ndims_is_0 else ())
e = (-1,) if self._event_ndims_is_0 else ()
x = array_ops.squeeze(x, squeeze_dims=b + e)
_, batch_shape, event_shape = self.get_shape(x)
else:
s = (x.get_shape().as_list() if x.get_shape().is_fully_defined()
else array_ops.shape(x))
batch_shape = array_ops.slice(s, (1,), (self.batch_ndims,))
# Since sample_dims=1 and is left-most, we add 1 to the number of
# batch_ndims to get the event start dim.
event_start = math_ops.select(
self._batch_ndims_is_0, 2, 1 + self.batch_ndims)
event_shape = array_ops.slice(s, (event_start,), (self.event_ndims,))
new_shape = array_ops.concat(0, (sample_shape, batch_shape, event_shape))
x = array_ops.reshape(x, shape=new_shape)
return x
def pick_vector(cond,
true_vector,
false_vector,
name="pick_vector"):
"""Picks possibly different length row `Tensor`s based on condition.
Value `Tensor`s should have exactly one dimension.
If `cond` is a python Boolean or `tf.constant` then either `true_vector` or
`false_vector` is immediately returned. I.e., no graph nodes are created and
no validation happens.
Args:
cond: `Tensor`. Must have `dtype=tf.bool` and be scalar.
true_vector: `Tensor` of one dimension. Returned when cond is `True`.
false_vector: `Tensor` of one dimension. Returned when cond is `False`.
name: `String`. The name to give this op.
Example:
```python
pick_vector(tf.less(0, 5), tf.range(10, 12), tf.range(15, 18))
# result is tensor: [10, 11].
pick_vector(tf.less(5, 0), tf.range(10, 12), tf.range(15, 18))
# result is tensor: [15, 16, 17].
Returns:
true_or_false_vector: Tensor
.
Raises:
TypeError: if cond.dtype != tf.bool
TypeError: if cond
is not a constant and
true_vector.dtype != false_vector.dtype
"""
with ops.name_scope(name, values=(cond, true_vector, false_vector)):
cond = ops.convert_to_tensor(cond, name="cond")
if cond.dtype != dtypes.bool:
raise TypeError("%s.dtype=%s which is not %s" %
(cond.name, cond.dtype, dtypes.bool))
cond_value_static = tensor_util.constant_value(cond)
if cond_value_static is not None:
return true_vector if cond_value_static else false_vector
true_vector = ops.convert_to_tensor(true_vector, name="true_vector")
false_vector = ops.convert_to_tensor(false_vector, name="false_vector")
if true_vector.dtype != false_vector.dtype:
raise TypeError(
"%s.dtype=%s does not match %s.dtype=%s"
% (true_vector.name, true_vector.dtype,
false_vector.name, false_vector.dtype))
n = array_ops.shape(true_vector)[0]
return array_ops.slice(array_ops.concat(0, (true_vector, false_vector)),
[math_ops.select(cond, 0, n)],
[math_ops.select(cond, n, -1)])
```
def _process_scale(self, scale, event_ndims):
"""Helper to __init__ which gets scale in batch-ready form.
This function expands dimensions of `scale` according to the following
table:
event_ndims
scale.ndims 0 1
0 [1]+S+[1,1] "silent error"
1 [ ]+S+[1,1] "silent error"
2 [ ]+S+[1,1] [1]+S+[ ]
3 [ ]+S+[1,1] [ ]+S+[ ]
... (same) (same)
The idea is that we want to convert `scale` into something which can always
work for, say, the left-hand argument of `batch_matmul`.
Args:
scale: `Tensor`.
event_ndims: `Tensor` (0D, `int32`).
Returns:
scale: `Tensor` with dims expanded according to [above] table.
batch_ndims: `Tensor` (0D, `int32`). The ndims of the `batch` portion.
"""
ndims = array_ops.rank(scale)
left = math_ops.select(
math_ops.reduce_any([
math_ops.reduce_all([
math_ops.equal(ndims, 0),
math_ops.equal(event_ndims, 0)
]),
math_ops.reduce_all([
math_ops.equal(ndims, 2),
math_ops.equal(event_ndims, 1)
])]), 1, 0)
right = math_ops.select(math_ops.equal(event_ndims, 0), 2, 0)
pad = array_ops.concat(0, (
array_ops.ones([left], dtype=dtypes.int32),
array_ops.shape(scale),
array_ops.ones([right], dtype=dtypes.int32)))
scale = array_ops.reshape(scale, pad)
batch_ndims = ndims - 2 + right
# For safety, explicitly zero-out the upper triangular part.
scale = array_ops.matrix_band_part(scale, -1, 0)
if self.validate_args:
# matrix_band_part will fail if scale is not at least rank 2.
shape = array_ops.shape(scale)
assert_square = check_ops.assert_equal(
shape[-2], shape[-1],
message="Input must be a (batch of) square matrix.")
# Assuming lower-triangular means we only need check diag != 0.
diag = array_ops.matrix_diag_part(scale)
is_non_singular = math_ops.logical_not(
math_ops.reduce_any(
math_ops.equal(diag, ops.convert_to_tensor(0, dtype=diag.dtype))))
assert_non_singular = control_flow_ops.Assert(
is_non_singular, ["Singular matrix encountered", diag])
scale = control_flow_ops.with_dependencies(
[assert_square, assert_non_singular], scale)
return scale, batch_ndims