def variable_summaries(var, name, collections=None):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization).
Args:
- var: Tensor for variable from which we want to log.
- name: Variable name.
- collections: List of collections to save the summary to.
"""
with tf.name_scope(name):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean, collections)
num_params = tf.reduce_prod(tf.shape(var))
tf.summary.scalar('num_params', num_params, collections)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev, collections)
tf.summary.scalar('max', tf.reduce_max(var), collections)
tf.summary.scalar('min', tf.reduce_min(var), collections)
tf.summary.histogram('histogram', var, collections)
tf.summary.scalar('sparsity', tf.nn.zero_fraction(var), collections)
python类reduce_prod()的实例源码
def extract_patches_fn(image: tf.Tensor, patch_shape: list, offsets) -> tf.Tensor:
"""
:param image: tf.Tensor
:param patch_shape: [h, w]
:param offsets: tuple between 0 and 1
:return: patches [batch_patches, h, w, c]
"""
with tf.name_scope('patch_extraction'):
h, w = patch_shape
c = image.get_shape()[-1]
offset_h = tf.cast(tf.round(offsets[0] * h // 2), dtype=tf.int32)
offset_w = tf.cast(tf.round(offsets[1] * w // 2), dtype=tf.int32)
offset_img = image[offset_h:, offset_w:, :]
offset_img = offset_img[None, :, :, :]
patches = tf.extract_image_patches(offset_img, ksizes=[1, h, w, 1], strides=[1, h // 2, w // 2, 1],
rates=[1, 1, 1, 1], padding='VALID')
patches_shape = tf.shape(patches)
return tf.reshape(patches, [tf.reduce_prod(patches_shape[0:3]), h, w, int(c)]) # returns [batch_patches, h, w, c]
def _max_pool_grad_grad(dy, x, y, ksize, strides, padding, argmax=None):
"""Gradients of MaxPoolGrad."""
if argmax is None:
_, argmax = tf.nn.max_pool_with_argmax(x, ksize, strides, padding)
grad = dy
grad_flat = tf.reshape(grad, [-1])
argmax_flat = tf.reshape(argmax, [-1])
x_shape = tf.cast(tf.shape(x), argmax.dtype)
batch_dim = tf.reshape(
tf.range(
x_shape[0], dtype=argmax.dtype), [-1, 1, 1, 1])
nelem = tf.reduce_prod(x_shape[1:])
batch_dim *= nelem
y_zero = tf.zeros_like(y, dtype=argmax.dtype)
batch_dim += y_zero
batch_dim = tf.reshape(batch_dim, [-1])
argmax_flat += batch_dim
grad_input = tf.gather(grad_flat, argmax_flat)
grad_input = tf.reshape(grad_input, tf.shape(y))
return grad_input
def ternary_decoder(encoded_data, scaler, shape):
"""Decoding the signs to float format """
a = tf.cast(encoded_data, tf.int32)
a_split1 = tf.mod(a,4)
a_split2 = tf.to_int32(tf.mod(a/4,4))
a_split3 = tf.to_int32(tf.mod(a/16,4))
a_split4 = tf.to_int32(tf.mod(a/64,4))
a = tf.concat([a_split1, a_split2, a_split3, a_split4], 0)
real_size = tf.reduce_prod(shape)
a = tf.to_float(a)
a = tf.gather(a, tf.range(0,real_size))
a = tf.reshape(a, shape)
a = tf.subtract(a,1)
decoded = a*scaler
return decoded
def f_inter_box(top_left_a, bot_right_a, top_left_b, bot_right_b):
"""Computes intersection area with boxes.
Args:
top_left_a: [B, T, 2] or [B, 2]
bot_right_a: [B, T, 2] or [B, 2]
top_left_b: [B, T, 2] or [B, 2]
bot_right_b: [B, T, 2] or [B, 2]
Returns:
area: [B, T]
"""
top_left_max = tf.maximum(top_left_a, top_left_b)
bot_right_min = tf.minimum(bot_right_a, bot_right_b)
ndims = tf.shape(tf.shape(top_left_a))
# Check if the resulting box is valid.
overlap = tf.to_float(top_left_max < bot_right_min)
overlap = tf.reduce_prod(overlap, ndims - 1)
area = tf.reduce_prod(bot_right_min - top_left_max, ndims - 1)
area = overlap * tf.abs(area)
return area
def f_iou_box_old(top_left_a, bot_right_a, top_left_b, bot_right_b):
"""Computes IoU of boxes.
Args:
top_left_a: [B, T, 2] or [B, 2]
bot_right_a: [B, T, 2] or [B, 2]
top_left_b: [B, T, 2] or [B, 2]
bot_right_b: [B, T, 2] or [B, 2]
Returns:
iou: [B, T]
"""
inter_area = f_inter_box(top_left_a, bot_right_a, top_left_b, bot_right_b)
inter_area = tf.maximum(inter_area, 1e-6)
ndims = tf.shape(tf.shape(top_left_a))
# area_a = tf.reduce_prod(bot_right_a - top_left_a, ndims - 1)
# area_b = tf.reduce_prod(bot_right_b - top_left_b, ndims - 1)
check_a = tf.reduce_prod(tf.to_float(top_left_a < bot_right_a), ndims - 1)
area_a = check_a * tf.reduce_prod(bot_right_a - top_left_a, ndims - 1)
check_b = tf.reduce_prod(tf.to_float(top_left_b < bot_right_b), ndims - 1)
area_b = check_b * tf.reduce_prod(bot_right_b - top_left_b, ndims - 1)
union_area = (area_a + area_b - inter_area + 1e-5)
union_area = tf.maximum(union_area, 1e-5)
iou = inter_area / union_area
iou = tf.maximum(iou, 1e-5)
iou = tf.minimum(iou, 1.0)
return iou
def get_filled_box_idx(idx, top_left, bot_right):
"""Fill a box with top left and bottom right coordinates.
Args:
idx: [B, T, H, W, 2] or [B, H, W, 2] or [H, W, 2]
top_left: [B, T, 2] or [B, 2] or [2]
bot_right: [B, T, 2] or [B, 2] or [2]
"""
ss = tf.shape(idx)
ndims = tf.shape(ss)
batch = tf.slice(ss, [0], ndims - 3)
coord_shape = tf.concat(0, [batch, tf.constant([1, 1, 2])])
top_left = tf.reshape(top_left, coord_shape)
bot_right = tf.reshape(bot_right, coord_shape)
lower = tf.reduce_prod(tf.to_float(idx >= top_left), ndims - 1)
upper = tf.reduce_prod(tf.to_float(idx <= bot_right), ndims - 1)
box = lower * upper
return box
tensorflow_backend.py 文件源码
项目:deep-learning-keras-projects
作者: jasmeetsb
项目源码
文件源码
阅读 30
收藏 0
点赞 0
评论 0
def prod(x, axis=None, keepdims=False):
"""Multiplies the values in a tensor, alongside the specified axis.
# Arguments
x: A tensor or variable.
axis: An integer, the axis to compute the product.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
# Returns
A tensor with the product of elements of `x`.
"""
axis = _normalize_axis(axis, ndim(x))
return tf.reduce_prod(x, reduction_indices=axis, keep_dims=keepdims)
def sparse_filtering_loss(_, y_pred):
'''Defines the sparse filtering loss function.
Args:
y_true (tensor): The ground truth tensor (not used, since this is an
unsupervised learning algorithm).
y_pred (tensor): Tensor representing the feature vector at a
particular layer.
Returns:
scalar tensor: The sparse filtering loss.
'''
y = tf.reshape(y_pred, tf.stack([-1, tf.reduce_prod(y_pred.shape[1:])]))
l2_normed = tf.nn.l2_normalize(y, dim=1)
l1_norm = tf.norm(l2_normed, ord=1, axis=1)
return tf.reduce_sum(l1_norm)
def __init__(self, input_, outdim=2, debug=False):
assert outdim >= 1
self._outdim = outdim
input_shape = tuple(input_.get_shape().as_list())
to_flatten = input_shape[self._outdim - 1:]
if any(s is None for s in to_flatten):
flattened = None
else:
flattened = int(np.prod(to_flatten))
self._output_shape = input_shape[1:self._outdim - 1] + (flattened,)
if debug:
util.header('Flatten(new_shape=%s)' % str(self._output_shape))
pre_shape = tf.shape(input_)[:self._outdim - 1:]
to_flatten = tf.reduce_prod(tf.shape(input_)[self._outdim - 1:])
self._output = tf.reshape(input_, tf.concat(0, [pre_shape, tf.pack([to_flatten])]))
def _usage_after_read(self, prev_usage, free_gate, read_weights):
"""Calcualtes the new usage after reading and freeing from memory.
Args:
prev_usage: tensor of shape `[batch_size, memory_size]`.
free_gate: tensor of shape `[batch_size, num_reads]` with entries in the
range [0, 1] indicating the amount that locations read from can be
freed.
read_weights: tensor of shape `[batch_size, num_reads, memory_size]`.
Returns:
New usage, a tensor of shape `[batch_size, memory_size]`.
"""
with tf.name_scope('usage_after_read'):
free_gate = tf.expand_dims(free_gate, -1)
free_read_weights = free_gate * read_weights
phi = tf.reduce_prod(1 - free_read_weights, [1], name='phi')
return prev_usage * phi
def prod(x, axis=None, keepdims=False):
"""Multiplies the values in a tensor, alongside the specified axis.
# Arguments
x: A tensor or variable.
axis: An integer, the axis to compute the product.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
# Returns
A tensor with the product of elements of `x`.
"""
axis = _normalize_axis(axis, ndim(x))
return tf.reduce_prod(x, reduction_indices=axis, keep_dims=keepdims)
def prod(x, axis=None, keepdims=False):
"""Multiplies the values in a tensor, alongside the specified axis.
# Arguments
x: A tensor or variable.
axis: An integer, the axis to compute the product.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
# Returns
A tensor with the product of elements of `x`.
"""
axis = _normalize_axis(axis, ndim(x))
return tf.reduce_prod(x, reduction_indices=axis, keep_dims=keepdims)
def gather_indices_2d(x, block_shape, block_stride):
"""Getting gather indices."""
# making an identity matrix kernel
kernel = tf.eye(block_shape[0] * block_shape[1])
kernel = reshape_range(kernel, 0, 1, [block_shape[0], block_shape[1], 1])
# making indices [1, h, w, 1] to appy convs
x_shape = common_layers.shape_list(x)
indices = tf.range(x_shape[2] * x_shape[3])
indices = tf.reshape(indices, [1, x_shape[2], x_shape[3], 1])
indices = tf.nn.conv2d(
tf.cast(indices, tf.float32),
kernel,
strides=[1, block_stride[0], block_stride[1], 1],
padding="VALID")
# making indices [num_blocks, dim] to gather
dims = common_layers.shape_list(indices)[:3]
if all([isinstance(dim, int) for dim in dims]):
num_blocks = functools.reduce(operator.mul, dims, 1)
else:
num_blocks = tf.reduce_prod(dims)
indices = tf.reshape(indices, [num_blocks, -1])
return tf.cast(indices, tf.int32)
def hypervolume(self, reference):
"""
Autoflow method to calculate the hypervolume indicator
The hypervolume indicator is the volume of the dominated region.
:param reference: reference point to use
Should be equal or bigger than the anti-ideal point of the Pareto set
For comparing results across runs the same reference point must be used
:return: hypervolume indicator (the higher the better)
"""
min_pf = tf.reduce_min(self.front, 0, keep_dims=True)
R = tf.expand_dims(reference, 0)
pseudo_pf = tf.concat((min_pf, self.front, R), 0)
D = tf.shape(pseudo_pf)[1]
N = tf.shape(self.bounds.ub)[0]
idx = tf.tile(tf.expand_dims(tf.range(D), -1),[1, N])
ub_idx = tf.reshape(tf.stack([tf.transpose(self.bounds.ub), idx], axis=2), [N * D, 2])
lb_idx = tf.reshape(tf.stack([tf.transpose(self.bounds.lb), idx], axis=2), [N * D, 2])
ub = tf.reshape(tf.gather_nd(pseudo_pf, ub_idx), [D, N])
lb = tf.reshape(tf.gather_nd(pseudo_pf, lb_idx), [D, N])
hv = tf.reduce_sum(tf.reduce_prod(ub - lb, 0))
return tf.reduce_prod(R - min_pf) - hv
def get_marginal_likelihood(yt, mean_yt, xt, s, alpha, beta, eta_mu, eta_sigma, eps, sigma_px, epsilon = 1e-8):
yt_expand = tf.expand_dims(yt, 0)
mean_yt = tf.reshape(mean_yt, [s, FLAGS.batch_size, 784])
xt = tf.reshape(xt, [1, s, FLAGS.batch_size, FLAGS.hidden_size])
# p_ygivenx = tf.reduce_prod(tf.pow(mean_yt, yt_expand) * tf.pow(1 - mean_yt, 1 - yt_expand), axis=2)
v = alpha / (alpha + beta)
pi = tf.concat(0, [v, [1.0]]) * tf.concat(0, [[1.0], tf.cumprod(1 - v)])
p_x = gaussian_mixture_pdf(eta_mu, tf.square(eta_sigma) + tf.square(sigma_px), xt, pi)
log_p_y_s = tf.reduce_sum(yt_expand * tf.log(mean_yt + epsilon) \
+ (1.0 - yt_expand) * tf.log(1.0 - mean_yt + epsilon), 2) \
+ tf.log(p_x) \
+ 0.5 * tf.reduce_sum(tf.square(eps), 2)
log_p_y_s_max = tf.reduce_max(log_p_y_s, reduction_indices=0)
log_p_y = tf.log(tf.reduce_mean(tf.exp(log_p_y_s - log_p_y_s_max), 0)) + log_p_y_s_max
return tf.reduce_mean(log_p_y)
# Taken from: https://github.com/tensorflow/tensorflow/issues/6322
def gauss_prob(mu, logstd, x):
std = tf.exp(logstd)
var = tf.square(std)
gp = tf.exp(-(x - mu)/(2*var)) / ((2*np.pi)**.5 * std)
return tf.reduce_prod(gp, [1])
utils_combine.py 文件源码
项目:adversarial-deep-structural-networks
作者: wentaozhu
项目源码
文件源码
阅读 31
收藏 0
点赞 0
评论 0
def cnnmodel(X, Y, paras, flag='single'):
assert(flag=='single' or flag=='combine')
X = tf.reshape(X, shape=[-1, boxheight, boxwidth, 1])
yreshape = tf.reshape(Y, [-1, boxheight, boxwidth, 1])
yonehot = tf.concat(3, [1-yreshape, yreshape])
if flag == 'combine':
hconv4clip = buildcombmodel(X, paras)
else: hconv4clip = buildmodel(X, paras)
#hconv4log = -tf.log(hconv4clip)
#q_train, q_test = crfrnn(hconv4log, paras['wsmooth'], paras['wcontra'], k1, k2, trainiter=5, testiter=10)
#q_train = tf.reshape(q_train, [-1, boxheight, boxwidth, 2])
q_train = -tf.log(hconv4clip)
trainenergy = tf.reduce_sum((q_train)*yonehot, reduction_indices=3)
#trainenergy = tf.reduce_prod(trainenergy, reduction_indices=[1,2])
trainenergy = tf.reduce_mean(trainenergy, [0,1,2])
q_test = hconv4clip
#q_test = crfrnn(hconv4, paras['wsmooth'], paras['wcontra'], k1, k2, iter=5)
q_test = tf.reshape(q_test, [-1, boxheight, boxwidth, 2])
testenergy = tf.reduce_sum(tf.mul(q_test, yonehot), reduction_indices=3)
#testenergy = tf.reduce_prod(testenergy, reduction_indices=[1,2])
testenergy = tf.reduce_mean(testenergy, [0,1,2])
predarg = tf.argmax(q_test, 3)
yint64 = tf.to_int64(Y)
acc = tf.equal(yint64, predarg)
acc = tf.to_float(acc)
accuracy = tf.reduce_mean(acc, [0,1,2])
di = dice_tf(tf.reshape(yint64, [-1,]), tf.reshape(predarg, [-1,]))
return trainenergy, accuracy, di, testenergy, q_test
def cnnmodel(X, Y, paras, flag='single'):
assert(flag=='single' or flag=='combine')
X = tf.reshape(X, shape=[-1, boxheight, boxwidth, 1])
yreshape = tf.reshape(Y, [-1, boxheight, boxwidth, 1])
yonehot = tf.concat(3, [1-yreshape, yreshape])
if flag == 'combine':
hconv4clip = buildcombmodel(X, paras)
else: hconv4clip = buildmodel(X, paras)
#hconv4log = -tf.log(hconv4clip)
#q_train, q_test = crfrnn(hconv4log, paras['wsmooth'], paras['wcontra'], k1, k2, trainiter=5, testiter=10)
#q_train = tf.reshape(q_train, [-1, boxheight, boxwidth, 2])
q_train = -tf.log(hconv4clip)
trainenergy = tf.reduce_sum((q_train)*yonehot, reduction_indices=3)
#trainenergy = tf.reduce_prod(trainenergy, reduction_indices=[1,2])
trainenergy = tf.reduce_mean(trainenergy, [0,1,2])
q_test = hconv4clip
#q_test = crfrnn(hconv4, paras['wsmooth'], paras['wcontra'], k1, k2, iter=5)
q_test = tf.reshape(q_test, [-1, boxheight, boxwidth, 2])
testenergy = tf.reduce_sum(tf.mul(q_test, yonehot), reduction_indices=3)
#testenergy = tf.reduce_prod(testenergy, reduction_indices=[1,2])
testenergy = tf.reduce_mean(testenergy, [0,1,2])
predarg = tf.argmax(q_test, 3)
yint64 = tf.to_int64(Y)
acc = tf.equal(yint64, predarg)
acc = tf.to_float(acc)
accuracy = tf.reduce_mean(acc, [0,1,2])
di = dice_tf(tf.reshape(yint64, [-1,]), tf.reshape(predarg, [-1,]))
return trainenergy, accuracy, di, testenergy, predarg
def model(X, Y, k1, k2, paras, flag='single'):
assert(flag=='single' or flag=='combine')
X = tf.reshape(X, shape=[-1, boxheight, boxwidth, 1])
yreshape = tf.reshape(Y, [-1, boxheight, boxwidth, 1])
yonehot = tf.concat(3, [1-yreshape, yreshape])
if flag == 'combine':
hconv4clip = buildcombmodel(X, paras, fusion=False)
#h1, h2, h3, h4 = tf.split(3, 4, hconv4clip)
q_train, q_test = crfrnn(hconv4clip, paras['wsmooth'], paras['wcontra'], k1, k2,
trainiter=5, testiter=10, wunary=paras['wunary'])
else:
hconv4clip = buildmodel(X, paras)
q_train, q_test = crfrnn(hconv4clip, paras['wsmooth'], paras['wcontra'], k1, k2,
trainiter=5, testiter=10)
#hconv4log = -tf.log(hconv4clip)
#q_train = tf.reshape(q_train, [-1, boxheight, boxwidth, 2])
#q_train = -tf.log(hconv4clip)
q_trainclip = tf.clip_by_value(q_train, 1e-6, 1.)
trainenergy = tf.reduce_sum(-tf.log(q_trainclip)*yonehot, reduction_indices=3)
#trainenergy = tf.reduce_prod(trainenergy, reduction_indices=[1,2])
trainenergy = tf.reduce_mean(trainenergy, [0,1,2])
#q_test = hconv4clip
#q_test = crfrnn(hconv4, paras['wsmooth'], paras['wcontra'], k1, k2, iter=5)
q_test = tf.reshape(q_test, [-1, boxheight, boxwidth, 2])
testenergy = tf.reduce_sum(tf.mul(q_test, yonehot), reduction_indices=3)
#testenergy = tf.reduce_prod(testenergy, reduction_indices=[1,2])
testenergy = tf.reduce_mean(testenergy, [0,1,2])
predarg = tf.argmax(q_test, 3)
yint64 = tf.to_int64(Y)
acc = tf.equal(yint64, predarg)
acc = tf.to_float(acc)
accuracy = tf.reduce_mean(acc, [0,1,2])
di = dice_tf(tf.reshape(yint64, [-1,]), tf.reshape(predarg, [-1,]))
return trainenergy, accuracy, di, testenergy, predarg
def log_norm(expr_list, name):
"""
:param expr_list:
:param name:
:return:
"""
n_elems = 0
norm = 0.
for e in nest.flatten(expr_list):
n_elems += tf.reduce_prod(tf.shape(e))
norm += tf.reduce_sum(e**2)
norm /= tf.to_float(n_elems)
tf.summary.scalar(name, norm)
return norm
def xavier_normal_dist_conv3d(shape):
return tf.truncated_normal(shape, mean=0,
stddev=tf.sqrt(3. / (tf.reduce_prod(shape[:3]) * tf.reduce_sum(shape[3:]))))
def xavier_uniform_dist_conv3d(shape):
with tf.variable_scope('xavier_glorot_initializer'):
denominator = tf.cast((tf.reduce_prod(shape[:3]) * tf.reduce_sum(shape[3:])), tf.float32)
lim = tf.sqrt(6. / denominator)
return tf.random_uniform(shape, minval=-lim, maxval=lim)
def prod(x, axis=None, keepdims=False):
'''Multiplies the values in a tensor, alongside the specified axis.
'''
axis = _normalize_axis(axis, ndim(x))
return tf.reduce_prod(x, reduction_indices=axis, keep_dims=keepdims)
def get_output_for(self, input, **kwargs):
# total_entries = tf.reduce_prod(tf.shape(input))
pre_shape = tf.shape(input)[:self.outdim - 1]
to_flatten = tf.reduce_prod(tf.shape(input)[self.outdim - 1:])
return tf.reshape(input, tf.concat(0, [pre_shape, tf.pack([to_flatten])]))
def likelihood_ratio_sym(self, x_var, old_dist_info_vars, new_dist_info_vars):
old_p = old_dist_info_vars["p"]
new_p = new_dist_info_vars["p"]
ndims = old_p.get_shape().ndims
return tf.reduce_prod(x_var * new_p / (old_p + TINY) + (1 - x_var) * (1 - new_p) / (1 - old_p + TINY),
reduction_indices=ndims - 1)
def _prob(self, given):
return tf.reduce_prod(tf.ones_like(given), -1)
def prob(self, given):
"""
prob(given)
Compute probability density (mass) function at `given` value.
:param given: A Tensor. The value at which to evaluate probability
density (mass) function. Must be able to broadcast to have a shape
of ``(... + )batch_shape + value_shape``.
:return: A Tensor of shape ``(... + )batch_shape[:-group_ndims]``.
"""
given = self._check_input_shape(given)
p = self._prob(given)
return tf.reduce_prod(p, tf.range(-self._group_ndims, 0))
def Flatten(layer):
"""
Handy function for flattening the result of a conv2D or
maxpool2D to be used for a fully-connected (affine) layer.
"""
layer_shape = layer.get_shape()
# num_features = tf.reduce_prod(tf.shape(layer)[1:])
num_features = layer_shape[1:].num_elements()
layer_flat = tf.reshape(layer, [-1, num_features])
return layer_flat, num_features
def test_sum_prod_broadcast(self):
# placeholder
a = tf.placeholder(tf.float32, shape=[3, 4, 5, 6])
b = tf.placeholder(tf.float32, shape=[3, 4, 5])
a_sum = tf.reduce_sum(a, reduction_indices=[0, 3]) # shape (4, 5)
b_prod = tf.reduce_prod(b, reduction_indices=[0, 1]) # shape (5,)
f = a_sum + b_prod + b # (4, 5) + (5,) + (3, 4, 5) -> (3, 4, 5)
# value
feed_dict = dict()
for x in [a, b]:
feed_dict[x] = np.random.rand(*tf_obj_shape(x))
# test
self.run(f, tf_feed_dict=feed_dict)