def resize_axis(tensor, axis, new_size, fill_value=0):
tensor = tf.convert_to_tensor(tensor)
shape = tf.unstack(tf.shape(tensor))
pad_shape = shape[:]
pad_shape[axis] = tf.maximum(0, new_size - shape[axis])
shape[axis] = tf.minimum(shape[axis], new_size)
shape = tf.stack(shape)
resized = tf.concat([
tf.slice(tensor, tf.zeros_like(shape), shape),
tf.fill(tf.stack(pad_shape), tf.cast(fill_value, tensor.dtype))
], axis)
# Update shape.
new_shape = tensor.get_shape().as_list() # A copy is being made.
new_shape[axis] = new_size
resized.set_shape(new_shape)
return resized
python类convert_to_tensor()的实例源码
def get_batch_data():
# Load data
X, Y = load_data()
# calc total batch count
num_batch = len(X) // hp.batch_size
# Convert to tensor
X = tf.convert_to_tensor(X, tf.int32)
Y = tf.convert_to_tensor(Y, tf.float32)
# Create Queues
input_queues = tf.train.slice_input_producer([X, Y])
# create batch queues
x, y = tf.train.batch(input_queues,
num_threads=8,
batch_size=hp.batch_size,
capacity=hp.batch_size * 64,
allow_smaller_final_batch=False)
return x, y, num_batch # (N, T), (N, T), ()
def lengths_to_mask(lengths_b, max_length):
"""
Turns a vector of lengths into a boolean mask
Args:
lengths_b: an integer vector of lengths
max_length: maximum length to fill the mask
Returns:
a boolean array of shape (batch_size, max_length)
row[i] consists of True repeated lengths_b[i] times, followed by False
"""
lengths_b = tf.convert_to_tensor(lengths_b)
assert lengths_b.get_shape().ndims == 1
mask_bt = tf.expand_dims(tf.range(max_length), 0) < tf.expand_dims(lengths_b, 1)
return mask_bt
def create_decoder(self, helper, mode):
attention_fn = AttentionLayerDot(
params={"num_units": self.attention_dim},
mode=tf.contrib.learn.ModeKeys.TRAIN)
attention_values = tf.convert_to_tensor(
np.random.randn(self.batch_size, self.input_seq_len, 32),
dtype=tf.float32)
attention_keys = tf.convert_to_tensor(
np.random.randn(self.batch_size, self.input_seq_len, 32),
dtype=tf.float32)
params = AttentionDecoder.default_params()
params["max_decode_length"] = self.max_decode_length
return AttentionDecoder(
params=params,
mode=mode,
vocab_size=self.vocab_size,
attention_keys=attention_keys,
attention_values=attention_values,
attention_values_length=np.arange(self.batch_size) + 1,
attention_fn=attention_fn)
def setUp(self):
super(BridgeTest, self).setUp()
self.batch_size = 4
self.encoder_cell = tf.contrib.rnn.MultiRNNCell(
[tf.contrib.rnn.GRUCell(4), tf.contrib.rnn.GRUCell(8)])
self.decoder_cell = tf.contrib.rnn.MultiRNNCell(
[tf.contrib.rnn.LSTMCell(16), tf.contrib.rnn.GRUCell(8)])
final_encoder_state = nest.map_structure(
lambda x: tf.convert_to_tensor(
value=np.random.randn(self.batch_size, x),
dtype=tf.float32),
self.encoder_cell.state_size)
self.encoder_outputs = EncoderOutput(
outputs=tf.convert_to_tensor(
value=np.random.randn(self.batch_size, 10, 16), dtype=tf.float32),
attention_values=tf.convert_to_tensor(
value=np.random.randn(self.batch_size, 10, 16), dtype=tf.float32),
attention_values_length=np.full([self.batch_size], 10),
final_state=final_encoder_state)
def _test_with_residuals(self, inputs, **kwargs):
"""Runs the cell in a session"""
inputs = tf.convert_to_tensor(inputs)
state = (tf.constant(np.random.randn(1, 2)),
tf.constant(np.random.randn(1, 2)))
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
test_cell = rnn_cell.ExtendedMultiRNNCell(
[tf.contrib.rnn.GRUCell(2) for _ in range(2)],
residual_connections=True,
**kwargs)
res_test = test_cell(inputs, state, scope="test")
with self.test_session() as sess:
sess.run([tf.global_variables_initializer()])
return sess.run(res_test)
def run_bilateral_slice_apply(self, dev, grid_data, guide_data, input_data, has_offset=False):
with tf.device(dev):
grid_tensor = tf.convert_to_tensor(
grid_data, name='grid', dtype=tf.float32)
guide_tensor = tf.convert_to_tensor(
guide_data, name='guide', dtype=tf.float32)
input_tensor = tf.convert_to_tensor(
input_data, name='input', dtype=tf.float32)
output_tensor = ops.bilateral_slice_apply(grid_tensor, guide_tensor, input_tensor, has_offset=has_offset)
with self.test_session() as sess:
output_data = sess.run(output_tensor)
return output_data
def get_label_queue(self,batch_size):
tf_labels = tf.convert_to_tensor(self.attr.values, dtype=tf.uint8)#0,1
with tf.name_scope('label_queue'):
uint_label=tf.train.slice_input_producer([tf_labels])[0]
label=tf.to_float(uint_label)
#All labels, not just those in causal_model
dict_data={sl:tl for sl,tl in
zip(self.label_names,tf.split(label,len(self.label_names)))}
num_preprocess_threads = max(self.num_worker-3,1)
data_batch = tf.train.shuffle_batch(
dict_data,
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=self.min_queue_examples + 3 * batch_size,
min_after_dequeue=self.min_queue_examples,
)
return data_batch
def l1_regularizer(weight=1.0, scope=None):
"""Define a L1 regularizer.
Args:
weight: scale the loss by this factor.
scope: Optional scope for op_scope.
Returns:
a regularizer function.
"""
def regularizer(tensor):
with tf.op_scope([tensor], scope, 'L1Regularizer'):
l1_weight = tf.convert_to_tensor(weight,
dtype=tensor.dtype.base_dtype,
name='weight')
return tf.mul(l1_weight, tf.reduce_sum(tf.abs(tensor)), name='value')
return regularizer
def l2_regularizer(weight=1.0, scope=None):
"""Define a L2 regularizer.
Args:
weight: scale the loss by this factor.
scope: Optional scope for op_scope.
Returns:
a regularizer function.
"""
def regularizer(tensor):
with tf.op_scope([tensor], scope, 'L2Regularizer'):
l2_weight = tf.convert_to_tensor(weight,
dtype=tensor.dtype.base_dtype,
name='weight')
return tf.mul(l2_weight, tf.nn.l2_loss(tensor), name='value')
return regularizer
def l1_l2_regularizer(weight_l1=1.0, weight_l2=1.0, scope=None):
"""Define a L1L2 regularizer.
Args:
weight_l1: scale the L1 loss by this factor.
weight_l2: scale the L2 loss by this factor.
scope: Optional scope for op_scope.
Returns:
a regularizer function.
"""
def regularizer(tensor):
with tf.op_scope([tensor], scope, 'L1L2Regularizer'):
weight_l1_t = tf.convert_to_tensor(weight_l1,
dtype=tensor.dtype.base_dtype,
name='weight_l1')
weight_l2_t = tf.convert_to_tensor(weight_l2,
dtype=tensor.dtype.base_dtype,
name='weight_l2')
reg_l1 = tf.mul(weight_l1_t, tf.reduce_sum(tf.abs(tensor)),
name='value_l1')
reg_l2 = tf.mul(weight_l2_t, tf.nn.l2_loss(tensor),
name='value_l2')
return tf.add(reg_l1, reg_l2, name='value')
return regularizer
def l1_loss(tensor, weight=1.0, scope=None):
"""Define a L1Loss, useful for regularize, i.e. lasso.
Args:
tensor: tensor to regularize.
weight: scale the loss by this factor.
scope: Optional scope for op_scope.
Returns:
the L1 loss op.
"""
with tf.op_scope([tensor], scope, 'L1Loss'):
weight = tf.convert_to_tensor(weight,
dtype=tensor.dtype.base_dtype,
name='loss_weight')
loss = tf.mul(weight, tf.reduce_sum(tf.abs(tensor)), name='value')
tf.add_to_collection(LOSSES_COLLECTION, loss)
return loss
def l2_loss(tensor, weight=1.0, scope=None, normalize=False):
"""Define a L2Loss, useful for regularize, i.e. weight decay.
Args:
tensor: tensor to regularize.
weight: an optional weight to modulate the loss.
scope: Optional scope for op_scope.
Returns:
the L2 loss op.
"""
with tf.op_scope([tensor], scope, 'L2Loss'):
weight = tf.convert_to_tensor(weight,
dtype=tensor.dtype.base_dtype,
name='loss_weight')
if normalize:
loss = tf.sqrt( (tf.sqrt( tf.nn.l2_loss(tensor)) / tf.to_float(tf.size(tensor))) , name='value')
else:
loss = tf.mul(weight, tf.nn.l2_loss(tensor), name='value')
tf.add_to_collection(LOSSES_COLLECTION, loss)
return loss
def sparse_cross_entropy_loss(logits, labels,
weight=1.0, scope=None):
"""Define a Cross Entropy loss using sparse_softmax_cross_entropy_with_logits.
It can scale the loss by weight factor, and smooth the labels.
Args:
logits: [batch_size, num_classes] logits outputs of the network .
labels: [batch_size,] target labels.
weight: scale the loss by this factor.
scope: Optional scope for op_scope.
Returns:
A tensor with the softmax_cross_entropy loss.
"""
with tf.op_scope([logits, labels], scope, 'SparseCrossEntropyLoss'):
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits,labels,name='xentropy')
weight = tf.convert_to_tensor(weight,
dtype=logits.dtype.base_dtype,
name='loss_weight')
loss = tf.mul(weight, tf.reduce_mean(cross_entropy), name='value')
tf.add_to_collection(LOSSES_COLLECTION, loss)
return loss
def _largest_size_at_most(height, width, largest_side):
"""Computes new shape with the largest side equal to `largest_side`.
Computes new shape with the largest side equal to `largest_side` while
preserving the original aspect ratio.
Args:
height: an int32 scalar tensor indicating the current height.
width: an int32 scalar tensor indicating the current width.
largest_side: A python integer or scalar `Tensor` indicating the size of
the largest side after resize.
Returns:
new_height: an int32 scalar tensor indicating the new height.
new_width: and int32 scalar tensor indicating the new width.
"""
largest_side = tf.convert_to_tensor(largest_side, dtype=tf.int32)
height = tf.to_float(height)
width = tf.to_float(width)
largest_side = tf.to_float(largest_side)
scale = tf.cond(tf.greater(height, width),
lambda: largest_side / height,
lambda: largest_side / width)
new_height = tf.to_int32(height * scale)
new_width = tf.to_int32(width * scale)
return new_height, new_width
def imgread(img_path, scale = 4):
img = scipy.misc.imread(img_path)
img = img /256.0
h,w,c = img.shape
tmp1 = h % scale
new_h = h + scale - tmp1
tmp2 = w % scale
new_w = w +scale-tmp2
img = np.pad(img, ((0,scale-tmp1), (0, scale-tmp2),(0,0)), mode = 'reflect')
if scale != None:
img = np.expand_dims(img,0)
img = tf.convert_to_tensor(img)
lr_w = new_w / scale
lr_h = new_h /scale
img = tf.cast(img, tf.float32)
img_lr = tf.image.resize_images(img, [lr_h, lr_w])
img_lr = tf.cast(img_lr,tf.float32)
return img_lr, img
return img
def imgread(img_path, scale = 4):
img = scipy.misc.imread(img_path)
#img = scipy.misc.imresize(img, (128, 128))
img = img /256.0
h,w,c = img.shape
new_h = pow(2, int(math.log(h, 2))+1)
tmp1 = new_h - h
new_w = pow(2, int(math.log(w, 2))+1)
tmp2 = new_w - w
img = np.pad(img, ((0,tmp1), (0, tmp2),(0,0)), mode = 'constant')
if scale != None:
img = np.expand_dims(img,0)
img = tf.convert_to_tensor(img)
lr_w = new_w / scale
lr_h = new_h /scale
img = tf.cast(img, tf.float32)
img_lr = tf.image.resize_images(img, [lr_h, lr_w])
img_lr = tf.cast(img_lr,tf.float32)
return img_lr, img
return img
def get_batch_data():
# Load data
X, Y = load_data()
# calc total batch count
num_batch = len(X) // hp.batch_size
# Convert to tensor
X = tf.convert_to_tensor(X, tf.int32)
Y = tf.convert_to_tensor(Y, tf.int32)
# Create Queues
input_queues = tf.train.slice_input_producer([X, Y])
# create batch queues
x, y = tf.train.batch(input_queues,
num_threads=8,
batch_size=hp.batch_size,
capacity=hp.batch_size * 64,
allow_smaller_final_batch=False)
return x, y, num_batch # (N, T), (N, T), ()
def _fwlinear(self, args, output_size, scope=None):
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
assert len(args) == 2
assert args[0].get_shape().as_list()[1] == output_size
dtype = [a.dtype for a in args][0]
with vs.variable_scope(scope or "Linear"):
matrixW = vs.get_variable(
"MatrixW", dtype=dtype, initializer=tf.convert_to_tensor(np.eye(output_size, dtype=np.float32) * .05))
matrixC = vs.get_variable(
"MatrixC", [args[1].get_shape().as_list()[1], output_size], dtype=dtype)
res = tf.matmul(args[0], matrixW) + tf.matmul(args[1], matrixC)
return res
def _aspect_preserving_resize(image, smallest_side):
"""Resize images preserving the original aspect ratio.
Args:
image: A 3-D image `Tensor`.
smallest_side: A python integer or scalar `Tensor` indicating the size of
the smallest side after resize.
Returns:
resized_image: A 3-D tensor containing the resized image.
"""
smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)
shape = tf.shape(image)
height = shape[0]
width = shape[1]
new_height, new_width = _smallest_size_at_least(height, width, smallest_side)
image = tf.expand_dims(image, 0)
resized_image = tf.image.resize_bilinear(image, [new_height, new_width],
align_corners=False)
resized_image = tf.squeeze(resized_image)
resized_image.set_shape([None, None, 3])
return resized_image
def batch_gather(tensor, indices):
"""Gather in batch from a tensor of arbitrary size.
In pseduocode this module will produce the following:
output[i] = tf.gather(tensor[i], indices[i])
Args:
tensor: Tensor of arbitrary size.
indices: Vector of indices.
Returns:
output: A tensor of gathered values.
"""
shape = get_shape(tensor)
flat_first = tf.reshape(tensor, [shape[0] * shape[1]] + shape[2:])
indices = tf.convert_to_tensor(indices)
offset_shape = [shape[0]] + [1] * (indices.shape.ndims - 1)
offset = tf.reshape(tf.range(shape[0]) * shape[1], offset_shape)
output = tf.gather(flat_first, indices + offset)
return output
def test_objective(self):
log_qx = stats.norm.logpdf(self._n01_samples).astype(np.float32)
qx_samples = tf.convert_to_tensor(self._n01_samples)
log_qx = tf.convert_to_tensor(log_qx)
def _check_elbo(x_mean, x_std):
# check their elbo
def log_joint(observed):
norm = Normal(mean=x_mean, std=x_std)
return norm.log_prob(observed['x'])
lower_bound = elbo(log_joint, observed={},
latent={'x': [qx_samples, log_qx]}, axis=0)
analytic_lower_bound = -_kl_normal_normal(0., 1., x_mean, x_std)
with self.test_session(use_gpu=True) as sess:
a = sess.run(lower_bound)
b = sess.run(analytic_lower_bound)
# print(a, b)
self.assertNear(a, b, 1e-2)
_check_elbo(0., 1.)
_check_elbo(2., 3.)
def test_objective(self):
log_qx = stats.norm.logpdf(self._n01_samples).astype(np.float32)
qx_samples = tf.convert_to_tensor(self._n01_samples)
log_qx = tf.convert_to_tensor(log_qx)
def log_joint(observed):
norm = Normal(std=1.)
return norm.log_prob(observed['x'])
lower_bound = klpq(log_joint, observed={},
latent={'x': [qx_samples, log_qx]}, axis=0)
err_msg = "can only be optimized instead of being evaluated"
with self.assertRaisesRegexp(NotImplementedError, err_msg):
_ = lower_bound + 1.
with self.test_session(use_gpu=True) as sess:
with self.assertRaisesRegexp(NotImplementedError, err_msg):
sess.run(lower_bound)
def __init__(self, name, distribution, n_samples, observed=None):
self._name = name
self._distribution = distribution
self._n_samples = n_samples
self._dtype = distribution.dtype
if observed is not None:
try:
observed = tf.convert_to_tensor(observed, dtype=self.dtype)
except ValueError as e:
raise ValueError(
"StochasticTensor('{}') not compatible "
"with its observed value. Error message: {}".format(
self._name, e))
self._observed = observed
try:
self._net = BayesianNet.get_context()
self._net._add_stochastic_tensor(self)
except RuntimeError:
self._net = None
super(StochasticTensor, self).__init__()
def tensor(self):
"""
Return corresponding Tensor through sampling, or if observed, return
the observed value.
:return: A Tensor.
"""
if not hasattr(self, '_tensor'):
if self._observed is not None:
self._tensor = self._observed
elif self._name in self._net.observed:
try:
self._tensor = tf.convert_to_tensor(
self._net.observed[self._name], dtype=self._dtype)
except ValueError as e:
raise ValueError(
"StochasticTensor('{}') not compatible "
"with its observed value. Error message: {}".format(
self._name, e))
else:
self._tensor = self.sample(self._n_samples)
return self._tensor
def __init__(self, initial_stepsize, adapt_step_size, gamma, t0, kappa,
delta):
with tf.name_scope("StepsizeTuner"):
self.adapt_step_size = tf.convert_to_tensor(
adapt_step_size, dtype=tf.bool, name="adapt_step_size")
self.initial_stepsize = initial_stepsize
self.gamma = tf.convert_to_tensor(gamma, dtype=tf.float32,
name="gamma")
self.t0 = tf.convert_to_tensor(t0, dtype=tf.float32, name="t0")
self.kappa = tf.convert_to_tensor(kappa, dtype=tf.float32,
name="kappa")
self.delta = tf.convert_to_tensor(delta, dtype=tf.float32,
name="delta")
self.mu = tf.constant(10 * initial_stepsize, dtype=tf.float32,
name="mu")
self.step = tf.Variable(0.0, dtype=tf.float32,
name="step", trainable=False)
self.log_epsilon_bar = tf.Variable(
0.0, dtype=tf.float32, name="log_epsilon_bar", trainable=False)
self.h_bar = tf.Variable(0.0, dtype=tf.float32,
name="h_bar", trainable=False)
def __init__(self, logits, dtype=None, group_ndims=0, **kwargs):
self._logits = tf.convert_to_tensor(logits)
param_dtype = assert_same_float_dtype(
[(self._logits, 'Bernoulli.logits')])
if dtype is None:
dtype = tf.int32
assert_same_float_and_int_dtype([], dtype)
super(Bernoulli, self).__init__(
dtype=dtype,
param_dtype=param_dtype,
is_continuous=False,
is_reparameterized=False,
group_ndims=group_ndims,
**kwargs)
def __init__(self, logits, dtype=None, group_ndims=0, **kwargs):
self._logits = tf.convert_to_tensor(logits)
param_dtype = assert_same_float_dtype(
[(self._logits, 'Categorical.logits')])
if dtype is None:
dtype = tf.int32
assert_same_float_and_int_dtype([], dtype)
self._logits = assert_rank_at_least_one(
self._logits, 'Categorical.logits')
self._n_categories = get_shape_at(self._logits, -1)
super(Categorical, self).__init__(
dtype=dtype,
param_dtype=param_dtype,
is_continuous=False,
is_reparameterized=False,
group_ndims=group_ndims,
**kwargs)
def __init__(self,
rate,
dtype=None,
group_ndims=0,
check_numerics=False,
**kwargs):
self._rate = tf.convert_to_tensor(rate)
param_dtype = assert_same_float_dtype(
[(self._rate, 'Poisson.rate')])
if dtype is None:
dtype = tf.int32
assert_same_float_and_int_dtype([], dtype)
self._check_numerics = check_numerics
super(Poisson, self).__init__(
dtype=dtype,
param_dtype=param_dtype,
is_continuous=False,
is_reparameterized=False,
group_ndims=group_ndims,
**kwargs)
def __init__(self, logits, dtype=None, group_ndims=0, **kwargs):
self._logits = tf.convert_to_tensor(logits)
param_dtype = assert_same_float_dtype(
[(self._logits, 'OnehotCategorical.logits')])
if dtype is None:
dtype = tf.int32
assert_same_float_and_int_dtype([], dtype)
self._logits = assert_rank_at_least_one(
self._logits, 'OnehotCategorical.logits')
self._n_categories = get_shape_at(self._logits, -1)
super(OnehotCategorical, self).__init__(
dtype=dtype,
param_dtype=param_dtype,
is_continuous=False,
is_reparameterized=False,
group_ndims=group_ndims,
**kwargs)