def _load_data_graph(self):
"""
Loads the data graph consisting of the encoder and decoder input placeholders, Label (Target tip summary)
placeholders and the weights of the hidden layer of the Seq2Seq model.
:return: None
"""
# input
with tf.variable_scope("train_test", reuse=True):
# review input - Both original and reversed
self.enc_inp_fwd = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t)
for t in range(self.seq_length)]
self.enc_inp_bwd = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t)
for t in range(self.seq_length)]
# desired output
self.labels = [tf.placeholder(tf.int32, shape=(None,), name="labels%i" % t)
for t in range(self.seq_length)]
# weight of the hidden layer
self.weights = [tf.ones_like(labels_t, dtype=tf.float32)
for labels_t in self.labels]
# Decoder input: prepend some "GO" token and drop the final
# token of the encoder input
self.dec_inp = ([tf.zeros_like(self.labels[0], dtype=np.int32, name="GO")] + self.labels[:-1])
python类ones_like()的实例源码
def _load_data_graph(self):
"""
Loads the data graph consisting of the encoder and decoder input placeholders, Label (Target tip summary)
placeholders and the weights of the hidden layer of the Seq2Seq model.
:return: None
"""
# input
with tf.variable_scope("train_test", reuse=True):
self.enc_inp = [tf.placeholder(tf.int32, shape=(None,),
name="input%i" % t)
for t in range(self.seq_length)]
# desired output
self.labels = [tf.placeholder(tf.int32, shape=(None,),
name="labels%i" % t)
for t in range(self.seq_length)]
# weight of the hidden layer
self.weights = [tf.ones_like(labels_t, dtype=tf.float32)
for labels_t in self.labels]
# Decoder input: prepend some "GO" token and drop the final
# token of the encoder input
self.dec_inp = ([tf.zeros_like(self.labels[0], dtype=np.int32, name="GO")]
+ self.labels[:-1])
def _load_data_graph(self):
"""
Loads the data graph consisting of the encoder and decoder input placeholders, Label (Target tip summary)
placeholders and the weights of the hidden layer of the Seq2Seq model.
:return: None
"""
# input
with tf.variable_scope("train_test", reuse=True):
self.enc_inp = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t)
for t in range(self.seq_length)]
# desired output
self.labels = [tf.placeholder(tf.int32, shape=(None,), name="labels%i" % t)
for t in range(self.seq_length)]
# weight of the hidden layer
self.weights = [tf.ones_like(labels_t, dtype=tf.float32)
for labels_t in self.labels]
# Decoder input: prepend some "GO" token and drop the final
# token of the encoder input
self.dec_inp = ([tf.zeros_like(self.labels[0], dtype=np.int32, name="GO")] + self.labels[:-1])
def _load_data_graph(self):
"""
Loads the data graph consisting of the encoder and decoder input placeholders, Label (Target tip summary)
placeholders and the weights of the hidden layer of the Seq2Seq model.
:return: None
"""
# input
with tf.variable_scope("train_test", reuse=True):
# review input - Both original and reversed
self.enc_inp_fwd = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t)
for t in range(self.seq_length)]
self.enc_inp_bwd = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t)
for t in range(self.seq_length)]
# desired output
self.labels = [tf.placeholder(tf.int32, shape=(None,), name="labels%i" % t)
for t in range(self.seq_length)]
# weight of the hidden layer
self.weights = [tf.ones_like(labels_t, dtype=tf.float32)
for labels_t in self.labels]
# Decoder input: prepend some "GO" token and drop the final
# token of the encoder input
self.dec_inp = ([tf.zeros_like(self.labels[0], dtype=np.int32, name="GO")] + self.labels[:-1])
def _meshgrid(self, height, width):
with tf.variable_scope('_meshgrid'):
# This should be equivalent to:
# x_t, y_t = np.meshgrid(np.linspace(-1, 1, width),
# np.linspace(-1, 1, height))
# ones = np.ones(np.prod(x_t.shape))
# grid = np.vstack([x_t.flatten(), y_t.flatten(), ones])
x_t = tf.matmul(tf.ones(shape=tf.pack([height, 1])),
tf.transpose(tf.expand_dims(tf.linspace(-1.0, 1.0, width), 1), [1, 0]))
y_t = tf.matmul(tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1),
tf.ones(shape=tf.pack([1, width])))
x_t_flat = tf.reshape(x_t, (1, -1))
y_t_flat = tf.reshape(y_t, (1, -1))
ones = tf.ones_like(x_t_flat)
grid = tf.concat(0, [x_t_flat, y_t_flat, ones])
return grid
def generate_mask(img_mask_list, h, w, l):
img_masks, loss_masks = [], []
for i in range(l):
# generate image mask
img_mask = img_mask_list[i]
img_mask = tf.cast(tf.image.decode_png(img_mask), tf.float32)
img_mask = tf.reshape(img_mask, (h, w))
img_masks.append(img_mask)
# generate loss mask
s_total = h * w
s_mask = tf.reduce_sum(img_mask)
def f1(): return img_mask*((s_total-s_mask)/s_mask-1)+1
def f2(): return tf.zeros_like(img_mask)
def f3(): return tf.ones_like(img_mask)
loss_mask = tf.case([(tf.equal(s_mask, 0), f2), \
(tf.less(s_mask, s_total/2), f1)],
default=f3)
loss_masks.append(loss_mask)
return tf.stack(img_masks), tf.stack(loss_masks)
def create_generator_loss(disc_output, gene_output, features):
# I.e. did we fool the discriminator?
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=disc_output, logits=tf.ones_like(disc_output))
gene_ce_loss = tf.reduce_mean(cross_entropy, name='gene_ce_loss')
# I.e. does the result look like the feature?
K = int(gene_output.get_shape()[1])//int(features.get_shape()[1])
assert K == 2 or K == 4 or K == 8
downscaled = _downscale(gene_output, K)
gene_l1_loss = tf.reduce_mean(tf.abs(downscaled - features), name='gene_l1_loss')
gene_loss = tf.add((1.0 - FLAGS.gene_l1_factor) * gene_ce_loss,
FLAGS.gene_l1_factor * gene_l1_loss, name='gene_loss')
return gene_loss
def _gan_loss(self, logits_real, logits_fake, feature_real, feature_fake, use_features=False):
discriminator_loss_real = self._cross_entropy_loss(logits_real, tf.ones_like(logits_real),
name="disc_real_loss")
discriminator_loss_fake = self._cross_entropy_loss(logits_fake, tf.zeros_like(logits_fake),
name="disc_fake_loss")
self.discriminator_loss = discriminator_loss_fake + discriminator_loss_real
gen_loss_disc = self._cross_entropy_loss(logits_fake, tf.ones_like(logits_fake), name="gen_disc_loss")
if use_features:
gen_loss_features = tf.reduce_mean(tf.nn.l2_loss(feature_real - feature_fake)) / (self.crop_image_size ** 2)
else:
gen_loss_features = 0
self.gen_loss = gen_loss_disc + 0.1 * gen_loss_features
tf.scalar_summary("Discriminator_loss", self.discriminator_loss)
tf.scalar_summary("Generator_loss", self.gen_loss)
def sequence_loss(self, y_pred, y_true):
'''
Loss function for the seq2seq RNN. Reshape predicted and true (label) tensors, generate dummy weights,
then use seq2seq.sequence_loss to actually compute the loss function.
'''
if self.verbose > 2: print ("my_sequence_loss y_pred=%s, y_true=%s" % (y_pred, y_true))
logits = tf.unpack(y_pred, axis=1) # list of [-1, num_decoder_synbols] elements
targets = tf.unpack(y_true, axis=1) # y_true has shape [-1, self.out_seq_len]; unpack to list of self.out_seq_len [-1] elements
if self.verbose > 2:
print ("my_sequence_loss logits=%s" % (logits,))
print ("my_sequence_loss targets=%s" % (targets,))
weights = [tf.ones_like(yp, dtype=tf.float32) for yp in targets]
if self.verbose > 4: print ("my_sequence_loss weights=%s" % (weights,))
sl = seq2seq.sequence_loss(logits, targets, weights)
if self.verbose > 2: print ("my_sequence_loss return = %s" % sl)
return sl
def _create(self, d_real, d_fake):
ops = self.ops
config = self.config
gan = self.gan
generator_target_probability = config.generator_target_probability or 0.8
label_smooth = config.label_smooth or 0.2
zeros = tf.zeros_like(d_fake)
ones = tf.ones_like(d_fake)
if config.improved:
g_loss = self.sigmoid_kl_with_logits(d_fake, generator_target_probability)
d_loss = self.sigmoid_kl_with_logits(d_real, 1.-label_smooth) + \
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake, labels=zeros)
else:
g_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake, labels=zeros)
d_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=d_real, labels=zeros) + \
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake, labels=ones)
return [d_loss, g_loss]
def __init__(self, num_layers, size_layer, dimension_input, len_noise, sequence_size, learning_rate):
self.noise = tf.placeholder(tf.float32, [None, None, len_noise])
self.fake_input = tf.placeholder(tf.float32, [None, None, dimension_input])
self.true_sentence = tf.placeholder(tf.float32, [None, None, dimension_input])
self.initial_layer = generator_encode(self.noise, num_layers, size_layer, len_noise)
self.final_outputs = generator_sentence(self.fake_input, self.initial_layer, num_layers, size_layer, dimension_input)
fake_logits = discriminator(self.final_outputs, num_layers, size_layer, dimension_input)
true_logits = discriminator(self.true_sentence, num_layers, size_layer, dimension_input, reuse = True)
d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = true_logits, labels = tf.ones_like(true_logits)))
d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = fake_logits, labels = tf.zeros_like(fake_logits)))
self.g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = fake_logits, labels = tf.ones_like(fake_logits)))
self.d_loss = d_loss_real + d_loss_fake
d_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope = 'discriminator')
g_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope = 'generator_encode') + tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope = 'generator_sentence')
self.d_train_opt = tf.train.AdamOptimizer(learning_rate, beta1 = 0.5).minimize(self.d_loss, var_list = d_vars)
self.g_train_opt = tf.train.AdamOptimizer(learning_rate, beta1 = 0.5).minimize(self.g_loss, var_list = g_vars)
def _sample(self, n_samples):
mean, cov_tril = self.mean, self.cov_tril
if not self.is_reparameterized:
mean = tf.stop_gradient(mean)
cov_tril = tf.stop_gradient(cov_tril)
def tile(t):
new_shape = tf.concat([[n_samples], tf.ones_like(tf.shape(t))], 0)
return tf.tile(tf.expand_dims(t, 0), new_shape)
batch_mean = tile(mean)
batch_cov = tile(cov_tril)
# n_dim -> n_dim x 1 for matmul
batch_mean = tf.expand_dims(batch_mean, -1)
noise = tf.random_normal(tf.shape(batch_mean), dtype=self.dtype)
samples = tf.matmul(batch_cov, noise) + batch_mean
samples = tf.squeeze(samples, -1)
# Update static shape
static_n_samples = n_samples if isinstance(n_samples, int) else None
samples.set_shape(tf.TensorShape([static_n_samples])
.concatenate(self.get_batch_shape())
.concatenate(self.get_value_shape()))
return samples
def density_map(tensor, shape):
"""
"""
height, width, channels = shape
bins = max(height, width)
# values = value_map(tensor, shape, keep_dims=True)
# values = tf.minimum(tf.maximum(tensor, 0.0), 1.0) # TODO: Get this to work with HDR data
values = tensor
# https://stackoverflow.com/a/34143927
binned_values = tf.cast(tf.reshape(values * (bins - 1), [-1]), tf.int32)
ones = tf.ones_like(binned_values, dtype=tf.int32)
counts = tf.unsorted_segment_sum(ones, binned_values, bins)
out = tf.gather(counts, tf.cast(values[:, :] * (bins - 1), tf.int32))
return tf.ones(shape) * normalize(tf.cast(out, tf.float32))
def dice_accuracy(decoded_predictions, annotations, class_nums):
DiceRatio = tf.constant(0,tf.float32)
misclassnum = tf.constant(0,tf.float32)
class_num = tf.constant(class_nums,tf.float32)
sublist = []
for index in range(1,class_nums-2):
current_annotation = tf.cast(tf.equal(tf.ones_like(annotations)*index,\
annotations),tf.float32)
cureent_prediction = tf.cast(tf.equal(tf.ones_like(decoded_predictions)*index,\
decoded_predictions),tf.float32)
Overlap = tf.add(current_annotation,cureent_prediction)
Common = tf.reduce_sum(tf.cast(tf.equal(tf.ones_like(Overlap)*2,Overlap),\
tf.float32),[0,1,2,3])
annotation_num = tf.reduce_sum(current_annotation,[0,1,2,3])
predict_num = tf.reduce_sum(cureent_prediction,[0,1,2,3])
all_num = tf.add(annotation_num,predict_num)
Sub_DiceRatio = Common*2/tf.clip_by_value(all_num, 1e-10, 1e+10)
misclassnum = tf.cond(tf.equal(Sub_DiceRatio,0.0), lambda: misclassnum + 1, lambda: misclassnum)
sublist.append(Sub_DiceRatio)
DiceRatio = DiceRatio + Sub_DiceRatio
DiceRatio = DiceRatio/tf.clip_by_value(tf.cast((class_num-misclassnum-3),tf.float32),1e-10,1e+1000)
return DiceRatio, sublist
def dice_accuracy(decoded_predictions, annotations, class_nums):
DiceRatio = tf.constant(0,tf.float32)
misclassnum = tf.constant(0,tf.float32)
class_num = tf.constant(class_nums,tf.float32)
sublist = []
for index in range(1,class_nums-2):
current_annotation = tf.cast(tf.equal(tf.ones_like(annotations)*index,\
annotations),tf.float32)
cureent_prediction = tf.cast(tf.equal(tf.ones_like(decoded_predictions)*index,\
decoded_predictions),tf.float32)
Overlap = tf.add(current_annotation,cureent_prediction)
Common = tf.reduce_sum(tf.cast(tf.equal(tf.ones_like(Overlap)*2,Overlap),\
tf.float32),[0,1,2,3])
annotation_num = tf.reduce_sum(current_annotation,[0,1,2,3])
predict_num = tf.reduce_sum(cureent_prediction,[0,1,2,3])
all_num = tf.add(annotation_num,predict_num)
Sub_DiceRatio = 0
Sub_DiceRatio = Common*2/tf.clip_by_value(all_num, 1e-10, 1e+10)
misclassnum = tf.cond(tf.equal(Sub_DiceRatio,0.0), lambda: misclassnum + 1, lambda: misclassnum)
sublist.append(Sub_DiceRatio)
DiceRatio = DiceRatio + Sub_DiceRatio
del Sub_DiceRatio
DiceRatio = DiceRatio/tf.clip_by_value(tf.cast((class_num-misclassnum-3),tf.float32),1e-10,1e+1000)
return DiceRatio, sublist
def sequence_loss(self, y_pred, y_true):
'''
Loss function for the seq2seq RNN. Reshape predicted and true (label) tensors, generate dummy weights,
then use seq2seq.sequence_loss to actually compute the loss function.
'''
if self.verbose > 2: print ("my_sequence_loss y_pred=%s, y_true=%s" % (y_pred, y_true))
logits = tf.unstack(y_pred, axis=1) # list of [-1, num_decoder_synbols] elements
targets = tf.unstack(y_true, axis=1) # y_true has shape [-1, self.out_seq_len]; unpack to list of self.out_seq_len [-1] elements
if self.verbose > 2:
print ("my_sequence_loss logits=%s" % (logits,))
print ("my_sequence_loss targets=%s" % (targets,))
weights = [tf.ones_like(yp, dtype=tf.float32) for yp in targets]
if self.verbose > 4: print ("my_sequence_loss weights=%s" % (weights,))
sl = seq2seq.sequence_loss(logits, targets, weights)
if self.verbose > 2: print ("my_sequence_loss return = %s" % sl)
return sl
def size(x, reduce_instance_dims=True, name=None):
"""Computes the total size of instances in a `Tensor` over the whole dataset.
Args:
x: A `Tensor`.
reduce_instance_dims: By default collapses the batch and instance dimensions
to arrive at a single scalar output. If False, only collapses the batch
dimension and outputs a vector of the same shape as the input.
name: (Optional) A name for this operation.
Returns:
A `Tensor`. Has the same type as `x`.
"""
with tf.name_scope(name, 'size'):
# Note: Calling `sum` defined in this module, not the builtin.
return sum(tf.ones_like(x), reduce_instance_dims)
def segment_indices(segment_ids, name=None):
"""Returns a `Tensor` of indices within each segment.
segment_ids should be a sequence of non-decreasing non-negative integers that
define a set of segments, e.g. [0, 0, 1, 2, 2, 2] defines 3 segments of length
2, 1 and 3. The return value is a `Tensor` containing the indices within each
segment.
Example input: [0, 0, 1, 2, 2, 2]
Example output: [0, 1, 0, 0, 1, 2]
Args:
segment_ids: A 1-d `Tensor` containing an non-decreasing sequence of
non-negative integers with type `tf.int32` or `tf.int64`.
name: (Optional) A name for this operation.
Returns:
A `Tensor` containing the indices within each segment.
"""
with tf.name_scope(name, 'segment_indices'):
segment_lengths = tf.segment_sum(tf.ones_like(segment_ids), segment_ids)
segment_starts = tf.gather(tf.concat([[0], tf.cumsum(segment_lengths)], 0),
segment_ids)
return (tf.range(tf.size(segment_ids, out_type=segment_ids.dtype)) -
segment_starts)
def unpool(pool, ind, ksize=[1, 2, 2, 1], scope='unpool'):
with tf.variable_scope(scope):
input_shape = pool.get_shape().as_list()
output_shape = (input_shape[0], input_shape[1] * ksize[1], input_shape[2] * ksize[2], input_shape[3])
flat_input_size = np.prod(input_shape)
flat_output_shape = [output_shape[0], output_shape[1] * output_shape[2] * output_shape[3]]
pool_ = tf.reshape(pool, [flat_input_size])
batch_range = tf.reshape(tf.range(output_shape[0], dtype=ind.dtype), shape=[input_shape[0], 1, 1, 1])
b = tf.ones_like(ind) * batch_range
b = tf.reshape(b, [flat_input_size, 1])
ind_ = tf.reshape(ind, [flat_input_size, 1])
ind_ = tf.concat([b, ind_], 1)
ret = tf.scatter_nd(ind_, pool_, shape=flat_output_shape)
ret = tf.reshape(ret, output_shape)
return ret
def unpool(pool, ind, shape, ksize=[1, 2, 2, 1], scope=None):
with tf.name_scope(scope):
input_shape = tf.shape(pool)
output_shape = [input_shape[0], input_shape[1] * ksize[1], input_shape[2] * ksize[2], input_shape[3]]
flat_input_size = tf.cumprod(input_shape)[-1]
flat_output_shape = tf.stack([output_shape[0], output_shape[1] * output_shape[2] * output_shape[3]])
pool_ = tf.reshape(pool, tf.stack([flat_input_size]))
batch_range = tf.reshape(tf.range(tf.cast(output_shape[0], tf.int64), dtype=ind.dtype),
shape=tf.stack([input_shape[0], 1, 1, 1]))
b = tf.ones_like(ind) * batch_range
b = tf.reshape(b, tf.stack([flat_input_size, 1]))
ind_ = tf.reshape(ind, tf.stack([flat_input_size, 1]))
ind_ = tf.concat([b, ind_], 1)
ret = tf.scatter_nd(ind_, pool_, shape=tf.cast(flat_output_shape, tf.int64))
ret = tf.reshape(ret, tf.stack(output_shape))
ret = tf.reshape(ret, shape=shape)
return ret
def __call__(self, s_embed, s_src_pwr, s_mix_pwr, s_embed_flat=None):
if s_embed_flat is None:
s_embed_flat = tf.reshape(
s_embed,
[hparams.BATCH_SIZE, -1, hparams.EMBED_SIZE])
with tf.variable_scope(self.name):
s_src_assignment = tf.argmax(s_src_pwr, axis=1)
s_indices = tf.reshape(
s_src_assignment,
[hparams.BATCH_SIZE, -1])
fn_segmean = lambda _: tf.unsorted_segment_sum(
_[0], _[1], hparams.MAX_N_SIGNAL)
s_attractors = tf.map_fn(
fn_segmean, (s_embed_flat, s_indices), hparams.FLOATX)
s_attractors_wgt = tf.map_fn(
fn_segmean, (tf.ones_like(s_embed_flat), s_indices),
hparams.FLOATX)
s_attractors /= (s_attractors_wgt + 1.)
if hparams.DEBUG:
self.debug_fetches = dict()
# float[B, C, E]
return s_attractors
tensorflow_backend.py 文件源码
项目:deep-learning-keras-projects
作者: jasmeetsb
项目源码
文件源码
阅读 32
收藏 0
点赞 0
评论 0
def ones_like(x, dtype=None, name=None):
"""Instantiates an all-ones Keras variable
of the same shape as another Keras variable or tensor and returns it.
# Arguments
x: Keras variable or tensor.
dtype: String, dtype of returned Keras variable.
None uses the dtype of x.
# Returns
A Keras variable with the shape of x filled with ones.
# Example
```python
>>> from keras import backend as K
>>> kvar = K.variable(np.random.random((2,3)))
>>> kvar_ones = K.ones_like(kvar)
>>> K.eval(kvar_ones)
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
"""
return tf.ones_like(x, dtype=dtype, name=name)
```
def ptb_producer(raw_data, batch_size, num_steps, name=None):
with tf.name_scope(name, "PTBProducer", [raw_data, batch_size, num_steps]):
raw_data = tf.convert_to_tensor(raw_data, name="raw_data", dtype=tf.int32)
data_len = tf.size(raw_data)
batch_len = data_len // batch_size
data = tf.reshape(raw_data[0 : batch_size * batch_len],
[batch_size, batch_len])
epoch_size = (batch_len - 1) // num_steps
epoch_size = tf.identity(epoch_size, name="epoch_size")
i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()
x = tf.strided_slice(data, [0, i * num_steps],
[batch_size, (i + 1) * num_steps],
#tf.ones_like([0, i * num_steps]))
[1,1])
x.set_shape([batch_size, num_steps])
y = tf.strided_slice(data, [0, i * num_steps + 1],
[batch_size, (i + 1) * num_steps + 1],
#tf.ones_like([0, i * num_steps]))
[1,1])
y.set_shape([batch_size, num_steps])
return x, y
def build_inputs(self):
if self.mode == "inference":
# Inference mode doesn't read from disk, so defer to parent.
return super(ShowAndTellModel, self).build_inputs()
else:
# Replace disk I/O with random Tensors.
self.images = tf.random_uniform(
shape=[self.config.batch_size, self.config.image_height,
self.config.image_width, 3],
minval=-1,
maxval=1)
self.input_seqs = tf.random_uniform(
[self.config.batch_size, 15],
minval=0,
maxval=self.config.vocab_size,
dtype=tf.int64)
self.target_seqs = tf.random_uniform(
[self.config.batch_size, 15],
minval=0,
maxval=self.config.vocab_size,
dtype=tf.int64)
self.input_mask = tf.ones_like(self.input_seqs)
def spatial_transformer(U, theta, out_height, out_width):
num_batch = tf.shape(U)[0]
height, width, num_channels = U.get_shape()[1:]
x_t, y_t = meshgrid(out_height, out_width)
x_t = tf.expand_dims(x_t, 0)
y_t = tf.expand_dims(y_t, 0)
if theta.get_shape()[1] == 3:
s, t_x, t_y = tf.split(1, 3, theta)
x_s = tf.reshape(s*tf.tile(x_t, [num_batch,1]) + t_x, [-1])
y_s = tf.reshape(s*tf.tile(y_t, [num_batch,1]) + t_y, [-1])
else:
grid = tf.expand_dims(tf.concat(0, [x_t, y_t, tf.ones_like(x_t)]), 0)
grid = tf.tile(grid, [num_batch,1,1])
grid_t = tf.batch_matmul(tf.reshape(theta, [-1,2,3]), grid)
x_s = tf.reshape(tf.slice(grid_t, [0,0,0], [-1,1,-1]), [-1])
y_s = tf.reshape(tf.slice(grid_t, [0,1,0], [-1,1,-1]), [-1])
return transform(U, x_s, y_s, num_batch, out_height, out_width, num_channels)
# last layer of localization net
def spatial_transformer(U, theta, out_height, out_width):
num_batch = tf.shape(U)[0]
height, width, num_channels = U.get_shape()[1:]
x_t, y_t = meshgrid(out_height, out_width)
x_t = tf.expand_dims(x_t, 0)
y_t = tf.expand_dims(y_t, 0)
if theta.get_shape()[1] == 3:
s, t_x, t_y = tf.split(1, 3, theta)
x_s = tf.reshape(s*tf.tile(x_t, [num_batch,1]) + t_x, [-1])
y_s = tf.reshape(s*tf.tile(y_t, [num_batch,1]) + t_y, [-1])
else:
grid = tf.expand_dims(tf.concat(0, [x_t, y_t, tf.ones_like(x_t)]), 0)
grid = tf.tile(grid, [num_batch,1,1])
grid_t = tf.batch_matmul(tf.reshape(theta, [-1,2,3]), grid)
x_s = tf.reshape(tf.slice(grid_t, [0,0,0], [-1,1,-1]), [-1])
y_s = tf.reshape(tf.slice(grid_t, [0,1,0], [-1,1,-1]), [-1])
return transform(U, x_s, y_s, num_batch, out_height, out_width, num_channels)
# last layer of localization net
def _make_actiondist_ops(self, obs_B_Df):
with tf.variable_scope('flat'):
flat = nn.FlattenLayer(obs_B_Df)
with tf.variable_scope('hidden'):
net = nn.FeedforwardNet(flat.output, flat.output_shape, self.hidden_spec)
with tf.variable_scope('out'):
mean_layer = nn.AffineLayer(net.output, net.output_shape, self.action_space.shape,
Winitializer=tf.zeros_initializer, binitializer=None)
means_B_Da = mean_layer.output
# logstdev params
logstdevs_1_Da = tf.get_variable('logstdevs_1_Da', shape=(1, self.action_space.shape[0]),
initializer=tf.constant_initializer(self.init_logstdev))
stdevs_1_Da = self.min_stdev + tf.exp(
logstdevs_1_Da) # Required for stability of kl computations
stdevs_B_Da = tf.ones_like(means_B_Da) * stdevs_1_Da
actiondist_B_Pa = tf.concat(1, [means_B_Da, stdevs_B_Da])
return actiondist_B_Pa
def test_discriminator_loss_with_placeholder_for_logits(self):
logits = tf.placeholder(tf.float32, shape=(None, 4))
logits2 = tf.placeholder(tf.float32, shape=(None, 4))
real_weights = tf.ones_like(logits, dtype=tf.float32)
generated_weights = tf.ones_like(logits, dtype=tf.float32)
loss = self._d_loss_fn(
logits, logits2, real_weights=real_weights,
generated_weights=generated_weights)
with self.test_session() as sess:
loss = sess.run(loss,
feed_dict={
logits: [self._discriminator_real_outputs_np],
logits2: [self._discriminator_gen_outputs_np],
})
self.assertAlmostEqual(self._expected_d_loss, loss, 5)
def sigmoid_kl_with_logits(logits, targets):
""" Sigmoid cross entropy with smooth labels
Args:
logits: logits
targets: smooth targets
Returns:
cross entropy loss
"""
assert isinstance(targets, float)
if targets in [0., 1.]:
entropy = 0.
else:
entropy = - targets * np.log(targets) - \
(1. - targets) * np.log(1. - targets)
return tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(logits) * targets, logits=logits) - entropy
def _sigmoid_kl_with_logits(self, logits, targets):
""" Sigmoid cross entropy with smooth labels
Args:
logits: logits
targets: smooth targets
Returns:
cross entropy loss
"""
assert isinstance(targets, float)
if targets in [0., 1.]:
entropy = 0.
else:
entropy = - targets * \
np.log(targets) - (1. - targets) * np.log(1. - targets)
return tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=tf.ones_like(logits) * targets) - entropy