def test_sequence_feature_not_supported(self):
feature_spec = {
# FixedLenSequenceFeatures
'fixed_seq_bool':
tf.FixedLenSequenceFeature(shape=[10], dtype=tf.bool),
'fixed_seq_bool_allow_missing':
tf.FixedLenSequenceFeature(
shape=[5], dtype=tf.bool, allow_missing=True),
'fixed_seq_int':
tf.FixedLenSequenceFeature(shape=[5], dtype=tf.int64),
'fixed_seq_float':
tf.FixedLenSequenceFeature(shape=[5], dtype=tf.float32),
'fixed_seq_string':
tf.FixedLenSequenceFeature(shape=[5], dtype=tf.string),
}
with self.assertRaisesRegexp(ValueError,
'DatasetSchema does not support '
'FixedLenSequenceFeature yet.'):
sch.from_feature_spec(feature_spec)
python类int64()的实例源码
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def read_whole_features(file_pattern, num_epochs=1):
'''
Return
`feature`: `dict` whose keys are `sp`, `ap`, `f0`, `en`, `speaker`
'''
files = tf.gfile.Glob(file_pattern)
print('{} files found'.format(len(files)))
filename_queue = tf.train.string_input_producer(files, num_epochs=num_epochs)
reader = tf.WholeFileReader()
key, value = reader.read(filename_queue)
print("Processing {}".format(key), flush=True)
value = tf.decode_raw(value, tf.float32)
value = tf.reshape(value, [-1, FEAT_DIM])
return {
'sp': value[:, :SP_DIM],
'ap': value[:, SP_DIM : 2*SP_DIM],
'f0': value[:, SP_DIM * 2],
'en': value[:, SP_DIM * 2 + 1],
'speaker': tf.cast(value[:, SP_DIM * 2 + 2], tf.int64),
'filename': key,
}
def mnist_batcher_in_tanh_vector(
batch_size,
capacity=256,
min_after_dequeue=128,
):
(x, y), (_, _) = keras.datasets.mnist.load_data()
x = tf.constant(x)
x = tf.cast(x, tf.float32)
x = keras.layers.Flatten()(x) / 127.5 - 1.
y = tf.cast(y, tf.int64)
return tf.train.shuffle_batch(
[x, y],
batch_size=batch_size,
capacity=capacity,
min_after_dequeue=min_after_dequeue,
enqueue_many=True
)
def _validate(self, machine, n=10):
N = n * n
# same row same z
z = tf.random_normal(shape=[n, self.arch['z_dim']])
z = tf.tile(z, [1, n])
z = tf.reshape(z, [N, -1])
z = tf.Variable(z, trainable=False, dtype=tf.float32)
# same column same y
y = tf.range(0, 10, 1, dtype=tf.int64)
y = tf.reshape(y, [-1, 1])
y = tf.tile(y, [n, 1])
Xh = machine.generate(z, y) # 100, 64, 64, 3
# Xh = gray2jet(Xh)
# Xh = make_png_thumbnail(Xh, n)
Xh = make_png_jet_thumbnail(Xh, n)
return Xh
def _validate(self, machine, n=10):
N = n * n
z = np.random.normal(0., 1., size=[n, self.arch['z_dim']])
z = np.concatenate([z] * n, axis=1)
z = np.reshape(z, [N, -1]).astype(np.float32) # consecutive rows
y = np.asarray(
[[5, 0, 0 ],
[9, 0, 0 ],
[12, 0, 0 ],
[17, 0, 0 ],
[19, 0, 0 ],
[161, 0, 0 ],
[170, 0, 0 ],
[170, 16, 0 ],
[161, 9, 4 ],
[19, 24, 50]],
dtype=np.int64)
y = np.concatenate([y] * n, axis=0)
Z = tf.constant(z)
Y = tf.constant(y)
Xh = machine.generate(Z, Y) # 100, 64, 64, 3
Xh = make_png_thumbnail(Xh, n)
return Xh
def _validate(self, machine, n=10):
N = n * n
# same row same z
z = tf.random_normal(shape=[n, self.arch['z_dim']])
z = tf.tile(z, [1, n])
z = tf.reshape(z, [N, -1])
z = tf.Variable(z, trainable=False, dtype=tf.float32)
# same column same y
y = tf.range(0, 10, 1, dtype=tf.int64)
y = tf.reshape(y, [-1,])
y = tf.tile(y, [n,])
Xh = machine.generate(z, y) # 100, 64, 64, 3
Xh = make_png_thumbnail(Xh, n)
return Xh
def global_step(device=''):
"""Returns the global step variable.
Args:
device: Optional device to place the variable. It can be an string or a
function that is called to get the device for the variable.
Returns:
the tensor representing the global step variable.
"""
global_step_ref = tf.get_collection(tf.GraphKeys.GLOBAL_STEP)
if global_step_ref:
return global_step_ref[0]
else:
collections = [
VARIABLES_TO_RESTORE,
tf.GraphKeys.VARIABLES,
tf.GraphKeys.GLOBAL_STEP,
]
# Get the device for the variable.
with tf.device(variable_device(device, 'global_step')):
return tf.get_variable('global_step', shape=[], dtype=tf.int64,
initializer=tf.zeros_initializer,
trainable=False, collections=collections)
def loss(logits, label_batch):
"""
Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits -> logits from inference()
label_batch -> 1D tensor of [batch_size]
Rtns:
total_loss -> float tensor
"""
# Calculate the average cross entropy loss across the batch.
label_batch = tf.cast(label_batch,tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits,
label_batch,name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses',cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def create_optimizers(gene_loss, gene_var_list,
disc_loss, disc_var_list):
# TBD: Does this global step variable need to be manually incremented? I think so.
global_step = tf.Variable(0, dtype=tf.int64, trainable=False, name='global_step')
learning_rate = tf.placeholder(dtype=tf.float32, name='learning_rate')
gene_opti = tf.train.AdamOptimizer(learning_rate=learning_rate,
beta1=FLAGS.learning_beta1,
name='gene_optimizer')
disc_opti = tf.train.AdamOptimizer(learning_rate=learning_rate,
beta1=FLAGS.learning_beta1,
name='disc_optimizer')
gene_minimize = gene_opti.minimize(gene_loss, var_list=gene_var_list, name='gene_loss_minimize', global_step=global_step)
disc_minimize = disc_opti.minimize(disc_loss, var_list=disc_var_list, name='disc_loss_minimize', global_step=global_step)
return (global_step, learning_rate, gene_minimize, disc_minimize)
def make_input_schema(mode=tf.contrib.learn.ModeKeys.TRAIN):
"""Input schema definition.
Args:
mode: tf.contrib.learn.ModeKeys specifying if the schema is being used for
train/eval or prediction.
Returns:
A `Schema` object.
"""
result = ({} if mode == tf.contrib.learn.ModeKeys.INFER
else {'clicked': tf.FixedLenFeature(shape=[], dtype=tf.int64)})
for name in INTEGER_COLUMN_NAMES:
result[name] = tf.FixedLenFeature(
shape=[], dtype=tf.int64, default_value=-1)
for name in CATEGORICAL_COLUMN_NAMES:
result[name] = tf.FixedLenFeature(shape=[], dtype=tf.string,
default_value='')
return dataset_schema.from_feature_spec(result)
def example_serving_input_fn(default_batch_size=None):
"""Build the serving inputs.
Args:
default_batch_size (int): Batch size for the tf.placeholder shape
"""
feature_spec = {}
for feat in CONTINUOUS_COLS:
feature_spec[feat] = tf.FixedLenFeature(shape=[], dtype=tf.int64)
for feat, _ in CATEGORICAL_COLS:
feature_spec[feat] = tf.FixedLenFeature(shape=[], dtype=tf.string)
example_bytestring = tf.placeholder(
shape=[default_batch_size],
dtype=tf.string,
)
features = tf.parse_example(example_bytestring, feature_spec)
return features, {'example': example_bytestring}
def make_input_schema(mode=tf.contrib.learn.ModeKeys.TRAIN):
"""Input schema definition.
Args:
mode: tf.contrib.learn.ModeKeys specifying if the schema is being used for
train/eval or prediction.
Returns:
A `Schema` object.
"""
result = ({} if mode == tf.contrib.learn.ModeKeys.INFER else {
'score': tf.FixedLenFeature(shape=[], dtype=tf.float32)
})
result.update({
'subreddit': tf.FixedLenFeature(shape=[], dtype=tf.string),
'author': tf.FixedLenFeature(shape=[], dtype=tf.string),
'comment_body': tf.FixedLenFeature(shape=[], dtype=tf.string,
default_value=''),
'comment_parent_body': tf.FixedLenFeature(shape=[], dtype=tf.string,
default_value=''),
'toplevel': tf.FixedLenFeature(shape=[], dtype=tf.int64),
})
return dataset_schema.from_feature_spec(result)
def _loss_shared(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits, labels, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def create_torch_variable(self, value, gpu=False):
"""Convenience method that produces a tensor given the value of the defined type.
Returns: a torch tensor of same type.
"""
if isinstance(value, torch.autograd.Variable):
if gpu:
value = value.cuda()
return value
if not torch.is_tensor(value):
if not isinstance(value, np.ndarray):
value = np.array(value, dtype=self.dtype.as_numpy_dtype)
else:
value = value.astype(self.dtype.as_numpy_dtype)
if value.size == 0:
return value
allowed = [tf.int16, tf.int32, tf.int64, tf.float16, tf.float32, tf.float64, tf.int8]
if self.dtype in allowed:
value = torch.autograd.Variable(torch.from_numpy(value))
else:
value = torch.autograd.Variable(value)
if gpu and isinstance(value, torch.autograd.Variable):
value = value.cuda()
return value
def read(self, shuffle=True, num_epochs=None):
with tf.name_scope('input'):
reader = tf.TFRecordReader()
filename_queue = tf.train.string_input_producer([self.filename], num_epochs=num_epochs)
_, serialized_input = reader.read(filename_queue)
inputs = tf.parse_single_example(serialized_input,
features={
'inputs_seq': tf.FixedLenFeature([self.seq_len * 2 + 3], tf.int64),
'output': tf.FixedLenFeature([1], tf.int64)
})
inputs_seq = inputs['inputs_seq']
output = inputs['output']
min_after_dequeue = 100
if shuffle:
inputs_seqs, outputs = tf.train.shuffle_batch([inputs_seq, output], batch_size=self.batch_size, num_threads=2, capacity=min_after_dequeue + 3 * self.batch_size, min_after_dequeue=min_after_dequeue)
else:
inputs_seqs, outputs = tf.train.batch([inputs_seq, output], batch_size=self.batch_size)
return inputs_seqs, outputs
def _convert_string_dtype(dtype):
if dtype == 'float16':
return tf.float16
if dtype == 'float32':
return tf.float32
elif dtype == 'float64':
return tf.float64
elif dtype == 'int16':
return tf.int16
elif dtype == 'int32':
return tf.int32
elif dtype == 'int64':
return tf.int64
elif dtype == 'uint8':
return tf.int8
elif dtype == 'uint16':
return tf.uint16
else:
raise ValueError('Unsupported dtype:', dtype)
def sparse_categorical_crossentropy(output, target, from_logits=False):
'''Categorical crossentropy between an output tensor
and a target tensor, where the target is an integer tensor.
'''
# Note: tf.nn.softmax_cross_entropy_with_logits
# expects logits, Keras expects probabilities.
if not from_logits:
epsilon = _to_tensor(_EPSILON, output.dtype.base_dtype)
output = tf.clip_by_value(output, epsilon, 1 - epsilon)
output = tf.log(output)
output_shape = output.get_shape()
res = tf.nn.sparse_softmax_cross_entropy_with_logits(
tf.reshape(output, [-1, int(output_shape[-1])]),
cast(flatten(target), 'int64'))
if len(output_shape) == 3:
# if our output includes timesteps we need to reshape
return tf.reshape(res, [-1, int(output_shape[-2])])
else:
return res
def in_top_k(predictions, targets, k):
'''Returns whether the `targets` are in the top `k` `predictions`
# Arguments
predictions: A tensor of shape batch_size x classess and type float32.
targets: A tensor of shape batch_size and type int32 or int64.
k: An int, number of top elements to consider.
# Returns
A tensor of shape batch_size and type bool. output_i is True if
targets_i is within top-k values of predictions_i
'''
return tf.nn.in_top_k(predictions, targets, k)
# CONVOLUTIONS
def data_augmentation(img, gt_bboxes, gt_cats, seg, config):
params = config['train_augmentation']
img = apply_with_random_selector(
img,
lambda x, ordering: photometric_distortions(x, ordering, params),
num_cases=4)
if seg is not None:
img = tf.concat([img, tf.cast(seg, tf.float32)], axis=-1)
img, gt_bboxes, gt_cats = scale_distortions(img, gt_bboxes, gt_cats,
params)
img, gt_bboxes = mirror_distortions(img, gt_bboxes, params)
# XXX reference implementation also randomizes interpolation method
img_size = config['image_size']
img_out = tf.image.resize_images(img[..., :3], [img_size, img_size])
gt_bboxes, gt_cats = filter_small_gt(gt_bboxes, gt_cats, 2/config['image_size'])
if seg is not None:
seg_shape = config['fm_sizes'][0]
seg = tf.expand_dims(tf.expand_dims(img[..., 3], 0), -1)
seg = tf.squeeze(tf.image.resize_nearest_neighbor(seg, [seg_shape, seg_shape]))
seg = tf.cast(tf.round(seg), tf.int64)
return img_out, gt_bboxes, gt_cats, seg
def assert_same_float_and_int_dtype(tensors_with_name, dtype=None):
"""
Whether all types of tensors in `tensors` are the same and floating (or
integer) type.
:param tensors_with_name: A list of (tensor, tensor_name).
:param dtype: Expected type. If `None`, depend on the type of tensors.
:return: The type of `tensors`.
"""
available_types = [tf.float16, tf.float32, tf.float64,
tf.int16, tf.int32, tf.int64]
if dtype is None:
return assert_same_specific_dtype(tensors_with_name, available_types)
elif dtype in available_types:
return assert_same_dtype(tensors_with_name, dtype)
else:
raise TypeError("The argument 'dtype' must be in %s" % available_types)
def loss(self, logits, labels):
"""Add L2Loss to all the trainable variables.
Args:
logits: Logits from get().
labels: Labels from train_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
with tf.variable_scope('loss'):
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(
cross_entropy, name='cross_entropy')
tf.add_to_collection(LOSSES, cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
error = tf.add_n(tf.get_collection(LOSSES), name='total_loss')
return error
def loss(self, logits, labels):
"""Add L2Loss to all the trainable variables.
Args:
logits: Logits from get().
labels: Labels from train_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
with tf.variable_scope('loss'):
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(
cross_entropy, name='cross_entropy')
tf.add_to_collection(LOSSES, cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
error = tf.add_n(tf.get_collection(LOSSES), name='total_loss')
return error
def loss(self, logits, labels):
"""Add L2Loss to all the trainable variables.
Args:
logits: Logits from get().
labels: Labels from train_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
with tf.variable_scope('loss'):
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(
cross_entropy, name='cross_entropy')
tf.add_to_collection(LOSSES, cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
error = tf.add_n(tf.get_collection(LOSSES), name='total_loss')
return error
def loss(self, logits, labels):
"""Add L2Loss to all the trainable variables.
Args:
logits: Logits from get().
labels: Labels from train_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
with tf.variable_scope('loss'):
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(
cross_entropy, name='cross_entropy')
tf.add_to_collection(LOSSES, cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
error = tf.add_n(tf.get_collection(LOSSES), name='total_loss')
return error
def loss(self, logits, labels):
"""Add L2Loss to all the trainable variables.
Args:
logits: Logits from get().
labels: Labels from train_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
with tf.variable_scope('loss'):
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(
cross_entropy, name='cross_entropy')
tf.add_to_collection(LOSSES, cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
error = tf.add_n(tf.get_collection(LOSSES), name='total_loss')
return error
def loss(self, logits, labels):
"""Add L2Loss to all the trainable variables.
Args:
logits: Logits from get().
labels: Labels from train_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
with tf.variable_scope('loss'):
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(
cross_entropy, name='cross_entropy')
tf.add_to_collection(LOSSES, cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
error = tf.add_n(tf.get_collection(LOSSES), name='total_loss')
return error
def loss(self, logits, labels):
"""Add L2Loss to all the trainable variables.
Args:
logits: Logits from get().
labels: Labels from train_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
with tf.variable_scope('loss'):
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(
cross_entropy, name='cross_entropy')
tf.add_to_collection(LOSSES, cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
error = tf.add_n(tf.get_collection(LOSSES), name='total_loss')
return error
def cal_loss(self):
one_hot_labels = tf.one_hot(
self.labels, depth=self.conf.class_num,
axis=self.channel_axis, name='labels/one_hot')
losses = tf.losses.softmax_cross_entropy(
one_hot_labels, self.predictions, scope='loss/losses')
self.loss_op = tf.reduce_mean(losses, name='loss/loss_op')
self.decoded_preds = tf.argmax(
self.predictions, self.channel_axis, name='accuracy/decode_pred')
correct_prediction = tf.equal(
self.labels, self.decoded_preds,
name='accuracy/correct_pred')
self.accuracy_op = tf.reduce_mean(
tf.cast(correct_prediction, tf.float32, name='accuracy/cast'),
name='accuracy/accuracy_op')
# weights = tf.cast(
# tf.greater(self.decoded_preds, 0, name='m_iou/greater'),
# tf.int32, name='m_iou/weights')
weights = tf.cast(
tf.less(self.labels, self.conf.channel, name='m_iou/greater'),
tf.int64, name='m_iou/weights')
labels = tf.multiply(self.labels, weights, name='m_iou/mul')
self.m_iou, self.miou_op = tf.metrics.mean_iou(
self.labels, self.decoded_preds, self.conf.class_num,
weights, name='m_iou/m_ious')