def create_torch_variable(self, value, gpu=False):
"""Convenience method that produces a tensor given the value of the defined type.
Returns: a torch tensor of same type.
"""
if isinstance(value, torch.autograd.Variable):
if gpu:
value = value.cuda()
return value
if not torch.is_tensor(value):
if not isinstance(value, np.ndarray):
value = np.array(value, dtype=self.dtype.as_numpy_dtype)
else:
value = value.astype(self.dtype.as_numpy_dtype)
if value.size == 0:
return value
allowed = [tf.int16, tf.int32, tf.int64, tf.float16, tf.float32, tf.float64, tf.int8]
if self.dtype in allowed:
value = torch.autograd.Variable(torch.from_numpy(value))
else:
value = torch.autograd.Variable(value)
if gpu and isinstance(value, torch.autograd.Variable):
value = value.cuda()
return value
python类int8()的实例源码
def _convert_string_dtype(dtype):
if dtype == 'float16':
return tf.float16
if dtype == 'float32':
return tf.float32
elif dtype == 'float64':
return tf.float64
elif dtype == 'int16':
return tf.int16
elif dtype == 'int32':
return tf.int32
elif dtype == 'int64':
return tf.int64
elif dtype == 'uint8':
return tf.int8
elif dtype == 'uint16':
return tf.uint16
else:
raise ValueError('Unsupported dtype:', dtype)
def mu_law(x, mu=255, int8=False):
"""A TF implementation of Mu-Law encoding.
Args:
x: The audio samples to encode.
mu: The Mu to use in our Mu-Law.
int8: Use int8 encoding.
Returns:
out: The Mu-Law encoded int8 data.
"""
out = tf.sign(x) * tf.log(1 + mu * tf.abs(x)) / np.log(1 + mu)
out = tf.floor(out * 128)
if int8:
out = tf.cast(out, tf.int8)
return out
def _convert_string_dtype(dtype):
if dtype == 'float16':
return tf.float16
if dtype == 'float32':
return tf.float32
elif dtype == 'float64':
return tf.float64
elif dtype == 'int16':
return tf.int16
elif dtype == 'int32':
return tf.int32
elif dtype == 'int64':
return tf.int64
elif dtype == 'uint8':
return tf.int8
elif dtype == 'uint16':
return tf.uint16
else:
raise ValueError('Unsupported dtype:', dtype)
tensorflow_backend.py 文件源码
项目:deep-learning-keras-projects
作者: jasmeetsb
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def _convert_string_dtype(dtype):
if dtype == 'float16':
return tf.float16
if dtype == 'float32':
return tf.float32
elif dtype == 'float64':
return tf.float64
elif dtype == 'int16':
return tf.int16
elif dtype == 'int32':
return tf.int32
elif dtype == 'int64':
return tf.int64
elif dtype == 'uint8':
return tf.int8
elif dtype == 'uint16':
return tf.uint16
else:
raise ValueError('Unsupported dtype:', dtype)
def _convert_string_dtype(dtype):
if dtype == 'float16':
return tf.float16
if dtype == 'float32':
return tf.float32
elif dtype == 'float64':
return tf.float64
elif dtype == 'int16':
return tf.int16
elif dtype == 'int32':
return tf.int32
elif dtype == 'int64':
return tf.int64
elif dtype == 'uint8':
return tf.int8
elif dtype == 'uint16':
return tf.uint16
else:
raise ValueError('Unsupported dtype:', dtype)
def unwrap_output_sparse(self, final_state, include_stop_tokens=True):
"""
Retreive the beam search output from the final state.
Returns a sparse tensor with underlying dimensions of [batch_size, max_len]
"""
output_dense = final_state[0]
mask = tf.not_equal(output_dense, self.stop_token)
if include_stop_tokens:
output_dense = tf.concat(1, [output_dense[:, 1:],
tf.ones_like(output_dense[:, 0:1]) *
self.stop_token])
mask = tf.concat(1, [mask[:, 1:], tf.cast(tf.ones_like(mask[:, 0:1],
dtype=tf.int8),
tf.bool)])
return sparse_boolean_mask(output_dense, mask)
def _convert_string_dtype(dtype):
if dtype == 'float16':
return tf.float16
if dtype == 'float32':
return tf.float32
elif dtype == 'float64':
return tf.float64
elif dtype == 'int16':
return tf.int16
elif dtype == 'int32':
return tf.int32
elif dtype == 'int64':
return tf.int64
elif dtype == 'uint8':
return tf.int8
elif dtype == 'uint16':
return tf.uint16
else:
raise ValueError('Unsupported dtype:', dtype)
def _convert_string_dtype(dtype):
if dtype == 'float16':
return tf.float16
if dtype == 'float32':
return tf.float32
elif dtype == 'float64':
return tf.float64
elif dtype == 'int16':
return tf.int16
elif dtype == 'int32':
return tf.int32
elif dtype == 'int64':
return tf.int64
elif dtype == 'uint8':
return tf.int8
elif dtype == 'uint16':
return tf.uint16
else:
raise ValueError('Unsupported dtype:', dtype)
def _convert_string_dtype(dtype):
if dtype == 'float16':
return tf.float16
if dtype == 'float32':
return tf.float32
elif dtype == 'float64':
return tf.float64
elif dtype == 'int16':
return tf.int16
elif dtype == 'int32':
return tf.int32
elif dtype == 'int64':
return tf.int64
elif dtype == 'uint8':
return tf.int8
elif dtype == 'uint16':
return tf.uint16
else:
raise ValueError('Unsupported dtype:', dtype)
def test_embedding_int8(self):
weights = np.array([[1, 2], [3, 4]], dtype='float32')
embedding = tdl.Embedding(2, 2, initializer=weights)
with self.test_session() as sess:
embeddings = [embedding(tf.constant([x], dtype=tf.int8))
for x in [0, 1, 7, -5]]
sess.run(tf.global_variables_initializer())
self.assertAllEqual([[[1, 2]], [[3, 4]], [[3, 4]], [[3, 4]]],
sess.run(embeddings))
def mu_law_encode(audio, quantization_channels=256):
"""Quantizes waveform amplitudes."""
with tf.name_scope('encode'):
mu = quantization_channels - 1
out = tf.sign(audio) * tf.log(1 + mu * tf.abs(audio)) / np.log(1 + mu)
out = tf.cast(tf.floor(out * 128), tf.int8)
return out
# tensorflow/magenta/blob/master/magenta/models/nsynth/utils.py#L79
def read(filename_queue):
class CAM17Record(object):
pass
result = CAM17Record()
result.height = IMAGE_SIZE
result.width = IMAGE_SIZE
result.depth = CHANNELS
reader = tf.TFRecordReader()
result.key, value = reader.read(filename_queue)
feature_map = {
'image/encoded': tf.FixedLenFeature([], dtype=tf.string,
default_value=''),
'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64,
default_value=-1)
}
features = tf.parse_single_example(value, feature_map)
result.label = tf.cast(features['image/class/label'], dtype=tf.int8)
image_buffer = features['image/encoded']
image = tf.image.decode_jpeg(image_buffer, channels=CHANNELS)
depth_major = tf.reshape(image,
[result.width, result.height, result.depth])
result.uint8image = tf.transpose(depth_major, [1, 0, 2])
return result
def test_input_int8(self):
self._assert_dtype(
np.int8, tf.int8, np.matrix([[1, 2], [3, 4]], dtype=np.int8))
def test_input_int8(self):
self._assert_dtype(
np.int8, tf.int8, np.matrix([[1, 2], [3, 4]], dtype=np.int8))
def runSum():
a=tf.constant(12,dtype=tf.int8)
b=tf.constant(10,dtype=tf.int8)
sv=tf.train.Supervisor(logdir="./test1")
with sv.managed_session() as sess:
for i in range(10):
if sv.should_stop():
return
print(sess.run([a,b]))
def testDNN():
# copied from quick start sample code https://www.tensorflow.org/get_started/tflearn
# Load datasets.
training_set = tf.contrib.learn.datasets.base.load_csv_with_header(
filename='training.csv',
target_dtype=np.int8,
features_dtype=np.int8)
test_set = tf.contrib.learn.datasets.base.load_csv_with_header(
filename='testset.csv',
target_dtype=np.int8,
features_dtype=np.int8)
feature_columns = [tf.contrib.layers.real_valued_column("", dtype=tf.int8, dimension=1000)]
classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
hidden_units=[10],
n_classes=2,
model_dir="/tmp/spammodel3")
# Define the training inputs
def get_train_inputs():
x = tf.constant(training_set.data)
y = tf.constant(training_set.target)
return x, y
# Fit model.
classifier.fit(input_fn=get_train_inputs, steps=2000)
# Define the test inputs
def get_test_inputs():
x = tf.constant(test_set.data)
y = tf.constant(test_set.target)
return x, y
# Evaluate accuracy.
score = classifier.evaluate(input_fn=get_test_inputs,
steps=1)
print("\nTest Accuracy: {0:f}\n".format(score["accuracy"]))
for key in score:
print(key, score[key])
# Test Accuracy: 0.981333
# accuracy/baseline_label_mean 0.0233333
# loss 0.0698425
# auc 0.892803
# global_step 4000
# accuracy/threshold_0.500000_mean 0.981333
# recall/positive_threshold_0.500000_mean 0.257143
# labels/prediction_mean 0.0196873
# accuracy 0.981333
# precision/positive_threshold_0.500000_mean 0.818182
# labels/actual_label_mean 0.0233333