def adjust_hue(image, delta, name=None):
with ops.op_scope([image], name, 'adjust_hue') as name:
# Remember original dtype to so we can convert back if needed
orig_dtype = image.dtype
flt_image = tf.image.convert_image_dtype(image, tf.float32)
hsv = gen_image_ops.rgb_to_hsv(flt_image)
hue = tf.slice(hsv, [0, 0, 0, 0], [-1, -1, -1, 1])
saturation = tf.slice(hsv, [0, 0, 0, 1], [-1, -1, -1, 1])
value = tf.slice(hsv, [0, 0, 0, 2], [-1, -1, -1, 1])
# Note that we add 2*pi to guarantee that the resulting hue is a positive
# floating point number since delta is [-0.5, 0.5].
hue = math_ops.mod(hue + (delta + 1.), 1.)
hsv_altered = tf.concat(3, [hue, saturation, value])
rgb_altered = gen_image_ops.hsv_to_rgb(hsv_altered)
return tf.image.convert_image_dtype(rgb_altered, orig_dtype)
python类mod()的实例源码
def _shard_indices(self, keys):
if self._key_dtype == dtypes.string:
indices = string_ops.string_to_hash_bucket_fast(keys, self._num_shards)
else:
indices = math_ops.mod(keys, self._num_shards)
return math_ops.cast(indices, dtypes.int32)
def insert_transformed_feature(self, columns_to_tensors):
"""Handles sparse column to id conversion."""
sparse_id_values = math_ops.mod(columns_to_tensors[self.name].values,
self.bucket_size,
name="mod")
columns_to_tensors[self] = ops.SparseTensor(
columns_to_tensors[self.name].indices, sparse_id_values,
columns_to_tensors[self.name].shape)
def _shard_indices(self, keys):
key_shape = keys.get_shape()
if key_shape.ndims > 1:
# If keys are a matrix (i.e. a single key is a vector), we use the first
# element of each key vector to determine the shard.
keys = array_ops.slice(keys, [0, 0], [key_shape[0].value, 1])
keys = array_ops.reshape(keys, [-1])
indices = math_ops.mod(math_ops.abs(keys), self._num_shards)
return math_ops.cast(indices, dtypes.int32)
def insert_transformed_feature(self, columns_to_tensors):
"""Handles sparse column to id conversion."""
input_tensor = self._get_input_sparse_tensor(columns_to_tensors)
sparse_id_values = math_ops.mod(input_tensor.values, self.bucket_size,
name="mod")
columns_to_tensors[self] = sparse_tensor_py.SparseTensor(
input_tensor.indices, sparse_id_values, input_tensor.shape)
def __mod__(self, other):
return mod(self, other)
def testFloat(self):
x = [0.5, 0.7, 0.3]
for dtype in [np.float32, np.double]:
# Test scalar and vector versions.
for denom in [x[0], [x[0]] * 3]:
x_np = np.array(x, dtype=dtype)
with self.test_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y_tf = math_ops.mod(x_tf, denom)
y_tf_np = y_tf.eval()
y_np = np.fmod(x_np, denom)
self.assertAllClose(y_tf_np, y_np, atol=1e-2)
def testFixed(self):
x = [5, 10, 23]
for dtype in [np.int32, np.int64]:
# Test scalar and vector versions.
for denom in [x[0], x]:
x_np = np.array(x, dtype=dtype)
with self.test_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y_tf = math_ops.mod(x_tf, denom)
y_tf_np = y_tf.eval()
y_np = np.mod(x_np, denom)
self.assertAllClose(y_tf_np, y_np)
sharded_mutable_dense_hashtable.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 17
收藏 0
点赞 0
评论 0
def _shard_indices(self, keys):
key_shape = keys.get_shape()
if key_shape.ndims > 1:
# If keys are a matrix (i.e. a single key is a vector), we use the first
# element of each key vector to determine the shard.
keys = array_ops.slice(keys, [0, 0], [key_shape[0].value, 1])
keys = array_ops.reshape(keys, [-1])
indices = math_ops.mod(math_ops.abs(keys), self._num_shards)
return math_ops.cast(indices, dtypes.int32)
feature_column.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def insert_transformed_feature(self, columns_to_tensors):
"""Handles sparse column to id conversion."""
input_tensor = self._get_input_sparse_tensor(columns_to_tensors)
sparse_id_values = math_ops.mod(input_tensor.values, self.bucket_size,
name="mod")
columns_to_tensors[self] = sparse_tensor_py.SparseTensor(
input_tensor.indices, sparse_id_values, input_tensor.dense_shape)
core_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 14
收藏 0
点赞 0
评论 0
def setUp(self):
super(CoreBinaryOpsTest, self).setUp()
self.x_probs_broadcast_tensor = array_ops.reshape(
self.x_probs_lt.tensor, [self.x_size, 1, self.probs_size])
self.channel_probs_broadcast_tensor = array_ops.reshape(
self.channel_probs_lt.tensor, [1, self.channel_size, self.probs_size])
# == and != are not element-wise for tf.Tensor, so they shouldn't be
# elementwise for LabeledTensor, either.
self.ops = [
('add', operator.add, math_ops.add, core.add),
('sub', operator.sub, math_ops.subtract, core.sub),
('mul', operator.mul, math_ops.multiply, core.mul),
('div', operator.truediv, math_ops.div, core.div),
('mod', operator.mod, math_ops.mod, core.mod),
('pow', operator.pow, math_ops.pow, core.pow_function),
('equal', None, math_ops.equal, core.equal),
('less', operator.lt, math_ops.less, core.less),
('less_equal', operator.le, math_ops.less_equal, core.less_equal),
('not_equal', None, math_ops.not_equal, core.not_equal),
('greater', operator.gt, math_ops.greater, core.greater),
('greater_equal', operator.ge, math_ops.greater_equal,
core.greater_equal),
]
self.test_lt_1 = self.x_probs_lt
self.test_lt_2 = self.channel_probs_lt
self.test_lt_1_broadcast = self.x_probs_broadcast_tensor
self.test_lt_2_broadcast = self.channel_probs_broadcast_tensor
self.broadcast_axes = [self.a0, self.a1, self.a3]
core.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def __mod__(self, other):
return mod(self, other)
core.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 16
收藏 0
点赞 0
评论 0
def __rmod__(self, other):
return mod(other, self)