def _reset(self):
"""Resets wait counter and cooldown counter.
"""
if self.mode not in ['auto', 'min', 'max']:
logging.warning('Learning Rate Plateau Reducing mode %s is unknown, '
'fallback to auto mode.' % (self.mode))
self.mode = 'auto'
if (self.mode == 'min' or
(self.mode == 'auto' and 'acc' not in self.monitor)):
self.monitor_op = lambda a, b: np.less(a, b - self.epsilon)
self.best = np.Inf
else:
self.monitor_op = lambda a, b: np.greater(a, b + self.epsilon)
self.best = -np.Inf
self.cooldown_counter = 0
self.wait = 0
self.lr_epsilon = self.min_lr * 1e-4
python类less()的实例源码
def _padding_mask(sequence_lengths, padded_length):
"""Creates a mask used for calculating losses with padded input.
Args:
sequence_lengths: a `Tensor` of shape `[batch_size]` containing the unpadded
length of each sequence.
padded_length: a scalar `Tensor` indicating the length of the sequences
after padding
Returns:
A boolean `Tensor` M of shape `[batch_size, padded_length]` where
`M[i, j] == True` when `lengths[i] > j`.
"""
range_tensor = math_ops.range(padded_length)
return math_ops.less(array_ops.expand_dims(range_tensor, 0),
array_ops.expand_dims(sequence_lengths, 1))
def padding_mask(sequence_lengths, padded_length):
"""Creates a mask used for calculating losses with padded input.
Args:
sequence_lengths: A `Tensor` of shape `[batch_size]` containing the unpadded
length of each sequence.
padded_length: A scalar `Tensor` indicating the length of the sequences
after padding
Returns:
A boolean `Tensor` M of shape `[batch_size, padded_length]` where
`M[i, j] == True` when `lengths[i] > j`.
"""
range_tensor = math_ops.range(padded_length)
return math_ops.less(array_ops.expand_dims(range_tensor, 0),
array_ops.expand_dims(sequence_lengths, 1))
def random_flip_left_right(image, bboxes, seed=None):
"""Random flip left-right of an image and its bounding boxes.
"""
def flip_bboxes(bboxes):
"""Flip bounding boxes coordinates.
"""
bboxes = tf.stack([bboxes[:, 0], 1 - bboxes[:, 3],
bboxes[:, 2], 1 - bboxes[:, 1]], axis=-1)
return bboxes
# Random flip. Tensorflow implementation.
with tf.name_scope('random_flip_left_right'):
image = ops.convert_to_tensor(image, name='image')
_Check3DImage(image, require_static=False)
uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed)
mirror_cond = math_ops.less(uniform_random, .5)
# Flip image.
result = control_flow_ops.cond(mirror_cond,
lambda: array_ops.reverse_v2(image, [1]),
lambda: image)
# Flip bboxes.
bboxes = control_flow_ops.cond(mirror_cond,
lambda: flip_bboxes(bboxes),
lambda: bboxes)
return fix_image_flip_shape(image, result), bboxes
def random_flip_left_right(image, bboxes, seed=None):
"""Random flip left-right of an image and its bounding boxes.
"""
def flip_bboxes(bboxes):
"""Flip bounding boxes coordinates.
"""
bboxes = tf.stack([bboxes[:, 0], 1 - bboxes[:, 3],
bboxes[:, 2], 1 - bboxes[:, 1]], axis=-1)
return bboxes
# Random flip. Tensorflow implementation.
with tf.name_scope('random_flip_left_right'):
image = ops.convert_to_tensor(image, name='image')
_Check3DImage(image, require_static=False)
uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed)
mirror_cond = math_ops.less(uniform_random, .5)
# Flip image.
result = control_flow_ops.cond(mirror_cond,
lambda: array_ops.reverse_v2(image, [1]),
lambda: image)
# Flip bboxes.
bboxes = control_flow_ops.cond(mirror_cond,
lambda: flip_bboxes(bboxes),
lambda: bboxes)
return fix_image_flip_shape(image, result), bboxes
def random_flip_left_right(image, bboxes, seed=None):
"""Random flip left-right of an image and its bounding boxes.
"""
def flip_bboxes(bboxes):
"""Flip bounding boxes coordinates.
"""
bboxes = tf.stack([bboxes[:, 0], 1 - bboxes[:, 3],
bboxes[:, 2], 1 - bboxes[:, 1]], axis=-1)
return bboxes
# Random flip. Tensorflow implementation.
with tf.name_scope('random_flip_left_right'):
image = ops.convert_to_tensor(image, name='image')
_Check3DImage(image, require_static=False)
uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed)
mirror_cond = math_ops.less(uniform_random, .5)
# Flip image.
result = control_flow_ops.cond(mirror_cond,
lambda: array_ops.reverse_v2(image, [1]),
lambda: image)
# Flip bboxes.
bboxes = control_flow_ops.cond(mirror_cond,
lambda: flip_bboxes(bboxes),
lambda: bboxes)
return fix_image_flip_shape(image, result), bboxes
distribution_util_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 35
收藏 0
点赞 0
评论 0
def testCorrectlyPicksVector(self):
with self.test_session():
x = np.arange(10, 12)
y = np.arange(15, 18)
self.assertAllEqual(x,
distribution_util.pick_vector(
math_ops.less(0, 5), x, y).eval())
self.assertAllEqual(y,
distribution_util.pick_vector(
math_ops.less(5, 0), x, y).eval())
self.assertAllEqual(x,
distribution_util.pick_vector(
constant_op.constant(True), x, y)) # No eval.
self.assertAllEqual(y,
distribution_util.pick_vector(
constant_op.constant(False), x, y)) # No eval.
def sample(self):
u = tf.random_uniform(tf.shape(self.ps))
return tf.to_float(math_ops.less(u, self.ps))
def random_flip_left_right(image, seed=None):
uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed)
mirror = math_ops.less(tf.pack(
[1.0, 1.0, uniform_random, 1.0]), 0.5)
return tf.reverse(image, mirror)
def random_flip_up_down(image, seed=None):
uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed)
mirror = math_ops.less(tf.pack(
[1.0, uniform_random, 1.0, 1.0]), 0.5)
return tf.reverse(image, mirror)
def sample(self):
u = tf.random_uniform(tf.shape(self.ps))
return tf.to_float(math_ops.less(u, self.ps))
def random_flip_left_right(image, seed=None):
uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed)
mirror = math_ops.less(tf.pack([1.0, 1.0, uniform_random, 1.0]), 0.5)
return tf.reverse(image, mirror)
def random_flip_up_down(image, seed=None):
uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed)
mirror = math_ops.less(tf.pack([1.0, uniform_random, 1.0, 1.0]), 0.5)
return tf.reverse(image, mirror)
def less(x, y):
"""Element-wise truth value of (x < y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.less(x, y)
def __init__(self,
filepath,
monitor='val_loss',
verbose=0,
save_best_only=False,
save_weights_only=False,
mode='auto',
period=1):
super(ModelCheckpoint, self).__init__()
self.monitor = monitor
self.verbose = verbose
self.filepath = filepath
self.save_best_only = save_best_only
self.save_weights_only = save_weights_only
self.period = period
self.epochs_since_last_save = 0
if mode not in ['auto', 'min', 'max']:
logging.warning('ModelCheckpoint mode %s is unknown, '
'fallback to auto mode.' % (mode))
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
self.best = np.Inf
elif mode == 'max':
self.monitor_op = np.greater
self.best = -np.Inf
else:
if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
self.monitor_op = np.greater
self.best = -np.Inf
else:
self.monitor_op = np.less
self.best = np.Inf
def __init__(self,
monitor='val_loss',
min_delta=0,
patience=0,
verbose=0,
mode='auto'):
super(EarlyStopping, self).__init__()
self.monitor = monitor
self.patience = patience
self.verbose = verbose
self.min_delta = min_delta
self.wait = 0
self.stopped_epoch = 0
if mode not in ['auto', 'min', 'max']:
logging.warning('EarlyStopping mode %s is unknown, '
'fallback to auto mode.' % (self.mode))
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
elif mode == 'max':
self.monitor_op = np.greater
else:
if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
self.monitor_op = np.greater
else:
self.monitor_op = np.less
if self.monitor_op == np.greater:
self.min_delta *= 1
else:
self.min_delta *= -1
def on_train_begin(self, logs=None):
# Allow instances to be re-used
self.wait = 0
self.stopped_epoch = 0
self.best = np.Inf if self.monitor_op == np.less else -np.Inf
def random_flip_left_right(image, bboxes, seed=None):
"""Random flip left-right of an image and its bounding boxes.
"""
def flip_bboxes(bboxes):
"""Flip bounding boxes coordinates.
"""
bboxes = tf.stack([bboxes[:, 0], 1 - bboxes[:, 3],
bboxes[:, 2], 1 - bboxes[:, 1]], axis=-1)
return bboxes
# Random flip. Tensorflow implementation.
with tf.name_scope('random_flip_left_right'):
image = ops.convert_to_tensor(image, name='image')
_Check3DImage(image, require_static=False)
uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed)
mirror_cond = math_ops.less(uniform_random, .5)
#debugging info
# mirror_cond = tf.Print(mirror_cond, [mirror_cond], 'flipped image')
# Flip image.
result = control_flow_ops.cond(mirror_cond,
lambda: array_ops.reverse_v2(image, [1]),
lambda: image)
# Flip bboxes.
bboxes = control_flow_ops.cond(mirror_cond,
lambda: flip_bboxes(bboxes),
lambda: bboxes)
return fix_image_flip_shape(image, result), bboxes
def _mode(self):
s = self.df - self.dimension - 1.
s = math_ops.select(
math_ops.less(s, 0.),
constant_op.constant(float("NaN"), dtype=self.dtype, name="nan"),
s)
if self.cholesky_input_output_matrices:
return math_ops.sqrt(s) * self.scale_operator_pd.sqrt_to_dense()
return s * self.scale_operator_pd.to_dense()
def _sample_n(self, n, seed=None):
new_shape = array_ops.concat(0, ([n], self.batch_shape()))
uniform = random_ops.random_uniform(
new_shape, seed=seed, dtype=self.p.dtype)
sample = math_ops.less(uniform, self.p)
return math_ops.cast(sample, self.dtype)
def __lt__(self, other):
return less(self, other)
def _mode(self):
s = self.df - self.dimension - 1.
s = math_ops.select(
math_ops.less(s, 0.),
constant_op.constant(float("NaN"), dtype=self.dtype, name="nan"),
s)
if self.cholesky_input_output_matrices:
return math_ops.sqrt(s) * self.scale_operator_pd.sqrt_to_dense()
return s * self.scale_operator_pd.to_dense()
def _flip_vector_to_matrix_dynamic(vec, batch_shape):
"""flip_vector_to_matrix with dynamic shapes."""
# Shapes associated with batch_shape
batch_rank = array_ops.size(batch_shape)
# Shapes associated with vec.
vec = ops.convert_to_tensor(vec, name="vec")
vec_shape = array_ops.shape(vec)
vec_rank = array_ops.rank(vec)
vec_batch_rank = vec_rank - 1
m = vec_batch_rank - batch_rank
# vec_shape_left = [M1,...,Mm] or [].
vec_shape_left = array_ops.slice(vec_shape, [0], [m])
# If vec_shape_left = [], then condensed_shape = [1] since reduce_prod([]) = 1
# If vec_shape_left = [M1,...,Mm], condensed_shape = [M1*...*Mm]
condensed_shape = [math_ops.reduce_prod(vec_shape_left)]
k = array_ops.gather(vec_shape, vec_rank - 1)
new_shape = array_ops.concat(0, (batch_shape, [k], condensed_shape))
def _flip_front_dims_to_back():
# Permutation corresponding to [N1,...,Nn] + [k, M1,...,Mm]
perm = array_ops.concat(
0, (math_ops.range(m, vec_rank), math_ops.range(0, m)))
return array_ops.transpose(vec, perm=perm)
x_flipped = control_flow_ops.cond(
math_ops.less(0, m),
_flip_front_dims_to_back,
lambda: array_ops.expand_dims(vec, -1))
return array_ops.reshape(x_flipped, new_shape)
def sample(self):
u = tf.random_uniform(tf.shape(self.ps))
return tf.to_float(math_ops.less(u, self.ps))
core_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 28
收藏 0
点赞 0
评论 0
def setUp(self):
super(CoreBinaryOpsTest, self).setUp()
self.x_probs_broadcast_tensor = array_ops.reshape(
self.x_probs_lt.tensor, [self.x_size, 1, self.probs_size])
self.channel_probs_broadcast_tensor = array_ops.reshape(
self.channel_probs_lt.tensor, [1, self.channel_size, self.probs_size])
# == and != are not element-wise for tf.Tensor, so they shouldn't be
# elementwise for LabeledTensor, either.
self.ops = [
('add', operator.add, math_ops.add, core.add),
('sub', operator.sub, math_ops.subtract, core.sub),
('mul', operator.mul, math_ops.multiply, core.mul),
('div', operator.truediv, math_ops.div, core.div),
('mod', operator.mod, math_ops.mod, core.mod),
('pow', operator.pow, math_ops.pow, core.pow_function),
('equal', None, math_ops.equal, core.equal),
('less', operator.lt, math_ops.less, core.less),
('less_equal', operator.le, math_ops.less_equal, core.less_equal),
('not_equal', None, math_ops.not_equal, core.not_equal),
('greater', operator.gt, math_ops.greater, core.greater),
('greater_equal', operator.ge, math_ops.greater_equal,
core.greater_equal),
]
self.test_lt_1 = self.x_probs_lt
self.test_lt_2 = self.channel_probs_lt
self.test_lt_1_broadcast = self.x_probs_broadcast_tensor
self.test_lt_2_broadcast = self.channel_probs_broadcast_tensor
self.broadcast_axes = [self.a0, self.a1, self.a3]
core_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 17
收藏 0
点赞 0
评论 0
def test_reflexive(self):
labeled_tensor = self.x_probs_lt + 1 # all elements must be >0 for division
for op_name, infix_op, _, lt_op in self.ops:
if infix_op is not None:
expected_lt = lt_op(2, labeled_tensor)
actual_lt = infix_op(2, labeled_tensor)
# Python uses greater for the reflexive version of less (and vise-versa)
if 'less' in op_name:
op_name = op_name.replace('less', 'greater')
elif 'greater' in op_name:
op_name = op_name.replace('greater', 'less')
self.assertIn(op_name, actual_lt.name)
self.assertLabeledTensorsEqual(expected_lt, actual_lt)
core.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 18
收藏 0
点赞 0
评论 0
def __lt__(self, other):
return less(self, other)
wishart.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def _mode(self):
s = self.df - self.dimension - 1.
s = array_ops.where(
math_ops.less(s, 0.),
constant_op.constant(float("NaN"), dtype=self.dtype, name="nan"),
s)
if self.cholesky_input_output_matrices:
return math_ops.sqrt(s) * self.scale_operator_pd.sqrt_to_dense()
return s * self.scale_operator_pd.to_dense()
operator_pd.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def _flip_vector_to_matrix_dynamic(vec, batch_shape):
"""flip_vector_to_matrix with dynamic shapes."""
# Shapes associated with batch_shape
batch_rank = array_ops.size(batch_shape)
# Shapes associated with vec.
vec = ops.convert_to_tensor(vec, name="vec")
vec_shape = array_ops.shape(vec)
vec_rank = array_ops.rank(vec)
vec_batch_rank = vec_rank - 1
m = vec_batch_rank - batch_rank
# vec_shape_left = [M1,...,Mm] or [].
vec_shape_left = array_ops.strided_slice(vec_shape, [0], [m])
# If vec_shape_left = [], then condensed_shape = [1] since reduce_prod([]) = 1
# If vec_shape_left = [M1,...,Mm], condensed_shape = [M1*...*Mm]
condensed_shape = [math_ops.reduce_prod(vec_shape_left)]
k = array_ops.gather(vec_shape, vec_rank - 1)
new_shape = array_ops.concat((batch_shape, [k], condensed_shape), 0)
def _flip_front_dims_to_back():
# Permutation corresponding to [N1,...,Nn] + [k, M1,...,Mm]
perm = array_ops.concat((math_ops.range(m, vec_rank), math_ops.range(0, m)),
0)
return array_ops.transpose(vec, perm=perm)
x_flipped = control_flow_ops.cond(
math_ops.less(0, m),
_flip_front_dims_to_back,
lambda: array_ops.expand_dims(vec, -1))
return array_ops.reshape(x_flipped, new_shape)
student_t.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def _cdf(self, x):
# Take Abs(scale) to make subsequent where work correctly.
y = (x - self.loc) / math_ops.abs(self.scale)
x_t = self.df / (y**2. + self.df)
neg_cdf = 0.5 * math_ops.betainc(0.5 * self.df, 0.5, x_t)
return array_ops.where(math_ops.less(y, 0.), neg_cdf, 1. - neg_cdf)