def test_one_of(self):
block = tdb.OneOf(lambda x: x > 0,
{True: tdb.Scalar(),
False: tdb.Scalar() >> tdb.Function(tf.negative)})
self.assertBuildsConst(3., block, 3)
self.assertBuildsConst(3., block, -3)
python类negative()的实例源码
def test_one_of_mixed_input_type(self):
block = (tdb.Identity(), tdb.Scalar('int32')) >> tdb.OneOf(
key_fn=tdb.GetItem(0),
case_blocks=(tdb.Function(tf.square), tdb.Function(tf.negative)),
pre_block=tdb.GetItem(1))
self.assertBuilds(4, block, (0, 2))
self.assertBuilds(-2, block, (1, 2))
def test_optional_default_none_type_inference(self):
child = tdb.Scalar() >> tdb.Function(tf.negative)
block = tdb.Optional(child)
self.assertEqual(child.output_type, None)
child.set_output_type([])
self.assertEqual(block.output_type, tdt.TensorType([]))
def calculate_loss(self, predictions, labels, **unused_params):
with tf.name_scope("loss_xent"):
epsilon = 10e-6
float_labels = tf.cast(labels, tf.float32)
cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
1 - float_labels) * tf.log(1 - predictions + epsilon)
cross_entropy_loss = tf.negative(cross_entropy_loss)
return tf.reduce_mean(tf.reduce_sum(cross_entropy_loss, 1))
def calculate_loss(self, predictions, labels, **unused_params):
with tf.name_scope("loss_softmax"):
epsilon = 10e-8
float_labels = tf.cast(labels, tf.float32)
# l1 normalization (labels are no less than 0)
label_rowsum = tf.maximum(
tf.reduce_sum(float_labels, 1, keep_dims=True),
epsilon)
norm_float_labels = tf.div(float_labels, label_rowsum)
softmax_outputs = tf.nn.softmax(predictions)
softmax_loss = tf.negative(tf.reduce_sum(
tf.multiply(norm_float_labels, tf.log(softmax_outputs)), 1))
return tf.reduce_mean(softmax_loss)
def calculate_loss(self, predictions, labels, **unused_params):
with tf.name_scope("loss_xent"):
epsilon = 10e-6
float_labels = tf.cast(labels, tf.float32)
cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
1 - float_labels) * tf.log(1 - predictions + epsilon)
cross_entropy_loss = tf.negative(cross_entropy_loss)
return tf.reduce_mean(tf.reduce_sum(cross_entropy_loss, 1))
def calculate_loss(self, predictions, labels, **unused_params):
with tf.name_scope("loss_softmax"):
epsilon = 10e-8
float_labels = tf.cast(labels, tf.float32)
# l1 normalization (labels are no less than 0)
label_rowsum = tf.maximum(
tf.reduce_sum(float_labels, 1, keep_dims=True),
epsilon)
norm_float_labels = tf.div(float_labels, label_rowsum)
softmax_outputs = tf.nn.softmax(predictions)
softmax_loss = tf.negative(tf.reduce_sum(
tf.multiply(norm_float_labels, tf.log(softmax_outputs)), 1))
return tf.reduce_mean(softmax_loss)
def calculate_loss(self, predictions, labels, **unused_params):
with tf.name_scope("loss_xent"):
epsilon = 10e-6
float_labels = tf.cast(labels, tf.float32)
cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
1 - float_labels) * tf.log(1 - predictions + epsilon)
cross_entropy_loss = tf.negative(cross_entropy_loss)
return tf.reduce_mean(tf.reduce_sum(cross_entropy_loss, 1))
def calculate_loss(self, predictions, labels, **unused_params):
with tf.name_scope("loss_softmax"):
epsilon = 10e-8
float_labels = tf.cast(labels, tf.float32)
# l1 normalization (labels are no less than 0)
label_rowsum = tf.maximum(
tf.reduce_sum(float_labels, 1, keep_dims=True),
epsilon)
norm_float_labels = tf.div(float_labels, label_rowsum)
softmax_outputs = tf.nn.softmax(predictions)
softmax_loss = tf.negative(tf.reduce_sum(
tf.multiply(norm_float_labels, tf.log(softmax_outputs)), 1))
return tf.reduce_mean(softmax_loss)
def __neg__(self):
return tf.negative(self)
def calculate_loss(self, predictions, labels, **unused_params):
with tf.name_scope("loss_xent"):
epsilon = 10e-6
alpha = FLAGS.alpha
float_labels = tf.cast(labels, tf.float32)
cross_entropy_loss = 2*(alpha*float_labels * tf.log(predictions + epsilon) + (1-alpha)*(
1 - float_labels) * tf.log(1 - predictions + epsilon))
cross_entropy_loss = tf.negative(cross_entropy_loss)
return tf.reduce_mean(tf.reduce_sum(cross_entropy_loss, 1))
def Neg_FwGrad(op, dx, _op_table=None, _grad_table=None):
if dx is None:
return None
return tf.negative(dx)
def test_basic(self):
with tf.Graph().as_default(), self.test_session() as sess:
rnd = np.random.RandomState(0)
x = self.get_random_tensor([18, 12], rnd=rnd)
y = tf.negative(x)
self.assert_bw_fw(sess, x, y, rnd=rnd)
def calculate_loss(self, predictions, labels, **unused_params):
with tf.name_scope("loss_xent"):
epsilon = 10e-6
float_labels = tf.cast(labels, tf.float32)
cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
1 - float_labels) * tf.log(1 - predictions + epsilon)
cross_entropy_loss = tf.negative(cross_entropy_loss)
return tf.reduce_mean(tf.reduce_sum(cross_entropy_loss, 1))
def calculate_loss(self, predictions, labels, **unused_params):
with tf.name_scope("loss_softmax"):
epsilon = 10e-8
float_labels = tf.cast(labels, tf.float32)
# l1 normalization (labels are no less than 0)
label_rowsum = tf.maximum(
tf.reduce_sum(float_labels, 1, keep_dims=True),
epsilon)
norm_float_labels = tf.div(float_labels, label_rowsum)
softmax_outputs = tf.nn.softmax(predictions)
softmax_loss = tf.negative(tf.reduce_sum(
tf.multiply(norm_float_labels, tf.log(softmax_outputs)), 1))
return tf.reduce_mean(softmax_loss)
def calculate_loss(self, predictions, labels, epsilon, wgts, **unused_params):
with tf.name_scope("loss_xent"):
#epsilon = 10e-8
float_labels = tf.cast(labels, tf.float32)
cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
1 - float_labels) * tf.log(1 - predictions + epsilon)
cross_entropy_loss = tf.negative(cross_entropy_loss * wgts)
return tf.reduce_mean(tf.reduce_sum(cross_entropy_loss, 1))
def calculate_loss(self, predictions, labels, **unused_params):
with tf.name_scope("loss_softmax"):
epsilon = 10e-8
float_labels = tf.cast(labels, tf.float32)
# l1 normalization (labels are no less than 0)
label_rowsum = tf.maximum(
tf.reduce_sum(float_labels, 1, keep_dims=True),
epsilon)
norm_float_labels = tf.div(float_labels, label_rowsum)
softmax_outputs = tf.nn.softmax(predictions)
softmax_loss = tf.negative(tf.reduce_sum(
tf.multiply(norm_float_labels, tf.log(softmax_outputs)), 1))
return tf.reduce_mean(softmax_loss)
def calculate_loss(self, predictions, labels, epsilon, **unused_params):
with tf.name_scope("loss_xent"):
#epsilon = 10e-8
float_labels = tf.cast(labels, tf.float32)
cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
1 - float_labels) * tf.log(1 - predictions + epsilon)
cross_entropy_loss = tf.negative(cross_entropy_loss)
return tf.reduce_mean(tf.reduce_sum(cross_entropy_loss, 1))
def _window(region, start_index, end_index):
"""
Returns the list of words starting from `start_index`, going to `end_index`
taken from region. If `start_index` is a negative number, or if `end_index`
is greater than the index of the last word in region, this function will pad
its return value with `NULL_WORD`.
"""
last_index = len(region) + 1
selected_tokens = region[max(start_index, 0):min(end_index, last_index) + 1]
return selected_tokens
def testCplxNegGPU(self):
shapes = [(5,4,3), (5,4), (5,), (1,)]
for sh in shapes:
x = ((np.random.randn(*sh) +
1j*np.random.randn(*sh)).astype(np.complex64))
self._compareGpu(x, np.negative, tf.negative)