python类negative()的实例源码

losses.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def calculate_loss_distill_boost(self, predictions, labels_distill, labels, **unused_params):
    with tf.name_scope("loss_distill_boost"):
      print("loss_distill_boost")
      epsilon = 10e-6
      float_labels = tf.cast(labels, tf.float32)
      batch_size = tf.shape(float_labels)[0]
      float_labels_distill = tf.cast(labels_distill, tf.float32)
      error = tf.negative(float_labels * tf.log(float_labels_distill + epsilon) + (
          1 - float_labels) * tf.log(1 - float_labels_distill + epsilon))
      error = tf.reduce_sum(error,axis=1,keep_dims=True)
      alpha = error / tf.reduce_sum(error) * tf.cast(batch_size,dtype=tf.float32)
      alpha = tf.clip_by_value(alpha, 0.5, 5)
      alpha = alpha / tf.reduce_sum(alpha) * tf.cast(batch_size,dtype=tf.float32)
      cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
          1 - float_labels) * tf.log(1 - predictions + epsilon)
      cross_entropy_loss = tf.negative(cross_entropy_loss * alpha)

      return tf.reduce_mean(tf.reduce_sum(cross_entropy_loss, 1))
losses.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def calculate_loss_distill_relabel(self, predictions, labels_distill, labels, **unused_params):
    with tf.name_scope("loss_distill_relabel"):
      print("loss_distill_relabel")
      epsilon = 10e-6
      float_labels = tf.cast(labels, tf.float32)
      sum_labels = tf.cast(tf.reduce_sum(float_labels),dtype=tf.int32)
      pos_distill, _ = tf.nn.top_k(tf.reshape(labels_distill,[-1]), k=sum_labels)
      labels_true = tf.ones(tf.shape(labels))
      labels_false = tf.zeros(tf.shape(labels))
      labels_add = tf.where(tf.greater_equal(labels_distill, pos_distill[-1]), labels_true, labels_false)
      print(labels_add.get_shape().as_list())
      float_labels = float_labels+labels_add*(1.0-float_labels)
      cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
          1 - float_labels) * tf.log(1 - predictions + epsilon)
      cross_entropy_loss = tf.negative(cross_entropy_loss)

      return tf.reduce_mean(tf.reduce_sum(cross_entropy_loss, 1))
losses.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def calculate_loss(self, predictions, labels, **unused_params):
    with tf.name_scope("loss_xent"):
      epsilon = 10e-6
      vocab_size = predictions.get_shape().as_list()[1]
      float_labels = tf.cast(labels, tf.float32)
      cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
          1 - float_labels) * tf.log(1 - predictions + epsilon)
      cross_entropy_loss = tf.negative(cross_entropy_loss)
      neg_labels = 1 - float_labels
      predictions_pos = predictions*float_labels+10*neg_labels
      predictions_minpos = tf.reduce_min(predictions_pos,axis=1,keep_dims=True)
      predictions_neg = predictions*neg_labels-10*float_labels
      predictions_maxneg = tf.reduce_max(predictions_neg,axis=1,keep_dims=True)
      mask_1 = tf.cast(tf.greater_equal(predictions_neg, predictions_minpos),dtype=tf.float32)
      mask_2 = tf.cast(tf.less_equal(predictions_pos, predictions_maxneg),dtype=tf.float32)
      cross_entropy_loss = cross_entropy_loss*(mask_1+mask_2)*10 + cross_entropy_loss
      return tf.reduce_mean(tf.reduce_sum(cross_entropy_loss, 1))
losses_embedding.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def calculate_loss(self, predictions, labels, **unused_params):
        bound = FLAGS.softmax_bound
        vocab_size_1 = bound
        with tf.name_scope("loss_softmax"):
            epsilon = 10e-8
            float_labels = tf.cast(labels, tf.float32)
            labels_1 = float_labels[:,:vocab_size_1]
            predictions_1 = predictions[:,:vocab_size_1]
            cross_entropy_loss = CrossEntropyLoss().calculate_loss(predictions_1,labels_1)
            lables_2 = float_labels[:,vocab_size_1:]
            predictions_2 = predictions[:,vocab_size_1:]
            # l1 normalization (labels are no less than 0)
            label_rowsum = tf.maximum(
                tf.reduce_sum(lables_2, 1, keep_dims=True),
                epsilon)
            label_append = 1.0-tf.reduce_max(lables_2, 1, keep_dims=True)
            norm_float_labels = tf.concat((tf.div(lables_2, label_rowsum),label_append),axis=1)
            predictions_append = 1.0-tf.reduce_sum(predictions_2, 1, keep_dims=True)
            softmax_outputs = tf.concat((predictions_2,predictions_append),axis=1)
            softmax_loss = norm_float_labels * tf.log(softmax_outputs + epsilon) + (
                                                                                       1 - norm_float_labels) * tf.log(1 - softmax_outputs + epsilon)
            softmax_loss = tf.negative(tf.reduce_sum(softmax_loss, 1))
        return tf.reduce_mean(softmax_loss) + cross_entropy_loss
losses.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def calculate_loss(self, predictions, labels, weights=None, **unused_params):
    with tf.name_scope("loss_xent"):
      epsilon = 10e-6
      if FLAGS.label_smoothing:
        float_labels = smoothing(labels)
      else:
        float_labels = tf.cast(labels, tf.float32)
      cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
          1 - float_labels) * tf.log(1 - predictions + epsilon)
      cross_entropy_loss = tf.negative(cross_entropy_loss)
      if weights is not None:
        print cross_entropy_loss, weights
        weighted_loss = tf.einsum("ij,i->ij", cross_entropy_loss, weights)
        print "create weighted_loss", weighted_loss
        return tf.reduce_mean(tf.reduce_sum(weighted_loss, 1))
      else:
        return tf.reduce_mean(tf.reduce_sum(cross_entropy_loss, 1))
losses.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def calculate_loss(self, predictions, labels, weights=None, **unused_params):
    with tf.name_scope("loss_xent"):
      epsilon = 10e-6
      if FLAGS.label_smoothing:
        float_labels = smoothing(labels)
      else:
        float_labels = tf.cast(labels, tf.float32)
      cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
          1 - float_labels) * tf.log(1 - predictions + epsilon)
      cross_entropy_loss = tf.negative(cross_entropy_loss)
      if weights is not None:
        print cross_entropy_loss, weights
        weighted_loss = tf.einsum("ij,i->ij", cross_entropy_loss, weights)
        print "create weighted_loss", weighted_loss
        return tf.reduce_mean(tf.reduce_sum(weighted_loss, 1))
      else:
        return tf.reduce_mean(tf.reduce_sum(cross_entropy_loss, 1))
matchnn.py 文件源码 项目:MatchingNetwork 作者: cnichkawde 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def call(self,inputs):
        """
        inputs in as array which contains the support set the embeddings, the target embedding as the second last value in the array, and true class of target embedding as the last value in the array
        """ 
        similarities = []

        targetembedding = inputs[-2]
        numsupportset = len(inputs)-2
        for ii in range(numsupportset):
            supportembedding = inputs[ii]
            dd = tf.negative(tf.sqrt(tf.reduce_sum(tf.square(supportembedding-targetembedding),1,keep_dims=True)))

            similarities.append(dd)

        similarities = tf.concat(axis=1,values=similarities)
        softmax_similarities = tf.nn.softmax(similarities)
        preds = tf.squeeze(tf.matmul(tf.expand_dims(softmax_similarities,1),inputs[-1]))

        preds.set_shape((inputs[0].shape[0],self.nway))

        return preds
plan_test.py 文件源码 项目:fold 作者: tensorflow 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def test_run_key_fn(self):
    p = plan.InferPlan()
    p.compiler = block_compiler.Compiler.create(
        blocks.Scalar() >> blocks.Function(tf.negative))
    p.logdir = self.get_temp_dir()
    p.examples = xrange(5)
    p.outputs = p.compiler.output_tensors
    results = []
    p.results_fn = results.append
    p.key_fn = str
    p.batch_size = 3
    p.chunk_size = 2
    with self.test_session() as sess:
      p.run(session=sess)
    self.assertEqual(1, len(results))
    self.assertEqual(
        [('0', (-0,)), ('1', (-1,)), ('2', (-2,)), ('3', (-3,)), ('4', (-4,))],
        list(results[0]))
blocks_test.py 文件源码 项目:fold 作者: tensorflow 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_function_otype_inference_tensor_to_tensor(self):
    infer = tdb._infer_tf_output_type_from_input_type

    self.assertEqual(tdt.TensorType([]),
                     infer(tf.negative, tdt.TensorType([])))
    self.assertEqual(tdt.TensorType([2, 3]),
                     infer(tf.negative, tdt.TensorType([2, 3])))

    self.assertEqual(tdt.TensorType([], 'int32'),
                     infer(tf.negative, tdt.TensorType([], 'int32')))
    self.assertEqual(tdt.TensorType([2, 3], 'int32'),
                     infer(tf.negative, tdt.TensorType([2, 3], 'int32')))

    f = lambda x: tf.cast(x, 'int32')
    self.assertEqual(tdt.TensorType([], 'int32'),
                     infer(f, tdt.TensorType([], 'float32')))
    self.assertEqual(tdt.TensorType([2, 3], 'int32'),
                     infer(f, tdt.TensorType([2, 3], 'float64')))
test_utils.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def testGetBackwardOpsSplit(self):
        # a -> b -> c
        #       \-> d
        a = tf.placeholder(tf.float32)
        b = tf.exp(a)
        c = tf.log(b)
        d = tf.negative(b)
        self.assertEqual(get_backward_ops([d]), [a.op, b.op, d.op])
        self.assertEqual(get_backward_ops([c]), [a.op, b.op, c.op])
        self.assertEqual(
            get_backward_ops([c, d]), [a.op, b.op, c.op, d.op])
        self.assertEqual(get_backward_ops([b, d]), [a.op, b.op, d.op])
        self.assertEqual(get_backward_ops([a, d]), [a.op, b.op, d.op])

        self.assertEqual(
            get_backward_ops([c, d], treat_as_inputs=[b]), [c.op, d.op])
        self.assertEqual(
            get_backward_ops([c], treat_as_inputs=[d]), [a.op, b.op, c.op])
losses.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def calculate_loss_negative(self, predictions_pos, predictions_neg, labels, **unused_params):
    with tf.name_scope("loss_negative"):
      epsilon = 10e-6
      float_labels = tf.cast(labels, tf.float32)
      weight_pos = np.loadtxt(FLAGS.autoencoder_dir+"labels_uni.out")
      weight_pos = tf.reshape(tf.cast(weight_pos,dtype=tf.float32),[1,-1])
      weight_pos = tf.log(tf.reduce_max(weight_pos)/weight_pos)+1
      cross_entropy_loss_1 = float_labels * tf.log(predictions_pos + epsilon)*weight_pos + (
          1 - float_labels) * tf.log(1 - predictions_pos + epsilon)
      cross_entropy_loss_2 = (1-float_labels) * tf.log(predictions_neg + epsilon) + \
                             float_labels * tf.log(1 - predictions_neg + epsilon)
      cross_entropy_loss = tf.negative(cross_entropy_loss_1+cross_entropy_loss_2)
      return tf.reduce_mean(tf.reduce_sum(cross_entropy_loss, 1))
losses.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def calculate_loss_max(self, predictions, predictions_experts, labels, **unused_params):
    with tf.name_scope("loss_max"):
      epsilon = 10e-6
      shape = predictions_experts.get_shape().as_list()
      float_labels = tf.cast(labels, tf.float32)
      cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
                        1 - float_labels) * tf.log(1 - predictions + epsilon)
      float_exprts = tf.tile(tf.reshape(float_labels,[-1,shape[1],1]),[1,1,shape[2]])
      cross_entropy_experts = float_exprts * tf.log(predictions_experts + epsilon) + (
                     1 - float_exprts) * tf.log(1 - predictions_experts + epsilon)
      cross_entropy_loss = tf.negative(cross_entropy_loss)
      cross_entropy_experts = tf.negative(tf.reduce_mean(cross_entropy_experts,axis=2))
      return tf.reduce_mean(tf.reduce_sum(cross_entropy_loss, 1)) + tf.reduce_mean(tf.reduce_sum(cross_entropy_experts, 1))
losses_embedding.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def calculate_loss(self, predictions, labels, **unused_params):
        with tf.name_scope("loss_xent"):
            epsilon = 10e-6
            origin_labels = tf.cast(labels, tf.float32)
            vocab_size = origin_labels.get_shape().as_list()[1]
            float_labels = tf.tile(tf.reshape(origin_labels,[-1, 1, vocab_size]),[1,FLAGS.top_k,1])
            float_labels = tf.reshape(float_labels,[-1,vocab_size])
            cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
                1 - float_labels) * tf.log(1 - predictions + epsilon)
            cross_entropy_loss = tf.negative(cross_entropy_loss)
            num_labels = tf.minimum(tf.reduce_sum(origin_labels,axis=1),tf.constant(FLAGS.top_k,dtype=tf.float32))
            mask = tf.reshape(tf.sequence_mask(num_labels,tf.constant(FLAGS.top_k,dtype=tf.float32),dtype=tf.float32),[-1])
            cross_entropy_loss = tf.reduce_sum(tf.reduce_sum(cross_entropy_loss, 1)*mask)/(tf.reduce_sum(mask)+epsilon)

            return cross_entropy_loss
losses.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def calculate_loss(self, predictions, labels, **unused_params):
    false_positive_punishment = FLAGS.false_positive_punishment
    false_negative_punishment = FLAGS.false_negative_punishment
    with tf.name_scope("loss_xent_recall"):
      epsilon = 10e-6
      if FLAGS.label_smoothing:
        float_labels = smoothing(labels)
      else:
        float_labels = tf.cast(labels, tf.float32)
      cross_entropy_loss = false_negative_punishment * float_labels * tf.log(predictions + epsilon) \
          + false_positive_punishment * ( 1 - float_labels) * tf.log(1 - predictions + epsilon)
      cross_entropy_loss = tf.negative(cross_entropy_loss)
      return tf.reduce_mean(tf.reduce_sum(cross_entropy_loss, 1))
losses.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def calculate_loss(self, predictions, labels, **unused_params):
    with tf.name_scope("loss_softmax"):
      epsilon = 10e-8
      float_labels = tf.cast(labels, tf.float32)
      # l1 normalization (labels are no less than 0)
      label_rowsum = tf.maximum(
          tf.reduce_sum(float_labels, 1, keep_dims=True),
          epsilon)
      norm_float_labels = tf.div(float_labels, label_rowsum)
      softmax_outputs = tf.nn.softmax(predictions)
      softmax_loss = tf.negative(tf.reduce_sum(
          tf.multiply(norm_float_labels, tf.log(softmax_outputs)), 1))
    return tf.reduce_mean(softmax_loss)
losses.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def calculate_loss(self, predictions, labels, topk=20, **unused_params):
    with tf.name_scope("loss_xent_batch"):
      batch_agreement = FLAGS.batch_agreement
      epsilon = 10e-6
      float_batch_size = float(FLAGS.batch_size)

      topk_predictions, _ = tf.nn.top_k(predictions, k=20)
      min_topk_predictions = tf.reduce_min(topk_predictions, axis=1, keep_dims=True)
      topk_mask = tf.cast(predictions >= min_topk_predictions, dtype=tf.float32)

      float_labels = tf.cast(labels, tf.float32)
      cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
          1 - float_labels) * tf.log(1 - predictions + epsilon)
      cross_entropy_loss = tf.negative(cross_entropy_loss)

      # minimum positive predictions in topk
      positive_predictions = (predictions * float_labels * topk_mask) + 1.0 - (float_labels * topk_mask)
      min_pp = tf.reduce_min(positive_predictions)

      # maximum negative predictions
      negative_predictions = predictions * (1.0 - float_labels)
      max_np = tf.reduce_max(negative_predictions)

      # 1s that fall under top-k
      false_negatives = tf.cast(predictions < min_topk_predictions, tf.float32) * float_labels
      # 0s that grow over 1s in top-k
      false_positives = tf.cast(predictions > min_pp, tf.float32) * (1.0 - float_labels) * topk_mask

      weight = (false_negatives + false_positives) * batch_agreement + 1.0
      weight = tf.stop_gradient(weight)
      print weight
      return tf.reduce_mean(tf.reduce_sum(weight * cross_entropy_loss, 1))
losses.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def calculate_loss(self, predictions, labels, **unused_params):
    with tf.name_scope("loss_softmax"):
      epsilon = 10e-8
      float_labels = tf.cast(labels, tf.float32)
      # l1 normalization (labels are no less than 0)
      label_rowsum = tf.maximum(
          tf.reduce_sum(float_labels, 1, keep_dims=True),
          epsilon)
      norm_float_labels = tf.div(float_labels, label_rowsum)
      softmax_outputs = tf.nn.softmax(predictions)
      softmax_loss = tf.negative(tf.reduce_sum(
          tf.multiply(norm_float_labels, tf.log(softmax_outputs)), 1))
    return tf.reduce_mean(softmax_loss)
matchnn.py 文件源码 项目:MatchingNetwork 作者: cnichkawde 项目源码 文件源码 阅读 78 收藏 0 点赞 0 评论 0
def _merge_function(self,inputs):
        return tf.negative(tf.abs(inputs[0]-inputs[1]))
losses.py 文件源码 项目:yt8m 作者: forwchen 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def calculate_loss(self, predictions, labels, **unused_params):
    with tf.name_scope("loss_xent"):
      epsilon = 10e-6
      float_labels = tf.cast(labels, tf.float32)
      cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
          1 - float_labels) * tf.log(1 - predictions + epsilon)
      cross_entropy_loss = tf.negative(cross_entropy_loss)
      return tf.reduce_mean(tf.reduce_sum(cross_entropy_loss, 1))
losses.py 文件源码 项目:yt8m 作者: forwchen 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def calculate_loss(self, predictions, labels, **unused_params):
    with tf.name_scope("loss_softmax"):
      epsilon = 10e-8
      float_labels = tf.cast(labels, tf.float32)
      # l1 normalization (labels are no less than 0)
      label_rowsum = tf.maximum(
          tf.reduce_sum(float_labels, 1, keep_dims=True),
          epsilon)
      norm_float_labels = tf.div(float_labels, label_rowsum)
      softmax_outputs = tf.nn.softmax(predictions)
      softmax_loss = tf.negative(tf.reduce_sum(
          tf.multiply(norm_float_labels, tf.log(softmax_outputs)), 1))
    return tf.reduce_mean(softmax_loss)
plan_test.py 文件源码 项目:fold 作者: tensorflow 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def test_run_no_key_fn(self):
    p = plan.InferPlan()
    p.compiler = block_compiler.Compiler.create(
        blocks.Scalar() >> blocks.Function(tf.negative))
    p.logdir = self.get_temp_dir()
    p.examples = xrange(5)
    p.outputs = p.compiler.output_tensors
    results = []
    p.results_fn = results.append
    p.batch_size = 3
    p.chunk_size = 2
    with self.test_session() as sess:
      p.run(session=sess)
    self.assertEqual(1, len(results))
    self.assertEqual([(0,), (-1,), (-2,), (-3,), (-4,)], list(results[0]))
blocks_test.py 文件源码 项目:fold 作者: tensorflow 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def test_composition_void(self):
    c = tdb.Composition()
    with c.scope():
      a = tdb.Scalar().reads(c.input)
      b = tdb.Function(tf.negative).reads(a)
      tdm.Metric('foo').reads(b)
      c.output.reads(a)
    self.assertBuilds((42., {'foo': [-42.]}), c, 42, max_depth=2)
blocks_test.py 文件源码 项目:fold 作者: tensorflow 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def test_composition_forward_type_inference(self):
    b = tdb._pipe([tdb.Identity(), tdb.Identity(),
                   tdb.Map(tdb.Function(tf.negative))])
    six.assertRaisesRegex(
        self, TypeError, 'bad input type PyObjectType',
        b.input.set_input_type, tdt.PyObjectType())
blocks_test.py 文件源码 项目:fold 作者: tensorflow 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def test_forward_declaration_orphaned_nested(self):
    fwd1 = tdb.ForwardDeclaration(tdt.VoidType(), tdt.TensorType([]))
    fwd2 = tdb.ForwardDeclaration(tdt.SequenceType(tdt.TensorType([])),
                                  tdt.TensorType([]))
    b = tdb.Map(tdb.Scalar()) >> fwd2() >> tdb.Function(tf.negative)
    fwd2.resolve_to(tdb.Fold(tdb.Function(tf.add), fwd1()))
    fwd1.resolve_to(tdb.FromTensor(tf.ones([])))
    self.assertBuilds(-8., b, [3, 4], max_depth=3)
blocks_test.py 文件源码 项目:fold 作者: tensorflow 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def test_map_tuple(self):
    block = (tdb.Scalar(), tdb.Scalar()) >> tdb.Map(tdb.Function(tf.negative))
    self.assertBuilds([-3., -4.], block, (3, 4))
blocks_test.py 文件源码 项目:fold 作者: tensorflow 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def test_all_of_1(self):
    self.assertBuilds((3.,), scalar_all_of(tf.negative), -3)
blocks_test.py 文件源码 项目:fold 作者: tensorflow 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def test_all_of_3(self):
    block = scalar_all_of(tf.identity, tf.abs, tf.negative)
    self.assertBuilds((3., 3., -3.), block, 3)
    self.assertBuilds((-3., 3., 3.), block, -3)
blocks_test.py 文件源码 项目:fold 作者: tensorflow 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def test_all_of_different_shapes(self):
    block = scalar_all_of(tf.negative, functools.partial(tf.expand_dims, dim=1))
    self.assertBuilds((-3., [3.]), block, 3)
blocks_test.py 文件源码 项目:fold 作者: tensorflow 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def test_tuple_of_seq(self):
    block = tdb.AllOf(
        tdb.Map(tdb.Scalar() >> tdb.Function(tf.negative)),
        tdb.Map(tdb.Scalar() >> tdb.Function(tf.identity)))
    self.assertBuilds(([], []), block, [], max_depth=0)
    self.assertBuilds(([-1., -2.], [1., 2.]), block, [1, 2])
blocks_test.py 文件源码 项目:fold 作者: tensorflow 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def test_input_transform(self):
    block = tdb.Map(tdb.InputTransform(lambda x: 1 + ord(x) - ord('a')) >>
                    tdb.Scalar('int32') >> tdb.Function(tf.negative))
    self.assertBuilds([-1, -2, -3, -4], block, 'abcd')


问题


面经


文章

微信
公众号

扫码关注公众号