python类logical_and()的实例源码

infer_util.py 文件源码 项目:magenta 作者: tensorflow 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _frame_metrics(frame_labels, frame_predictions):
  """Calculate frame-based metrics."""
  frame_labels_bool = tf.cast(frame_labels, tf.bool)
  frame_predictions_bool = tf.cast(frame_predictions, tf.bool)

  frame_true_positives = tf.reduce_sum(tf.to_float(tf.logical_and(
      tf.equal(frame_labels_bool, True),
      tf.equal(frame_predictions_bool, True))))
  frame_false_positives = tf.reduce_sum(tf.to_float(tf.logical_and(
      tf.equal(frame_labels_bool, False),
      tf.equal(frame_predictions_bool, True))))
  frame_false_negatives = tf.reduce_sum(tf.to_float(tf.logical_and(
      tf.equal(frame_labels_bool, True),
      tf.equal(frame_predictions_bool, False))))
  frame_accuracy = tf.reduce_sum(tf.to_float(
      tf.equal(frame_labels_bool, frame_predictions_bool)))

  frame_precision = tf.where(
      tf.greater(frame_true_positives + frame_false_positives, 0),
      tf.div(frame_true_positives,
             frame_true_positives + frame_false_positives),
      0)
  frame_recall = tf.where(
      tf.greater(frame_true_positives + frame_false_negatives, 0),
      tf.div(frame_true_positives,
             frame_true_positives + frame_false_negatives),
      0)
  frame_f1_score = f1_score(frame_precision, frame_recall)
  frame_accuracy_without_true_negatives = accuracy_without_true_negatives(
      frame_true_positives, frame_false_positives, frame_false_negatives)

  return {
      'true_positives': frame_true_positives,
      'false_positives': frame_false_positives,
      'false_negatives': frame_false_negatives,
      'accuracy': frame_accuracy,
      'accuracy_without_true_negatives': frame_accuracy_without_true_negatives,
      'precision': frame_precision,
      'recall': frame_recall,
      'f1_score': frame_f1_score,
  }
utils.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def __and__(self, other):
        return tf.logical_and(self, other)
utils.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __rand__(self, other):
        return tf.logical_and(other, self)
renderer.py 文件源码 项目:tf.rasterizer 作者: vahidk 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def barycentric(verts, p):
    ab = verts[2] - verts[0]
    ac = verts[1] - verts[0]
    pa = verts[0] - p
    u = utils.tri_cross(
        [ab[0], ac[0], pa[:, 0]],
        [ab[1], ac[1], pa[:, 1]])
    v = [u[0] / u[2], u[1] / u[2]]
    bc = [1. - v[0] - v[1], v[1], v[0]]
    valid = tf.logical_and(
        tf.abs(u[2]) >= 1.0,
        tf.reduce_all(tf.stack(bc, axis=1) >= 0, axis=1))
    return bc, valid
models.py 文件源码 项目:TF-FaceDetection 作者: mariolew 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def fcn_12_detect(threshold, dropout=False, activation=tf.nn.relu):

    imgs = tf.placeholder(tf.float32, [None, 12, 12, 3])
    labels = tf.placeholder(tf.float32, [None, 1])
    keep_prob = tf.placeholder(tf.float32, name='keep_prob')
    with tf.variable_scope('net_12'):
        conv1,_ = utils.conv2d(x=imgs, n_output=16, k_w=3, k_h=3, d_w=1, d_h=1, name="conv1")
        conv1 = activation(conv1)
        pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding="SAME", name="pool1")
        ip1,W1 = utils.conv2d(x=pool1, n_output=16, k_w=6, k_h=6, d_w=1, d_h=1, padding="VALID", name="ip1")
        ip1 = activation(ip1)
        if dropout:
            ip1 = tf.nn.dropout(ip1, keep_prob)
        ip2,W2 = utils.conv2d(x=ip1, n_output=1, k_w=1, k_h=1, d_w=1, d_h=1, name="ip2")

        pred = tf.nn.sigmoid(utils.flatten(ip2))
        target = utils.flatten(labels)

        regularizer = 8e-3 * (tf.nn.l2_loss(W1)+100*tf.nn.l2_loss(W2))

        loss = tf.reduce_mean(tf.div(tf.add(-tf.reduce_sum(target * tf.log(pred + 1e-9),1), -tf.reduce_sum((1-target) * tf.log(1-pred + 1e-9),1)),2)) + regularizer
        cost = tf.reduce_mean(loss)

        thresholding_12 = tf.cast(tf.greater(pred, threshold), "float")
        recall_12 = tf.reduce_sum(tf.cast(tf.logical_and(tf.equal(thresholding_12, tf.constant([1.0])), tf.equal(target, tf.constant([1.0]))), "float")) / tf.reduce_sum(target)

        correct_prediction = tf.equal(tf.cast(tf.greater(pred, threshold), tf.int32), tf.cast(target, tf.int32))
        acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        return {'imgs': imgs, 'labels': labels, 'keep_prob': keep_prob,
            'cost': cost, 'pred': pred, 'accuracy': acc, 'features': ip1,
            'recall': recall_12, 'thresholding': thresholding_12}
models.py 文件源码 项目:TF-FaceDetection 作者: mariolew 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def fcn_24_detect(threshold, dropout=False, activation=tf.nn.relu):

    imgs = tf.placeholder(tf.float32, [None, 24, 24, 3])
    labels = tf.placeholder(tf.float32, [None, 1])
    keep_prob = tf.placeholder(tf.float32, name='keep_prob')

    net_12 = fcn_12_detect(0.16, activation=activation)
    with tf.variable_scope('net_24'):
        conv1, _ = utils.conv2d(x=imgs, n_output=64, k_w=5, k_h=5, d_w=1, d_h=1, name="conv1")
        conv1 = activation(conv1)
        pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding="SAME", name="pool1")
        ip1, W1 = utils.conv2d(x=pool1, n_output=128, k_w=12, k_h=12, d_w=1, d_h=1, padding="VALID", name="ip1")
        ip1 = activation(ip1)
        net_12_ip1 = net_12['features']
        concat = tf.concat(3, [ip1, net_12_ip1])
        if dropout:
            concat = tf.nn.dropout(concat, keep_prob)
        ip2, W2 = utils.conv2d(x=concat, n_output=1, k_w=1, k_h=1, d_w=1, d_h=1, name="ip2")

        pred = tf.nn.sigmoid(utils.flatten(ip2))
        target = utils.flatten(labels)

        regularizer = 8e-3 * (tf.nn.l2_loss(W1)+100*tf.nn.l2_loss(W2))

        loss = tf.reduce_mean(tf.div(tf.add(-tf.reduce_sum(target * tf.log(pred + 1e-9),1), -tf.reduce_sum((1-target) * tf.log(1-pred + 1e-9),1)),2)) + regularizer
        cost = tf.reduce_mean(loss)

        thresholding_24 = tf.cast(tf.greater(pred, threshold), "float")
        recall_24 = tf.reduce_sum(tf.cast(tf.logical_and(tf.equal(thresholding_24, tf.constant([1.0])), tf.equal(target, tf.constant([1.0]))), "float")) / tf.reduce_sum(target)

        correct_prediction = tf.equal(tf.cast(tf.greater(pred, threshold), tf.int32), tf.cast(target, tf.int32))
        acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        return {'net_12': net_12, 'imgs': imgs, 'labels': labels,
            'imgs_12': net_12['imgs'], 'labels_12': net_12['labels'],
            'keep_prob': keep_prob, 'keep_prob_12': net_12['keep_prob'],
            'cost': cost, 'pred': pred, 'accuracy': acc, 'features': concat,
            'recall': recall_24, 'thresholding': thresholding_24}
trainer_nn.py 文件源码 项目:deep_learning 作者: wecliqued 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def _calculate_f1_score(self):
        '''
        F1 score is used instead of accuracy in case of strongly biased classes. Google it up :)
        :return: F1 score, what else?!?
        '''
        with tf.name_scope('stats'):
            prediction = self.prediction
            y = self.y
            with tf.name_scope('true_positive'):
                tp = tf.reduce_sum(tf.to_int32(tf.logical_and(tf.equal(prediction, y), tf.equal(prediction, 1))))
            with tf.name_scope('true_negative'):
                tn = tf.reduce_sum(tf.to_int32(tf.logical_and(tf.equal(prediction, y), tf.equal(prediction, 0))))
            with tf.name_scope('false_positive'):
                fp = tf.reduce_sum(tf.to_int32(tf.logical_and(tf.not_equal(prediction, y), tf.equal(prediction, 1))))
            with tf.name_scope('false_negative'):
                fn = tf.reduce_sum(tf.to_int32(tf.logical_and(tf.not_equal(prediction, y), tf.equal(prediction, 0))))

            with tf.name_scope('precision'):
                self.precision = tp / (tp + fp)
            with tf.name_scope('recall'):
                self.recall = tp / (tp + fn)
            with tf.name_scope('accuracy'):
                self.accuracy = (tp+tn) / (tp+tn+fp+fn)

            with tf.name_scope('f1_score'):
                self.f1_score = 2 * self.precision * self.recall / (self.precision + self.recall)
hmc.py 文件源码 项目:GPflow 作者: GPflow 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def _leapfrog_step(xs, ps, epsilon, max_iterations, logprob_grads_fn):
    def update_xs(ps_values):
        return _map(lambda x, p: x.assign_add(epsilon * p), xs, ps_values)

    def whether_proceed(grads):
        finits = _map(lambda grad: tf.reduce_all(tf.is_finite(grad)), grads)
        return tf.reduce_all(finits)

    def cond(i, proceed, _ps, _xs):
        return tf.logical_and(proceed, i < max_iterations)

    def body(i, _proceed, ps, _xs):
        xs_new = update_xs(ps)
        with tf.control_dependencies(xs_new):
            _, grads = logprob_grads_fn()
            proceed = whether_proceed(grads)
            def ps_step():
                with tf.control_dependencies(grads):
                    return _update_ps(ps, grads, epsilon)
            def ps_no_step():
                with tf.control_dependencies(grads):
                    return ps

            ps_new = tf.cond(proceed, ps_step, ps_no_step, strict=True)
            return i + 1, proceed, ps_new, xs_new

    result = _while_loop(cond, body, [0, True, ps, xs])

    _i, proceed_out, ps_out, xs_out = result
    deps = _flat([proceed_out], ps_out, xs_out)
    with tf.control_dependencies(deps):
        logprob_out, grads_out = logprob_grads_fn()
        return proceed_out, xs_out, ps_out, logprob_out, grads_out
vgg_preprocessing.py 文件源码 项目:Classification_Nets 作者: BobLiu20 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def _crop(image, offset_height, offset_width, crop_height, crop_width):
  """Crops the given image using the provided offsets and sizes.

  Note that the method doesn't assume we know the input image size but it does
  assume we know the input image rank.

  Args:
    image: an image of shape [height, width, channels].
    offset_height: a scalar tensor indicating the height offset.
    offset_width: a scalar tensor indicating the width offset.
    crop_height: the height of the cropped image.
    crop_width: the width of the cropped image.

  Returns:
    the cropped (and resized) image.

  Raises:
    InvalidArgumentError: if the rank is not 3 or if the image dimensions are
      less than the crop size.
  """
  original_shape = tf.shape(image)

  rank_assertion = tf.Assert(
      tf.equal(tf.rank(image), 3),
      ['Rank of image must be equal to 3.'])
  with tf.control_dependencies([rank_assertion]):
    cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])

  size_assertion = tf.Assert(
      tf.logical_and(
          tf.greater_equal(original_shape[0], crop_height),
          tf.greater_equal(original_shape[1], crop_width)),
      ['Crop size greater than the image size.'])

  offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))

  # Use tf.slice instead of crop_to_bounding box as it accepts tensors to
  # define the crop size.
  with tf.control_dependencies([size_assertion]):
    image = tf.slice(image, offsets, cropped_shape)
  return tf.reshape(image, cropped_shape)
preprocessor.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _crop(self, image, offset_height, offset_width, crop_height, crop_width):
        """Crops the given image using the provided offsets and sizes.
        Note that the method doesn't assume we know the input image size but it does
        assume we know the input image rank.

        Args:
          image: an image of shape [height, width, channels].
          offset_height: a scalar tensor indicating the height offset.
          offset_width: a scalar tensor indicating the width offset.
          crop_height: the height of the cropped image.
          crop_width: the width of the cropped image.

        Returns:
          the cropped (and resized) image.

        Raises:
          InvalidArgumentError: if the rank is not 3 or if the image dimensions are
            less than the crop size.
        """
        original_shape = tf.shape(image)

        rank_assertion = tf.Assert(
            tf.equal(tf.rank(image), 3),
            ['Rank of image must be equal to 3.'])
        with tf.control_dependencies([rank_assertion]):
            cropped_shape = tf.stack(
                [crop_height, crop_width, original_shape[2]])

        size_assertion = tf.Assert(
            tf.logical_and(
                tf.greater_equal(original_shape[0], crop_height),
                tf.greater_equal(original_shape[1], crop_width)),
            ['Crop size greater than the image size.'])

        offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))
        with tf.control_dependencies([size_assertion]):
            image = tf.slice(image, offsets, cropped_shape)
        return tf.reshape(image, cropped_shape)
preprocessor.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def _crop(self, image, offset_height, offset_width, crop_height, crop_width):
        """Crops the given image using the provided offsets and sizes.
        Note that the method doesn't assume we know the input image size but it does
        assume we know the input image rank.

        Args:
          image: an image of shape [height, width, channels].
          offset_height: a scalar tensor indicating the height offset.
          offset_width: a scalar tensor indicating the width offset.
          crop_height: the height of the cropped image.
          crop_width: the width of the cropped image.

        Returns:
          the cropped (and resized) image.

        Raises:
          InvalidArgumentError: if the rank is not 3 or if the image dimensions are
            less than the crop size.
        """
        original_shape = tf.shape(image)

        rank_assertion = tf.Assert(
            tf.equal(tf.rank(image), 3),
            ['Rank of image must be equal to 3.'])
        with tf.control_dependencies([rank_assertion]):
            cropped_shape = tf.stack(
                [crop_height, crop_width, original_shape[2]])

        size_assertion = tf.Assert(
            tf.logical_and(
                tf.greater_equal(original_shape[0], crop_height),
                tf.greater_equal(original_shape[1], crop_width)),
            ['Crop size greater than the image size.'])

        offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))
        with tf.control_dependencies([size_assertion]):
            image = tf.slice(image, offsets, cropped_shape)
        return tf.reshape(image, cropped_shape)
augmentation.py 文件源码 项目:fcn 作者: ilovin 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def crop_to_fixed_size(img_tensor,annotation_tensor,output_shape):
    """
    the output_shape must be smaller than the input_shape
    :param img_tensor: [w,h,depth]
    :param annotation_tensor: [w,h,1]
    :param output_shape:
    :param mask_out_num:
    :return: (output_shape,output_shape,3) (output_shape,output_shape,1)
    """
    original_shape = tf.shape(img_tensor)
    crop_width, crop_height = output_shape[0],output_shape[1]
    image_width, image_height = original_shape[0],original_shape[1]
    img_cropped_shape = tf.stack([output_shape[0], output_shape[1], 3])
    annotate_cropped_shape = tf.stack([output_shape[0], output_shape[1], 1])

    size_assertion = tf.Assert(
        tf.logical_and(
            tf.greater_equal(original_shape[0], crop_width),
            tf.greater_equal(original_shape[1], crop_height)),
        ['Crop size greater than the image size.'])
    max_offset_height = tf.reshape(image_height - crop_height + 1, [])
    max_offset_width = tf.reshape(image_width - crop_width + 1, [])

    offset_height = tf.random_uniform(
        [], maxval=max_offset_height, dtype=tf.int32)
    offset_width = tf.random_uniform(
        [], maxval=max_offset_width, dtype=tf.int32)
    offsets = tf.to_int32(tf.stack([offset_width, offset_height, 0]))

    annotation_tensor = tf.to_int32(annotation_tensor)
    # Use tf.slice instead of crop_to_bounding box as it accepts tensors to
    # define the crop size.
    with tf.control_dependencies([size_assertion]):
        image = tf.slice(img_tensor, offsets, img_cropped_shape)
        annotate = tf.slice(annotation_tensor,offsets,annotate_cropped_shape)
    return tf.reshape(image, img_cropped_shape),tf.reshape(annotate,annotate_cropped_shape)
augmentation.py 文件源码 项目:fcn 作者: ilovin 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def crop_or_resize_to_fixed_size_and_rotate_output(img_tensor,
                                        annotation_tensor,
                                        output_shape,
                                        mask_out_num=None):
    """Returns tensor of a size (output_shape, output_shape, depth) and (output_shape, output_shape, 1).
    The function returns tensor that is of a size (output_shape, output_shape, depth)
    which is randomly cropped and rotate

    Parameters
    ----------
    img_tensor : Tensor of size (width, height, depth)
        Tensor with image
    annotation_tensor : Tensor of size (width, height, 1)
        Tensor with respective annotation
    output_shape : Tensor or list [int, int]
        Tensor of list representing desired output shape
    mask_out_number : int
        Number representing the mask out value.

    Returns
    -------
    cropped_padded_img : Tensor of size (output_shape[0], output_shape[1], 3).
        Image Tensor that was randomly scaled
    cropped_padded_annotation : Tensor of size (output_shape[0], output_shape[1], 1)
        Respective annotation Tensor that was randomly scaled with the same parameters
    """
    input_shape = tf.shape(img_tensor)[0:2]
    image_width, image_height = input_shape[0],input_shape[1]
    crop_width, crop_height = output_shape[0],output_shape[1]

    cropped_padded_img,cropped_padded_annotaion =  control_flow_ops.cond(
        tf.logical_and(
            tf.greater_equal(image_height, crop_height),
            tf.greater_equal(image_width, crop_width)),
        fn1=lambda:crop_to_fixed_size(img_tensor,annotation_tensor,output_shape),
        fn2=lambda:resize_to_fixed_size(img_tensor,annotation_tensor,output_shape,mask_out_num=mask_out_num))
    return cropped_padded_img,cropped_padded_annotaion
vgg_preprocessing.py 文件源码 项目:fcn 作者: ilovin 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _crop(image, offset_height, offset_width, crop_height, crop_width):
  """Crops the given image using the provided offsets and sizes.

  Note that the method doesn't assume we know the input image size but it does
  assume we know the input image rank.

  Args:
    image: an image of shape [height, width, channels].
    offset_height: a scalar tensor indicating the height offset.
    offset_width: a scalar tensor indicating the width offset.
    crop_height: the height of the cropped image.
    crop_width: the width of the cropped image.

  Returns:
    the cropped (and resized) image.

  Raises:
    InvalidArgumentError: if the rank is not 3 or if the image dimensions are
      less than the crop size.
  """
  original_shape = tf.shape(image)

  rank_assertion = tf.Assert(
      tf.equal(tf.rank(image), 3),
      ['Rank of image must be equal to 3.'])
  with tf.control_dependencies([rank_assertion]):
    cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])

  size_assertion = tf.Assert(
      tf.logical_and(
          tf.greater_equal(original_shape[0], crop_height),
          tf.greater_equal(original_shape[1], crop_width)),
      ['Crop size greater than the image size.'])

  offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))

  # Use tf.slice instead of crop_to_bounding box as it accepts tensors to
  # define the crop size.
  with tf.control_dependencies([size_assertion]):
    image = tf.slice(image, offsets, cropped_shape)
  return tf.reshape(image, cropped_shape)
core_test.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def setUp(self):
    super(LogicalBinaryOpsTest, self).setUp()

    self.ops = [
        ('logical_and', operator.and_, tf.logical_and, core.logical_and),
        ('logical_or', operator.or_, tf.logical_or, core.logical_or),
        ('logical_xor', operator.xor, tf.logical_xor, core.logical_xor),
    ]
    self.test_lt_1 = self.original_lt < 10
    self.test_lt_2 = self.original_lt < 5
    self.test_lt_1_broadcast = self.test_lt_1.tensor
    self.test_lt_2_broadcast = self.test_lt_2.tensor
    self.broadcast_axes = self.test_lt_1.axes
vgg_preprocessing.py 文件源码 项目:segmentation-models 作者: desimone 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _crop(image, offset_height, offset_width, crop_height, crop_width):
  """Crops the given image using the provided offsets and sizes.

      Note that the method doesn't assume we know the input image size but it does
      assume we know the input image rank.

      Args:
        image: an image of shape [height, width, channels].
        offset_height: a scalar tensor indicating the height offset.
        offset_width: a scalar tensor indicating the width offset.
        crop_height: the height of the cropped image.
        crop_width: the width of the cropped image.

      Returns:
        the cropped (and resized) image.

      Raises:
        InvalidArgumentError: if the rank is not 3 or if the image dimensions are
          less than the crop size.
      """
  original_shape = tf.shape(image)

  rank_assertion = tf.Assert(
      tf.equal(tf.rank(image), 3), ['Rank of image must be equal to 3.'])
  cropped_shape = control_flow_ops.with_dependencies(
      [rank_assertion], tf.pack([crop_height, crop_width, original_shape[2]]))

  size_assertion = tf.Assert(
      tf.logical_and(
          tf.greater_equal(original_shape[0], crop_height),
          tf.greater_equal(original_shape[1], crop_width)),
      ['Crop size greater than the image size.'])

  offsets = tf.to_int32(tf.pack([offset_height, offset_width, 0]))

  # Use tf.slice instead of crop_to_bounding box as it accepts tensors to
  # define the crop size.
  image = control_flow_ops.with_dependencies(
      [size_assertion], tf.slice(image, offsets, cropped_shape))
  return tf.reshape(image, cropped_shape)
cnn_preprocessing.py 文件源码 项目:convolutional-vqa 作者: paarthneekhara 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _crop(image, offset_height, offset_width, crop_height, crop_width):
  """Crops the given image using the provided offsets and sizes.

  Note that the method doesn't assume we know the input image size but it does
  assume we know the input image rank.

  Args:
    image: an image of shape [height, width, channels].
    offset_height: a scalar tensor indicating the height offset.
    offset_width: a scalar tensor indicating the width offset.
    crop_height: the height of the cropped image.
    crop_width: the width of the cropped image.

  Returns:
    the cropped (and resized) image.

  Raises:
    InvalidArgumentError: if the rank is not 3 or if the image dimensions are
      less than the crop size.
  """
  original_shape = tf.shape(image)

  rank_assertion = tf.Assert(
      tf.equal(tf.rank(image), 3),
      ['Rank of image must be equal to 3.'])
  with tf.control_dependencies([rank_assertion]):
    cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])

  size_assertion = tf.Assert(
      tf.logical_and(
          tf.greater_equal(original_shape[0], crop_height),
          tf.greater_equal(original_shape[1], crop_width)),
      ['Crop size greater than the image size.'])

  offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))

  # Use tf.slice instead of crop_to_bounding box as it accepts tensors to
  # define the crop size.
  with tf.control_dependencies([size_assertion]):
    image = tf.slice(image, offsets, cropped_shape)
  return tf.reshape(image, cropped_shape)
AttentiveACTCell.py 文件源码 项目:act-rte-inference 作者: DeNeutoy 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __call__(self, inputs, state, timestep = 0, scope=None):

        with vs.variable_scope(scope or type(self).__name__):

            # define within cell constants/ counters used to control while loop for ACTStep
            prob = tf.constant(0.0,tf.float32,[self.batch_size], name="prob")
            prob_compare = tf.constant(0.0,tf.float32,[self.batch_size], name="prob_compare")
            counter = tf.constant(0.0, tf.float32,[self.batch_size], name="counter")
            acc_outputs = tf.zeros_like(state, tf.float32, name="output_accumulator")
            acc_states = tf.zeros_like(state, tf.float32, name="state_accumulator")
            batch_mask = tf.constant(True, tf.bool,[self.batch_size])

            # While loop stops when this predicate is FALSE.
            # Ie all (probability < 1-eps AND counter < N) are false.


            pred = lambda batch_mask,prob_compare,prob,\
                          counter,state,inputs,acc_output,acc_state:\
                tf.reduce_any(
                    tf.logical_and(
                        tf.less(prob_compare,self.one_minus_eps),
                        tf.less(counter,self.N)))
                # only stop if all of the batch have passed either threshold

            # Do while loop iterations until predicate above is false.
            _,_,remainders,iterations,_,_,output,next_state = \
                control_flow_ops.while_loop(pred,self.ACTStep,
                [batch_mask,prob_compare,prob,
                 counter,state,inputs, acc_outputs, acc_states])

        #accumulate remainder  and N values
        self.ACT_remainder.append(tf.reduce_mean(1 - remainders))
        self.ACT_iterations.append(tf.reduce_mean(iterations))

        return output, next_state
ACTAttentionModel.py 文件源码 项目:act-rte-inference 作者: DeNeutoy 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def do_act_steps(self, premise, hypothesis):


        self.rep_size = premise.get_shape()[-1].value

        self.one_minus_eps = tf.constant(1.0 - self.config.eps, tf.float32,[self.batch_size])
        self.N = tf.constant(self.config.max_computation, tf.float32,[self.batch_size])


        prob = tf.constant(0.0,tf.float32,[self.batch_size], name="prob")
        prob_compare = tf.constant(0.0,tf.float32,[self.batch_size], name="prob_compare")
        counter = tf.constant(0.0, tf.float32,[self.batch_size], name="counter")
        initial_state = tf.zeros([self.batch_size, 2*self.rep_size], tf.float32, name="state")
        acc_states = tf.zeros([self.batch_size,2*self.rep_size], tf.float32, name="state_accumulator")
        batch_mask = tf.constant(True, tf.bool,[self.batch_size])

        # While loop stops when this predicate is FALSE.
        # Ie all (probability < 1-eps AND counter < N) are false.

        pred = lambda batch_mask,prob_compare,prob,\
                      counter,state,premise, hypothesis ,acc_state:\
            tf.reduce_any(
                tf.logical_and(
                    tf.less(prob_compare,self.one_minus_eps),
                    tf.less(counter,self.N)))
                # only stop if all of the batch have passed either threshold

            # Do while loop iterations until predicate above is false.
        _,_,remainders,iterations,_,_,_,state = \
            tf.while_loop(pred,self.inference_step,
            [batch_mask,prob_compare,prob,
             counter,initial_state, premise, hypothesis, acc_states])

        return state, remainders, iterations
ACTDAModel.py 文件源码 项目:act-rte-inference 作者: DeNeutoy 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def do_inference_steps(self, initial_state, premise, hypothesis):


        self.one_minus_eps = tf.constant(1.0 - self.config.eps, tf.float32,[self.batch_size])
        self.N = tf.constant(self.config.max_computation, tf.float32,[self.batch_size])


        prob = tf.constant(0.0,tf.float32,[self.batch_size], name="prob")
        prob_compare = tf.constant(0.0,tf.float32,[self.batch_size], name="prob_compare")
        counter = tf.constant(0.0, tf.float32,[self.batch_size], name="counter")
        acc_states = tf.zeros_like(initial_state, tf.float32, name="state_accumulator")
        batch_mask = tf.constant(True, tf.bool,[self.batch_size])

        # While loop stops when this predicate is FALSE.
        # Ie all (probability < 1-eps AND counter < N) are false.

        pred = lambda batch_mask,prob_compare,prob,\
                      counter,state,premise, hypothesis ,acc_state:\
            tf.reduce_any(
                tf.logical_and(
                    tf.less(prob_compare,self.one_minus_eps),
                    tf.less(counter,self.N)))
                # only stop if all of the batch have passed either threshold

            # Do while loop iterations until predicate above is false.
        _,_,remainders,iterations,_,_,_,state = \
            tf.while_loop(pred,self.inference_step,
            [batch_mask,prob_compare,prob,
             counter,initial_state,premise, hypothesis, acc_states])

        return state, remainders, iterations


问题


面经


文章

微信
公众号

扫码关注公众号