python类image()的实例源码

preprocessor_test.py 文件源码 项目:tensorflow 作者: luyishisi 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def testRandomBlackPatches(self):
    preprocessing_options = []
    preprocessing_options.append((preprocessor.normalize_image, {
        'original_minval': 0,
        'original_maxval': 255,
        'target_minval': 0,
        'target_maxval': 1
    }))
    preprocessing_options.append((preprocessor.random_black_patches, {
        'size_to_image_ratio': 0.5
    }))
    images = self.createTestImages()
    tensor_dict = {fields.InputDataFields.image: images}
    blacked_tensor_dict = preprocessor.preprocess(tensor_dict,
                                                  preprocessing_options)
    blacked_images = blacked_tensor_dict[fields.InputDataFields.image]
    images_shape = tf.shape(images)
    blacked_images_shape = tf.shape(blacked_images)

    with self.test_session() as sess:
      (images_shape_, blacked_images_shape_) = sess.run(
          [images_shape, blacked_images_shape])
      self.assertAllEqual(images_shape_, blacked_images_shape_)
network_vgg16.py 文件源码 项目:HandDetection 作者: YunqiuXu 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def _crop_pool_layer(self, bottom, rois, name):
    with tf.variable_scope(name) as scope:
      batch_ids = tf.squeeze(tf.slice(rois, [0, 0], [-1, 1], name="batch_id"), [1])
      # Get the normalized coordinates of bounding boxes
      bottom_shape = tf.shape(bottom)
      height = (tf.to_float(bottom_shape[1]) - 1.) * np.float32(self._feat_stride[0])
      width = (tf.to_float(bottom_shape[2]) - 1.) * np.float32(self._feat_stride[0])
      x1 = tf.slice(rois, [0, 1], [-1, 1], name="x1") / width
      y1 = tf.slice(rois, [0, 2], [-1, 1], name="y1") / height
      x2 = tf.slice(rois, [0, 3], [-1, 1], name="x2") / width
      y2 = tf.slice(rois, [0, 4], [-1, 1], name="y2") / height
      # Won't be back-propagated to rois anyway, but to save time
      bboxes = tf.stop_gradient(tf.concat([y1, x1, y2, x2], axis=1))
      pre_pool_size = cfg.POOLING_SIZE * 2
      crops = tf.image.crop_and_resize(bottom, bboxes, tf.to_int32(batch_ids), [pre_pool_size, pre_pool_size], name="crops")

    return slim.max_pool2d(crops, [2, 2], padding='SAME')
network.py 文件源码 项目:tf-faster-rcnn 作者: endernewton 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def _crop_pool_layer(self, bottom, rois, name):
    with tf.variable_scope(name) as scope:
      batch_ids = tf.squeeze(tf.slice(rois, [0, 0], [-1, 1], name="batch_id"), [1])
      # Get the normalized coordinates of bounding boxes
      bottom_shape = tf.shape(bottom)
      height = (tf.to_float(bottom_shape[1]) - 1.) * np.float32(self._feat_stride[0])
      width = (tf.to_float(bottom_shape[2]) - 1.) * np.float32(self._feat_stride[0])
      x1 = tf.slice(rois, [0, 1], [-1, 1], name="x1") / width
      y1 = tf.slice(rois, [0, 2], [-1, 1], name="y1") / height
      x2 = tf.slice(rois, [0, 3], [-1, 1], name="x2") / width
      y2 = tf.slice(rois, [0, 4], [-1, 1], name="y2") / height
      # Won't be back-propagated to rois anyway, but to save time
      bboxes = tf.stop_gradient(tf.concat([y1, x1, y2, x2], axis=1))
      pre_pool_size = cfg.POOLING_SIZE * 2
      crops = tf.image.crop_and_resize(bottom, bboxes, tf.to_int32(batch_ids), [pre_pool_size, pre_pool_size], name="crops")

    return slim.max_pool2d(crops, [2, 2], padding='SAME')
tf_utils.py 文件源码 项目:thesis_scripts 作者: PhilippKopp 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def subtract_mean_multi(image_tensors, mean_image_path, channels=NUM_CHANNELS, image_size=512):

    mean_image = tf.convert_to_tensor(mean_image_path, dtype=tf.string)
    mean_file_contents = tf.read_file(mean_image)
    mean_uint8 = tf.image.decode_png(mean_file_contents, channels=channels)
    mean_uint8.set_shape([image_size, image_size, channels])


    images_mean_free = []
    for image_tensor in image_tensors:
        image_tensor.set_shape([image_size, image_size, channels])
        image = tf.cast(image_tensor, tf.float32)

        #subtract mean image
        image_mean_free = tf.subtract(image, tf.cast(mean_uint8, tf.float32))
        images_mean_free.append(image_mean_free)

    return images_mean_free
tf_utils.py 文件源码 项目:thesis_scripts 作者: PhilippKopp 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def single_input_image(image_str, mean_image_path, png_with_alpha=False, image_size=512):

    mean_image_str = tf.convert_to_tensor(mean_image_path, dtype=tf.string)

    file_contents = tf.read_file(image_str)
    if png_with_alpha:
        uint8image = tf.image.decode_png(file_contents, channels=4)
        uint8image.set_shape([image_size, image_size, 4])
    else:
        uint8image = tf.image.decode_image(file_contents, channels=NUM_CHANNELS)
        uint8image.set_shape([image_size, image_size, NUM_CHANNELS])
    image = tf.cast(uint8image, tf.float32)

    #subtract mean image
    mean_file_contents = tf.read_file(mean_image_str)
    if png_with_alpha:
        mean_uint8 = tf.image.decode_png(mean_file_contents, channels=4)
        mean_uint8.set_shape([image_size, image_size, 4])
    else:
        mean_uint8 = tf.image.decode_image(mean_file_contents, channels=NUM_CHANNELS)
        mean_uint8.set_shape([image_size, image_size, NUM_CHANNELS])
    image_mean_free = tf.subtract(image, tf.cast(mean_uint8, tf.float32))

    return image_mean_free
network.py 文件源码 项目:nexar-2 作者: lbin 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _crop_pool_layer(self, bottom, rois, name):
    with tf.variable_scope(name) as scope:
      batch_ids = tf.squeeze(tf.slice(rois, [0, 0], [-1, 1], name="batch_id"), [1])
      # Get the normalized coordinates of bounding boxes
      bottom_shape = tf.shape(bottom)
      height = (tf.to_float(bottom_shape[1]) - 1.) * np.float32(self._feat_stride[0])
      width = (tf.to_float(bottom_shape[2]) - 1.) * np.float32(self._feat_stride[0])
      x1 = tf.slice(rois, [0, 1], [-1, 1], name="x1") / width
      y1 = tf.slice(rois, [0, 2], [-1, 1], name="y1") / height
      x2 = tf.slice(rois, [0, 3], [-1, 1], name="x2") / width
      y2 = tf.slice(rois, [0, 4], [-1, 1], name="y2") / height
      # Won't be back-propagated to rois anyway, but to save time
      bboxes = tf.stop_gradient(tf.concat([y1, x1, y2, x2], axis=1))
      pre_pool_size = cfg.POOLING_SIZE * 2
      crops = tf.image.crop_and_resize(bottom, bboxes, tf.to_int32(batch_ids), [pre_pool_size, pre_pool_size], name="crops")

    return slim.max_pool2d(crops, [2, 2], padding='SAME')
image.py 文件源码 项目:polyaxon 作者: polyaxon 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def random_crop(images, height, width):
    """Randomly crops an image/images to a given size.

    Args:
        images: 4-D Tensor of shape `[batch, height, width, channels]` or
            3-D Tensor of shape `[height, width, channels]`.
        height: `float`. The height to crop to.
        width: `float`. The width to crop to.

    Returns:
        If `images` was 4-D, a 4-D float Tensor of shape
        `[batch, new_height, new_width, channels]`.
        If `images` was 3-D, a 3-D float Tensor of shape
        `[new_height, new_width, channels]`.
    """
    images_shape = get_shape(images)
    if len(images_shape) > 4:
        ValueError("'image' must have either 3 or 4 dimensions, "
                   "received `{}`.".format(images_shape))

    if len(images_shape) == 4:
        return tf.map_fn(lambda img: tf.random_crop(img, [height, width, images_shape[-1]]), images)

    return tf.random_crop(images, [height, width, images_shape[-1]])
image.py 文件源码 项目:polyaxon 作者: polyaxon 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def adjust_gamma(image, gamma=1, gain=1):
    """Performs Gamma Correction on the input image.
    Also known as Power Law Transform. This function transforms the
    input image pixelwise according to the equation Out = In**gamma
    after scaling each pixel to the range 0 to 1.
    (A mirror to tf.image adjust_gamma)

    Args:
        image : A Tensor.
        gamma : A scalar. Non negative real number.
        gain  : A scalar. The constant multiplier.

    Returns:
        A Tensor. Gamma corrected output image.

    Notes:
        For gamma greater than 1, the histogram will shift towards left and
        the output image will be darker than the input image.
        For gamma less than 1, the histogram will shift towards right and
        the output image will be brighter than the input image.

    References:
        [1] http://en.wikipedia.org/wiki/Gamma_correction
    """
    return tf.image.adjust_gamma(image, gamma, gain)
preprocessor_test.py 文件源码 项目:tensorflow 作者: luyishisi 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def testNormalizeImage(self):
    preprocess_options = [(preprocessor.normalize_image, {
        'original_minval': 0,
        'original_maxval': 256,
        'target_minval': -1,
        'target_maxval': 1
    })]
    images = self.createTestImages()
    tensor_dict = {fields.InputDataFields.image: images}
    tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
    images = tensor_dict[fields.InputDataFields.image]
    images_expected = self.expectedImagesAfterNormalization()

    with self.test_session() as sess:
      (images_, images_expected_) = sess.run(
          [images, images_expected])
      images_shape_ = images_.shape
      images_expected_shape_ = images_expected_.shape
      expected_shape = [1, 4, 4, 3]
      self.assertAllEqual(images_expected_shape_, images_shape_)
      self.assertAllEqual(images_shape_, expected_shape)
      self.assertAllClose(images_, images_expected_)
preprocessor_test.py 文件源码 项目:tensorflow 作者: luyishisi 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def testRandomPixelValueScale(self):
    preprocessing_options = []
    preprocessing_options.append((preprocessor.normalize_image, {
        'original_minval': 0,
        'original_maxval': 255,
        'target_minval': 0,
        'target_maxval': 1
    }))
    preprocessing_options.append((preprocessor.random_pixel_value_scale, {}))
    images = self.createTestImages()
    tensor_dict = {fields.InputDataFields.image: images}
    tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
    images_min = tf.to_float(images) * 0.9 / 255.0
    images_max = tf.to_float(images) * 1.1 / 255.0
    images = tensor_dict[fields.InputDataFields.image]
    values_greater = tf.greater_equal(images, images_min)
    values_less = tf.less_equal(images, images_max)
    values_true = tf.fill([1, 4, 4, 3], True)
    with self.test_session() as sess:
      (values_greater_, values_less_, values_true_) = sess.run(
          [values_greater, values_less, values_true])
      self.assertAllClose(values_greater_, values_true_)
      self.assertAllClose(values_less_, values_true_)
preprocessor_test.py 文件源码 项目:tensorflow 作者: luyishisi 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def testRandomImageScale(self):
    preprocess_options = [(preprocessor.random_image_scale, {})]
    images_original = self.createTestImages()
    tensor_dict = {fields.InputDataFields.image: images_original}
    tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
    images_scaled = tensor_dict[fields.InputDataFields.image]
    images_original_shape = tf.shape(images_original)
    images_scaled_shape = tf.shape(images_scaled)
    with self.test_session() as sess:
      (images_original_shape_, images_scaled_shape_) = sess.run(
          [images_original_shape, images_scaled_shape])
      self.assertTrue(
          images_original_shape_[1] * 0.5 <= images_scaled_shape_[1])
      self.assertTrue(
          images_original_shape_[1] * 2.0 >= images_scaled_shape_[1])
      self.assertTrue(
          images_original_shape_[2] * 0.5 <= images_scaled_shape_[2])
      self.assertTrue(
          images_original_shape_[2] * 2.0 >= images_scaled_shape_[2])
preprocessor_test.py 文件源码 项目:tensorflow 作者: luyishisi 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def testRandomAdjustBrightness(self):
    preprocessing_options = []
    preprocessing_options.append((preprocessor.normalize_image, {
        'original_minval': 0,
        'original_maxval': 255,
        'target_minval': 0,
        'target_maxval': 1
    }))
    preprocessing_options.append((preprocessor.random_adjust_brightness, {}))
    images_original = self.createTestImages()
    tensor_dict = {fields.InputDataFields.image: images_original}
    tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
    images_bright = tensor_dict[fields.InputDataFields.image]
    image_original_shape = tf.shape(images_original)
    image_bright_shape = tf.shape(images_bright)
    with self.test_session() as sess:
      (image_original_shape_, image_bright_shape_) = sess.run(
          [image_original_shape, image_bright_shape])
      self.assertAllEqual(image_original_shape_, image_bright_shape_)
preprocessor_test.py 文件源码 项目:tensorflow 作者: luyishisi 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def testRandomAdjustContrast(self):
    preprocessing_options = []
    preprocessing_options.append((preprocessor.normalize_image, {
        'original_minval': 0,
        'original_maxval': 255,
        'target_minval': 0,
        'target_maxval': 1
    }))
    preprocessing_options.append((preprocessor.random_adjust_contrast, {}))
    images_original = self.createTestImages()
    tensor_dict = {fields.InputDataFields.image: images_original}
    tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
    images_contrast = tensor_dict[fields.InputDataFields.image]
    image_original_shape = tf.shape(images_original)
    image_contrast_shape = tf.shape(images_contrast)
    with self.test_session() as sess:
      (image_original_shape_, image_contrast_shape_) = sess.run(
          [image_original_shape, image_contrast_shape])
      self.assertAllEqual(image_original_shape_, image_contrast_shape_)
preprocessor_test.py 文件源码 项目:tensorflow 作者: luyishisi 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def testRandomAdjustHue(self):
    preprocessing_options = []
    preprocessing_options.append((preprocessor.normalize_image, {
        'original_minval': 0,
        'original_maxval': 255,
        'target_minval': 0,
        'target_maxval': 1
    }))
    preprocessing_options.append((preprocessor.random_adjust_hue, {}))
    images_original = self.createTestImages()
    tensor_dict = {fields.InputDataFields.image: images_original}
    tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
    images_hue = tensor_dict[fields.InputDataFields.image]
    image_original_shape = tf.shape(images_original)
    image_hue_shape = tf.shape(images_hue)
    with self.test_session() as sess:
      (image_original_shape_, image_hue_shape_) = sess.run(
          [image_original_shape, image_hue_shape])
      self.assertAllEqual(image_original_shape_, image_hue_shape_)
preprocessor_test.py 文件源码 项目:tensorflow 作者: luyishisi 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def testRandomDistortColor(self):
    preprocessing_options = []
    preprocessing_options.append((preprocessor.normalize_image, {
        'original_minval': 0,
        'original_maxval': 255,
        'target_minval': 0,
        'target_maxval': 1
    }))
    preprocessing_options.append((preprocessor.random_distort_color, {}))
    images_original = self.createTestImages()
    images_original_shape = tf.shape(images_original)
    tensor_dict = {fields.InputDataFields.image: images_original}
    tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
    images_distorted_color = tensor_dict[fields.InputDataFields.image]
    images_distorted_color_shape = tf.shape(images_distorted_color)
    with self.test_session() as sess:
      (images_original_shape_, images_distorted_color_shape_) = sess.run(
          [images_original_shape, images_distorted_color_shape])
      self.assertAllEqual(images_original_shape_, images_distorted_color_shape_)
preprocessor_test.py 文件源码 项目:tensorflow 作者: luyishisi 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def testRandomResizeMethod(self):
    preprocessing_options = []
    preprocessing_options.append((preprocessor.normalize_image, {
        'original_minval': 0,
        'original_maxval': 255,
        'target_minval': 0,
        'target_maxval': 1
    }))
    preprocessing_options.append((preprocessor.random_resize_method, {
        'target_size': (75, 150)
    }))
    images = self.createTestImages()
    tensor_dict = {fields.InputDataFields.image: images}
    resized_tensor_dict = preprocessor.preprocess(tensor_dict,
                                                  preprocessing_options)
    resized_images = resized_tensor_dict[fields.InputDataFields.image]
    resized_images_shape = tf.shape(resized_images)
    expected_images_shape = tf.constant([1, 75, 150, 3], dtype=tf.int32)

    with self.test_session() as sess:
      (expected_images_shape_, resized_images_shape_) = sess.run(
          [expected_images_shape, resized_images_shape])
      self.assertAllEqual(expected_images_shape_,
                          resized_images_shape_)
preprocessor_test.py 文件源码 项目:tensorflow 作者: luyishisi 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def testResizeToRangeWithDynamicSpatialShape(self):
    """Tests image resizing, checking output sizes."""
    in_shape_list = [[60, 40, 3], [15, 30, 3], [15, 50, 3]]
    min_dim = 50
    max_dim = 100
    expected_shape_list = [[75, 50, 3], [50, 100, 3], [30, 100, 3]]

    for in_shape, expected_shape in zip(in_shape_list, expected_shape_list):
      in_image = tf.placeholder(tf.float32, shape=(None, None, 3))
      out_image = preprocessor.resize_to_range(
          in_image, min_dimension=min_dim, max_dimension=max_dim)
      out_image_shape = tf.shape(out_image)
      with self.test_session() as sess:
        out_image_shape = sess.run(out_image_shape,
                                   feed_dict={in_image:
                                              np.random.randn(*in_shape)})
        self.assertAllEqual(out_image_shape, expected_shape)
preprocessor_test.py 文件源码 项目:tensorflow 作者: luyishisi 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def testResizeToRangeSameMinMax(self):
    """Tests image resizing, checking output sizes."""
    in_shape_list = [[312, 312, 3], [299, 299, 3]]
    min_dim = 320
    max_dim = 320
    expected_shape_list = [[320, 320, 3], [320, 320, 3]]

    for in_shape, expected_shape in zip(in_shape_list, expected_shape_list):
      in_image = tf.random_uniform(in_shape)
      out_image = preprocessor.resize_to_range(
          in_image, min_dimension=min_dim, max_dimension=max_dim)
      out_image_shape = tf.shape(out_image)

      with self.test_session() as sess:
        out_image_shape = sess.run(out_image_shape)
        self.assertAllEqual(out_image_shape, expected_shape)
network_resnet.py 文件源码 项目:HandDetection 作者: YunqiuXu 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _add_gt_image(self):
    # add back mean
    image = self._image + cfg.PIXEL_MEANS
    # BGR to RGB (opencv uses BGR)
    resized = tf.image.resize_bilinear(image, tf.to_int32(self._im_info[:2] / self._im_info[2]))
    self._gt_image = tf.reverse(resized, axis=[-1])
network_resnet.py 文件源码 项目:HandDetection 作者: YunqiuXu 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def _add_gt_image_summary(self):
    # use a customized visualization function to visualize the boxes
    if self._gt_image is None:
      self._add_gt_image()
    image = tf.py_func(draw_bounding_boxes, 
                      [self._gt_image, self._gt_boxes, self._im_info],
                      tf.float32, name="gt_boxes")

    return tf.summary.image('GROUND_TRUTH', image)
network_resnet.py 文件源码 项目:HandDetection 作者: YunqiuXu 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def _roi_pool_layer(self, bootom, rois, name):
    with tf.variable_scope(name) as scope:
      return tf.image.roi_pooling(bootom, rois,
                                  pooled_height=cfg.POOLING_SIZE,
                                  pooled_width=cfg.POOLING_SIZE,
                                  spatial_scale=1. / 16.)[0]
network_resnet.py 文件源码 项目:HandDetection 作者: YunqiuXu 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def _crop_pool_layer(self, bottom, rois, name):
    with tf.variable_scope(name) as scope:
      batch_ids = tf.squeeze(tf.slice(rois, [0, 0], [-1, 1], name="batch_id"), [1])
      # Get the normalized coordinates of bounding boxes
      bottom_shape = tf.shape(bottom)
      height = (tf.to_float(bottom_shape[1]) - 1.) * np.float32(self._feat_stride[0])
      width = (tf.to_float(bottom_shape[2]) - 1.) * np.float32(self._feat_stride[0])
      x1 = tf.slice(rois, [0, 1], [-1, 1], name="x1") / width
      y1 = tf.slice(rois, [0, 2], [-1, 1], name="y1") / height
      x2 = tf.slice(rois, [0, 3], [-1, 1], name="x2") / width
      y2 = tf.slice(rois, [0, 4], [-1, 1], name="y2") / height
      # Won't be back-propagated to rois anyway, but to save time
      bboxes = tf.stop_gradient(tf.concat([y1, x1, y2, x2], axis=1))
      pre_pool_size = cfg.POOLING_SIZE * 2
      crops = tf.image.crop_and_resize(bottom, bboxes, tf.to_int32(batch_ids), [pre_pool_size, pre_pool_size], name="crops")

    return slim.max_pool2d(crops, [2, 2], padding='SAME')
network_resnet.py 文件源码 项目:HandDetection 作者: YunqiuXu 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _build_network(self, is_training=True):
    # select initializers
    if cfg.TRAIN.TRUNCATED:
      initializer = tf.truncated_normal_initializer(mean=0.0, stddev=0.01)
      initializer_bbox = tf.truncated_normal_initializer(mean=0.0, stddev=0.001)
    else:
      initializer = tf.random_normal_initializer(mean=0.0, stddev=0.01)
      initializer_bbox = tf.random_normal_initializer(mean=0.0, stddev=0.001)

    net_conv = self._image_to_head(is_training)
    with tf.variable_scope(self._scope, self._scope):
      # build the anchors for the image
      self._anchor_component()
      # region proposal network
      rois = self._region_proposal(net_conv, is_training, initializer)
      # region of interest pooling
      if cfg.POOLING_MODE == 'crop':
        pool5 = self._crop_pool_layer(net_conv, rois, "pool5")
      else:
        raise NotImplementedError

    fc7 = self._head_to_tail(pool5, is_training)
    with tf.variable_scope(self._scope, self._scope):
      # region classification
      cls_prob, bbox_pred = self._region_classification(fc7, is_training, 
                                                        initializer, initializer_bbox)

    self._score_summaries.update(self._predictions)

    return rois, cls_prob, bbox_pred
network_resnet.py 文件源码 项目:HandDetection 作者: YunqiuXu 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def test_image(self, sess, image, im_info):
    feed_dict = {self._image: image,
                 self._im_info: im_info}

    cls_score, cls_prob, bbox_pred, rois = sess.run([self._predictions["cls_score"],
                                                     self._predictions['cls_prob'],
                                                     self._predictions['bbox_pred'],
                                                     self._predictions['rois']],
                                                    feed_dict=feed_dict)
    return cls_score, cls_prob, bbox_pred, rois
network_vgg16.py 文件源码 项目:HandDetection 作者: YunqiuXu 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _add_gt_image(self):
    # add back mean
    image = self._image + cfg.PIXEL_MEANS
    # BGR to RGB (opencv uses BGR)
    resized = tf.image.resize_bilinear(image, tf.to_int32(self._im_info[:2] / self._im_info[2]))
    self._gt_image = tf.reverse(resized, axis=[-1])
network_vgg16.py 文件源码 项目:HandDetection 作者: YunqiuXu 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _add_gt_image_summary(self):
    # use a customized visualization function to visualize the boxes
    if self._gt_image is None:
      self._add_gt_image()
    image = tf.py_func(draw_bounding_boxes, 
                      [self._gt_image, self._gt_boxes, self._im_info],
                      tf.float32, name="gt_boxes")

    return tf.summary.image('GROUND_TRUTH', image)
network_vgg16.py 文件源码 项目:HandDetection 作者: YunqiuXu 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def _roi_pool_layer(self, bootom, rois, name):
    with tf.variable_scope(name) as scope:
      return tf.image.roi_pooling(bootom, rois,
                                  pooled_height=cfg.POOLING_SIZE,
                                  pooled_width=cfg.POOLING_SIZE,
                                  spatial_scale=1. / 16.)[0]
network_vgg16.py 文件源码 项目:HandDetection 作者: YunqiuXu 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_image(self, sess, image, im_info):
    feed_dict = {self._image: image,
                 self._im_info: im_info}

    cls_score, cls_prob, bbox_pred, rois = sess.run([self._predictions["cls_score"],
                                                     self._predictions['cls_prob'],
                                                     self._predictions['bbox_pred'],
                                                     self._predictions['rois']],
                                                    feed_dict=feed_dict)
    return cls_score, cls_prob, bbox_pred, rois
network.py 文件源码 项目:tf-faster-rcnn 作者: endernewton 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _add_gt_image(self):
    # add back mean
    image = self._image + cfg.PIXEL_MEANS
    # BGR to RGB (opencv uses BGR)
    resized = tf.image.resize_bilinear(image, tf.to_int32(self._im_info[:2] / self._im_info[2]))
    self._gt_image = tf.reverse(resized, axis=[-1])
network.py 文件源码 项目:tf-faster-rcnn 作者: endernewton 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def _add_gt_image_summary(self):
    # use a customized visualization function to visualize the boxes
    if self._gt_image is None:
      self._add_gt_image()
    image = tf.py_func(draw_bounding_boxes, 
                      [self._gt_image, self._gt_boxes, self._im_info],
                      tf.float32, name="gt_boxes")

    return tf.summary.image('GROUND_TRUTH', image)


问题


面经


文章

微信
公众号

扫码关注公众号