python类image_summary()的实例源码

crbm.py 文件源码 项目:CDBN-for-Tensorflow 作者: shygiants 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __image_summary(self, name, image, max_images):
        tf.image_summary('{}/{}'.format(self.name, name), image, max_images=max_images)
cifar10_input.py 文件源码 项目:dlbench 作者: hclhkbu 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def _generate_image_and_label_batch(image, label, min_queue_examples,
                                    batch_size, shuffle):
  """Construct a queued batch of images and labels.

  Args:
    image: 3-D Tensor of [height, width, 3] of type.float32.
    label: 1-D Tensor of type.int32
    min_queue_examples: int32, minimum number of samples to retain
      in the queue that provides of batches of examples.
    batch_size: Number of images per batch.
    shuffle: boolean indicating whether to use a shuffling queue.

  Returns:
    images: Images. 4D tensor of [batch_size, height, width, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.
  """
  # Create a queue that shuffles the examples, and then
  # read 'batch_size' images + labels from the example queue.
  num_preprocess_threads = 8 
  if shuffle:
    images, label_batch = tf.train.shuffle_batch(
        [image, label],
        batch_size=batch_size,
        num_threads=num_preprocess_threads,
        capacity=min_queue_examples + 16 * batch_size,
        min_after_dequeue=min_queue_examples)
  else:
    images, label_batch = tf.train.batch(
        [image, label],
        batch_size=batch_size,
        num_threads=num_preprocess_threads,
        capacity=min_queue_examples + 16 * batch_size)

  # Display the training images in the visualizer.
  #tf.image_summary('images', images)

  return images, tf.reshape(label_batch, [batch_size])
trainer.py 文件源码 项目:StackGAN 作者: hanzhanggit 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def visualize_one_superimage(self, img_var, images, rows, filename):
        stacked_img = []
        for row in range(rows):
            img = images[row * rows, :, :, :]
            row_img = [img]  # real image
            for col in range(rows):
                row_img.append(img_var[row * rows + col, :, :, :])
            # each rows is 1realimage +10_fakeimage
            stacked_img.append(tf.concat(1, row_img))
        imgs = tf.expand_dims(tf.concat(0, stacked_img), 0)
        current_img_summary = tf.image_summary(filename, imgs)
        return current_img_summary, imgs
trainer.py 文件源码 项目:StackGAN 作者: hanzhanggit 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def visualization(self, n):
        fake_sum_train, superimage_train = \
            self.visualize_one_superimage(self.fake_images[:n * n],
                                          self.images[:n * n],
                                          n, "train")
        fake_sum_test, superimage_test = \
            self.visualize_one_superimage(self.fake_images[n * n:2 * n * n],
                                          self.images[n * n:2 * n * n],
                                          n, "test")
        self.superimages = tf.concat(0, [superimage_train, superimage_test])
        self.image_summary = tf.merge_summary([fake_sum_train, fake_sum_test])
trainer.py 文件源码 项目:StackGAN 作者: hanzhanggit 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def epoch_sum_images(self, sess, n):
        images_train, _, embeddings_train, captions_train, _ =\
            self.dataset.train.next_batch(n * n, cfg.TRAIN.NUM_EMBEDDING)
        images_train = self.preprocess(images_train, n)
        embeddings_train = self.preprocess(embeddings_train, n)

        images_test, _, embeddings_test, captions_test, _ = \
            self.dataset.test.next_batch(n * n, 1)
        images_test = self.preprocess(images_test, n)
        embeddings_test = self.preprocess(embeddings_test, n)

        images = np.concatenate([images_train, images_test], axis=0)
        embeddings =\
            np.concatenate([embeddings_train, embeddings_test], axis=0)

        if self.batch_size > 2 * n * n:
            images_pad, _, embeddings_pad, _, _ =\
                self.dataset.test.next_batch(self.batch_size - 2 * n * n, 1)
            images = np.concatenate([images, images_pad], axis=0)
            embeddings = np.concatenate([embeddings, embeddings_pad], axis=0)
        feed_dict = {self.images: images,
                     self.embeddings: embeddings}
        gen_samples, img_summary =\
            sess.run([self.superimages, self.image_summary], feed_dict)

        # save images generated for train and test captions
        scipy.misc.imsave('%s/train.jpg' % (self.log_dir), gen_samples[0])
        scipy.misc.imsave('%s/test.jpg' % (self.log_dir), gen_samples[1])

        # pfi_train = open(self.log_dir + "/train.txt", "w")
        pfi_test = open(self.log_dir + "/test.txt", "w")
        for row in range(n):
            # pfi_train.write('\n***row %d***\n' % row)
            # pfi_train.write(captions_train[row * n])

            pfi_test.write('\n***row %d***\n' % row)
            pfi_test.write(captions_test[row * n])
        # pfi_train.close()
        pfi_test.close()

        return img_summary
trainer.py 文件源码 项目:StackGAN 作者: hanzhanggit 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def visualize_one_superimage(self, img_var, images, rows, filename):
        stacked_img = []
        for row in range(rows):
            img = images[row * rows, :, :, :]
            row_img = [img]  # real image
            for col in range(rows):
                row_img.append(img_var[row * rows + col, :, :, :])
            # each rows is 1realimage +10_fakeimage
            stacked_img.append(tf.concat(1, row_img))
        imgs = tf.expand_dims(tf.concat(0, stacked_img), 0)
        current_img_summary = tf.image_summary(filename, imgs)
        return current_img_summary, imgs
reader.py 文件源码 项目:tensorflow-ram 作者: qingzew 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def _generate_image_and_label_batch(self, image, label, min_queue_examples,
                                        shuffle):
        """Construct a queued batch of images and labels.

        Args:
            image: 3-D Tensor of [height, width, 3] of type.float32.
            label: 1-D Tensor of type.int32
            min_queue_examples: int32, minimum number of samples to retain
                in the queue that provides of batches of examples.
            batch_size: Number of images per batch.
            shuffle: boolean indicating whether to use a shuffling queue.

        Returns:
            images: Images. 4D tensor of [batch_size, height, width, 3] size.
            labels: Labels. 1D tensor of [batch_size] size.
        """
        # Create a queue that shuffles the examples, and then
        # read 'batch_size' images + labels from the example queue.
        if shuffle:
            images, labels = tf.train.shuffle_batch(
                [image, label],
                batch_size = self.batch_size,
                num_threads = self.num_threads,
                capacity = min_queue_examples + 3 * self.batch_size,
                min_after_dequeue = min_queue_examples)
        else:
            images, labels = tf.train.batch(
                [image, label],
                batch_size = self.batch_size,
                num_threads = self.num_threads,
                capacity = min_queue_examples + 3 * self.batch_size)

        # Display the training images in the visualizer.
        tf.image_summary('images', images, max_images = 3)

        return {'images' : images, 'labels' : labels}
inputs.py 文件源码 项目:various_residual_networks 作者: yuhui-lin 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _generate_image_and_label_batch(image, label, min_queue_examples,
                                    batch_size, shuffle, smr_name):
    """Construct a queued batch of images and labels.
    Args:
        image: 3-D Tensor of [height, width, 3] of type.float32.
        label: 1-D Tensor of type.int32
        min_queue_examples: int32, minimum number of samples to retain
        in the queue that provides of batches of examples.
        batch_size: Number of images per batch.
        shuffle: boolean indicating whether to use a shuffling queue.
    Returns:
        images: Images. 4D tensor of [batch_size, height, width, 3] size.
        labels: Labels. 1D tensor of [batch_size] size.
    """
    # Create a queue that shuffles the examples, and then
    # read 'batch_size' images + labels from the example queue.
    num_preprocess_threads = 16
    if shuffle:
        images, label_batch = tf.train.shuffle_batch(
            [image, label],
            batch_size=batch_size,
            num_threads=num_preprocess_threads,
            capacity=min_queue_examples + 3 * batch_size,
            min_after_dequeue=min_queue_examples)
    else:
        images, label_batch = tf.train.batch(
            [image, label],
            batch_size=batch_size,
            num_threads=num_preprocess_threads,
            capacity=min_queue_examples + 3 * batch_size)

    # Display the training images in the visualizer.
    tf.image_summary(smr_name, images, max_images=FLAGS.max_images)

    return images, tf.reshape(label_batch, [batch_size])
train.py 文件源码 项目:TFFRCNN 作者: InterVideo 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def build_image_summary(self):
        """
        A simple graph for write image summary
        :return:
        """
        log_image_data = tf.placeholder(tf.uint8, [None, None, 3])
        log_image_name = tf.placeholder(tf.string)
        log_image = tf.image_summary(log_image_name, tf.expand_dims(log_image_data, 0), max_images=1)
        # log_image = tf.image_summary(log_image_name, log_image_data, max_images=50)
        return log_image, log_image_data, log_image_name
cifar10_tf.py 文件源码 项目:NetworkCompress 作者: luzai 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def conv_layer(input, size_in, size_out, name="conv"):
    with tf.name_scope(name) as scope:
        w = tf.Variable(tf.truncated_normal([5, 5, size_in, size_out], stddev=0.1), name="W")
        b = tf.Variable(tf.constant(0.1, shape=[size_out]), name="B")
        conv = tf.nn.conv2d(input, w, strides=[1, 1, 1, 1], padding="SAME")
        act = tf.nn.relu(conv + b)
        tf.summary.histogram("weights", w)
        tf.summary.histogram("bias", b)
        tf.summary.histogram("activation", act)
        # act_list=tf.split(act,size_out,axis=)
        print(act.get_shape())
        # tf.Print(act,[act],message="!!!!!")
        # tf.Print(act,[act.get_shape()],message="!!!")
        # tf.Print(act,[tf.shape(act)],message="!!!!")

        x_min = tf.reduce_min(w)
        x_max = tf.reduce_max(w)
        weights_0_to_1 = (w - x_min) / (x_max - x_min)
        weights_0_to_255_uint8 = tf.image.convert_image_dtype(weights_0_to_1, dtype=tf.uint8)

        # to tf.image_summary format [batch_size, height, width, channels]
        weights_transposed = tf.transpose(weights_0_to_255_uint8, [3, 0, 1, 2])
        tf.summary.image('activation', weights_transposed)
        return tf.nn.max_pool(act, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")


# Add fully connected layer
model_input.py 文件源码 项目:SLAM 作者: sanjeevkumar42 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def read_rgbd_data(self, input_queue):
        # original input size
        width_original = 480
        height_original = 640

        # input size
        width = 224
        height = 224

        value_rgb = tf.read_file(input_queue[0])
        value_depth = tf.read_file(input_queue[1])

        # Decoder
        png_rgb = tf.image.decode_png(value_rgb, channels=3)
        tf.image_summary('image', png_rgb)
        png_depth = tf.image.decode_png(value_depth, channels=1)

        # Reshape
        png_rgb = tf.reshape(png_rgb, [width_original, height_original, 3])
        png_depth = tf.reshape(png_depth, [width_original, height_original, 1])

        # Resize
        png_rgb = tf.image.resize_images(png_rgb, width, height)
        png_depth = tf.image.resize_images(png_depth, width, height)

        # Normalize depth
        png_depth = png_depth * 255.0 / tf.reduce_max(png_depth)

        image = tf.concat(2, (png_rgb, png_depth))

        twist = tf.reshape(input_queue[2], [1, 1, 6])

        return tf.cast(image, tf.float32), tf.cast(twist, tf.float32)
cifar10_input.py 文件源码 项目:SLAM 作者: sanjeevkumar42 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _generate_image_and_label_batch(image, label, min_queue_examples,
                                    batch_size, shuffle):
  """Construct a queued batch of images and labels.

  Args:
    image: 3-D Tensor of [height, width, 3] of type.float32.
    label: 1-D Tensor of type.int32
    min_queue_examples: int32, minimum number of samples to retain
      in the queue that provides of batches of examples.
    batch_size: Number of images per batch.
    shuffle: boolean indicating whether to use a shuffling queue.

  Returns:
    images: Images. 4D tensor of [batch_size, height, width, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.
  """
  # Create a queue that shuffles the examples, and then
  # read 'batch_size' images + labels from the example queue.
  num_preprocess_threads = 16
  if shuffle:
    images, label_batch = tf.train.shuffle_batch(
        [image, label],
        batch_size=batch_size,
        num_threads=num_preprocess_threads,
        capacity=min_queue_examples + 3 * batch_size,
        min_after_dequeue=min_queue_examples)
  else:
    images, label_batch = tf.train.batch(
        [image, label],
        batch_size=batch_size,
        num_threads=num_preprocess_threads,
        capacity=min_queue_examples + 3 * batch_size)

  # Display the training images in the visualizer.
  tf.image_summary('images', images)

  return images, tf.reshape(label_batch, [batch_size])
summary_writer.py 文件源码 项目:tensorflow_node 作者: elggem 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def image_summary(self, tag, image):
        image = image.reshape((1, image.shape[0], image.shape[1], 1)).astype(np.float32)

        image_summary_op = tf.image_summary(tag, image)
        image_summary_str = tf.Session().run(image_summary_op)

        SummaryWriter().writer.add_summary(image_summary_str, 0)
        SummaryWriter().writer.flush()

        rospy.loginfo("?? " + tag + " image plotted.")
        pass
prediction_model_stochastic_search.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def make_cdna_kerns_summary(cdna_kerns, t, suffix):

    sum = []
    cdna_kerns = tf.split(4, 10, cdna_kerns)
    for i, kern in enumerate(cdna_kerns):
        kern = tf.squeeze(kern)
        kern = tf.expand_dims(kern,-1)
        sum.append(
            tf.image_summary('step' + str(t) +'_filter'+ str(i)+ suffix, kern)
        )

    return  sum
prediction_model_downsized_lesslayer.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def make_cdna_kerns_summary(cdna_kerns, t, suffix):

    sum = []
    cdna_kerns = tf.split(4, 10, cdna_kerns)
    for i, kern in enumerate(cdna_kerns):
        kern = tf.squeeze(kern)
        kern = tf.expand_dims(kern,-1)
        sum.append(
            tf.image_summary('step' + str(t) +'_filter'+ str(i)+ suffix, kern)
        )

    return  sum
prediction_hiddenstate.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def make_cdna_kerns_summary(cdna_kerns, t, suffix):

    sum = []
    cdna_kerns = tf.split(4, 10, cdna_kerns)
    for i, kern in enumerate(cdna_kerns):
        kern = tf.squeeze(kern)
        kern = tf.expand_dims(kern,-1)
        sum.append(
            tf.image_summary('step' + str(t) +'_filter'+ str(i)+ suffix, kern)
        )

    return  sum
tf_model.py 文件源码 项目:TF-Net 作者: Jorba123 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def image_summary(x, tensor_name=None, max_images=3):
    if tensor_name is None:
        tensor_name = x.op.name
    tf.summary.image(tensor_name, x)
cifar10.py 文件源码 项目:pixel-rnn 作者: pby5 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _generate_image_and_label_batch(image, label, min_queue_examples,
                                    batch_size):
  """Construct a queued batch of images and labels.

  Args:
    image: 3-D Tensor of [height, width, 3] of type.float32.
    label: 1-D Tensor of type.int32
    min_queue_examples: int32, minimum number of samples to retain
      in the queue that provides of batches of examples.
    batch_size: Number of images per batch.

  Returns:
    images: Images. 4D tensor of [batch_size, height, width, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.
  """
  # Create a queue that shuffles the examples, and then
  # read 'batch_size' images + labels from the example queue.
  num_preprocess_threads = 16
  images, label_batch = tf.train.shuffle_batch(
      [image, label],
      batch_size=batch_size,
      num_threads=num_preprocess_threads,
      capacity=min_queue_examples + 3 * batch_size,
      min_after_dequeue=min_queue_examples)

  # Display the training images in the visualizer.
  tf.image_summary('images', images)

  return images, tf.reshape(label_batch, [batch_size])
style_transfer.py 文件源码 项目:deep-style-transfer 作者: albertlai 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def build_loss(self, session, texture_weight=15, tv=500):
        if self.is_training:
            with tf.name_scope('loss'):     
                self.loss = self.descriptor_loss.build(session, self.generator.image_in, texture_weight)
                if tv > 0:
                    print("tv loss %d" % tv)
                    with tf.name_scope('tv_loss'):
                        batches, h, w, c = self.generator.out.get_shape().as_list()
                        x = self.generator.out[:,1:,:,:]
                        x_1 = self.generator.out[:,:(h-1),:,:]
                        y = self.generator.out[:,:,1:,:]
                        y_1 = self.generator.out[:,:,:w-1,:]
                        x_var = tf.nn.l2_loss(x - x_1)
                        y_var = tf.nn.l2_loss(y - y_1)
                        x_n = batches * (h-1) * w * c
                        y_n = batches * h * (w-1) * c
                        tv_loss = tv * (x_var/x_n + y_var/y_n)
                    self.loss = self.loss + tv_loss

            loss_summary_name = "loss"
            self.summary = tf.scalar_summary(loss_summary_name, self.loss)
            image_summary_name = "out"
            self.image_summary = tf.image_summary(image_summary_name, self.generator.out + utils.MEAN_VALUES, max_images=3)
            input_summary_name = "in"
            self.input_summary = tf.image_summary(input_summary_name, self.image + utils.MEAN_VALUES, max_images=3)

            self.merged = tf.merge_all_summaries()

            self.global_step = tf.Variable(0, name='global_step', trainable=False)

            return self.loss
style_transfer.py 文件源码 项目:deep-style-transfer 作者: albertlai 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def run_epoch(self, session, train_op, train_writer, batch_gen=None, num_iterations=NUM_ITERATIONS, output_dir="output", write_image=False):
        epoch_size = num_iterations
        start_time = time.time()
        image_skip = 1 if epoch_size < 5 else epoch_size / 5
        summary_skip = 1 if epoch_size < 25 else epoch_size / 25
        for step in range(epoch_size):
            if self.model_name == MULTISCALE:
                feed = self.add_noise_to_feed({})
            else:
                feed = {}
            batch = batch_gen.get_batch()
            feed[self.image] = batch
            if self.is_training:
                ops = [train_op, self.loss, self.merged, self.image_summary, self.input_summary, self.generator.out, self.global_step]
                _, loss, summary, image_summary, input_summary, last_out, global_step = session.run(ops, feed_dict=feed)
                if write_image and step % image_skip == 0:
                    utils.write_image(os.path.join('%s/images/valid_%d.png' % (output_dir, step)), last_out)
                if train_writer != None:
                    if step % summary_skip == 0:
                        train_writer.add_summary(summary, global_step)
                        train_writer.flush()
                    if step % image_skip == 0:
                        train_writer.add_summary(input_summary)
                        train_writer.flush()
                        train_writer.add_summary(image_summary)
                        train_writer.flush()
            else:
                ops = self.generator.out
                last_out = session.run(ops, feed_dict=feed)
                loss = summary = image_summary = input_summary = global_step = None
        return loss, summary, image_summary, last_out, global_step


问题


面经


文章

微信
公众号

扫码关注公众号