python类image_summary()的实例源码

flownetsymple.py 文件源码 项目:neuro-stereo 作者: lugu 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def conv_max_pool_2x2(x, conv_width, conv_height, in_depth, out_depth, name="conv"):

    with tf.name_scope(name) as scope:
        W_conv = weight_variable([conv_width, conv_height, in_depth, out_depth])
        b_conv = bias_variable([out_depth])
        h_conv = tf.nn.relu(conv2d(x, W_conv) + b_conv)
        h_pool = max_pool_2x2(h_conv)

    with tf.name_scope("summaries") as scope:

        # TIPS: to display the 32 convolution filters, re-arrange the
        # weigths to look like 32 images with a transposition.
        a = tf.reshape(W_conv, [conv_width * conv_height * in_depth, out_depth])
        b = tf.transpose(a)
        c = tf.reshape(b, [out_depth, conv_width, conv_height * in_depth, 1])
        conv_image = tf.image_summary(name + " filter", c, out_depth)

        # TIPS: by looking at the weights histogram, we can see the the
        # weigths are explosing or vanishing.
        W_conv_hist = tf.histogram_summary(name + " weights", W_conv)
        b_conv_hist = tf.histogram_summary(name + " biases", b_conv)

    return h_pool
evaluator.py 文件源码 项目:easy-tensorflow 作者: khanhptnk 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def run(self):
    """Run evaluation."""
    # Create logging directory if not exists.
    if not os.path.isdir(self._eval_log_dir):
      os.makedirs(self._eval_log_dir)

    # Compute loss function and other evaluating metrics.
    self._initialize()

    # Visualize input images in Tensorboard.
    self._summary_ops.append(tf.image_summary("Eval_Image", self._observations, max_images=5))

    # Use `slim.evaluation.evaluation_loop` to evaluate the model periodically.
    slim.evaluation.evaluation_loop(
        master='',
        checkpoint_dir=self._train_log_dir,
        logdir=self._eval_log_dir,
        num_evals=self._config.num_batches,
        eval_op=self._metrics_to_updates.values(),
        summary_op=tf.merge_summary(self._summary_ops),
        eval_interval_secs=self._config.eval_interval_secs)
trainer.py 文件源码 项目:easy-tensorflow 作者: khanhptnk 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def run(self):
    """Run training."""
    # Create logging directory if not exists.
    if not os.path.isdir(self._train_log_dir):
      os.makedirs(self._train_log_dir)

    # Load data and compute loss function
    self._initialize()

    # Visualize input images in Tensorboard.
    self._summary_ops.append(tf.image_summary("Image_Train", self._observations, max_images=5))

    # Initialize optimizer.
    optimizer = tf.train.AdadeltaOptimizer(self._config.learning_rate)
    train_op = slim.learning.create_train_op(self._loss, optimizer)

    # Use `slim.learning.train` to manage training.
    slim.learning.train(train_op=train_op,
                        logdir=self._train_log_dir,
                        graph=self._graph,
                        number_of_steps=self._config.train_steps,
                        summary_op=tf.merge_summary(self._summary_ops),
                        save_summaries_secs=self._config.save_summaries_secs,
                        save_interval_secs=self._config.save_interval_secs)
trainer.py 文件源码 项目:how_to_convert_text_to_images 作者: llSourcell 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def visualization(self, n):
        fake_sum_train, superimage_train =\
            self.visualize_one_superimage(self.fake_images[:n * n],
                                          self.images[:n * n],
                                          n, "train")
        fake_sum_test, superimage_test =\
            self.visualize_one_superimage(self.fake_images[n * n:2 * n * n],
                                          self.images[n * n:2 * n * n],
                                          n, "test")
        self.superimages = tf.concat(0, [superimage_train, superimage_test])
        self.image_summary = tf.merge_summary([fake_sum_train, fake_sum_test])

        hr_fake_sum_train, hr_superimage_train =\
            self.visualize_one_superimage(self.hr_fake_images[:n * n],
                                          self.hr_images[:n * n, :, :, :],
                                          n, "hr_train")
        hr_fake_sum_test, hr_superimage_test =\
            self.visualize_one_superimage(self.hr_fake_images[n * n:2 * n * n],
                                          self.hr_images[n * n:2 * n * n],
                                          n, "hr_test")
        self.hr_superimages =\
            tf.concat(0, [hr_superimage_train, hr_superimage_test])
        self.hr_image_summary =\
            tf.merge_summary([hr_fake_sum_train, hr_fake_sum_test])
tfbasemodel.py 文件源码 项目:Supply-demand-forecasting 作者: LevinJ 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def get_input(self):
        # Input data.
        # Load the training, validation and test data into constants that are
        # attached to the graph.
        self.mnist = input_data.read_data_sets('data',
                                    one_hot=True,
                                    fake_data=False)
        # Input placehoolders
        with tf.name_scope('input'):
            self.x = tf.placeholder(tf.float32, [None, 784], name='x-input')
            self.y_true = tf.placeholder(tf.float32, [None, 10], name='y-input')
        self.keep_prob = tf.placeholder(tf.float32, name='drop_out')
        # below is just for the sake of visualization
        with tf.name_scope('input_reshape'):
            image_shaped_input = tf.reshape(self.x, [-1, 28, 28, 1])
            tf.image_summary('input', image_shaped_input, 10)

        return
model.py 文件源码 项目:Magic-Pixel 作者: zhwhong 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def build_model(self):
        self.inputs = tf.placeholder(tf.float32, [self.batch_size, self.input_size, self.input_size, 3], name='real_images')
        # self.inputs = tf.placeholder(tf.float32, [None, self.input_size, self.input_size, 3], name='real_images')

        try:
            self.up_inputs = tf.image.resize_images(self.inputs, self.image_shape[0], self.image_shape[1], tf.image.ResizeMethod.NEAREST_NEIGHBOR)
        except ValueError:
            # newer versions of tensorflow
            self.up_inputs = tf.image.resize_images(self.inputs, [self.image_shape[0], self.image_shape[1]], tf.image.ResizeMethod.NEAREST_NEIGHBOR)

        self.images = tf.placeholder(tf.float32, [self.batch_size] + self.image_shape, name='real_images')
        # self.images = tf.placeholder(tf.float32, [None] + self.image_shape, name='real_images')
        self.sample_images= tf.placeholder(tf.float32, [self.sample_size] + self.image_shape, name='sample_images')
        # self.sample_images = tf.placeholder(tf.float32, [None] + self.image_shape, name='sample_images')

        self.G = self.generator(self.inputs)
        self.G_sum = tf.image_summary("G", self.G)
        self.g_loss = tf.reduce_mean(tf.square(self.images-self.G))
        self.g_loss_sum = tf.scalar_summary("g_loss", self.g_loss)
        t_vars = tf.trainable_variables()
        self.g_vars = [var for var in t_vars if 'g_' in var.name]
        self.saver = tf.train.Saver()
trainer.py 文件源码 项目:StackGAN 作者: hanzhanggit 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def visualization(self, n):
        fake_sum_train, superimage_train =\
            self.visualize_one_superimage(self.fake_images[:n * n],
                                          self.images[:n * n],
                                          n, "train")
        fake_sum_test, superimage_test =\
            self.visualize_one_superimage(self.fake_images[n * n:2 * n * n],
                                          self.images[n * n:2 * n * n],
                                          n, "test")
        self.superimages = tf.concat(0, [superimage_train, superimage_test])
        self.image_summary = tf.merge_summary([fake_sum_train, fake_sum_test])

        hr_fake_sum_train, hr_superimage_train =\
            self.visualize_one_superimage(self.hr_fake_images[:n * n],
                                          self.hr_images[:n * n, :, :, :],
                                          n, "hr_train")
        hr_fake_sum_test, hr_superimage_test =\
            self.visualize_one_superimage(self.hr_fake_images[n * n:2 * n * n],
                                          self.hr_images[n * n:2 * n * n],
                                          n, "hr_test")
        self.hr_superimages =\
            tf.concat(0, [hr_superimage_train, hr_superimage_test])
        self.hr_image_summary =\
            tf.merge_summary([hr_fake_sum_train, hr_fake_sum_test])
data_input.py 文件源码 项目:cnn_picture_gazebo 作者: liuyandong1988 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def get_batch(image, label, batch_size, crop_size):
        #??????
    distorted_image=tf.image.central_crop(image,33./37.)
    distorted_image = tf.random_crop(distorted_image, [crop_size, crop_size, 3])#????,???
# #     distorted_image = tf.image.random_flip_up_down(distorted_image)#??????
#     distorted_image = tf.image.random_brightness(distorted_image,max_delta=50)#????  
#     distorted_image = tf.image.random_contrast(distorted_image,lower=0.2, upper=1.8)#?????  

    #??batch
    #shuffle_batch????capacity????shuttle??????????????????batch???capacity?????
    #?????????
    images, label_batch = tf.train.shuffle_batch([distorted_image, label],batch_size=batch_size,
                                                 num_threads=4,capacity=50000,min_after_dequeue=10000)

    # ????
    #tf.image_summary('images', images)
    return images, tf.reshape(label_batch, [batch_size])

#?????????????get_batch??
nerve_input.py 文件源码 项目:ultrasound-nerve-segmentation-in-tensorflow 作者: loliverhennigh 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def nerve_inputs(batch_size):
  """ Construct nerve input net.
  Args:
    batch_size: Number of images per batch.
  Returns:
    images: Images. 4D tensor. Possible of size [batch_size, 84x84x4].
    mask: Images. 4D tensor. Possible of size [batch_size, 84x84x4].
  """

  shape = (420,580)

  tfrecord_filename = glb('../data/tfrecords/*') 
  print(tfrecord_filename)

  filename_queue = tf.train.string_input_producer(tfrecord_filename) 

  image, mask = read_data(filename_queue, shape)

  images, masks = _generate_image_label_batch(image, mask, batch_size)

  # display in tf summary page 
  tf.image_summary('images', images)
  tf.image_summary('mask', masks)

  return images, masks
mnist.py 文件源码 项目:neuro-stereo 作者: lugu 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def conv_max_pool_2x2(x, conv_width, conv_height, in_depth, out_depth, name="conv"):

    with tf.name_scope(name) as scope:
        W_conv = weight_variable([conv_width, conv_height, in_depth, out_depth])
        b_conv = bias_variable([out_depth])
        h_conv = tf.nn.relu(conv2d(x, W_conv) + b_conv)
        h_pool = max_pool_2x2(h_conv)

    with tf.name_scope("summaries") as scope:

        # TIPS: to display the 32 convolution filters, re-arrange the
        # weigths to look like 32 images with a transposition.
        a = tf.reshape(W_conv, [conv_width * conv_height * in_depth, out_depth])
        b = tf.transpose(a)
        c = tf.reshape(b, [out_depth, conv_width, conv_height * in_depth, 1])
        conv_image = tf.image_summary(name + " filter", c, out_depth)

        # TIPS: by looking at the weights histogram, we can see the the
        # weigths are explosing or vanishing.
        W_conv_hist = tf.histogram_summary(name + " weights", W_conv)
        b_conv_hist = tf.histogram_summary(name + " biases", b_conv)

    return h_pool
cifarnet_preprocessing.py 文件源码 项目:the-neural-perspective 作者: GokuMohandas 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def preprocess_for_eval(image, output_height, output_width):
  """Preprocesses the given image for evaluation.

  Args:
    image: A `Tensor` representing an image of arbitrary size.
    output_height: The height of the image after preprocessing.
    output_width: The width of the image after preprocessing.

  Returns:
    A preprocessed image.
  """
  tf.image_summary('image', tf.expand_dims(image, 0))
  # Transform the image to floats.
  image = tf.to_float(image)

  # Resize and crop if needed.
  resized_image = tf.image.resize_image_with_crop_or_pad(image,
                                                         output_width,
                                                         output_height)
  tf.image_summary('resized_image', tf.expand_dims(resized_image, 0))

  # Subtract off the mean and divide by the variance of the pixels.
  return tf.image.per_image_whitening(resized_image)
cifarnet_preprocessing.py 文件源码 项目:fast-neural-style 作者: coder-james 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def preprocess_for_eval(image, output_height, output_width):
  """Preprocesses the given image for evaluation.

  Args:
    image: A `Tensor` representing an image of arbitrary size.
    output_height: The height of the image after preprocessing.
    output_width: The width of the image after preprocessing.

  Returns:
    A preprocessed image.
  """
  tf.image_summary('image', tf.expand_dims(image, 0))
  # Transform the image to floats.
  image = tf.to_float(image)

  # Resize and crop if needed.
  resized_image = tf.image.resize_image_with_crop_or_pad(image,
                                                         output_width,
                                                         output_height)
  tf.image_summary('resized_image', tf.expand_dims(resized_image, 0))

  # Subtract off the mean and divide by the variance of the pixels.
  return tf.image.per_image_whitening(resized_image)
zap50k.py 文件源码 项目:gan-image-similarity 作者: marcbelmont 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def zap_data(FLAGS, shuffle):
    files = glob(FLAGS.file_pattern)
    filename_queue = tf.train.string_input_producer(
        files,
        shuffle=shuffle,
        num_epochs=None if shuffle else 1)
    image = read_image(filename_queue, shuffle)

    # Mini batch
    num_preprocess_threads = 1 if FLAGS.debug else 4
    min_queue_examples = 100 if FLAGS.debug else 10000
    if shuffle:
        images = tf.train.shuffle_batch(
            image,
            batch_size=FLAGS.batch_size,
            num_threads=num_preprocess_threads,
            capacity=min_queue_examples + 3 * FLAGS.batch_size,
            min_after_dequeue=min_queue_examples)
    else:
        images = tf.train.batch(
            image,
            FLAGS.batch_size,
            allow_smaller_final_batch=True)
    # tf.image_summary('images', images, max_images=8)
    return dict(batch=images, size=len(files))
main.py 文件源码 项目:gan-image-similarity 作者: marcbelmont 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def generator(z, latent_c):
    depths = [32, 64, 64, 64, 64, 64, 3]
    sizes = zip(
        np.linspace(4, IMAGE_SIZE['resized'][0], len(depths)).astype(np.int),
        np.linspace(6, IMAGE_SIZE['resized'][1], len(depths)).astype(np.int))
    with slim.arg_scope([slim.conv2d_transpose],
                        normalizer_fn=slim.batch_norm,
                        kernel_size=3):
        with tf.variable_scope("gen"):
            size = sizes.pop(0)
            net = tf.concat(1, [z, latent_c])
            net = slim.fully_connected(net, depths[0] * size[0] * size[1])
            net = tf.reshape(net, [-1, size[0], size[1], depths[0]])
            for depth in depths[1:-1] + [None]:
                net = tf.image.resize_images(
                    net, sizes.pop(0),
                    tf.image.ResizeMethod.NEAREST_NEIGHBOR)
                if depth:
                    net = slim.conv2d_transpose(net, depth)
            net = slim.conv2d_transpose(
                net, depths[-1], activation_fn=tf.nn.tanh, stride=1, normalizer_fn=None)
            tf.image_summary("gen", net, max_images=8)
    return net
cifarnet_preprocessing.py 文件源码 项目:the-neural-perspective 作者: johnsonc 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def preprocess_for_eval(image, output_height, output_width):
  """Preprocesses the given image for evaluation.

  Args:
    image: A `Tensor` representing an image of arbitrary size.
    output_height: The height of the image after preprocessing.
    output_width: The width of the image after preprocessing.

  Returns:
    A preprocessed image.
  """
  tf.image_summary('image', tf.expand_dims(image, 0))
  # Transform the image to floats.
  image = tf.to_float(image)

  # Resize and crop if needed.
  resized_image = tf.image.resize_image_with_crop_or_pad(image,
                                                         output_width,
                                                         output_height)
  tf.image_summary('resized_image', tf.expand_dims(resized_image, 0))

  # Subtract off the mean and divide by the variance of the pixels.
  return tf.image.per_image_whitening(resized_image)
eval.py 文件源码 项目:tfPhotoClassifier 作者: daiz713 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def distorted_inputs (tfrecord_file_paths=[]):
    fqueue = tf.train.string_input_producer(tfrecord_file_paths)
    reader = tf.TFRecordReader()
    key, serialized_example = reader.read(fqueue)
    features = tf.parse_single_example(serialized_example, features={
        'label': tf.FixedLenFeature([], tf.int64),
        'image': tf.FixedLenFeature([], tf.string)
    })
    image = tf.image.decode_jpeg(features['image'], channels=size['depth'])
    image = tf.cast(image, tf.float32)
    image.set_shape([size['width'], size['height'], size['depth']])

    min_fraction_of_examples_in_queue = 0.4
    min_queue_examples = int(cifar10.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL * min_fraction_of_examples_in_queue)

    images, labels = tf.train.shuffle_batch(
        [tf.image.per_image_whitening(image), tf.cast(features['label'], tf.int32)],
        batch_size=BATCH_SIZE,
        capacity=min_queue_examples + 3 * BATCH_SIZE,
        min_after_dequeue=min_queue_examples
    )

    images = tf.image.resize_images(images, size['input_width'], size['input_height'])
    tf.image_summary('images', images)
    return images, labels
train.py 文件源码 项目:tfPhotoClassifier 作者: daiz713 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def distorted_inputs (tfrecord_file_paths=[]):
    fqueue = tf.train.string_input_producer(tfrecord_file_paths)
    reader = tf.TFRecordReader()
    key, serialized_example = reader.read(fqueue)
    features = tf.parse_single_example(serialized_example, features={
        'label': tf.FixedLenFeature([], tf.int64),
        'image': tf.FixedLenFeature([], tf.string)
    })
    image = tf.image.decode_jpeg(features['image'], channels=size['depth'])
    image = tf.cast(image, tf.float32)
    image.set_shape([size['width'], size['height'], size['depth']])

    min_fraction_of_examples_in_queue = 0.4
    min_queue_examples = int(cifar10.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN * min_fraction_of_examples_in_queue)

    images, labels = tf.train.shuffle_batch(
        [tf.image.per_image_whitening(image), tf.cast(features['label'], tf.int32)],
        batch_size=BATCH_SIZE,
        capacity=min_queue_examples + 3 * BATCH_SIZE,
        min_after_dequeue=min_queue_examples
    )

    images = tf.image.resize_images(images, size['input_width'], size['input_height'])
    tf.image_summary('images', images)
    return images, labels
cifar10_input.py 文件源码 项目:ml 作者: hohoins 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _generate_image_and_label_batch(image, label, min_queue_examples,
                                    batch_size):
  """Construct a queued batch of images and labels.

  Args:
    image: 3-D Tensor of [height, width, 3] of type.float32.
    label: 1-D Tensor of type.int32
    min_queue_examples: int32, minimum number of samples to retain
      in the queue that provides of batches of examples.
    batch_size: Number of images per batch.

  Returns:
    images: Images. 4D tensor of [batch_size, height, width, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.
  """
  # Create a queue that shuffles the examples, and then
  # read 'batch_size' images + labels from the example queue.
  num_preprocess_threads = 16
  images, label_batch = tf.train.shuffle_batch(
      [image, label],
      batch_size=batch_size,
      num_threads=num_preprocess_threads,
      capacity=min_queue_examples + 3 * batch_size,
      min_after_dequeue=min_queue_examples)

  # Display the training images in the visualizer.
  # tf.image_summary('images', images)
  tf.summary.image('images', images)

  return images, tf.reshape(label_batch, [batch_size])
cifar10_input.py 文件源码 项目:ml 作者: hohoins 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def _generate_image_and_label_batch(image, label, min_queue_examples,
                                    batch_size):
  """Construct a queued batch of images and labels.

  Args:
    image: 3-D Tensor of [height, width, 3] of type.float32.
    label: 1-D Tensor of type.int32
    min_queue_examples: int32, minimum number of samples to retain
      in the queue that provides of batches of examples.
    batch_size: Number of images per batch.

  Returns:
    images: Images. 4D tensor of [batch_size, height, width, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.
  """
  # Create a queue that shuffles the examples, and then
  # read 'batch_size' images + labels from the example queue.
  num_preprocess_threads = 16
  images, label_batch = tf.train.shuffle_batch(
      [image, label],
      batch_size=batch_size,
      num_threads=num_preprocess_threads,
      capacity=min_queue_examples + 3 * batch_size,
      min_after_dequeue=min_queue_examples)

  # Display the training images in the visualizer.
  # tf.image_summary('images', images)
  tf.summary.image('images', images)

  return images, tf.reshape(label_batch, [batch_size])
trainer.py 文件源码 项目:how_to_convert_text_to_images 作者: llSourcell 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def visualize_one_superimage(self, img_var, images, rows, filename):
        stacked_img = []
        for row in range(rows):
            img = images[row * rows, :, :, :]
            row_img = [img]  # real image
            for col in range(rows):
                row_img.append(img_var[row * rows + col, :, :, :])
            # each rows is 1realimage +10_fakeimage
            stacked_img.append(tf.concat(1, row_img))
        imgs = tf.expand_dims(tf.concat(0, stacked_img), 0)
        current_img_summary = tf.image_summary(filename, imgs)
        return current_img_summary, imgs
trainer.py 文件源码 项目:how_to_convert_text_to_images 作者: llSourcell 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def visualization(self, n):
        fake_sum_train, superimage_train = \
            self.visualize_one_superimage(self.fake_images[:n * n],
                                          self.images[:n * n],
                                          n, "train")
        fake_sum_test, superimage_test = \
            self.visualize_one_superimage(self.fake_images[n * n:2 * n * n],
                                          self.images[n * n:2 * n * n],
                                          n, "test")
        self.superimages = tf.concat(0, [superimage_train, superimage_test])
        self.image_summary = tf.merge_summary([fake_sum_train, fake_sum_test])
trainer.py 文件源码 项目:how_to_convert_text_to_images 作者: llSourcell 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def epoch_sum_images(self, sess, n):
        images_train, _, embeddings_train, captions_train, _ =\
            self.dataset.train.next_batch(n * n, cfg.TRAIN.NUM_EMBEDDING)
        images_train = self.preprocess(images_train, n)
        embeddings_train = self.preprocess(embeddings_train, n)

        images_test, _, embeddings_test, captions_test, _ = \
            self.dataset.test.next_batch(n * n, 1)
        images_test = self.preprocess(images_test, n)
        embeddings_test = self.preprocess(embeddings_test, n)

        images = np.concatenate([images_train, images_test], axis=0)
        embeddings =\
            np.concatenate([embeddings_train, embeddings_test], axis=0)

        if self.batch_size > 2 * n * n:
            images_pad, _, embeddings_pad, _, _ =\
                self.dataset.test.next_batch(self.batch_size - 2 * n * n, 1)
            images = np.concatenate([images, images_pad], axis=0)
            embeddings = np.concatenate([embeddings, embeddings_pad], axis=0)
        feed_dict = {self.images: images,
                     self.embeddings: embeddings}
        gen_samples, img_summary =\
            sess.run([self.superimages, self.image_summary], feed_dict)

        # save images generated for train and test captions
        scipy.misc.imsave('%s/train.jpg' % (self.log_dir), gen_samples[0])
        scipy.misc.imsave('%s/test.jpg' % (self.log_dir), gen_samples[1])

        # pfi_train = open(self.log_dir + "/train.txt", "w")
        pfi_test = open(self.log_dir + "/test.txt", "w")
        for row in range(n):
            # pfi_train.write('\n***row %d***\n' % row)
            # pfi_train.write(captions_train[row * n])

            pfi_test.write('\n***row %d***\n' % row)
            pfi_test.write(captions_test[row * n])
        # pfi_train.close()
        pfi_test.close()

        return img_summary
trainer.py 文件源码 项目:how_to_convert_text_to_images 作者: llSourcell 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def visualize_one_superimage(self, img_var, images, rows, filename):
        stacked_img = []
        for row in range(rows):
            img = images[row * rows, :, :, :]
            row_img = [img]  # real image
            for col in range(rows):
                row_img.append(img_var[row * rows + col, :, :, :])
            # each rows is 1realimage +10_fakeimage
            stacked_img.append(tf.concat(1, row_img))
        imgs = tf.expand_dims(tf.concat(0, stacked_img), 0)
        current_img_summary = tf.image_summary(filename, imgs)
        return current_img_summary, imgs
q_network.py 文件源码 项目:agent-trainer 作者: lopespm 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _convolutional_layer(self, input, patch_size, stride, input_channels, output_channels, bias_init_value, scope_name):
        with tf.variable_scope(scope_name) as scope:
            weights = tf.get_variable(name='weights',
                                  shape=[patch_size, patch_size, input_channels, output_channels],
                                  initializer=tf.contrib.layers.xavier_initializer_conv2d())
            biases = tf.Variable(name='biases', initial_value=tf.constant(value=bias_init_value, shape=[output_channels]))
            conv = tf.nn.conv2d(input, weights, [1, stride, stride, 1], padding='SAME')

            linear_rectification_bias = tf.nn.bias_add(conv, biases)
            output = tf.nn.relu(linear_rectification_bias, name=scope.name)

            grid_x = output_channels // 4
            grid_y = 4 * input_channels
            kernels_image_grid = self._create_kernels_image_grid(weights, (grid_x, grid_y))
            tf.image_summary(scope_name + '/features', kernels_image_grid, max_images=1)

            if "_conv1" in scope_name:
                x_min = tf.reduce_min(weights)
                x_max = tf.reduce_max(weights)
                weights_0_to_1 = (weights - x_min) / (x_max - x_min)
                weights_0_to_255_uint8 = tf.image.convert_image_dtype(weights_0_to_1, dtype=tf.uint8)

                # to tf.image_summary format [batch_size, height, width, channels]
                weights_transposed = tf.transpose(weights_0_to_255_uint8, [3, 0, 1, 2])

                tf.image_summary(scope_name + '/features', weights_transposed[:,:,:,0:1], max_images=32)

        return output
callbacks.py 文件源码 项目:keras 作者: GeekLiB 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _set_model(self, model):
        import tensorflow as tf
        import keras.backend.tensorflow_backend as KTF

        self.model = model
        self.sess = KTF.get_session()
        if self.histogram_freq and self.merged is None:
            for layer in self.model.layers:

                for weight in layer.weights:
                    tf.histogram_summary(weight.name, weight)

                    if self.write_images:
                        w_img = tf.squeeze(weight)

                        shape = w_img.get_shape()
                        if len(shape) > 1 and shape[0] > shape[1]:
                            w_img = tf.transpose(w_img)

                        if len(shape) == 1:
                            w_img = tf.expand_dims(w_img, 0)

                        w_img = tf.expand_dims(tf.expand_dims(w_img, 0), -1)

                        tf.image_summary(weight.name, w_img)

                if hasattr(layer, 'output'):
                    tf.histogram_summary('{}_out'.format(layer.name),
                                         layer.output)
        self.merged = tf.merge_all_summaries()
        if self.write_graph:
            if parse_version(tf.__version__) >= parse_version('0.8.0'):
                self.writer = tf.train.SummaryWriter(self.log_dir,
                                                     self.sess.graph)
            else:
                self.writer = tf.train.SummaryWriter(self.log_dir,
                                                     self.sess.graph_def)
        else:
            self.writer = tf.train.SummaryWriter(self.log_dir)
cifar10_input.py 文件源码 项目:facial-emotion-detection-dl 作者: dllatas 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def _generate_image_and_label_batch(image, label, min_queue_examples,
                                    batch_size, shuffle):
  """Construct a queued batch of images and labels.
  Args:
    image: 3-D Tensor of [height, width, 3] of type.float32.
    label: 1-D Tensor of type.int32
    min_queue_examples: int32, minimum number of samples to retain
      in the queue that provides of batches of examples.
    batch_size: Number of images per batch.
    shuffle: boolean indicating whether to use a shuffling queue.
  Returns:
    images: Images. 4D tensor of [batch_size, height, width, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.
  """
  # Create a queue that shuffles the examples, and then
  # read 'batch_size' images + labels from the example queue.
  num_preprocess_threads = 16
  if shuffle:
    images, label_batch = tf.train.shuffle_batch(
        [image, label],
        batch_size=batch_size,
        num_threads=num_preprocess_threads,
        capacity=min_queue_examples + 3 * batch_size,
        min_after_dequeue=min_queue_examples)
  else:
    images, label_batch = tf.train.batch(
        [image, label],
        batch_size=batch_size,
        num_threads=num_preprocess_threads,
        capacity=min_queue_examples + 3 * batch_size)

  # Display the training images in the visualizer.
  tf.image_summary('images', images, max_images=10)

  return images, tf.reshape(label_batch, [batch_size])
cifar10_input.py 文件源码 项目:facial-emotion-detection-dl 作者: dllatas 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _generate_image_and_label_batch(image, label, min_queue_examples,
                                    batch_size, shuffle):
  """Construct a queued batch of images and labels.
  Args:
    image: 3-D Tensor of [height, width, 3] of type.float32.
    label: 1-D Tensor of type.int32
    min_queue_examples: int32, minimum number of samples to retain
      in the queue that provides of batches of examples.
    batch_size: Number of images per batch.
    shuffle: boolean indicating whether to use a shuffling queue.
  Returns:
    images: Images. 4D tensor of [batch_size, height, width, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.
  """
  # Create a queue that shuffles the examples, and then
  # read 'batch_size' images + labels from the example queue.
  num_preprocess_threads = 16
  if shuffle:
    images, label_batch = tf.train.shuffle_batch(
        [image, label],
        batch_size=batch_size,
        num_threads=num_preprocess_threads,
        capacity=min_queue_examples + 3 * batch_size,
        min_after_dequeue=min_queue_examples)
  else:
    images, label_batch = tf.train.batch(
        [image, label],
        batch_size=batch_size,
        num_threads=num_preprocess_threads,
        capacity=min_queue_examples + 3 * batch_size)

  # Display the training images in the visualizer.
  tf.image_summary('images/train', images, max_images=10)
  return images, tf.reshape(label_batch, [batch_size])
cifar10_input.py 文件源码 项目:facial-emotion-detection-dl 作者: dllatas 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def _generate_image_and_label_batch(image, label, min_queue_examples,
                                    batch_size, shuffle):
  """Construct a queued batch of images and labels.
  Args:
    image: 3-D Tensor of [height, width, 3] of type.float32.
    label: 1-D Tensor of type.int32
    min_queue_examples: int32, minimum number of samples to retain
      in the queue that provides of batches of examples.
    batch_size: Number of images per batch.
    shuffle: boolean indicating whether to use a shuffling queue.
  Returns:
    images: Images. 4D tensor of [batch_size, height, width, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.
  """
  # Create a queue that shuffles the examples, and then
  # read 'batch_size' images + labels from the example queue.
  num_preprocess_threads = 16
  if shuffle:
    images, label_batch = tf.train.shuffle_batch(
        [image, label],
        batch_size=batch_size,
        num_threads=num_preprocess_threads,
        capacity=min_queue_examples + 3 * batch_size,
        min_after_dequeue=min_queue_examples)
  else:
    images, label_batch = tf.train.batch(
        [image, label],
        batch_size=batch_size,
        num_threads=num_preprocess_threads,
        capacity=min_queue_examples + 3 * batch_size)

  # Display the training images in the visualizer.
  tf.image_summary('images', images, max_images=10)

  return images, tf.reshape(label_batch, [batch_size])
cifar10_input2.py 文件源码 项目:facial-emotion-detection-dl 作者: dllatas 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def _generate_image_and_label_batch(image, label, min_queue_examples,
                                    batch_size):
  """Construct a queued batch of images and labels.

  Args:
    image: 3-D Tensor of [height, width, 3] of type.float32.
    label: 1-D Tensor of type.int32
    min_queue_examples: int32, minimum number of samples to retain
      in the queue that provides of batches of examples.
    batch_size: Number of images per batch.

  Returns:
    images: Images. 4D tensor of [batch_size, height, width, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.
  """
  # Create a queue that shuffles the examples, and then
  # read 'batch_size' images + labels from the example queue.
  num_preprocess_threads = 16
  images, label_batch = tf.train.shuffle_batch(
      [image, label],
      batch_size=batch_size,
      num_threads=num_preprocess_threads,
      capacity=min_queue_examples + 3 * batch_size,
      min_after_dequeue=min_queue_examples)

  # Display the training images in the visualizer.
  tf.image_summary('images', images, max_images=10)

  return images, tf.reshape(label_batch, [batch_size])
input.py 文件源码 项目:facial-emotion-detection-dl 作者: dllatas 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def generate_train_batch(label, image, batch_size=FLAGS.batch_size):
    num_preprocess_threads = 1
    min_fraction_of_examples_in_queue = 0.5
    min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN * min_fraction_of_examples_in_queue)
    images, label_batch = tf.train.shuffle_batch(
        [image, label],
        batch_size=batch_size,
        num_threads=num_preprocess_threads,
        capacity=min_queue_examples + 3 * batch_size,
        # capacity=4,
        min_after_dequeue=min_queue_examples
        # min_after_dequeue=1
        )
    tf.image_summary('images', images)
    return images, tf.reshape(label_batch, [batch_size])


问题


面经


文章

微信
公众号

扫码关注公众号