python类sparse_tensor_to_dense()的实例源码

test_ctc_decoders.py 文件源码 项目:speechless 作者: JuliusKunze 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def test(self):
        def decode_greedily(beam_search: bool, merge_repeated: bool):
            aa_ctc_blank_aa_logits = tf.constant(np.array([[[1.0, 0.0]], [[1.0, 0.0]], [[0.0, 1.0]],
                                                           [[1.0, 0.0]], [[1.0, 0.0]]], dtype=np.float32))
            sequence_length = tf.constant(np.array([5], dtype=np.int32))

            (decoded_list,), log_probabilities = \
                tf.nn.ctc_beam_search_decoder(inputs=aa_ctc_blank_aa_logits,
                                              sequence_length=sequence_length,
                                              merge_repeated=merge_repeated,
                                              beam_width=1) \
                    if beam_search else \
                    tf.nn.ctc_greedy_decoder(inputs=aa_ctc_blank_aa_logits,
                                             sequence_length=sequence_length,
                                             merge_repeated=merge_repeated)

            return list(tf.Session().run(tf.sparse_tensor_to_dense(decoded_list)[0]))

        self.assertEqual([0], decode_greedily(beam_search=True, merge_repeated=True))
        self.assertEqual([0, 0], decode_greedily(beam_search=True, merge_repeated=False))
        self.assertEqual([0, 0], decode_greedily(beam_search=False, merge_repeated=True))
        self.assertEqual([0, 0, 0, 0], decode_greedily(beam_search=False, merge_repeated=False))
tensorflow_backend.py 文件源码 项目:deep-learning-keras-projects 作者: jasmeetsb 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def to_dense(tensor):
    """Converts a sparse tensor into a dense tensor
    and returns it.

    # Arguments
        tensor: A tensor instance (potentially sparse).

    # Returns
        A dense tensor.

    # Examples
    ```python
        >>> from keras import backend as K
        >>> b = K.placeholder((2, 2), sparse=True)
        >>> print(K.is_sparse(b))
        True
        >>> c = K.to_dense(b)
        >>> print(K.is_sparse(c))
        False
"""
if is_sparse(tensor):
    return tf.sparse_tensor_to_dense(tensor)
else:
    return tensor

```

textdecoder.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def tensors_to_item(self, keys_to_tensors):
        tensor = keys_to_tensors[self._tensor_key]
        shape = self._shape
        if self._shape_keys:
            shape_dims = []
            for k in self._shape_keys:
                shape_dim = keys_to_tensors[k]
                if isinstance(shape_dim, tf.SparseTensor):
                    shape_dim = tf.sparse_tensor_to_dense(shape_dim)
                shape_dims.append(shape_dim)
            shape = tf.reshape(tf.stack(shape_dims), [-1])
        if isinstance(tensor, tf.SparseTensor):
            if shape is not None:
                tensor = tf.sparse_reshape(tensor, shape)
            tensor = tf.sparse_tensor_to_dense(
                tensor, self._default_value)
        else:
            if shape is not None:
                tensor = tf.reshape(tensor, shape)

        return tensor
tensorflow_backend.py 文件源码 项目:keras-customized 作者: ambrite 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def to_dense(tensor):
    '''Converts a sparse tensor into a dense tensor
    and returns it.

    # Arguments
        tensor: A tensor instance (potentially sparse).

    # Returns
        A dense tensor.

    # Examples
    ```python
        >>> from keras import backend as K
        >>> b = K.placeholder((2, 2), sparse=True)
        >>> print(K.is_sparse(b))
        True
        >>> c = K.to_dense(b)
        >>> print(K.is_sparse(c))
        False
'''
if is_sparse(tensor):
    return tf.sparse_tensor_to_dense(tensor)
else:
    return tensor

```

DataHandeling.py 文件源码 项目:DeepCellSeg 作者: arbellea 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def _get_image(self):

        im_filename = tf.sparse_tensor_to_dense(tf.string_split(tf.expand_dims(self.raw_queue.dequeue(), 0), ':'), '')
        im_filename.set_shape([1, 2])
        im_raw = tf.read_file(self.base_folder+im_filename[0][0])
        seg_raw = tf.read_file(self.base_folder+im_filename[0][1])

        image = tf.reshape(tf.cast(tf.image.decode_png(im_raw, channels=1, dtype=tf.uint16), tf.float32),
                           self.image_size, name='input_image')
        seg = tf.reshape(tf.cast(tf.image.decode_png(seg_raw, channels=1, dtype=tf.uint8), tf.float32), self.image_size,
                         name='input_seg')
        if self.partial_frame:
            crop_y_start = int(((1-self.partial_frame) * self.image_size[0])/2)
            crop_y_end = int(((1+self.partial_frame) * self.image_size[0])/2)
            crop_x_start = int(((1-self.partial_frame) * self.image_size[1])/2)
            crop_x_end = int(((1+self.partial_frame) * self.image_size[1])/2)
            image = tf.slice(image, [crop_y_start, crop_x_start, 0], [crop_y_end, crop_x_end, -1])
            seg = tf.slice(seg, [crop_y_start, crop_x_start, 0], [crop_y_end, crop_x_end, -1])

        return image, seg, im_filename[0][0], im_filename[0][1]
tensorflow_backend.py 文件源码 项目:keras 作者: NVIDIA 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def to_dense(tensor):
    """Converts a sparse tensor into a dense tensor
    and returns it.

    # Arguments
        tensor: A tensor instance (potentially sparse).

    # Returns
        A dense tensor.

    # Examples
    ```python
        >>> from keras import backend as K
        >>> b = K.placeholder((2, 2), sparse=True)
        >>> print(K.is_sparse(b))
        True
        >>> c = K.to_dense(b)
        >>> print(K.is_sparse(c))
        False
"""
if is_sparse(tensor):
    return tf.sparse_tensor_to_dense(tensor)
else:
    return tensor

```

tensorflow_backend.py 文件源码 项目:keras_superpixel_pooling 作者: parag2489 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def to_dense(tensor):
    """Converts a sparse tensor into a dense tensor and returns it.

    # Arguments
        tensor: A tensor instance (potentially sparse).

    # Returns
        A dense tensor.

    # Examples
    ```python
        >>> from keras import backend as K
        >>> b = K.placeholder((2, 2), sparse=True)
        >>> print(K.is_sparse(b))
        True
        >>> c = K.to_dense(b)
        >>> print(K.is_sparse(c))
        False
"""
if is_sparse(tensor):
    return tf.sparse_tensor_to_dense(tensor)
else:
    return tensor

```

seq2seq_helpers.py 文件源码 项目:DeepDeepParser 作者: janmbuys 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def init_thin_stack(batch_size, max_num_concepts):
  """Initializes the thin stack.
  Returns:
    thin_stack: Tensor with the stack content.
    thin_stack_head_next: Index pointers to element after stack head.
  """
  # Stack initialized to -1, points to initial state.
  thin_stack = -tf.ones(tf.pack([batch_size, max_num_concepts]),
      dtype=tf.int32)
  # Reshape to ensure dimension 1 is known.
  thin_stack = tf.reshape(thin_stack, [-1, max_num_concepts])
  # Set to 0 at position 0.
  inds = tf.transpose(tf.to_int64(tf.pack(
   [tf.range(batch_size), tf.zeros(tf.pack([batch_size]), dtype=tf.int32)])))
  delta = tf.SparseTensor(inds, tf.ones(tf.pack([batch_size]), dtype=tf.int32),
      tf.pack([tf.to_int64(batch_size), max_num_concepts]))
  new_thin_stack = thin_stack + tf.sparse_tensor_to_dense(delta)
  # Position 0 is for empty stack; position after head always >= 1.
  thin_stack_head_next = tf.ones(tf.pack([batch_size]),
      dtype=tf.int32)
  return new_thin_stack, thin_stack_head_next
seq2seq_helpers.py 文件源码 项目:DeepDeepParser 作者: janmbuys 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def mask_decoder_reduce(logit, thin_stack_head_next, logit_size, batch_size):
  """Ensures that we can only reduce when the stack has at least 1 item.

  For each batch entry k:
    If thin_stack_head_next == 0, #alternatively, or 1.
      let logit[k][reduce_index] = -np.inf, 
    else don't change.
  """
  # Allow reduce only if at least 1 item on stack, i.e., pointer >= 2.
  update_vals = tf.pack([-np.inf, -np.inf, 0.0])
  update_val = tf.gather(update_vals, 
      tf.minimum(thin_stack_head_next,
      2*tf.ones(tf.pack([batch_size]), dtype=tf.int32)))

  re_filled = tf.fill(tf.pack([batch_size]),
      tf.to_int64(data_utils.REDUCE_ID))
  re_inds = tf.transpose(tf.pack(
      [tf.to_int64(tf.range(batch_size)), re_filled]))
  re_delta = tf.SparseTensor(re_inds, update_val, tf.to_int64(
      tf.pack([batch_size, logit_size])))
  new_logit = logit + tf.sparse_tensor_to_dense(re_delta)
  return new_logit
cluster_measurements.py 文件源码 项目:scalable_analytics 作者: broadinstitute 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _raw_features_to_dense_tensor(raw_features):
  """Convert the raw features expressing a sparse vector to a dense tensor.

  Args:
    raw_features: Parsed features in sparse matrix format.
  Returns:
    A dense tensor populated with the raw features.
  """
  # Load the vocabulary here as each batch of examples is parsed to ensure that
  # the examples and the mapping table are located in the same TensorFlow graph.
  measurement_table = tf.contrib.lookup.index_table_from_file(
      vocabulary_file=FLAGS.vocabulary_file)
  tf.logging.info("Loaded vocabulary file %s with %s terms.",
                  FLAGS.vocabulary_file, str(measurement_table.size()))

  indices = measurement_table.lookup(raw_features[MEASUREMENTS_FEATURE])

  merged = tf.sparse_merge(
      indices,
      raw_features[VALUES_FEATURE],
      vocab_size=measurement_table.size())
  return tf.sparse_tensor_to_dense(merged)
tf_example_decoder.py 文件源码 项目:tensorflow 作者: luyishisi 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _reshape_instance_masks(self, keys_to_tensors):
    """Reshape instance segmentation masks.

    The instance segmentation masks are reshaped to [num_instances, height,
    width] and cast to boolean type to save memory.

    Args:
      keys_to_tensors: a dictionary from keys to tensors.

    Returns:
      A 3-D boolean tensor of shape [num_instances, height, width].
    """
    masks = keys_to_tensors['image/segmentation/object']
    if isinstance(masks, tf.SparseTensor):
      masks = tf.sparse_tensor_to_dense(masks)
    height = keys_to_tensors['image/height']
    width = keys_to_tensors['image/width']
    to_shape = tf.cast(tf.stack([-1, height, width]), tf.int32)

    return tf.cast(tf.reshape(masks, to_shape), tf.bool)
Metrics.py 文件源码 项目:How-to-Learn-from-Little-Data 作者: llSourcell 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def accuracy_instance(predictions, targets, n=[1, 2, 3, 4, 5, 10], nb_classes=5, nb_samples_per_class=10, batch_size=1):
    targets = tf.cast(targets, predictions.dtype)

    accuracy = tf.constant(value=0, shape=(batch_size, nb_samples_per_class), dtype=tf.float32)
    indices = tf.constant(value=0, shape=(batch_size, nb_classes+1), dtype=tf.float32)

    def step_((accuracy, indices), (p, t)):
        """with tf.variable_scope("Metric_step_var", reuse=True):
            accuracy = tf.get_variable(name="accuracy", shape=(batch_size, nb_samples_per_class),
                                       initializer=tf.constant_initializer(0), dtype=tf.float32)
            indices = tf.get_variable(name="indices", shape=(batch_size, nb_classes + 1),
                                      initializer=tf.constant_initializer(0), dtype=tf.float32)"""

        p = tf.cast(p, tf.int32)
        t = tf.cast(t, tf.int32)
        ##Accuracy Update
        batch_range = tf.cast(tf.range(0, batch_size), dtype=tf.int32)
        gather = tf.cast(tf.gather_nd(indices,tf.pack([tf.range(0,p.get_shape().as_list()[0]), t], axis=1)), tf.int32)
        index = tf.cast(tf.pack([batch_range, gather], axis=1), dtype=tf.int64)
        val = tf.cast(tf.equal(p, t), tf.float32)
        delta = tf.SparseTensor(indices=index, values=val, shape=tf.cast(accuracy.get_shape().as_list(), tf.int64))
        accuracy = accuracy + tf.sparse_tensor_to_dense(delta)
        ##Index Update
        index = tf.cast(tf.pack([batch_range, t], axis=1), dtype=tf.int64)
        val = tf.constant(1.0, shape=[batch_size])
        delta = tf.SparseTensor(indices=index, values=val, shape=tf.cast(indices.get_shape().as_list(), dtype=tf.int64))
        indices = indices + tf.sparse_tensor_to_dense(delta)
        return [accuracy, indices]
Metrics.py 文件源码 项目:NTM-One-Shot-TF 作者: hmishra2250 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def accuracy_instance(predictions, targets, n=[1, 2, 3, 4, 5, 10], nb_classes=5, nb_samples_per_class=10, batch_size=1):
    targets = tf.cast(targets, predictions.dtype)

    accuracy = tf.constant(value=0, shape=(batch_size, nb_samples_per_class), dtype=tf.float32)
    indices = tf.constant(value=0, shape=(batch_size, nb_classes+1), dtype=tf.float32)

    def step_((accuracy, indices), (p, t)):
        """with tf.variable_scope("Metric_step_var", reuse=True):
            accuracy = tf.get_variable(name="accuracy", shape=(batch_size, nb_samples_per_class),
                                       initializer=tf.constant_initializer(0), dtype=tf.float32)
            indices = tf.get_variable(name="indices", shape=(batch_size, nb_classes + 1),
                                      initializer=tf.constant_initializer(0), dtype=tf.float32)"""

        p = tf.cast(p, tf.int32)
        t = tf.cast(t, tf.int32)
        ##Accuracy Update
        batch_range = tf.cast(tf.range(0, batch_size), dtype=tf.int32)
        gather = tf.cast(tf.gather_nd(indices,tf.stack([tf.range(0,p.get_shape().as_list()[0]), t], axis=1)), tf.int32)
        index = tf.cast(tf.stack([batch_range, gather], axis=1), dtype=tf.int64)
        val = tf.cast(tf.equal(p, t), tf.float32)
        delta = tf.SparseTensor(indices=index, values=val, dense_shape=tf.cast(accuracy.get_shape().as_list(), tf.int64))
        accuracy = accuracy + tf.sparse_tensor_to_dense(delta)
        ##Index Update
        index = tf.cast(tf.stack([batch_range, t], axis=1), dtype=tf.int64)
        val = tf.constant(1.0, shape=[batch_size])
        delta = tf.SparseTensor(indices=index, values=val, dense_shape=tf.cast(indices.get_shape().as_list(), dtype=tf.int64))
        indices = indices + tf.sparse_tensor_to_dense(delta)
        return [accuracy, indices]
ocr.py 文件源码 项目:tf-cnn-lstm-ocr-captcha 作者: Luonic 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def check_decoder(logits, labels, timesteps):
  with tf.variable_scope('check_decoder'):
    decoded, log_prob = tf.nn.ctc_greedy_decoder(logits, timesteps)
    decoded = tf.cast(decoded[0], tf.int32)
    decoded = tf.sparse_tensor_to_dense(decoded)
    # decoded = tf.Print(decoded, [decoded], "Decoded")    
    return decoded
data.py 文件源码 项目:seglink 作者: bgshih 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def input_stream(record_path, scope=None):
  """
  Input data stream
  ARGS
    `record_path`: tf records file path
  RETURN
    `streams`: data streams
  """
  with tf.device('/cpu:0'):
    with tf.variable_scope(scope or 'input_stream'):
      reader = tf.TFRecordReader()
      filename_queue = tf.train.string_input_producer([record_path], None)
      _, record_value = reader.read(filename_queue)
      features = tf.parse_single_example(record_value,
        {
          'image_jpeg': tf.FixedLenFeature([], tf.string),
          'image_name': tf.FixedLenFeature([], tf.string),
          'word_polygons': tf.VarLenFeature(tf.float32),
          # 'words': tf.VarLenFeature(tf.string) // FIXME: problem with parsing words
        })
      # decode jpeg image
      image = tf.cast(tf.image.decode_jpeg(features['image_jpeg'], channels=3), tf.float32)

      # extract bounding polygons
      word_polygons = tf.sparse_tensor_to_dense(features['word_polygons'])
      word_polygons = tf.reshape(word_polygons, [-1, WORD_POLYGON_DIM])

      # extract words
      # words = tf.sparse_tensor_to_dense(features['words'])

      # output streams
      streams = {'image': image,
                 'image_name': features['image_name'],
                 'image_jpeg': features['image_jpeg'],
                 'word_polygons': word_polygons}
      return streams
tensorflow_backend.py 文件源码 项目:keras 作者: GeekLiB 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def to_dense(tensor):
    if is_sparse(tensor):
        return tf.sparse_tensor_to_dense(tensor)
    else:
        return tensor
common.py 文件源码 项目:HyperGAN 作者: 255BITS 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, config, batch_size, one_hot=False):
        self.lookup = None
        reader = tf.TextLineReader()
        filename_queue = tf.train.string_input_producer(["chargan.txt"])
        key, x = reader.read(filename_queue)
        vocabulary = self.get_vocabulary()

        table = tf.contrib.lookup.string_to_index_table_from_tensor(
            mapping = vocabulary, default_value = 0)

        x = tf.string_join([x, tf.constant(" " * 64)]) 
        x = tf.substr(x, [0], [64])
        x = tf.string_split(x,delimiter='')
        x = tf.sparse_tensor_to_dense(x, default_value=' ')
        x = tf.reshape(x, [64])
        x = table.lookup(x)
        self.one_hot = one_hot
        if one_hot:
            x = tf.one_hot(x, len(vocabulary))
            x = tf.cast(x, dtype=tf.float32)
            x = tf.reshape(x, [1, int(x.get_shape()[0]), int(x.get_shape()[1]), 1])
        else:
            x = tf.cast(x, dtype=tf.float32)
            x -= len(vocabulary)/2.0
            x /= len(vocabulary)/2.0
            x = tf.reshape(x, [1,1, 64, 1])

        num_preprocess_threads = 8

        x = tf.train.shuffle_batch(
          [x],
          batch_size=batch_size,
          num_threads=num_preprocess_threads,
          capacity= 5000,
          min_after_dequeue=500,
          enqueue_many=True)

        self.x = x
        self.table = table
cnn_lstm_otc_ocr.py 文件源码 项目:CNN_LSTM_CTC_Tensorflow 作者: watsonyanghx 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _build_train_op(self):
        self.global_step = tf.Variable(0, trainable=False)

        self.loss = tf.nn.ctc_loss(labels=self.labels,
                                   inputs=self.logits,
                                   sequence_length=self.seq_len)
        self.cost = tf.reduce_mean(self.loss)
        tf.summary.scalar('cost', self.cost)

        self.lrn_rate = tf.train.exponential_decay(FLAGS.initial_learning_rate,
                                                   self.global_step,
                                                   FLAGS.decay_steps,
                                                   FLAGS.decay_rate,
                                                   staircase=True)

        # self.optimizer = tf.train.RMSPropOptimizer(learning_rate=self.lrn_rate,
        #                                            momentum=FLAGS.momentum).minimize(self.cost,
        #                                                                              global_step=self.global_step)
        # self.optimizer = tf.train.MomentumOptimizer(learning_rate=self.lrn_rate,
        #                                             momentum=FLAGS.momentum,
        #                                             use_nesterov=True).minimize(self.cost,
        #                                                                         global_step=self.global_step)

        self.optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.initial_learning_rate,
                                                beta1=FLAGS.beta1,
                                                beta2=FLAGS.beta2).minimize(self.loss,
                                                                            global_step=self.global_step)
        train_ops = [self.optimizer] + self._extra_train_ops
        self.train_op = tf.group(*train_ops)

        # Option 2: tf.contrib.ctc.ctc_beam_search_decoder
        # (it's slower but you'll get better results)
        # decoded, log_prob = tf.nn.ctc_greedy_decoder(logits, seq_len,merge_repeated=False)
        self.decoded, self.log_prob = tf.nn.ctc_beam_search_decoder(self.logits,
                                                                    self.seq_len,
                                                                    merge_repeated=False)
        self.dense_decoded = tf.sparse_tensor_to_dense(self.decoded[0], default_value=-1)
test_input.py 文件源码 项目:LiTeFlow 作者: petrux 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def _decode(message):
    features = {
        'key': tf.FixedLenFeature([], tf.int64),
        'vector': tf.VarLenFeature(tf.int64)
    }
    parsed = tf.parse_single_example(
        serialized=message,
        features=features)
    key = parsed['key']
    vector = tf.sparse_tensor_to_dense(parsed['vector'])
    return key, vector
core_utils.py 文件源码 项目:sbrt2017 作者: igormq 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def to_dense(x):
    if K.is_sparse(x):
        return tf.sparse_tensor_to_dense(x, default_value=-1)
    return x
ucf101.py 文件源码 项目:tensorlight 作者: bsautermeister 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _read_record(self, filename_queue):

        class FrameSeqRecord(object):
            pass

        record = FrameSeqRecord()
        record.height = self._data_img_size[0]
        record.width = self._data_img_size[1]
        record.depth = self._data_img_size[2]

        input_seq_length = self.input_shape[0]
        target_seq_length = self.target_shape[0]
        total_seq_length = input_seq_length + target_seq_length

        frame_bytes = record.height * record.width * record.depth
        record_bytes = frame_bytes * (total_seq_length)
        total_file_bytes = frame_bytes * self._serialized_sequence_length

        with tf.name_scope('read_record'):
            reader = tf.FixedLengthRecordReader(total_file_bytes)

            record.key, value = reader.read(filename_queue)
            decoded_record_bytes = tf.decode_raw(value, tf.uint8)

            decoded_record_bytes = tf.reshape(decoded_record_bytes,
                                              [self._serialized_sequence_length, record.height, record.width, record.depth])

            # calculcate tensors [start, 0, 0, 0]
            rnd_start_index = tf.to_int32(tf.random_uniform([1], 0, self._serialized_sequence_length - (total_seq_length), 
                                                            tf.int32))
            seq_start_offset = tf.SparseTensor(indices=[[0]], values=rnd_start_index, dense_shape=[4])
            sequence_start = tf.sparse_tensor_to_dense(seq_start_offset)

            # take a random slice of frames as input
            record.data = tf.slice(decoded_record_bytes, sequence_start,
                                   [total_seq_length, record.height, record.width, record.depth])
            return record
ucf11.py 文件源码 项目:tensorlight 作者: bsautermeister 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _read_record(self, filename_queue):

        class FrameSeqRecord(object):
            pass

        record = FrameSeqRecord()
        record.height = self._data_img_size[0]
        record.width = self._data_img_size[1]
        record.depth = self._data_img_size[2]

        input_seq_length = self.input_shape[0]
        target_seq_length = self.target_shape[0]
        total_seq_length = input_seq_length + target_seq_length

        frame_bytes = record.height * record.width * record.depth
        record_bytes = frame_bytes * (total_seq_length)
        total_file_bytes = frame_bytes * self._serialized_sequence_length

        with tf.name_scope('read_record'):
            reader = tf.FixedLengthRecordReader(total_file_bytes)

            record.key, value = reader.read(filename_queue)
            decoded_record_bytes = tf.decode_raw(value, tf.uint8)

            decoded_record_bytes = tf.reshape(decoded_record_bytes,
                                              [self._serialized_sequence_length, record.height, record.width, record.depth])

            # calculcate tensors [start, 0, 0, 0]
            rnd_start_index = tf.to_int32(tf.random_uniform([1], 0, self._serialized_sequence_length - (total_seq_length), 
                                                            tf.int32))
            seq_start_offset = tf.SparseTensor(indices=[[0]], values=rnd_start_index, dense_shape=[4])
            sequence_start = tf.sparse_tensor_to_dense(seq_start_offset)

            # take a random slice of frames as input
            record.data = tf.slice(decoded_record_bytes, sequence_start,
                                   [total_seq_length, record.height, record.width, record.depth])
            return record
losses.py 文件源码 项目:fast-neural-style 作者: coder-james 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def pixel_loss(layer, FLAGS):
    generated_images, content_images = tf.split(0, 2, layer)

    #img_bytes = tf.read_file(FLAGS.mask_file)
    #maskimage = tf.image.decode_jpeg(img_bytes)
    #maskimage = tf.to_float(maskimage)
    #m_mean = tf.reduce_mean(maskimage, axis=(1,2))
    #index = tf.where(m_mean < 1.5)
    #top_index = index + tf.to_int64(1)
    #down_index = index - tf.to_int64(1)

    #select = tf.zeros_like(m_mean, dtype=tf.float32)
    #values = tf.squeeze(tf.ones_like(index, dtype=tf.float32))
    #topvalues = tf.squeeze(tf.ones_like(top_index, dtype=tf.float32))
    #downvalues = tf.squeeze(tf.ones_like(down_index, dtype=tf.float32))
    #delta = tf.SparseTensor(index, values, [FLAGS.image_size])
    #topdelta = tf.SparseTensor(index, topvalues, [FLAGS.image_size])
    #downdelta = tf.SparseTensor(index, downvalues, [FLAGS.image_size])
    #black_select = select + tf.sparse_tensor_to_dense(delta)
    #top_select = select + tf.sparse_tensor_to_dense(topdelta)
    #down_select = select + tf.sparse_tensor_to_dense(downdelta)

    #black_select = tf.mul(black_select, top_select)
    #black_select = tf.mul(black_select, down_select)
    #black_select = tf.expand_dims(black_select, -1)
    #black_select = tf.matmul(black_select, tf.ones([1, FLAGS.image_size]))
    #black_select = tf.expand_dims(black_select, -1)

    #generated_images = tf.mul(generated_images, black_select)
    #content_images = tf.mul(content_images, black_select)

    size = tf.size(generated_images)
    pixel_loss = tf.nn.l2_loss(generated_images - content_images) * 2 / tf.to_float(size)
    return pixel_loss
ctc_decoder.py 文件源码 项目:neuralmonkey 作者: ufal 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def decoded(self) -> tf.Tensor:
        if self.beam_width == 1:
            decoded, _ = tf.nn.ctc_greedy_decoder(
                inputs=self.logits, sequence_length=self.input_lengths,
                merge_repeated=self.merge_repeated_outputs)
        else:
            decoded, _ = tf.nn.ctc_beam_search_decoder(
                inputs=self.logits, sequence_length=self.input_lengths,
                beam_width=self.beam_width,
                merge_repeated=self.merge_repeated_outputs)

        return tf.sparse_tensor_to_dense(
            tf.sparse_transpose(decoded[0]),
            default_value=self.vocabulary.get_word_index(END_TOKEN))
ctc_decoder.py 文件源码 项目:neuralmonkey 作者: ufal 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def decoded(self) -> tf.Tensor:
        if self.beam_width == 1:
            decoded, _ = tf.nn.ctc_greedy_decoder(
                inputs=self.logits, sequence_length=self.input_lengths,
                merge_repeated=self.merge_repeated_outputs)
        else:
            decoded, _ = tf.nn.ctc_beam_search_decoder(
                inputs=self.logits, sequence_length=self.input_lengths,
                beam_width=self.beam_width,
                merge_repeated=self.merge_repeated_outputs)

        return tf.sparse_tensor_to_dense(
            tf.sparse_transpose(decoded[0]),
            default_value=self.vocabulary.get_word_index(END_TOKEN))
ctc_decoder.py 文件源码 项目:neuralmonkey 作者: ufal 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def decoded(self) -> tf.Tensor:
        if self.beam_width == 1:
            decoded, _ = tf.nn.ctc_greedy_decoder(
                inputs=self.logits, sequence_length=self.input_lengths,
                merge_repeated=self.merge_repeated_outputs)
        else:
            decoded, _ = tf.nn.ctc_beam_search_decoder(
                inputs=self.logits, sequence_length=self.input_lengths,
                beam_width=self.beam_width,
                merge_repeated=self.merge_repeated_outputs)

        return tf.sparse_tensor_to_dense(
            tf.sparse_transpose(decoded[0]),
            default_value=self.vocabulary.get_word_index(END_TOKEN))
DataHandeling.py 文件源码 项目:DeepCellSeg 作者: arbellea 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _get_image(self):

        filename = tf.sparse_tensor_to_dense(tf.string_split(tf.expand_dims(self.raw_queue.dequeue(), 0), ':'), '')
        filename.set_shape([1, 2])
        # seg_filename = self.seg_queue.dequeue()

        im_raw = tf.read_file(self.base_folder+filename[0][0])
        seg_raw = tf.read_file(self.base_folder+filename[0][1])
        image = tf.reshape(tf.cast(tf.image.decode_png(im_raw, channels=1, dtype=tf.uint16), tf.float32),
                           self.image_size, name='input_image')
        seg = tf.reshape(tf.cast(tf.image.decode_png(seg_raw, channels=1, dtype=tf.uint8), tf.float32), self.image_size,
                         name='input_seg')

        return image, seg, filename[0][0], filename[0][1]
evaluation_metrics_tf.py 文件源码 项目:deep-mlsa 作者: spinningbytes 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def f1_score_keras(y_true, y_pred):
    # convert probas to 0,1
    y_pred_ones = K.zeros_like(y_true)
    # y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1

    # indices_x = K.arange(start=0, stop=y_true.get_shape()[0])
    indices_x = K.expand_dims(K.arange(start=0, stop=tf.shape(y_true)[0], dtype='int64'), dim=-1)
    indices_y = K.expand_dims(K.argmax(y_pred, axis=-1), dim=-1)
    indices = K.concatenate((indices_x, indices_y))
    values = K.sum(K.ones_like(indices_x, dtype='float32'), axis=-1)
    shape = K.cast(tf.shape(y_pred_ones), dtype='int64')
    delta = tf.SparseTensor(indices, values, shape)

    y_pred_ones = y_pred_ones + tf.sparse_tensor_to_dense(delta)

    # where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true * y_pred_ones, axis=0)

    # for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    # for each class: how many are true members of said class
    gold_cnt = K.sum(y_true, axis=0)

    # precision for each class
    precision = tf.select(K.equal(pred_cnt, 0), K.zeros_like(y_true_pred), y_true_pred / pred_cnt,
                          name='precision_f1_semeval')

    # recall for each class
    recall = tf.select(K.equal(gold_cnt, 0), K.zeros_like(y_true_pred), y_true_pred / gold_cnt,
                       name='racall_f1_semeval')

    # f1 for each class
    f1_class = tf.select(K.equal(precision + recall, 0), K.zeros_like(y_true_pred),
                         2 * (precision * recall) / (precision + recall), name='precision_f1_semeval')

    # return average f1 score over all classes
    return K.mean(f1_class)
evaluation_metrics_tf.py 文件源码 项目:deep-mlsa 作者: spinningbytes 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def f1_score_semeval(y_true, y_pred):
    # convert probas to 0,1
    y_pred_ones = K.zeros_like(y_true)
    # y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1

    # indices_x = K.arange(start=0, stop=y_true.get_shape()[0])
    indices_x = K.expand_dims(K.arange(start=0, stop=tf.shape(y_true, name='get_indicec_x_shape')[0], dtype='int64'),
                              dim=-1)
    indices_y = K.expand_dims(K.argmax(y_pred, axis=-1), dim=-1)
    indices = K.concatenate((indices_x, indices_y))
    values = K.sum(K.ones_like(indices_x, dtype='float32'), axis=-1)
    shape = K.cast(tf.shape(y_pred_ones), dtype='int64')
    delta = tf.SparseTensor(indices, values, shape)

    y_pred_ones = y_pred_ones + tf.sparse_tensor_to_dense(delta)

    # where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true * y_pred_ones, axis=0)

    # for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    # for each class: how many are true members of said class
    gold_cnt = K.sum(y_true, axis=0)

    # precision for each class
    precision = tf.select(K.equal(pred_cnt, 0), K.zeros_like(y_true_pred), y_true_pred / pred_cnt,
                          name='precision_f1_semeval')

    # recall for each class
    recall = tf.select(K.equal(gold_cnt, 0), K.zeros_like(y_true_pred), y_true_pred / gold_cnt,
                       name='racall_f1_semeval')

    # f1 for each class
    f1_class = tf.select(K.equal(precision + recall, 0), K.zeros_like(y_true_pred),
                         2 * (precision * recall) / (precision + recall), name='precision_f1_semeval')

    # return average f1 score over all classes
    return (f1_class[0] + f1_class[2]) / 2.0
evaluation_metrics_tf.py 文件源码 项目:deep-mlsa 作者: spinningbytes 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def f1_score_task3(y_true, y_pred):
    # convert probas to 0,1
    y_pred_ones = K.zeros_like(y_true)
    # y_pred_ones = K.T.set_subtensor(y_ppred[K.T.arange(y_true.shape[0]), K.argmax(y_pred, axis=-1)], 1)
    indices_x = K.expand_dims(K.arange(start=0, stop=tf.shape(y_true, name='get_indicec_x_shape')[0], dtype='int64'),
                              dim=-1)
    indices_y = K.expand_dims(K.argmax(y_pred, axis=-1), dim=-1)
    indices = K.concatenate((indices_x, indices_y))
    values = K.sum(K.ones_like(indices_x, dtype='float32'), axis=-1)
    shape = K.cast(tf.shape(y_pred_ones), dtype='int64')
    delta = tf.SparseTensor(indices, values, shape)

    y_pred_ones = y_pred_ones + tf.sparse_tensor_to_dense(delta)

    # where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true * y_pred_ones, axis=0)

    # for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    # for each class: how many are true members of said class
    gold_cnt = K.sum(y_true, axis=0)

    # precision for each class
    precision = tf.select(K.equal(pred_cnt, 0), K.zeros_like(y_true_pred), y_true_pred / pred_cnt,
                          name='precision_f1_semeval')

    # recall for each class
    recall = tf.select(K.equal(gold_cnt, 0), K.zeros_like(y_true_pred), y_true_pred / gold_cnt,
                       name='racall_f1_semeval')

    # f1 for each class
    f1_class = tf.select(K.equal(precision + recall, 0), K.zeros_like(y_true_pred),
                         2 * (precision * recall) / (precision + recall), name='precision_f1_semeval')

    # return average f1 score over all classes
    return f1_class[1]


问题


面经


文章

微信
公众号

扫码关注公众号