python类FixedLenSequenceFeature()的实例源码

batch_inputs.py 文件源码 项目:sample-cnn 作者: tae-jun 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _read_sequence_example(filename_queue,
                           n_labels=50, n_samples=59049, n_segments=10):
  reader = tf.TFRecordReader()
  _, serialized_example = reader.read(filename_queue)
  context, sequence = tf.parse_single_sequence_example(
    serialized_example,
    context_features={
      'raw_labels': tf.FixedLenFeature([], dtype=tf.string)
    },
    sequence_features={
      'raw_segments': tf.FixedLenSequenceFeature([], dtype=tf.string)
    })

  segments = tf.decode_raw(sequence['raw_segments'], tf.float32)
  segments.set_shape([n_segments, n_samples])

  labels = tf.decode_raw(context['raw_labels'], tf.uint8)
  labels.set_shape([n_labels])
  labels = tf.cast(labels, tf.float32)

  return segments, labels
dataset_schema_test.py 文件源码 项目:transform 作者: tensorflow 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def test_sequence_feature_not_supported(self):
    feature_spec = {
        # FixedLenSequenceFeatures
        'fixed_seq_bool':
            tf.FixedLenSequenceFeature(shape=[10], dtype=tf.bool),
        'fixed_seq_bool_allow_missing':
            tf.FixedLenSequenceFeature(
                shape=[5], dtype=tf.bool, allow_missing=True),
        'fixed_seq_int':
            tf.FixedLenSequenceFeature(shape=[5], dtype=tf.int64),
        'fixed_seq_float':
            tf.FixedLenSequenceFeature(shape=[5], dtype=tf.float32),
        'fixed_seq_string':
            tf.FixedLenSequenceFeature(shape=[5], dtype=tf.string),
    }

    with self.assertRaisesRegexp(ValueError,
                                 'DatasetSchema does not support '
                                 'FixedLenSequenceFeature yet.'):
      sch.from_feature_spec(feature_spec)
dataprocessor.py 文件源码 项目:tdlstm 作者: bluemonk482 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def read_tfrecord(filename_queue):
    reader = tf.TFRecordReader()
    _,examples = reader.read(filename_queue)

    context_features = {
        "length": tf.FixedLenFeature([], dtype=tf.int64)
    }
    sequence_features = {
        "tokens": tf.FixedLenSequenceFeature([], dtype=tf.int64),
        "labels": tf.FixedLenSequenceFeature([], dtype=tf.int64)
    }
    context_parsed, sequence_parsed = tf.parse_single_sequence_example(
        serialized=examples,
        context_features=context_features,
        sequence_features=sequence_features
    )
    return context_parsed, sequence_parsed
data_utils.py 文件源码 项目:dilated-cnn-ner 作者: iesl 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def example_parser(self, filename_queue):
        reader = tf.TFRecordReader()
        key, record_string = reader.read(filename_queue)
        features = {
            'labels': tf.FixedLenSequenceFeature([], tf.int64),
            'tokens': tf.FixedLenSequenceFeature([], tf.int64),
            'shapes': tf.FixedLenSequenceFeature([], tf.int64),
            'chars': tf.FixedLenSequenceFeature([], tf.int64),
            'seq_len': tf.FixedLenSequenceFeature([], tf.int64),
            'tok_len': tf.FixedLenSequenceFeature([], tf.int64),
        }

        _, example = tf.parse_single_sequence_example(serialized=record_string, sequence_features=features)
        labels = example['labels']
        tokens = example['tokens']
        shapes = example['shapes']
        chars = example['chars']
        seq_len = example['seq_len']
        tok_len = example['tok_len']
        # context = c['context']
        return labels, tokens, shapes, chars, seq_len, tok_len
        # return labels, tokens, labels, labels, labels
utils.py 文件源码 项目:VideoImagination 作者: gitpub327 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def ReadInput(self,num_epochs=None, val=False,test=False):
  if val:
    filenames = tf.gfile.Glob(self.data_dir+'/surfing_val.tfrecords')
  elif test:
    filenames = tf.gfile.Glob(self.data_dir+'/surfing_test.tfrecords')
  else:
    filenames = tf.gfile.Glob(self.data_dir+'/surfing.tfrecords')
  filename_queue = tf.train.string_input_producer(filenames,num_epochs=num_epochs, shuffle=True)
  reader = tf.TFRecordReader()
  _, example = reader.read(filename_queue)
  feature_sepc = {
      self.features: tf.FixedLenSequenceFeature(
          shape=[self.image_width * self.image_width * self.c_dim], dtype=tf.float32)}
  _, features = tf.parse_single_sequence_example(
      example, sequence_features=feature_sepc)
  moving_objs = tf.reshape(
      features[self.features], [self.video_len, self.image_width, self.image_width, self.c_dim])
  examples = tf.train.shuffle_batch(
        [moving_objs],
        batch_size=self.batch_size,
        num_threads=self.batch_size,
        capacity=self.batch_size * 100,
        min_after_dequeue=self.batch_size * 4)
  return examples
readers.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def prepare_reader(self, filename_queue):

    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)

    contexts, features = tf.parse_single_sequence_example(
        serialized_example,
        context_features={
            "video_id": tf.FixedLenFeature([], tf.string),
            "labels": tf.VarLenFeature(tf.int64)},
        sequence_features={
            "rgb": tf.FixedLenSequenceFeature([], dtype=tf.string),
            "audio": tf.FixedLenSequenceFeature([], dtype=tf.string),
        })

    # read ground truth labels
    labels = (tf.cast(
        tf.sparse_to_dense(contexts["labels"].values, (self.num_classes,), 1,
            validate_indices=False),
        tf.bool))

    rgbs, num_frames = self.get_video_matrix(features["rgb"], 1024, self.max_frames)
    audios, num_frames = self.get_video_matrix(features["audio"], 1024, self.max_frames)

    batch_video_ids = tf.expand_dims(contexts["video_id"], 0)
    batch_rgbs = tf.expand_dims(rgbs, 0)
    batch_audios = tf.expand_dims(audios, 0)
    batch_labels = tf.expand_dims(labels, 0)
    batch_frames = tf.expand_dims(num_frames, 0)

    return batch_video_ids, batch_rgbs, batch_audios, batch_labels, batch_frames
dataset_schema.py 文件源码 项目:transform 作者: tensorflow 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def as_batched_placeholder(self, column):
    """Returns the representation of this column as a placeholder Tensor.

    Args:
      column: The column to be represented.
    """
    raise NotImplementedError()

# note we don't provide tf.FixedLenSequenceFeature yet, because that is
# only used to parse tf.SequenceExample.
dataset_schema.py 文件源码 项目:transform 作者: tensorflow 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _from_parse_feature(parse_feature):
  """Convert a single feature spec to a ColumnSchema."""

  # FixedLenFeature
  if isinstance(parse_feature, tf.FixedLenFeature):
    representation = FixedColumnRepresentation(parse_feature.default_value)
    return ColumnSchema(parse_feature.dtype, parse_feature.shape,
                        representation)

  # FixedLenSequenceFeature
  if isinstance(parse_feature, tf.FixedLenSequenceFeature):
    raise ValueError('DatasetSchema does not support '
                     'FixedLenSequenceFeature yet.')

  # VarLenFeature
  if isinstance(parse_feature, tf.VarLenFeature):
    representation = ListColumnRepresentation()
    return ColumnSchema(parse_feature.dtype, [None], representation)

  # SparseFeature
  if isinstance(parse_feature, tf.SparseFeature):
    index_field = SparseIndexField(name=parse_feature.index_key,
                                   is_sorted=parse_feature.already_sorted)
    representation = SparseColumnRepresentation(
        value_field_name=parse_feature.value_key,
        index_fields=[index_field])
    return ColumnSchema(parse_feature.dtype, [parse_feature.size],
                        representation)

  raise ValueError('Cannot interpret feature spec {} with type {}'.format(
      parse_feature, type(parse_feature)))
split_video.py 文件源码 项目:Y8M 作者: mpekalski 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def frame_example_2_np(seq_example_bytes, 
                       max_quantized_value=2,
                       min_quantized_value=-2):
  feature_names=['rgb','audio']
  feature_sizes = [1024, 128]
  with tf.Graph().as_default():
    contexts, features = tf.parse_single_sequence_example(
        seq_example_bytes,
        context_features={"video_id": tf.FixedLenFeature(
            [], tf.string),
                          "labels": tf.VarLenFeature(tf.int64)},
        sequence_features={
            feature_name : tf.FixedLenSequenceFeature([], dtype=tf.string)
            for feature_name in feature_names
        })

    decoded_features = { name: tf.reshape(
        tf.cast(tf.decode_raw(features[name], tf.uint8), tf.float32),
        [-1, size]) for name, size in zip(feature_names, feature_sizes)
        }
    feature_matrices = {
        name: utils.Dequantize(decoded_features[name],
          max_quantized_value, min_quantized_value) for name in feature_names}

    with tf.Session() as sess:
      vid = sess.run(contexts['video_id'])
      labs = sess.run(contexts['labels'].values)
      rgb = sess.run(feature_matrices['rgb'])
      audio = sess.run(feature_matrices['audio'])

  return vid, labs, rgb, audio


#%% Split frame level file into three video level files: all, 1st half, 2nd half.
split_video.py 文件源码 项目:Y8M 作者: mpekalski 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def build_graph():
    feature_names=['rgb','audio']
    feature_sizes = [1024, 128] 
    max_quantized_value=2
    min_quantized_value=-2

    seq_example_bytes = tf.placeholder(tf.string)
    contexts, features = tf.parse_single_sequence_example(
        seq_example_bytes,
        context_features={"video_id": tf.FixedLenFeature(
            [], tf.string),
                          "labels": tf.VarLenFeature(tf.int64)},
        sequence_features={
            feature_name : tf.FixedLenSequenceFeature([], dtype=tf.string)
            for feature_name in feature_names
        })

    decoded_features = { name: tf.reshape(
        tf.cast(tf.decode_raw(features[name], tf.uint8), tf.float32),
        [-1, size]) for name, size in zip(feature_names, feature_sizes)
        }
    feature_matrices = {
        name: utils.Dequantize(decoded_features[name],
          max_quantized_value, min_quantized_value) for name in feature_names}

    tf.add_to_collection("vid_tsr", contexts['video_id'])
    tf.add_to_collection("labs_tsr", contexts['labels'].values)
    tf.add_to_collection("rgb_tsr", feature_matrices['rgb'])
    tf.add_to_collection("audio_tsr", feature_matrices['audio'])
    tf.add_to_collection("seq_example_bytes", seq_example_bytes)

#   with tf.Session() as sess:
#       writer = tf.summary.FileWriter('./graphs', sess.graph)
dataset.py 文件源码 项目:instacart-basket-prediction 作者: colinmorris 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def record_spec(self):
    field_dtype = lambda fname: tf.float32 if data_fields.FIELD_LOOKUP[fname].dtype==float \
        else tf.int64
    context_feats = {featname: tf.FixedLenFeature([], field_dtype(featname))
      for featname in context_fields}
    seq_feats = {featname: tf.FixedLenSequenceFeature([], field_dtype(featname))
        for featname in sequence_fields}
    return dict(context_features=context_feats, sequence_features=seq_feats)
inputs.py 文件源码 项目:JetsonTX1_im2txt 作者: Netzeband 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def parse_sequence_example(serialized, image_feature, caption_feature):
  """Parses a tensorflow.SequenceExample into an image and caption.

  Args:
    serialized: A scalar string Tensor; a single serialized SequenceExample.
    image_feature: Name of SequenceExample context feature containing image
      data.
    caption_feature: Name of SequenceExample feature list containing integer
      captions.

  Returns:
    encoded_image: A scalar string Tensor containing a JPEG encoded image.
    caption: A 1-D uint64 Tensor with dynamically specified length.
  """
  context, sequence = tf.parse_single_sequence_example(
      serialized,
      context_features={
          image_feature: tf.FixedLenFeature([], dtype=tf.string)
      },
      sequence_features={
          caption_feature: tf.FixedLenSequenceFeature([], dtype=tf.int64),
      })

  encoded_image = context[image_feature]
  caption = sequence[caption_feature]
  return encoded_image, caption
tfrecord_read.py 文件源码 项目:Youtube8mdataset_kagglechallenge 作者: jasonlee27 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def prepare_reader(self,
                       filename_queue,
                       max_quantized_value=2,
                       min_quantized_value=-2):
        reader = tf.TFRecordReader()
        _, serialized_example = reader.read(filename_queue)
        context_features, sequence_features = {"video_id": tf.FixedLenFeature([], tf.string),
                                               "labels": tf.VarLenFeature(tf.int64)}, None
        if self.sequence_data:
            sequence_features = {self.feature_name[0]: tf.FixedLenSequenceFeature([], dtype=tf.string),
                                 self.feature_name[1]: tf.FixedLenSequenceFeature([], dtype=tf.string), }
        else:
            context_features[self.feature_name[0]] = tf.FixedLenFeature(self.feature_size[0], tf.float32)
            context_features[self.feature_name[1]] = tf.FixedLenFeature(self.feature_size[1], tf.float32)

        contexts, features = tf.parse_single_sequence_example(serialized_example,
                                                              context_features=context_features,
                                                              sequence_features=sequence_features)
        labels = (tf.cast(contexts["labels"].values, tf.int64))

        if self.sequence_data:
            decoded_features = tf.reshape(tf.cast(tf.decode_raw(features[self.feature_name[0]], tf.uint8), tf.float32),
                                          [-1, self.feature_size[0]])
            video_matrix = Dequantize(decoded_features, max_quantized_value, min_quantized_value)

            decoded_features = tf.reshape(tf.cast(tf.decode_raw(features[self.feature_name[1]], tf.uint8), tf.float32),
                                          [-1, self.feature_size[1]])
            audio_matrix = Dequantize(decoded_features, max_quantized_value, min_quantized_value)

            num_frames = tf.minimum(tf.shape(decoded_features)[0], self.max_frames)
        else:
            video_matrix = contexts[self.feature_name[0]]
            audio_matrix = contexts[self.feature_name[1]]
            num_frames = tf.constant(-1)

        # Pad or truncate to 'max_frames' frames.
        # video_matrix = resize_axis(video_matrix, 0, self.max_frames)
        return contexts["video_id"], video_matrix, audio_matrix, labels, num_frames
feature_column_test.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 49 收藏 0 点赞 0 评论 0
def testCreateSequenceFeatureSpec(self):
    sparse_col = tf.contrib.layers.sparse_column_with_hash_bucket(
        "sparse_column", hash_bucket_size=100)
    embedding_col = tf.contrib.layers.embedding_column(
        tf.contrib.layers.sparse_column_with_hash_bucket(
            "sparse_column_for_embedding",
            hash_bucket_size=10),
        dimension=4)
    sparse_id_col = tf.contrib.layers.sparse_column_with_keys(
        "id_column", ["marlo", "omar", "stringer"])
    weighted_id_col = tf.contrib.layers.weighted_sparse_column(
        sparse_id_col, "id_weights_column")
    real_valued_col1 = tf.contrib.layers.real_valued_column(
        "real_valued_column", dimension=2)
    real_valued_col2 = tf.contrib.layers.real_valued_column(
        "real_valued_default_column", dimension=5, default_value=3.0)

    feature_columns = set([sparse_col, embedding_col, weighted_id_col,
                           real_valued_col1, real_valued_col2])

    feature_spec = fc._create_sequence_feature_spec_for_parsing(feature_columns)

    expected_feature_spec = {
        "sparse_column": tf.VarLenFeature(tf.string),
        "sparse_column_for_embedding": tf.VarLenFeature(tf.string),
        "id_column": tf.VarLenFeature(tf.string),
        "id_weights_column": tf.VarLenFeature(tf.float32),
        "real_valued_column": tf.FixedLenSequenceFeature(
            shape=[2], dtype=tf.float32, allow_missing=False),
        "real_valued_default_column": tf.FixedLenSequenceFeature(
            shape=[5], dtype=tf.float32, allow_missing=True)}

    self.assertDictEqual(expected_feature_spec, feature_spec)
inputs.py 文件源码 项目:im2txt_api 作者: mainyaa 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def parse_sequence_example(serialized, image_feature, caption_feature):
  """Parses a tensorflow.SequenceExample into an image and caption.

  Args:
    serialized: A scalar string Tensor; a single serialized SequenceExample.
    image_feature: Name of SequenceExample context feature containing image
      data.
    caption_feature: Name of SequenceExample feature list containing integer
      captions.

  Returns:
    encoded_image: A scalar string Tensor containing a JPEG encoded image.
    caption: A 1-D uint64 Tensor with dynamically specified length.
  """
  context, sequence = tf.parse_single_sequence_example(
      serialized,
      context_features={
          image_feature: tf.FixedLenFeature([], dtype=tf.string)
      },
      sequence_features={
          caption_feature: tf.FixedLenSequenceFeature([], dtype=tf.int64),
      })

  encoded_image = context[image_feature]
  caption = sequence[caption_feature]
  return encoded_image, caption
inputs.py 文件源码 项目:im2latex 作者: dhoman01 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def parse_sequence_example(serialized, image_feature, caption_feature):
  """Parses a tensorflow.SequenceExample into an image and caption.

  Args:
    serialized: A scalar string Tensor; a single serialized SequenceExample.
    image_feature: Name of SequenceExample context feature containing image
      data.
    caption_feature: Name of SequenceExample feature list containing integer
      captions.

  Returns:
    encoded_image: A scalar string Tensor containing a JPEG encoded image.
    caption: A 1-D uint64 Tensor with dynamically specified length.
  """
  context, sequence = tf.parse_single_sequence_example(
      serialized,
      context_features={
          image_feature: tf.FixedLenFeature([], dtype=tf.string)
      },
      sequence_features={
          caption_feature: tf.FixedLenSequenceFeature([], dtype=tf.int64),
      })

  encoded_image = context[image_feature]
  caption = sequence[caption_feature]
  return encoded_image, caption
inputs.py 文件源码 项目:semsearch 作者: sanjana-Bijoe 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def parse_sequence_example(serialized, image_feature, caption_feature):
  """Parses a tensorflow.SequenceExample into an image and caption.

  Args:
    serialized: A scalar string Tensor; a single serialized SequenceExample.
    image_feature: Name of SequenceExample context feature containing image
      data.
    caption_feature: Name of SequenceExample feature list containing integer
      captions.

  Returns:
    encoded_image: A scalar string Tensor containing a JPEG encoded image.
    caption: A 1-D uint64 Tensor with dynamically specified length.
  """
  context, sequence = tf.parse_single_sequence_example(
      serialized,
      context_features={
          image_feature: tf.FixedLenFeature([], dtype=tf.string)
      },
      sequence_features={
          caption_feature: tf.FixedLenSequenceFeature([], dtype=tf.int64),
      })

  encoded_image = context[image_feature]
  caption = sequence[caption_feature]
  return encoded_image, caption
input_ops.py 文件源码 项目:cws-tensorflow 作者: JayYip 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def parse_example_queue(example_queue, config):
    """ Read one example.
      This function read one example and return context sequence and tag sequence
      correspondingly. 

      Args:
        filename_queue: A filename queue returned by string_input_producer
        context_feature_name: Context feature name in TFRecord. Set in ModelConfig
        tag_feature_name: Tag feature name in TFRecord. Set in ModelConfig

      Returns:
        input_seq: An int32 Tensor with different length.
        tag_seq: An int32 Tensor with different length.
      """

    #Parse one example
    context, features = tf.parse_single_sequence_example(
        example_queue,
        context_features={
            config.length_name: tf.FixedLenFeature([], dtype=tf.int64)
        },
        sequence_features={
            config.context_feature_name:
            tf.FixedLenSequenceFeature([], dtype=tf.int64),
            config.tag_feature_name:
            tf.FixedLenSequenceFeature([], dtype=tf.int64)
        })

    return (features[config.context_feature_name],
            features[config.tag_feature_name], context[config.length_name])
inputs.py 文件源码 项目:jacksearch 作者: srome 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def parse_sequence_example(serialized, image_feature, caption_feature):
  """Parses a tensorflow.SequenceExample into an image and caption.

  Args:
    serialized: A scalar string Tensor; a single serialized SequenceExample.
    image_feature: Name of SequenceExample context feature containing image
      data.
    caption_feature: Name of SequenceExample feature list containing integer
      captions.

  Returns:
    encoded_image: A scalar string Tensor containing a JPEG encoded image.
    caption: A 1-D uint64 Tensor with dynamically specified length.
  """
  context, sequence = tf.parse_single_sequence_example(
      serialized,
      context_features={
          image_feature: tf.FixedLenFeature([], dtype=tf.string)
      },
      sequence_features={
          caption_feature: tf.FixedLenSequenceFeature([], dtype=tf.int64),
      })

  encoded_image = context[image_feature]
  caption = sequence[caption_feature]
  return encoded_image, caption
inputs.py 文件源码 项目:im2txt 作者: huichen 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def parse_sequence_example(serialized, image_feature, caption_feature):
  """Parses a tensorflow.SequenceExample into an image and caption.

  Args:
    serialized: A scalar string Tensor; a single serialized SequenceExample.
    image_feature: Name of SequenceExample context feature containing image
      data.
    caption_feature: Name of SequenceExample feature list containing integer
      captions.

  Returns:
    encoded_image: A scalar string Tensor containing a JPEG encoded image.
    caption: A 1-D uint64 Tensor with dynamically specified length.
  """
  context, sequence = tf.parse_single_sequence_example(
      serialized,
      context_features={
          image_feature: tf.FixedLenFeature([], dtype=tf.string)
      },
      sequence_features={
          caption_feature: tf.FixedLenSequenceFeature([], dtype=tf.int64),
      })

  encoded_image = context[image_feature]
  caption = sequence[caption_feature]
  return encoded_image, caption
sequence_example.py 文件源码 项目:master-thesis 作者: AndreasMadsen 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def parse_sequence_example(serialized_example):
    context, sequence = tf.parse_single_sequence_example(
        serialized=serialized_example,
        context_features={
             "length": tf.FixedLenFeature([], dtype=tf.int64)
        },
        sequence_features={
             "source": tf.FixedLenSequenceFeature([], dtype=tf.int64),
             "target": tf.FixedLenSequenceFeature([], dtype=tf.int64)
        }
    )

    return (context['length'], sequence['source'], sequence['target'])
inputs.py 文件源码 项目:im2txt_demo 作者: siavash9000 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def parse_sequence_example(serialized, image_feature, caption_feature):
  """Parses a tensorflow.SequenceExample into an image and caption.

  Args:
    serialized: A scalar string Tensor; a single serialized SequenceExample.
    image_feature: Name of SequenceExample context feature containing image
      data.
    caption_feature: Name of SequenceExample feature list containing integer
      captions.

  Returns:
    encoded_image: A scalar string Tensor containing a JPEG encoded image.
    caption: A 1-D uint64 Tensor with dynamically specified length.
  """
  context, sequence = tf.parse_single_sequence_example(
      serialized,
      context_features={
          image_feature: tf.FixedLenFeature([], dtype=tf.string)
      },
      sequence_features={
          caption_feature: tf.FixedLenSequenceFeature([], dtype=tf.int64),
      })

  encoded_image = context[image_feature]
  caption = sequence[caption_feature]
  return encoded_image, caption
inputs.py 文件源码 项目:tf-tutorial 作者: zchen0211 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def parse_sequence_example(serialized, image_feature, caption_feature):
  """Parses a tensorflow.SequenceExample into an image and caption.

  Args:
    serialized: A scalar string Tensor; a single serialized SequenceExample.
    image_feature: Name of SequenceExample context feature containing image
      data.
    caption_feature: Name of SequenceExample feature list containing integer
      captions.

  Returns:
    encoded_image: A scalar string Tensor containing a JPEG encoded image.
    caption: A 1-D uint64 Tensor with dynamically specified length.
  """
  context, sequence = tf.parse_single_sequence_example(
      serialized,
      context_features={
          image_feature: tf.FixedLenFeature([], dtype=tf.string)
      },
      sequence_features={
          caption_feature: tf.FixedLenSequenceFeature([], dtype=tf.int64),
      })

  encoded_image = context[image_feature]
  caption = sequence[caption_feature]
  return encoded_image, caption
pipelines.py 文件源码 项目:polyaxon 作者: polyaxon 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _create_sequence_features(self):
        sequences = {}
        for feature, feature_type in self.meta_data['sequence_features_types'].items():
            if feature_type == 'int':
                sequences[feature] = tf.FixedLenSequenceFeature([], dtype=tf.int64)
            elif feature_type == 'float':
                sequences[feature] = tf.FixedLenSequenceFeature([], dtype=tf.float32)
            elif feature_type == 'bytes':
                sequences[feature] = tf.FixedLenSequenceFeature([], dtype=tf.string)
            else:
                raise TypeError("The feature type `{}` is not supported.".format({feature_type}))

        return sequences
utility.py 文件源码 项目:vessel-classification 作者: GlobalFishingWatch 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def single_feature_file_reader(filename_queue, num_features):
    """ Read and interpret data from a set of TFRecord files.

  Args:
    filename_queue: a queue of filenames to read through.
    num_features: the depth of the features.

  Returns:
    A pair of tuples:
      1. a context dictionary for the feature
      2. the vessel movement features, tensor of dimension [width, num_features].
  """

    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)

    # The serialized example is converted back to actual values.
    context_features, sequence_features = tf.parse_single_sequence_example(
        serialized_example,
        # Defaults are not specified since both keys are required.
        context_features={'mmsi': tf.FixedLenFeature([], tf.int64), },
        sequence_features={
            'movement_features': tf.FixedLenSequenceFeature(
                shape=(num_features, ), dtype=tf.float32)
        })

    return context_features, sequence_features
input_pipeline.py 文件源码 项目:seq2seq 作者: google 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def make_data_provider(self, **kwargs):

    context_keys_to_features = {
        self.params["image_field"]: tf.FixedLenFeature(
            [], dtype=tf.string),
        "image/format": tf.FixedLenFeature(
            [], dtype=tf.string, default_value=self.params["image_format"]),
    }

    sequence_keys_to_features = {
        self.params["caption_ids_field"]: tf.FixedLenSequenceFeature(
            [], dtype=tf.int64),
        self.params["caption_tokens_field"]: tf.FixedLenSequenceFeature(
            [], dtype=tf.string)
    }

    items_to_handlers = {
        "image": tfexample_decoder.Image(
            image_key=self.params["image_field"],
            format_key="image/format",
            channels=3),
        "target_ids":
        tfexample_decoder.Tensor(self.params["caption_ids_field"]),
        "target_tokens":
        tfexample_decoder.Tensor(self.params["caption_tokens_field"]),
        "target_len": tfexample_decoder.ItemHandlerCallback(
            keys=[self.params["caption_tokens_field"]],
            func=lambda x: tf.size(x[self.params["caption_tokens_field"]]))
    }

    decoder = TFSEquenceExampleDecoder(
        context_keys_to_features, sequence_keys_to_features, items_to_handlers)

    dataset = tf.contrib.slim.dataset.Dataset(
        data_sources=self.params["files"],
        reader=tf.TFRecordReader,
        decoder=decoder,
        num_samples=None,
        items_to_descriptions={})

    return tf.contrib.slim.dataset_data_provider.DatasetDataProvider(
        dataset=dataset,
        shuffle=self.params["shuffle"],
        num_epochs=self.params["num_epochs"],
        **kwargs)
readers.py 文件源码 项目:yt8m 作者: forwchen 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def prepare_serialized_examples(self, serialized_example,
      max_quantized_value=2, min_quantized_value=-2):

    contexts, features = tf.parse_single_sequence_example(
        serialized_example,
        context_features={"video_id": tf.FixedLenFeature(
            [], tf.string),
                          "labels": tf.VarLenFeature(tf.int64)},
        sequence_features={
            feature_name : tf.FixedLenSequenceFeature([], dtype=tf.string)
            for feature_name in self.feature_names
        })

    # read ground truth labels
    labels = (tf.cast(
        tf.sparse_to_dense(contexts["labels"].values, (self.num_classes,), 1,
            validate_indices=False),
        tf.bool))

    # loads (potentially) different types of features and concatenates them
    num_features = len(self.feature_names)
    assert num_features > 0, "No feature selected: feature_names is empty!"

    assert len(self.feature_names) == len(self.feature_sizes), \
    "length of feature_names (={}) != length of feature_sizes (={})".format( \
    len(self.feature_names), len(self.feature_sizes))

    num_frames = -1  # the number of frames in the video
    feature_matrices = [None] * num_features  # an array of different features
    for feature_index in range(num_features):
      feature_matrix, num_frames_in_this_feature = self.get_video_matrix(
          features[self.feature_names[feature_index]],
          self.feature_sizes[feature_index],
          self.max_frames,
          max_quantized_value,
          min_quantized_value)
      if num_frames == -1:
        num_frames = num_frames_in_this_feature
      else:
        tf.assert_equal(num_frames, num_frames_in_this_feature)

      feature_matrices[feature_index] = feature_matrix

    # cap the number of frames at self.max_frames
    num_frames = tf.minimum(num_frames, self.max_frames)

    # concatenate different features
    video_matrix = tf.concat(feature_matrices, 1)

    # convert to batch format.
    # TODO: Do proper batch reads to remove the IO bottleneck.
    batch_video_ids = tf.expand_dims(contexts["video_id"], 0)
    batch_video_matrix = tf.expand_dims(video_matrix, 0)
    batch_labels = tf.expand_dims(labels, 0)
    batch_frames = tf.expand_dims(num_frames, 0)

    return batch_video_ids, batch_video_matrix, batch_labels, batch_frames
utilities.py 文件源码 项目:unsupervised-2017-cvprw 作者: imatge-upc 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def read_my_file_format_dis(filename_queue, is_training):
    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)
    context_features = {
        "height": tf.FixedLenFeature([], dtype=tf.int64),
        "width": tf.FixedLenFeature([], dtype=tf.int64),
        "sequence_length": tf.FixedLenFeature([], dtype=tf.int64),
        "text": tf.FixedLenFeature([], dtype=tf.string),
        "label": tf.FixedLenFeature([], dtype=tf.int64)
    }
    sequence_features = {
        "frames": tf.FixedLenSequenceFeature([], dtype=tf.string),
        "masks": tf.FixedLenSequenceFeature([], dtype=tf.string)
    }
    context_parsed, sequence_parsed = tf.parse_single_sequence_example(
        serialized=serialized_example,
        context_features=context_features,
        sequence_features=sequence_features
    )

    height = 128#context_parsed['height'].eval()
    width  = 128#context_parsed['width'].eval()
    sequence_length = 32#context_parsed['sequence_length'].eval()

    clip  = decode_frames(sequence_parsed['frames'], height, width, sequence_length)

    # generate one hot vector
    label = context_parsed['label']
    label = tf.one_hot(label-1, FLAGS.num_class)
    text  = context_parsed['text']

    # randomly sample clips of 16 frames
    if is_training:
        idx = tf.squeeze(tf.random_uniform([1], 0, sequence_length-FLAGS.seq_length+1, dtype=tf.int32))
    else:
        idx = 8
    clip = clip[idx:idx+FLAGS.seq_length] / 255.0 * 2 - 1

    if is_training:
        # randomly reverse data
        reverse   = tf.squeeze(tf.random_uniform([1], 0, 2, dtype=tf.int32))
        clip      = tf.cond(tf.equal(reverse,0), lambda: clip, lambda: clip[::-1])

        # randomly horizontally flip data
        flip      = tf.squeeze(tf.random_uniform([1], 0, 2, dtype=tf.int32))
        clip      = tf.cond(tf.equal(flip,0), lambda: clip, lambda: \
                            tf.map_fn(lambda img: tf.image.flip_left_right(img), clip))

    clip.set_shape([FLAGS.seq_length, height, width, 3])

    return clip, label, text
readers.py 文件源码 项目:youtube-8m 作者: google 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def prepare_serialized_examples(self, serialized_example,
      max_quantized_value=2, min_quantized_value=-2):

    contexts, features = tf.parse_single_sequence_example(
        serialized_example,
        context_features={"video_id": tf.FixedLenFeature(
            [], tf.string),
                          "labels": tf.VarLenFeature(tf.int64)},
        sequence_features={
            feature_name : tf.FixedLenSequenceFeature([], dtype=tf.string)
            for feature_name in self.feature_names
        })

    # read ground truth labels
    labels = (tf.cast(
        tf.sparse_to_dense(contexts["labels"].values, (self.num_classes,), 1,
            validate_indices=False),
        tf.bool))

    # loads (potentially) different types of features and concatenates them
    num_features = len(self.feature_names)
    assert num_features > 0, "No feature selected: feature_names is empty!"

    assert len(self.feature_names) == len(self.feature_sizes), \
    "length of feature_names (={}) != length of feature_sizes (={})".format( \
    len(self.feature_names), len(self.feature_sizes))

    num_frames = -1  # the number of frames in the video
    feature_matrices = [None] * num_features  # an array of different features
    for feature_index in range(num_features):
      feature_matrix, num_frames_in_this_feature = self.get_video_matrix(
          features[self.feature_names[feature_index]],
          self.feature_sizes[feature_index],
          self.max_frames,
          max_quantized_value,
          min_quantized_value)
      if num_frames == -1:
        num_frames = num_frames_in_this_feature
      else:
        tf.assert_equal(num_frames, num_frames_in_this_feature)

      feature_matrices[feature_index] = feature_matrix

    # cap the number of frames at self.max_frames
    num_frames = tf.minimum(num_frames, self.max_frames)

    # concatenate different features
    video_matrix = tf.concat(feature_matrices, 1)

    # convert to batch format.
    # TODO: Do proper batch reads to remove the IO bottleneck.
    batch_video_ids = tf.expand_dims(contexts["video_id"], 0)
    batch_video_matrix = tf.expand_dims(video_matrix, 0)
    batch_labels = tf.expand_dims(labels, 0)
    batch_frames = tf.expand_dims(num_frames, 0)

    return batch_video_ids, batch_video_matrix, batch_labels, batch_frames
readers.py 文件源码 项目:Video-Classification 作者: boyaolin 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def prepare_serialized_examples(self, serialized_example,
      max_quantized_value=2, min_quantized_value=-2):

    contexts, features = tf.parse_single_sequence_example(
        serialized_example,
        context_features={"video_id": tf.FixedLenFeature(
            [], tf.string),
                          "labels": tf.VarLenFeature(tf.int64)},
        sequence_features={
            feature_name : tf.FixedLenSequenceFeature([], dtype=tf.string)
            for feature_name in self.feature_names
        })

    # read ground truth labels
    labels = (tf.cast(
        tf.sparse_to_dense(contexts["labels"].values, (self.num_classes,), 1,
            validate_indices=False),
        tf.bool))

    # loads (potentially) different types of features and concatenates them
    num_features = len(self.feature_names)
    assert num_features > 0, "No feature selected: feature_names is empty!"

    assert len(self.feature_names) == len(self.feature_sizes), \
    "length of feature_names (={}) != length of feature_sizes (={})".format( \
    len(self.feature_names), len(self.feature_sizes))

    num_frames = -1  # the number of frames in the video
    feature_matrices = [None] * num_features  # an array of different features
    for feature_index in range(num_features):
      feature_matrix, num_frames_in_this_feature = self.get_video_matrix(
          features[self.feature_names[feature_index]],
          self.feature_sizes[feature_index],
          self.max_frames,
          max_quantized_value,
          min_quantized_value)
      if num_frames == -1:
        num_frames = num_frames_in_this_feature
      else:
        tf.assert_equal(num_frames, num_frames_in_this_feature)

      feature_matrices[feature_index] = feature_matrix

    # cap the number of frames at self.max_frames
    num_frames = tf.minimum(num_frames, self.max_frames)

    # concatenate different features
    video_matrix = tf.concat(feature_matrices, 1)

    # convert to batch format.
    # TODO: Do proper batch reads to remove the IO bottleneck.
    batch_video_ids = tf.expand_dims(contexts["video_id"], 0)
    batch_video_matrix = tf.expand_dims(video_matrix, 0)
    batch_labels = tf.expand_dims(labels, 0)
    batch_frames = tf.expand_dims(num_frames, 0)

    return batch_video_ids, batch_video_matrix, batch_labels, batch_frames


问题


面经


文章

微信
公众号

扫码关注公众号