python类int32()的实例源码

model_utils.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def SampleRandomFrames(model_input, num_frames, num_samples):
  """Samples a random set of frames of size num_samples.

  Args:
    model_input: A tensor of size batch_size x max_frames x feature_size
    num_frames: A tensor of size batch_size x 1
    num_samples: A scalar

  Returns:
    `model_input`: A tensor of size batch_size x num_samples x feature_size
  """
  batch_size = tf.shape(model_input)[0]
  frame_index = tf.cast(
      tf.multiply(
          tf.random_uniform([batch_size, num_samples]),
          tf.tile(tf.cast(num_frames, tf.float32), [1, num_samples])), tf.int32)
  batch_index = tf.tile(
      tf.expand_dims(tf.range(batch_size), 1), [1, num_samples])
  index = tf.stack([batch_index, frame_index], 2)
  return tf.gather_nd(model_input, index)
model.py 文件源码 项目:ISLES2017 作者: MiguelMonteiro 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def parse_example(serialized_example):
    features = tf.parse_single_example(
        serialized_example,
        # Defaults are not specified since both keys are required.
        features={
            'shape': tf.FixedLenFeature([], tf.string),
            'img_raw': tf.FixedLenFeature([], tf.string),
            'gt_raw': tf.FixedLenFeature([], tf.string),
            'example_name': tf.FixedLenFeature([], tf.string)
        })

    with tf.variable_scope('decoder'):
        shape = tf.decode_raw(features['shape'], tf.int32)
        image = tf.decode_raw(features['img_raw'], tf.float32)
        ground_truth = tf.decode_raw(features['gt_raw'], tf.uint8)
        example_name = features['example_name']

    with tf.variable_scope('image'):
        # reshape and add 0 dimension (would be batch dimension)
        image = tf.expand_dims(tf.reshape(image, shape), 0)
    with tf.variable_scope('ground_truth'):
        # reshape
        ground_truth = tf.cast(tf.reshape(ground_truth, shape[:-1]), tf.float32)
    return image, ground_truth, example_name
TestUpd.py 文件源码 项目:How-to-Learn-from-Little-Data 作者: llSourcell 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def omniglot():

    sess = tf.InteractiveSession()

    """    def wrapper(v):
        return tf.Print(v, [v], message="Printing v")

    v = tf.Variable(initial_value=np.arange(0, 36).reshape((6, 6)), dtype=tf.float32, name='Matrix')

    sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())

    temp = tf.Variable(initial_value=np.arange(0, 36).reshape((6, 6)), dtype=tf.float32, name='temp')
    temp = wrapper(v)
    #with tf.control_dependencies([temp]):
    temp.eval()
    print 'Hello'"""

    def update_tensor(V, dim2, val):  # Update tensor V, with index(:,dim2[:]) by val[:]
        val = tf.cast(val, V.dtype)
        def body(_, (v, d2, chg)):
            d2_int = tf.cast(d2, tf.int32)
            return tf.slice(tf.concat_v2([v[:d2_int],[chg] ,v[d2_int+1:]], axis=0), [0], [v.get_shape().as_list()[0]])
        Z = tf.scan(body, elems=(V, dim2, val), initializer=tf.constant(1, shape=V.get_shape().as_list()[1:], dtype=tf.float32), name="Scan_Update")
        return Z
seq2seq_helpers.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def decode(self, cell_dec, enc_final_state, output_size, output_embed_matrix, training, grammar_helper=None):
        if self.config.use_dot_product_output:
            output_layer = DotProductLayer(output_embed_matrix)
        else:
            output_layer = tf.layers.Dense(output_size, use_bias=False)

        go_vector = tf.ones((self.batch_size,), dtype=tf.int32) * self.config.grammar.start
        if training:
            output_ids_with_go = tf.concat([tf.expand_dims(go_vector, axis=1), self.output_placeholder], axis=1)
            outputs = tf.nn.embedding_lookup([output_embed_matrix], output_ids_with_go)
            helper = TrainingHelper(outputs, self.output_length_placeholder+1)
        else:
            helper = GreedyEmbeddingHelper(output_embed_matrix, go_vector, self.config.grammar.end)

        if self.config.use_grammar_constraints:
            decoder = GrammarBasicDecoder(self.config.grammar, cell_dec, helper, enc_final_state, output_layer=output_layer, training_output = self.output_placeholder if training else None,
                                          grammar_helper=grammar_helper)
        else:
            decoder = BasicDecoder(cell_dec, helper, enc_final_state, output_layer=output_layer)

        final_outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(decoder, impute_finished=True, maximum_iterations=self.max_length)

        return final_outputs
eval_output_embeddings.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def bag_of_tokens(config, labels, label_lengths):
    if config.train_output_embeddings:
        with tf.variable_scope('embed', reuse=True):
            output_embeddings = tf.get_variable('output_embedding')
    else:
        output_embeddings = tf.constant(config.output_embedding_matrix)

    #everything_label_placeholder = tf.placeholder(shape=(None, config.max_length,), dtype=tf.int32)
    #everything_label_length_placeholder = tf.placeholder(shape=(None,), dtype=tf.int32)

    labels = tf.constant(np.array(labels))
    embedded_output = tf.gather(output_embeddings, labels)
    print('embedded_output before', embedded_output)
    #mask = tf.sequence_mask(label_lengths, maxlen=config.max_length, dtype=tf.float32)
    # note: this multiplication will broadcast the mask along all elements of the depth dimension
    # (which is why we run the expand_dims to choose how to broadcast)
    #embedded_output = embedded_output * tf.expand_dims(mask, axis=2)
    #print('embedded_output after', embedded_output)

    return tf.reduce_sum(embedded_output, axis=1)
data_set_helpers.py 文件源码 项目:AVSR-Deep-Speech 作者: pandeydivesh15 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __init__(self, files_list, thread_count, batch_size, numcep, numcontext, next_index=lambda x: x + 1):
        self._coord = None
        self._numcep = numcep
        self._x = tf.placeholder(tf.float32, [None, numcep + (2 * numcep * numcontext)])
        self._x_length = tf.placeholder(tf.int32, [])
        self._y = tf.placeholder(tf.int32, [None,])
        self._y_length = tf.placeholder(tf.int32, [])
        self.example_queue = tf.PaddingFIFOQueue(shapes=[[None, numcep + (2 * numcep * numcontext)], [], [None,], []],
                                                  dtypes=[tf.float32, tf.int32, tf.int32, tf.int32],
                                                  capacity=2 * self._get_device_count() * batch_size)
        self._enqueue_op = self.example_queue.enqueue([self._x, self._x_length, self._y, self._y_length])
        self._close_op = self.example_queue.close(cancel_pending_enqueues=True)
        self.batch_size = batch_size
        self._numcontext = numcontext
        self._thread_count = thread_count
        self._files_list = self._create_files_list(files_list)
        self._next_index = next_index
data_set_helpers_RHL.py 文件源码 项目:AVSR-Deep-Speech 作者: pandeydivesh15 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def __init__(self, files_list, thread_count, batch_size, numcep, numcontext, next_index=lambda x: x + 1):
        self._coord = None
        self._numcep = numcep
        self._x = tf.placeholder(tf.float32, [None, numcep + (2 * numcep * numcontext)])
        self._x_length = tf.placeholder(tf.int32, [])
        self._y = tf.placeholder(tf.int32, [None,])
        self._y_length = tf.placeholder(tf.int32, [])
        self.example_queue = tf.PaddingFIFOQueue(shapes=[[None, numcep + (2 * numcep * numcontext)], [], [None,], []],
                                                  dtypes=[tf.float32, tf.int32, tf.int32, tf.int32],
                                                  capacity=2 * self._get_device_count() * batch_size)
        self._enqueue_op = self.example_queue.enqueue([self._x, self._x_length, self._y, self._y_length])
        self._close_op = self.example_queue.close(cancel_pending_enqueues=True)
        self.batch_size = batch_size
        self._numcontext = numcontext
        self._thread_count = thread_count
        self._files_list = self._create_files_list(files_list)
        self._next_index = next_index
model_utils.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def SampleRandomFrames(model_input, num_frames, num_samples):
  """Samples a random set of frames of size num_samples.

  Args:
    model_input: A tensor of size batch_size x max_frames x feature_size
    num_frames: A tensor of size batch_size x 1
    num_samples: A scalar

  Returns:
    `model_input`: A tensor of size batch_size x num_samples x feature_size
  """
  batch_size = tf.shape(model_input)[0]
  frame_index = tf.cast(
      tf.multiply(
          tf.random_uniform([batch_size, num_samples]),
          tf.tile(tf.cast(num_frames, tf.float32), [1, num_samples])), tf.int32)
  batch_index = tf.tile(
      tf.expand_dims(tf.range(batch_size), 1), [1, num_samples])
  index = tf.stack([batch_index, frame_index], 2)
  return tf.gather_nd(model_input, index)
model_utils.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def SampleRandomFrames(model_input, num_frames, num_samples):
  """Samples a random set of frames of size num_samples.

  Args:
    model_input: A tensor of size batch_size x max_frames x feature_size
    num_frames: A tensor of size batch_size x 1
    num_samples: A scalar

  Returns:
    `model_input`: A tensor of size batch_size x num_samples x feature_size
  """
  batch_size = tf.shape(model_input)[0]
  frame_index = tf.cast(
      tf.multiply(
          tf.random_uniform([batch_size, num_samples]),
          tf.tile(tf.cast(num_frames, tf.float32), [1, num_samples])), tf.int32)
  batch_index = tf.tile(
      tf.expand_dims(tf.range(batch_size), 1), [1, num_samples])
  index = tf.stack([batch_index, frame_index], 2)
  return tf.gather_nd(model_input, index)
data_load.py 文件源码 项目:neurobind 作者: Kyubyong 项目源码 文件源码 阅读 49 收藏 0 点赞 0 评论 0
def get_batch_data():
    # Load data
    X, Y = load_data()

    # calc total batch count
    num_batch = len(X) // hp.batch_size

    # Convert to tensor
    X = tf.convert_to_tensor(X, tf.int32)
    Y = tf.convert_to_tensor(Y, tf.float32)

    # Create Queues
    input_queues = tf.train.slice_input_producer([X, Y])

    # create batch queues
    x, y = tf.train.batch(input_queues,
                          num_threads=8,
                          batch_size=hp.batch_size,
                          capacity=hp.batch_size * 64,
                          allow_smaller_final_batch=False)

    return x, y, num_batch  # (N, T), (N, T), ()
distributions.py 文件源码 项目:distributional_perspective_on_RL 作者: Kiwoo 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def sample_dtype(self):
        return tf.int32

# WRONG SECOND DERIVATIVES
# class CategoricalPd(Pd):
#     def __init__(self, logits):
#         self.logits = logits
#         self.ps = tf.nn.softmax(logits)
#     @classmethod
#     def fromflat(cls, flat):
#         return cls(flat)
#     def flatparam(self):
#         return self.logits
#     def mode(self):
#         return U.argmax(self.logits, axis=1)
#     def logp(self, x):
#         return -tf.nn.sparse_softmax_cross_entropy_with_logits(self.logits, x)
#     def kl(self, other):
#         return tf.nn.softmax_cross_entropy_with_logits(other.logits, self.ps) \
#                 - tf.nn.softmax_cross_entropy_with_logits(self.logits, self.ps)
#     def entropy(self):
#         return tf.nn.softmax_cross_entropy_with_logits(self.logits, self.ps)
#     def sample(self):
#         u = tf.random_uniform(tf.shape(self.logits))
#         return U.argmax(self.logits - tf.log(-tf.log(u)), axis=1)
beam_search.py 文件源码 项目:seq2seq 作者: google 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def create_initial_beam_state(config):
  """Creates an instance of `BeamState` that can be used on the first
  call to `beam_step`.

  Args:
    config: A BeamSearchConfig

  Returns:
    An instance of `BeamState`.
  """
  return BeamSearchState(
      log_probs=tf.zeros([config.beam_width]),
      finished=tf.zeros(
          [config.beam_width], dtype=tf.bool),
      lengths=tf.zeros(
          [config.beam_width], dtype=tf.int32))
rnn_encoder_test.py 文件源码 项目:seq2seq 作者: google 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def test_encode(self):
    inputs = tf.random_normal(
        [self.batch_size, self.sequence_length, self.input_depth])
    example_length = tf.ones(
        self.batch_size, dtype=tf.int32) * self.sequence_length

    encode_fn = rnn_encoder.UnidirectionalRNNEncoder(self.params, self.mode)
    encoder_output = encode_fn(inputs, example_length)

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      encoder_output_ = sess.run(encoder_output)

    np.testing.assert_array_equal(encoder_output_.outputs.shape,
                                  [self.batch_size, self.sequence_length, 32])
    self.assertIsInstance(encoder_output_.final_state,
                          tf.contrib.rnn.LSTMStateTuple)
    np.testing.assert_array_equal(encoder_output_.final_state.h.shape,
                                  [self.batch_size, 32])
    np.testing.assert_array_equal(encoder_output_.final_state.c.shape,
                                  [self.batch_size, 32])
rnn_encoder_test.py 文件源码 项目:seq2seq 作者: google 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _test_encode_with_params(self, params):
    """Tests the StackBidirectionalRNNEncoder with a specific cell"""
    inputs = tf.random_normal(
        [self.batch_size, self.sequence_length, self.input_depth])
    example_length = tf.ones(
        self.batch_size, dtype=tf.int32) * self.sequence_length

    encode_fn = rnn_encoder.StackBidirectionalRNNEncoder(params, self.mode)
    encoder_output = encode_fn(inputs, example_length)

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      encoder_output_ = sess.run(encoder_output)

    output_size = encode_fn.params["rnn_cell"]["cell_params"]["num_units"]

    np.testing.assert_array_equal(
        encoder_output_.outputs.shape,
        [self.batch_size, self.sequence_length, output_size * 2])

    return encoder_output_
decoder_test.py 文件源码 项目:seq2seq 作者: google 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def test_with_fixed_inputs(self):
    inputs = tf.random_normal(
        [self.batch_size, self.sequence_length, self.input_depth])
    seq_length = tf.ones(self.batch_size, dtype=tf.int32) * self.sequence_length

    helper = decode_helper.TrainingHelper(
        inputs=inputs, sequence_length=seq_length)
    decoder_fn = self.create_decoder(
        helper=helper, mode=tf.contrib.learn.ModeKeys.TRAIN)
    initial_state = decoder_fn.cell.zero_state(
        self.batch_size, dtype=tf.float32)
    decoder_output, _ = decoder_fn(initial_state, helper)

    #pylint: disable=E1101
    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      decoder_output_ = sess.run(decoder_output)

    np.testing.assert_array_equal(
        decoder_output_.logits.shape,
        [self.sequence_length, self.batch_size, self.vocab_size])
    np.testing.assert_array_equal(decoder_output_.predicted_ids.shape,
                                  [self.sequence_length, self.batch_size])

    return decoder_output_
pooling_encoder_test.py 文件源码 项目:seq2seq 作者: google 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def _test_with_params(self, params):
    """Tests the encoder with a given parameter configuration"""
    inputs = tf.random_normal(
        [self.batch_size, self.sequence_length, self.input_depth])
    example_length = tf.ones(
        self.batch_size, dtype=tf.int32) * self.sequence_length

    encode_fn = PoolingEncoder(params, self.mode)
    encoder_output = encode_fn(inputs, example_length)

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      encoder_output_ = sess.run(encoder_output)

    np.testing.assert_array_equal(
        encoder_output_.outputs.shape,
        [self.batch_size, self.sequence_length, self.input_depth])
    np.testing.assert_array_equal(
        encoder_output_.attention_values.shape,
        [self.batch_size, self.sequence_length, self.input_depth])
    np.testing.assert_array_equal(encoder_output_.final_state.shape,
                                  [self.batch_size, self.input_depth])
bbbc006_input.py 文件源码 项目:dcan-tensorflow 作者: lisjin 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def get_read_input(eval_data=False):
    """
    Fetch input data row by row from CSV files.
    Args:
        eval_data: Bool representing whether to read from train or test directories.
    Returns:
        read_input: An object representing a single example.
        reshaped_image: Image of type tf.float32, reshaped to correct dimensions.
    """
    # Create queues that produce the filenames and labels to read.
    pref = 'test' if eval_data else 'train'
    all_files_queue = tf.train.string_input_producer([pref + '.csv'])

    # Read examples from files in the filename queue.
    read_input = read_bbbc006(all_files_queue)
    reshaped_image = tf.cast(read_input.uint8image, tf.float32)
    read_input.label = tf.cast(read_input.label, tf.int32)

    return read_input, reshaped_image
network_vgg16.py 文件源码 项目:HandDetection 作者: YunqiuXu 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def _anchor_component(self):
    with tf.variable_scope('ANCHOR_' + self._tag) as scope:
      # just to get the shape right
      height = tf.to_int32(tf.ceil(self._im_info[0] / np.float32(self._feat_stride[0])))
      width = tf.to_int32(tf.ceil(self._im_info[1] / np.float32(self._feat_stride[0])))
      anchors, anchor_length = tf.py_func(generate_anchors_pre,
                                          [height, width,
                                           self._feat_stride, self._anchor_scales, self._anchor_ratios],
                                          [tf.float32, tf.int32], name="generate_anchors")
      anchors.set_shape([None, 4])
      anchor_length.set_shape([])
      self._anchors = anchors
      self._anchor_length = anchor_length

  # [Hand Detection] Batch normalization
  # http://stackoverflow.com/a/34634291/2267819
  # Note that this is different from the paper(they use another method)
sparse_test.py 文件源码 项目:cxflow-tensorflow 作者: Cognexa 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def test_dense_to_sparse(self):
        """ Test if `dense_to_sparse` works properly."""

        with tf.Session().as_default():
            dense = tf.constant([[1., 2., 0.], [0., 0., 3.]], dtype=tf.float32)

            sparse = dense_to_sparse(dense)

            self.assertTrue(np.array_equal(sparse.indices.eval(), np.array([[0, 0], [0, 1], [1, 2]])))
            self.assertTrue(np.array_equal(sparse.values.eval(), np.array([1., 2., 3.])))

            mask = tf.constant([[0, 1, 0], [1, 0, 0]], dtype=tf.int32)

            masked = dense_to_sparse(dense, mask)
            self.assertTrue(np.array_equal(masked.indices.eval(), np.array([[0, 1], [1, 0]])))
            self.assertTrue(np.array_equal(masked.values.eval(), np.array([2., 0.])))
repeat_test.py 文件源码 项目:cxflow-tensorflow 作者: Cognexa 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def test_repeat(self):
        """ Test if `repeat` works the same as np.repeat."""

        with tf.Session().as_default():
            # try different tensor types
            for npdtype, tfdtype in [(np.int32, tf.int32), (np.float32, tf.float32)]:
                for init_value in [np.array([0, 1, 2, 3], dtype=npdtype),
                                   np.array([[0, 1], [2, 3], [4, 5]], dtype=npdtype)]:
                    # and all their axes
                    for axis in range(len(init_value.shape)):
                        for repeats in [1, 2, 3, 11]:
                            tensor = tf.constant(init_value, dtype=tfdtype)

                            repeated_value = repeat(tensor, repeats=repeats, axis=axis).eval()
                            expected_value = np.repeat(init_value, repeats=repeats, axis=axis)

                            self.assertTrue(np.all(repeated_value == expected_value))
bin.py 文件源码 项目:cxflow-tensorflow 作者: Cognexa 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def bin_stats(predictions: tf.Tensor, labels: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
    """
    Calculate f1, precision and recall from binary classification expected and predicted values.

    :param predictions: 2-d tensor (batch, predictions) of predicted 0/1 classes
    :param labels: 2-d tensor (batch, labels) of expected 0/1 classes
    :return: a tuple of batched (f1, precision and recall) values
    """
    predictions = tf.cast(predictions, tf.int32)
    labels = tf.cast(labels, tf.int32)

    true_positives = tf.reduce_sum((predictions * labels), axis=1)
    false_positives = tf.reduce_sum(tf.cast(tf.greater(predictions, labels), tf.int32), axis=1)
    false_negatives = tf.reduce_sum(tf.cast(tf.greater(labels, predictions), tf.int32), axis=1)

    recall = true_positives / (true_positives + false_negatives)
    precision = true_positives / (true_positives + false_positives)
    f1_score = 2 / (1 / precision + 1 / recall)

    return f1_score, precision, recall
bin.py 文件源码 项目:cxflow-tensorflow 作者: Cognexa 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def bin_dice(predictions: tf.Tensor, labels: tf.Tensor) -> tf.Tensor:
    """
    Calculate Sorensen–Dice coefficient from the given binary classification expected and predicted values.

    The coefficient is defined as :math:`2*|X \cup Y| / (|X| + |Y|)`.

    :param predictions: 2-d tensor (batch, predictions) of predicted 0/1 classes
    :param labels: 2-d tensor (batch, labels) of expected 0/1 classes
    :return: batched Sørensen–Dice coefficients
    """
    predictions = tf.cast(predictions, tf.int32)
    labels = tf.cast(labels, tf.int32)

    true_positives = tf.reduce_sum((predictions * labels), axis=1)
    pred_positives = tf.reduce_sum(predictions, axis=1)
    label_positives = tf.reduce_sum(labels, axis=1)

    return 2 * true_positives / (pred_positives + label_positives)
utilities.py 文件源码 项目:sea-lion-counter 作者: rdinse 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def __init__(self, tag, x, summary_fn=tf.summary.scalar, summary_args=(), scope=None):
    """
    Initializes an Average.

    Arguments
      x: Tensor to be averaged over multiple runs.
      tag: Tag for the summary.
      summary_fn: Function used for creating a summary.
      summary_args: Arguments passed to the summary function.
    """

    with tf.variable_scope(scope or type(self).__name__):
      counter = tf.Variable(name="counter", initial_value=tf.constant(0),
                            dtype=tf.int32, trainable=False)
      running_sum = tf.Variable(name="running_sum", initial_value=tf.constant(0.),
                                dtype=tf.float32, trainable=False)

      self._running_average = running_sum / tf.cast(counter, tf.float32)
      self._summary = summary_fn(tag or x.name + '_avg', self._running_average, **summary_args)
      self._update_op = tf.group(counter.assign_add(1), running_sum.assign_add(x))
      self._reset_op = tf.group(counter.assign(0), running_sum.assign(0.))
nn.py 文件源码 项目:kaggle-review 作者: daxiongshu 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def _build(self):
        V = self.V
        M = self.flags.embedding_size # 64
        H = self.flags.num_units 
        C = self.flags.classes
        D = self.flags.d2v_size # embedding for d2v

        netname = "D2V"
        with tf.variable_scope(netname):
            self.inputs = tf.placeholder(dtype=tf.int32,shape=[None]) #[B]
            layer_name = "{}/embedding".format(netname)
            x = self._get_embedding(layer_name, self.inputs, V, D, reuse=False) # [B, S, M]

        netname = "NN"
        cell_name = self.flags.cell
        H1,H2 = 32,16
        with tf.variable_scope(netname):
            net = self._fc(x, fan_in=D, fan_out=H1, layer_name="%s/fc1"%netname, activation='relu')
            net = self._dropout(net)
            net = self._fc(net, fan_in=H1, fan_out=H2, layer_name="%s/fc2"%netname, activation='relu')
            net = self._dropout(net)
            net = self._fc(net, fan_in=H2, fan_out=C, layer_name="%s/fc3"%netname, activation=None)
            self.logit = net
rnn.py 文件源码 项目:kaggle-review 作者: daxiongshu 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _build(self):
        V = self.V
        M = self.flags.embedding_size # 64
        H = self.flags.num_units 
        C = self.flags.classes

        netname = "CBOW"
        with tf.variable_scope(netname):
            self.inputs = tf.placeholder(dtype=tf.int32,shape=[None, None]) #[B,S]
            layer_name = "{}/embedding".format(netname)
            x = self._get_embedding(layer_name, self.inputs, V, M, reuse=False) # [B, S, M]

        netname = "RNN"
        cell_name = self.flags.cell
        with tf.variable_scope(netname):
            args = {"num_units":H,"num_proj":C}
            cell_f = self._get_rnn_cell(cell_name=cell_name, args=args)
            cell_b = self._get_rnn_cell(cell_name=cell_name, args=args)
            (out_f, out_b), _ = tf.nn.bidirectional_dynamic_rnn(cell_f,cell_b,x,dtype=tf.float32)
            #logit = (out_f[:,-1,:] + out_b[:,-1,:])*0.5  # [B,1,C]
            logit = tf.reduce_mean(out_f+out_b,axis=1)
            logit = tf.squeeze(logit) # [B,C]
            self.logit = logit
a8_dynamic_memory_network.py 文件源码 项目:text_classification 作者: brightmart 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def smoothing_cross_entropy(self,logits, labels, vocab_size, confidence=0.9): #confidence = 1.0 - label_smoothing. where label_smooth=0.1. from http://github.com/tensorflow/tensor2tensor
        """Cross entropy with label smoothing to limit over-confidence."""
        with tf.name_scope("smoothing_cross_entropy", [logits, labels]):
            # Low confidence is given to all non-true labels, uniformly.
            low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1)
            # Normalizing constant is the best cross-entropy value with soft targets.
            # We subtract it just for readability, makes no difference on learning.
            normalizing = -(confidence * tf.log(confidence) + tf.to_float(vocab_size - 1) * low_confidence * tf.log(low_confidence + 1e-20))
            # Soft targets.
            soft_targets = tf.one_hot(
                tf.cast(labels, tf.int32),
                depth=vocab_size,
                on_value=confidence,
                off_value=low_confidence)
            xentropy = tf.nn.softmax_cross_entropy_with_logits(
                logits=logits, labels=soft_targets)
        return xentropy - normalizing
p71_TextRCNN_model.py 文件源码 项目:text_classification 作者: brightmart 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def test():
    #below is a function test; if you use this for text classifiction, you need to tranform sentence to indices of vocabulary first. then feed data to the graph.
    num_classes=10
    learning_rate=0.01
    batch_size=8
    decay_steps=1000
    decay_rate=0.9
    sequence_length=5
    vocab_size=10000
    embed_size=100
    is_training=True
    dropout_keep_prob=1#0.5
    textRNN=TextRCNN(num_classes, learning_rate, batch_size, decay_steps, decay_rate,sequence_length,vocab_size,embed_size,is_training)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for i in range(100):
            input_x=np.zeros((batch_size,sequence_length)) #[None, self.sequence_length]
            input_y=input_y=np.array([1,0,1,1,1,2,1,1]) #np.zeros((batch_size),dtype=np.int32) #[None, self.sequence_length]
            loss,acc,predict,_=sess.run([textRNN.loss_val,textRNN.accuracy,textRNN.predictions,textRNN.train_op],
                                        feed_dict={textRNN.input_x:input_x,textRNN.input_y:input_y,textRNN.dropout_keep_prob:dropout_keep_prob})
            print("loss:",loss,"acc:",acc,"label:",input_y,"prediction:",predict)
#test()
p71_TextRCNN_mode2.py 文件源码 项目:text_classification 作者: brightmart 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def test():
    #below is a function test; if you use this for text classifiction, you need to tranform sentence to indices of vocabulary first. then feed data to the graph.
    num_classes=10
    learning_rate=0.01
    batch_size=8
    decay_steps=1000
    decay_rate=0.9
    sequence_length=5
    vocab_size=10000
    embed_size=100
    is_training=True
    dropout_keep_prob=1#0.5
    textRNN=TextRCNN(num_classes, learning_rate, batch_size, decay_steps, decay_rate,sequence_length,vocab_size,embed_size,is_training)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for i in range(100):
            input_x=np.zeros((batch_size,sequence_length)) #[None, self.sequence_length]
            input_y=input_y=np.array([1,0,1,1,1,2,1,1]) #np.zeros((batch_size),dtype=np.int32) #[None, self.sequence_length]
            loss,acc,predict,_=sess.run([textRNN.loss_val,textRNN.accuracy,textRNN.predictions,textRNN.train_op],
                                        feed_dict={textRNN.input_x:input_x,textRNN.input_y:input_y,textRNN.dropout_keep_prob:dropout_keep_prob})
            print("loss:",loss,"acc:",acc,"label:",input_y,"prediction:",predict)
#test()
p5_fastTextB_model.py 文件源码 项目:text_classification 作者: brightmart 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def test():
    #below is a function test; if you use this for text classifiction, you need to tranform sentence to indices of vocabulary first. then feed data to the graph.
    num_classes=19
    learning_rate=0.01
    batch_size=8
    decay_steps=1000
    decay_rate=0.9
    sequence_length=5
    vocab_size=10000
    embed_size=100
    is_training=True
    dropout_keep_prob=1
    fastText=fastTextB(num_classes, learning_rate, batch_size, decay_steps, decay_rate,5,sequence_length,vocab_size,embed_size,is_training)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for i in range(100):
            input_x=np.zeros((batch_size,sequence_length),dtype=np.int32) #[None, self.sequence_length]
            input_y=input_y=np.array([1,0,1,1,1,2,1,1],dtype=np.int32) #np.zeros((batch_size),dtype=np.int32) #[None, self.sequence_length]
            loss,acc,predict,_=sess.run([fastText.loss_val,fastText.accuracy,fastText.predictions,fastText.train_op],
                                        feed_dict={fastText.sentence:input_x,fastText.labels:input_y})
            print("loss:",loss,"acc:",acc,"label:",input_y,"prediction:",predict)
#test()
p8_TextRNN_model.py 文件源码 项目:text_classification 作者: brightmart 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def test():
    #below is a function test; if you use this for text classifiction, you need to tranform sentence to indices of vocabulary first. then feed data to the graph.
    num_classes=10
    learning_rate=0.01
    batch_size=8
    decay_steps=1000
    decay_rate=0.9
    sequence_length=5
    vocab_size=10000
    embed_size=100
    is_training=True
    dropout_keep_prob=1#0.5
    textRNN=TextRNN(num_classes, learning_rate, batch_size, decay_steps, decay_rate,sequence_length,vocab_size,embed_size,is_training)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for i in range(100):
            input_x=np.zeros((batch_size,sequence_length)) #[None, self.sequence_length]
            input_y=input_y=np.array([1,0,1,1,1,2,1,1]) #np.zeros((batch_size),dtype=np.int32) #[None, self.sequence_length]
            loss,acc,predict,_=sess.run([textRNN.loss_val,textRNN.accuracy,textRNN.predictions,textRNN.train_op],feed_dict={textRNN.input_x:input_x,textRNN.input_y:input_y,textRNN.dropout_keep_prob:dropout_keep_prob})
            print("loss:",loss,"acc:",acc,"label:",input_y,"prediction:",predict)


问题


面经


文章

微信
公众号

扫码关注公众号