python类zeros()的实例源码

base_model.py 文件源码 项目:magenta 作者: tensorflow 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def sample(self, n, max_length=None, z=None, **kwargs):
    """Sample with an optional conditional embedding `z`."""
    if z is not None and z.shape[0].value != n:
      raise ValueError(
          '`z` must have a first dimension that equals `n` when given. '
          'Got: %d vs %d' % (z.shape[0].value, n))

    if self.hparams.conditional and z is None:
      tf.logging.warning(
          'Sampling from conditional model without `z`. Using random `z`.')
      normal_shape = [n, self.hparams.z_size]
      normal_dist = tf.contrib.distributions.Normal(
          loc=tf.zeros(normal_shape), scale=tf.ones(normal_shape))
      z = normal_dist.sample()

    return self.decoder.sample(n, max_length, z, **kwargs)
utils.py 文件源码 项目:magenta 作者: tensorflow 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def tf_ispecgram(spec,
                 n_fft=512,
                 hop_length=None,
                 mask=True,
                 pad=True,
                 log_mag=True,
                 re_im=False,
                 dphase=True,
                 mag_only=False,
                 num_iters=1000):
  dims = spec.get_shape().as_list()
  # Add back in nyquist frequency
  x = spec if not pad else tf.concat(
      [spec, tf.zeros([dims[0], 1, dims[2], dims[3]])], 1)
  audio = tf.py_func(batch_ispecgram, [
      x, n_fft, hop_length, mask, log_mag, re_im, dphase, mag_only, num_iters
  ], tf.float32)
  return audio


#---------------------------------------------------
# Summaries
#---------------------------------------------------
tensorbox.py 文件源码 项目:cancer 作者: yancz1989 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def build_lstm_inner(H, lstm_input):
  '''
  build lstm decoder
  '''
  lstm_cell = rnn_cell.BasicLSTMCell(H['lstm_size'], forget_bias=0.0, state_is_tuple=False)
  if H['num_lstm_layers'] > 1:
    lstm = rnn_cell.MultiRNNCell([lstm_cell] * H['num_lstm_layers'], state_is_tuple=False)
  else:
    lstm = lstm_cell

  batch_size = H['batch_size'] * H['grid_height'] * H['grid_width']
  state = tf.zeros([batch_size, lstm.state_size])

  outputs = []
  with tf.variable_scope('RNN', initializer=tf.random_uniform_initializer(-0.1, 0.1)):
    for time_step in range(H['rnn_len']):
      if time_step > 0: tf.get_variable_scope().reuse_variables()
      output, state = lstm(lstm_input, state)
      outputs.append(output)
  return outputs
utils.py 文件源码 项目:NER-LSTM-CRF 作者: liu-nlper 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def map_item2id(items, voc, max_len, none_word=1, lower=False, init_value=0, allow_error=True):
    """
    ?word/pos????id
    Args:
        items: list, ?????
        voc: ??
        max_len: int, ??????
        none_word: ??????,???0
        lower: bool, ???????
        init_value: default is 0, ?????
    Returns:
        arr: np.array, dtype=int32, shape=[max_len,]
    """
    assert type(none_word) == int
    arr = np.zeros((max_len,), dtype='int32') + init_value
    min_range = min(max_len, len(items))
    for i in range(min_range):  # ?items????max_len?????
        item = items[i] if not lower else items[i].lower()
        if allow_error:
            arr[i] = voc[item] if item in voc else none_word
        else:
            arr[i] = voc[item]
    return arr
concrete_latent_vae.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def vae(observed, n, n_x, n_z, n_k, tau, n_particles, relaxed=False):
    with zs.BayesianNet(observed=observed) as model:
        z_stacked_logits = tf.zeros([n, n_z, n_k])
        if relaxed:
            z = zs.ExpConcrete('z', tau, z_stacked_logits,
                               n_samples=n_particles, group_ndims=1)
            z = tf.exp(tf.reshape(z, [n_particles, n, n_z * n_k]))
        else:
            z = zs.OnehotCategorical(
                'z', z_stacked_logits, n_samples=n_particles, group_ndims=1,
                dtype=tf.float32)
            z = tf.reshape(z, [n_particles, n, n_z * n_k])
        lx_z = tf.layers.dense(z, 200, activation=tf.tanh)
        lx_z = tf.layers.dense(lx_z, 200, activation=tf.tanh)
        x_logits = tf.layers.dense(lx_z, n_x)
        x = zs.Bernoulli('x', x_logits, group_ndims=1)
    return model
test_stochastic.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_Normal(self):
        with BayesianNet():
            mean = tf.zeros([2, 3])
            logstd = tf.zeros([2, 3])
            std = tf.exp(logstd)
            n_samples = tf.placeholder(tf.int32, shape=[])
            group_ndims = tf.placeholder(tf.int32, shape=[])
            a = Normal('a', mean, logstd=logstd, n_samples=n_samples,
                       group_ndims=group_ndims)
            b = Normal('b', mean, std=std, n_samples=n_samples,
                       group_ndims=group_ndims)

        for st in [a, b]:
            sample_ops = set(get_backward_ops(st.tensor))
            for i in [mean, logstd, n_samples]:
                self.assertTrue(i.op in sample_ops)
            log_p = st.log_prob(np.ones([2, 3]))
            log_p_ops = set(get_backward_ops(log_p))
            for i in [mean, logstd, group_ndims]:
                self.assertTrue(i.op in log_p_ops)
            self.assertTrue(a.get_shape()[1:], mean.get_shape())
test_stochastic.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def test_Binomial(self):
        with BayesianNet():
            logits = tf.zeros([2, 3])
            n_experiments = tf.placeholder(tf.int32, shape=[])
            n_samples = tf.placeholder(tf.int32, shape=[])
            group_ndims = tf.placeholder(tf.int32, shape=[])
            a = Binomial('a', logits, n_experiments, n_samples,
                         group_ndims)
        sample_ops = set(get_backward_ops(a.tensor))
        for i in [logits, n_experiments, n_samples]:
            self.assertTrue(i.op in sample_ops)
        log_p = a.log_prob(np.ones([2, 3], dtype=np.int32))
        log_p_ops = set(get_backward_ops(log_p))
        for i in [logits, n_experiments, group_ndims]:
            self.assertTrue(i.op in log_p_ops)
        self.assertTrue(a.get_shape()[1:], logits.get_shape())
test_univariate.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def test_init(self):
        with self.test_session(use_gpu=True):
            with self.assertRaisesRegexp(
                    ValueError, "Either.*should be passed but not both"):
                Normal(mean=tf.ones([2, 1]))
            with self.assertRaisesRegexp(
                    ValueError, "Either.*should be passed but not both"):
                Normal(mean=tf.ones([2, 1]), std=1., logstd=0.)
            with self.assertRaisesRegexp(ValueError,
                                         "should be broadcastable to match"):
                Normal(mean=tf.ones([2, 1]), logstd=tf.zeros([2, 4, 3]))
            with self.assertRaisesRegexp(ValueError,
                                         "should be broadcastable to match"):
                Normal(mean=tf.ones([2, 1]), std=tf.ones([2, 4, 3]))

        Normal(mean=tf.placeholder(tf.float32, [None, 1]),
               logstd=tf.placeholder(tf.float32, [None, 1, 3]))
        Normal(mean=tf.placeholder(tf.float32, [None, 1]),
               std=tf.placeholder(tf.float32, [None, 1, 3]))
stacked_simple.py 文件源码 项目:deep-summarization 作者: harpribot 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def _load_model(self):
        """
        Creates the encoder decoder model

        :return: None
        """
        # Initial memory value for recurrence.
        self.prev_mem = tf.zeros((self.train_batch_size, self.memory_dim))

        # choose RNN/GRU/LSTM cell
        with tf.variable_scope("train_test", reuse=True):
            cell = self.get_cell()
            # Stacks layers of RNN's to form a stacked decoder
            self.cell = tf.nn.rnn_cell.MultiRNNCell([cell] * self.num_layers)

        # embedding model
        if not self.attention:
            with tf.variable_scope("train_test"):
                self.dec_outputs, self.dec_memory = tf.nn.seq2seq.embedding_rnn_seq2seq(
                                self.enc_inp, self.dec_inp, self.cell,
                                self.vocab_size, self.vocab_size, self.seq_length)
            with tf.variable_scope("train_test", reuse=True):
                self.dec_outputs_tst, _ = tf.nn.seq2seq.embedding_rnn_seq2seq(
                                self.enc_inp, self.dec_inp, self.cell,
                                self.vocab_size, self.vocab_size, self.seq_length, feed_previous=True)

        else:
            with tf.variable_scope("train_test"):
                self.dec_outputs, self.dec_memory = tf.nn.seq2seq.embedding_attention_seq2seq(
                                self.enc_inp, self.dec_inp, self.cell,
                                self.vocab_size, self.vocab_size, self.seq_length)
            with tf.variable_scope("train_test", reuse=True):
                self.dec_outputs_tst, _ = tf.nn.seq2seq.embedding_attention_seq2seq(
                                self.enc_inp, self.dec_inp, self.cell,
                                self.vocab_size, self.vocab_size, self.seq_length, feed_previous=True)
simple.py 文件源码 项目:deep-summarization 作者: harpribot 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def _load_model(self):
        """
        Creates the encoder decoder model

        :return: None
        """
        # Initial memory value for recurrence.
        self.prev_mem = tf.zeros((self.train_batch_size, self.memory_dim))

        # choose RNN/GRU/LSTM cell
        with tf.variable_scope("train_test", reuse=True):
            self.cell = self.get_cell()

        # embedding model
        if not self.attention:
            with tf.variable_scope("train_test"):
                self.dec_outputs, self.dec_memory = tf.nn.seq2seq.embedding_rnn_seq2seq(
                                self.enc_inp, self.dec_inp, self.cell,
                                self.vocab_size, self.vocab_size, self.seq_length)
            with tf.variable_scope("train_test", reuse=True):
                self.dec_outputs_tst, _ = tf.nn.seq2seq.embedding_rnn_seq2seq(
                                self.enc_inp, self.dec_inp, self.cell,
                                self.vocab_size, self.vocab_size, self.seq_length, feed_previous=True)

        else:
            with tf.variable_scope("train_test"):
                self.dec_outputs, self.dec_memory = tf.nn.seq2seq.embedding_attention_seq2seq(
                                self.enc_inp, self.dec_inp, self.cell,
                                self.vocab_size, self.vocab_size, self.seq_length)
            with tf.variable_scope("train_test", reuse=True):
                self.dec_outputs_tst, _ = tf.nn.seq2seq.embedding_attention_seq2seq(
                                self.enc_inp, self.dec_inp, self.cell,
                                self.vocab_size, self.vocab_size, self.seq_length, feed_previous=True)
abstract.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def get_init_state(self, batch_size):
        '''
        Construct the initial state of the grammar state machine.

        Returns:
            A tensor of dtype tf.int32 with shape (batch_size,)
        '''
        return tf.zeros((batch_size,), dtype=tf.int32)
beam_aligner.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def initialize(self):
        """Initialize the decoder.
        Args:
          name: Name scope for any created operations.
        Returns:
          `(finished, start_inputs, initial_state)`.
        """
        start_inputs = self._embedding_fn(self._tiled_start_tokens)
        print('start_inputs', start_inputs)
        finished = tf.zeros((self.batch_size, self._beam_width), dtype=tf.bool)

        self._initial_num_available_beams = tf.ones((self._batch_size,), dtype=tf.int32)
        self._full_num_available_beams = tf.fill((self._batch_size,), self._beam_width)

        with tf.name_scope('first_beam_mask'):
            self._first_beam_mask = self._make_beam_mask(self._initial_num_available_beams)
        with tf.name_scope('full_beam_mask'):
            self._full_beam_mask = self._make_beam_mask(self._full_num_available_beams)
        with tf.name_scope('minus_inifinity_scores'):
            self._minus_inifinity_scores = tf.fill((self.batch_size, self._beam_width, self._output_size), -1e+8)

        self._batch_size_range = tf.range(self.batch_size)
        initial_state = BeamSearchOptimizationDecoderState(
            cell_state=self._tiled_initial_cell_state,
            previous_logits=tf.zeros([self.batch_size, self._beam_width, self._output_size], dtype=tf.float32),
            previous_score=tf.zeros([self.batch_size, self._beam_width], dtype=tf.float32),
            # During the first time step we only consider the initial beam
            num_available_beams=self._initial_num_available_beams,
            gold_beam_id=tf.zeros([self.batch_size], dtype=tf.int32),
            finished=finished)

        return (finished, start_inputs, initial_state)
tree_encoder.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def zero_state(self, batch_size, dtype=tf.float32):
        zeros = tf.zeros((batch_size, self._num_cells), dtype=dtype)
        return LSTMStateTuple(zeros, zeros)
frame_level_models.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def create_model(self,
                     model_input,
                     vocab_size,
                     num_frames,
                     **unused_params):

        shape = model_input.get_shape().as_list()
        frames_sum = tf.reduce_sum(tf.abs(model_input),axis=2)
        frames_true = tf.ones(tf.shape(frames_sum))
        frames_false = tf.zeros(tf.shape(frames_sum))
        frames_bool = tf.reshape(tf.where(tf.greater(frames_sum, frames_false), frames_true, frames_false),[-1,shape[1],1])

        activation_1 = tf.reduce_max(model_input, axis=1)
        activation_2 = tf.reduce_sum(model_input*frames_bool, axis=1)/(tf.reduce_sum(frames_bool, axis=1)+1e-6)
        activation_3 = tf.reduce_min(model_input, axis=1)

        model_input_1, final_probilities_1 = self.sub_moe(activation_1,vocab_size,scopename="_max")
        model_input_2, final_probilities_2 = self.sub_moe(activation_2,vocab_size,scopename="_mean")
        model_input_3, final_probilities_3 = self.sub_moe(activation_3,vocab_size,scopename="_min")
        final_probilities = tf.stack((final_probilities_1,final_probilities_2,final_probilities_3),axis=1)
        weight2d = tf.get_variable("ensemble_weight2d",
                                   shape=[shape[2], 3, vocab_size],
                                   regularizer=slim.l2_regularizer(1.0e-8))
        activations = tf.stack((model_input_1, model_input_2, model_input_3), axis=2)
        weight = tf.nn.softmax(tf.einsum("aij,ijk->ajk", activations, weight2d), dim=1)
        result = {}
        result["prediction_frames"] = tf.reshape(final_probilities,[-1,vocab_size])
        result["predictions"] = tf.reduce_sum(final_probilities*weight,axis=1)
        return result
losses.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def calculate_loss(self, predictions, labels, b=1.0, **unused_params):
    with tf.name_scope("loss_hinge"):
      float_labels = tf.cast(labels, tf.float32)
      all_zeros = tf.zeros(tf.shape(float_labels), dtype=tf.float32)
      all_ones = tf.ones(tf.shape(float_labels), dtype=tf.float32)
      sign_labels = tf.subtract(tf.scalar_mul(2, float_labels), all_ones)
      hinge_loss = tf.maximum(
          all_zeros, tf.scalar_mul(b, all_ones) - sign_labels * predictions)
      return tf.reduce_mean(tf.reduce_sum(hinge_loss, 1))
losses.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def get_support(self, labels, support_type=None):
    if support_type == None:
      support_type = FLAGS.support_type
    if "," in support_type:
      new_labels = []
      for st in support_type.split(","):
        new_labels.append(tf.cast(self.get_support(labels, st), dtype=tf.float32))
      support_labels = tf.concat(new_labels, axis=1)
      return support_labels
    elif support_type == "vertical":
      num_classes = FLAGS.num_classes
      num_verticals = FLAGS.num_verticals
      vertical_file = FLAGS.vertical_file
      vertical_mapping = np.zeros([num_classes, num_verticals], dtype=np.float32)
      float_labels = tf.cast(labels, dtype=tf.float32)
      with open(vertical_file) as F:
        for line in F:
          group = map(int, line.strip().split())
          if len(group) == 2:
            x, y = group
            vertical_mapping[x, y] = 1
      vm_init = tf.constant_initializer(vertical_mapping)
      vm = tf.get_variable("vm", shape = [num_classes, num_verticals], 
                           trainable=False, initializer=vm_init)
      vertical_labels = tf.matmul(float_labels, vm)
      return tf.cast(vertical_labels > 0.2, tf.float32)
    elif support_type == "frequent":
      num_frequents = FLAGS.num_frequents
      frequent_labels = tf.slice(labels, begin=[0, 0], size=[-1, num_frequents])
      frequent_labels = tf.cast(frequent_labels, dtype=tf.float32)
      return frequent_labels
    elif support_type == "label":
      float_labels = tf.cast(labels, dtype=tf.float32)
      return float_labels
    else:
      raise NotImplementedError()
losses.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def calculate_loss(self, predictions, labels, b=1.0, **unused_params):
    with tf.name_scope("loss_hinge"):
      float_labels = tf.cast(labels, tf.float32)
      all_zeros = tf.zeros(tf.shape(float_labels), dtype=tf.float32)
      all_ones = tf.ones(tf.shape(float_labels), dtype=tf.float32)
      sign_labels = tf.subtract(tf.scalar_mul(2, float_labels), all_ones)
      hinge_loss = tf.maximum(
          all_zeros, tf.scalar_mul(b, all_ones) - sign_labels * predictions)
      return tf.reduce_mean(tf.reduce_sum(hinge_loss, 1))
losses.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def get_support(self, labels, support_type=None):
    if support_type == None:
      support_type = FLAGS.support_type
    if "," in support_type:
      new_labels = []
      for st in support_type.split(","):
        new_labels.append(tf.cast(self.get_support(labels, st), dtype=tf.float32))
      support_labels = tf.concat(new_labels, axis=1)
      return support_labels
    elif support_type == "vertical":
      num_classes = FLAGS.num_classes
      num_verticals = FLAGS.num_verticals
      vertical_file = FLAGS.vertical_file
      vertical_mapping = np.zeros([num_classes, num_verticals], dtype=np.float32)
      float_labels = tf.cast(labels, dtype=tf.float32)
      with open(vertical_file) as F:
        for line in F:
          group = map(int, line.strip().split())
          if len(group) == 2:
            x, y = group
            vertical_mapping[x, y] = 1
      vm_init = tf.constant_initializer(vertical_mapping)
      vm = tf.get_variable("vm", shape = [num_classes, num_verticals], 
                           trainable=False, initializer=vm_init)
      vertical_labels = tf.matmul(float_labels, vm)
      return tf.cast(vertical_labels > 0.2, tf.float32)
    elif support_type == "frequent":
      num_frequents = FLAGS.num_frequents
      frequent_labels = tf.slice(labels, begin=[0, 0], size=[-1, num_frequents])
      frequent_labels = tf.cast(frequent_labels, dtype=tf.float32)
      return frequent_labels
    elif support_type == "label":
      float_labels = tf.cast(labels, dtype=tf.float32)
      return float_labels
    else:
      raise NotImplementedError()
graph_definition.py 文件源码 项目:skiprnn-2017-telecombcn 作者: imatge-upc 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def compute_budget_loss(model, loss, updated_states, cost_per_sample):
    """
    Compute penalization term on the number of updated states (i.e. used samples)
    """
    if using_skip_rnn(model):
        return tf.reduce_mean(tf.reduce_sum(cost_per_sample * updated_states, 1), 0)
    else:
        return tf.zeros(loss.get_shape())
bridges.py 文件源码 项目:seq2seq 作者: google 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _create(self):
    zero_state = nest.map_structure(
        lambda x: tf.zeros([self.batch_size, x], dtype=tf.float32),
        self.decoder_state_size)
    return zero_state


问题


面经


文章

微信
公众号

扫码关注公众号