python类ones()的实例源码

readers.py 文件源码 项目:Video-Classification 作者: boyaolin 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def prepare_serialized_examples(self, serialized_examples):
    # set the mapping from the fields to data types in the proto
    num_features = len(self.feature_names)
    assert num_features > 0, "self.feature_names is empty!"
    assert len(self.feature_names) == len(self.feature_sizes), \
    "length of feature_names (={}) != length of feature_sizes (={})".format( \
    len(self.feature_names), len(self.feature_sizes))

    feature_map = {"video_id": tf.FixedLenFeature([], tf.string),
                   "labels": tf.VarLenFeature(tf.int64)}
    for feature_index in range(num_features):
      feature_map[self.feature_names[feature_index]] = tf.FixedLenFeature(
          [self.feature_sizes[feature_index]], tf.float32)

    features = tf.parse_example(serialized_examples, features=feature_map)
    labels = tf.sparse_to_indicator(features["labels"], self.num_classes)
    labels.set_shape([None, self.num_classes])
    concatenated_features = tf.concat([
        features[feature_name] for feature_name in self.feature_names], 1)

    return features["video_id"], concatenated_features, labels, tf.ones([tf.shape(serialized_examples)[0]])
policies.py 文件源码 项目:rl_algorithms 作者: DanielTakeshi 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def update_policy(self, ob_no, ac_n, std_adv_n, stepsize):
        """ 
        The input is the same for the discrete control case, except we return a
        single log standard deviation vector in addition to our logits. In this
        case, the logits are really the mean vector of Gaussians, which differs
        among components (observations) in the minbatch. We return the *old*
        ones since they are assigned, then `self.update_op` runs, which makes
        them outdated.
        """
        feed = {self.ob_no: ob_no,
                self.ac_na: ac_n,
                self.adv_n: std_adv_n,
                self.stepsize: stepsize}
        _, surr_loss, oldmean_na, oldlogstd_a = self.sess.run(
                [self.update_op, self.surr_loss, self.mean_na, self.logstd_a],
                feed_dict=feed)
        return surr_loss, oldmean_na, oldlogstd_a
wasserstein_gan.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def generator(observed, n, n_z, is_training):
    with zs.BayesianNet(observed=observed) as generator:
        z_min = -tf.ones([n, n_z])
        z_max = tf.ones([n, n_z])
        z = zs.Uniform('z', z_min, z_max)
        lx_z = tf.reshape(z, [-1, 1, 1, n_z])
        ngf = 32
        lx_z = tf.layers.conv2d_transpose(lx_z, ngf * 4, 3, use_bias=False)
        lx_z = tf.layers.batch_normalization(lx_z, training=is_training,
                                             scale=False)
        lx_z = tf.nn.relu(lx_z)
        lx_z = tf.layers.conv2d_transpose(lx_z, ngf * 2, 5, use_bias=False)
        lx_z = tf.layers.batch_normalization(lx_z, training=is_training,
                                             scale=False)
        lx_z = tf.nn.relu(lx_z)
        lx_z = tf.layers.conv2d_transpose(lx_z, ngf, 5, strides=(2, 2),
                                          padding='same', use_bias=False)
        lx_z = tf.layers.batch_normalization(lx_z, training=is_training,
                                             scale=False)
        lx_z = tf.nn.relu(lx_z)
        lx_z = tf.layers.conv2d_transpose(
            lx_z, 1, 5, strides=(2, 2), padding='same', activation=tf.sigmoid)
    return generator, lx_z
dcgan.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def generator(observed, n, n_z, is_training):
    with zs.BayesianNet(observed=observed) as generator:
        ngf = 64
        z_min = -tf.ones([n, n_z])
        z_max = tf.ones([n, n_z])
        z = zs.Uniform('z', z_min, z_max)
        lx_z = tf.layers.dense(z, ngf * 8 * 4 * 4, use_bias=False)
        lx_z = tf.layers.batch_normalization(lx_z, training=is_training)
        lx_z = tf.nn.relu(lx_z)
        lx_z = tf.reshape(lx_z, [-1, 4, 4, ngf * 8])
        lx_z = tf.layers.conv2d_transpose(lx_z, ngf * 4, 5, strides=(2, 2),
                                          padding='same', use_bias=False)
        lx_z = tf.layers.batch_normalization(lx_z, training=is_training)
        lx_z = tf.nn.relu(lx_z)
        lx_z = tf.layers.conv2d_transpose(lx_z, ngf * 2, 5, strides=(2, 2),
                                          padding='same', use_bias=False)
        lx_z = tf.layers.batch_normalization(lx_z, training=is_training)
        lx_z = tf.nn.relu(lx_z)
        lx_z = tf.layers.conv2d_transpose(lx_z, 3, 5, strides=(2, 2),
                                          padding='same', activation=tf.sigmoid)
    return generator, lx_z
variational_dropout.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def var_dropout(observed, x, n, net_size, n_particles, is_training):
    with zs.BayesianNet(observed=observed) as model:
        h = x
        normalizer_params = {'is_training': is_training,
                             'updates_collections': None}
        for i, [n_in, n_out] in enumerate(zip(net_size[:-1], net_size[1:])):
            eps_mean = tf.ones([n, n_in])
            eps = zs.Normal(
                'layer' + str(i) + '/eps', eps_mean, std=1.,
                n_samples=n_particles, group_ndims=1)
            h = layers.fully_connected(
                h * eps, n_out, normalizer_fn=layers.batch_norm,
                normalizer_params=normalizer_params)
            if i < len(net_size) - 2:
                h = tf.nn.relu(h)
        y = zs.OnehotCategorical('y', h)
    return model, h
test_stochastic.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def test_Normal(self):
        with BayesianNet():
            mean = tf.zeros([2, 3])
            logstd = tf.zeros([2, 3])
            std = tf.exp(logstd)
            n_samples = tf.placeholder(tf.int32, shape=[])
            group_ndims = tf.placeholder(tf.int32, shape=[])
            a = Normal('a', mean, logstd=logstd, n_samples=n_samples,
                       group_ndims=group_ndims)
            b = Normal('b', mean, std=std, n_samples=n_samples,
                       group_ndims=group_ndims)

        for st in [a, b]:
            sample_ops = set(get_backward_ops(st.tensor))
            for i in [mean, logstd, n_samples]:
                self.assertTrue(i.op in sample_ops)
            log_p = st.log_prob(np.ones([2, 3]))
            log_p_ops = set(get_backward_ops(log_p))
            for i in [mean, logstd, group_ndims]:
                self.assertTrue(i.op in log_p_ops)
            self.assertTrue(a.get_shape()[1:], mean.get_shape())
test_stochastic.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def test_Binomial(self):
        with BayesianNet():
            logits = tf.zeros([2, 3])
            n_experiments = tf.placeholder(tf.int32, shape=[])
            n_samples = tf.placeholder(tf.int32, shape=[])
            group_ndims = tf.placeholder(tf.int32, shape=[])
            a = Binomial('a', logits, n_experiments, n_samples,
                         group_ndims)
        sample_ops = set(get_backward_ops(a.tensor))
        for i in [logits, n_experiments, n_samples]:
            self.assertTrue(i.op in sample_ops)
        log_p = a.log_prob(np.ones([2, 3], dtype=np.int32))
        log_p_ops = set(get_backward_ops(log_p))
        for i in [logits, n_experiments, group_ndims]:
            self.assertTrue(i.op in log_p_ops)
        self.assertTrue(a.get_shape()[1:], logits.get_shape())
test_univariate.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def test_init(self):
        with self.test_session(use_gpu=True):
            with self.assertRaisesRegexp(
                    ValueError, "Either.*should be passed but not both"):
                Normal(mean=tf.ones([2, 1]))
            with self.assertRaisesRegexp(
                    ValueError, "Either.*should be passed but not both"):
                Normal(mean=tf.ones([2, 1]), std=1., logstd=0.)
            with self.assertRaisesRegexp(ValueError,
                                         "should be broadcastable to match"):
                Normal(mean=tf.ones([2, 1]), logstd=tf.zeros([2, 4, 3]))
            with self.assertRaisesRegexp(ValueError,
                                         "should be broadcastable to match"):
                Normal(mean=tf.ones([2, 1]), std=tf.ones([2, 4, 3]))

        Normal(mean=tf.placeholder(tf.float32, [None, 1]),
               logstd=tf.placeholder(tf.float32, [None, 1, 3]))
        Normal(mean=tf.placeholder(tf.float32, [None, 1]),
               std=tf.placeholder(tf.float32, [None, 1, 3]))
test_univariate.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def test_init(self):
        with self.test_session(use_gpu=True):
            with self.assertRaisesRegexp(
                    ValueError, "Either.*should be passed but not both"):
                FoldNormal(mean=tf.ones([2, 1]))
            with self.assertRaisesRegexp(
                    ValueError, "Either.*should be passed but not both"):
                FoldNormal(mean=tf.ones([2, 1]), std=1., logstd=0.)
            with self.assertRaisesRegexp(ValueError,
                                         "should be broadcastable to match"):
                FoldNormal(mean=tf.ones([2, 1]), logstd=tf.zeros([2, 4, 3]))
            with self.assertRaisesRegexp(ValueError,
                                         "should be broadcastable to match"):
                FoldNormal(mean=tf.ones([2, 1]), std=tf.ones([2, 4, 3]))

        FoldNormal(mean=tf.placeholder(tf.float32, [None, 1]),
                   logstd=tf.placeholder(tf.float32, [None, 1, 3]))
        FoldNormal(mean=tf.placeholder(tf.float32, [None, 1]),
                   std=tf.placeholder(tf.float32, [None, 1, 3]))
test_univariate.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def test_check_numerics(self):
        norm1 = FoldNormal(tf.ones([1, 2]), logstd=-1e10, check_numerics=True)
        with self.test_session(use_gpu=True):
            with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
                                         "precision.*Tensor had Inf"):
                norm1.log_prob(0.).eval()

        norm2 = FoldNormal(tf.ones([1, 2]), logstd=1e3, check_numerics=True)
        with self.test_session(use_gpu=True):
            with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
                                         "exp\(logstd\).*Tensor had Inf"):
                norm2.sample().eval()

        norm3 = FoldNormal(tf.ones([1, 2]), std=0., check_numerics=True)
        with self.test_session(use_gpu=True):
            with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
                                         "log\(std\).*Tensor had Inf"):
                norm3.log_prob(0.).eval()
test_univariate.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_value(self):
        with self.test_session(use_gpu=True):
            def _test_value(logits, given):
                logits = np.array(logits, np.float32)
                given = np.array(given, np.float32)
                bernoulli = Bernoulli(logits)
                log_p = bernoulli.log_prob(given)
                target_log_p = stats.bernoulli.logpmf(
                    given, 1. / (1. + np.exp(-logits)))
                self.assertAllClose(log_p.eval(), target_log_p)
                p = bernoulli.prob(given)
                target_p = stats.bernoulli.pmf(
                    given, 1. / (1. + np.exp(-logits)))
                self.assertAllClose(p.eval(), target_p)

            _test_value(0., [0, 1])
            _test_value([-50., -10., -50.], [1, 1, 0])
            _test_value([0., 4.], [[0, 1], [0, 1]])
            _test_value([[2., 3., 1.], [5., 7., 4.]],
                        np.ones([3, 1, 2, 3], dtype=np.int32))
test_univariate.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def test_init_n(self):
        dist = Binomial(tf.ones([2]), 10)
        self.assertTrue(isinstance(dist.n_experiments, int))
        self.assertEqual(dist.n_experiments, 10)
        with self.assertRaisesRegexp(ValueError, "must be positive"):
            _ = Binomial(tf.ones([2]), 0)

        with self.test_session(use_gpu=True):
            logits = tf.placeholder(tf.float32, None)
            n_experiments = tf.placeholder(tf.int32, None)
            dist2 = Binomial(logits, n_experiments)
            with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
                                         "should be a scalar"):
                dist2.n_experiments.eval(feed_dict={logits: [1.],
                                                    n_experiments: [10]})
            with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
                                         "must be positive"):
                dist2.n_experiments.eval(feed_dict={logits: [1.],
                                                    n_experiments: 0})
test_utils.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def test_explicit_broadcast(self):
        with self.test_session(use_gpu=True):
            def _test_func(a_shape, b_shape, target_shape):
                a = tf.ones(a_shape)
                b = tf.ones(b_shape)
                a, b = explicit_broadcast(a, b, 'a', 'b')
                self.assertEqual(a.eval().shape, b.eval().shape)
                self.assertEqual(a.eval().shape, target_shape)

            _test_func((5, 4), (1,), (5, 4))
            _test_func((5, 4), (4,), (5, 4))
            _test_func((2, 3, 5), (2, 1, 5), (2, 3, 5))
            _test_func((2, 3, 5), (3, 5), (2, 3, 5))
            _test_func((2, 3, 5), (3, 1), (2, 3, 5))

            with self.assertRaisesRegexp(ValueError, "cannot broadcast"):
                _test_func((3,), (4,), None)
            with self.assertRaisesRegexp(ValueError, "cannot broadcast"):
                _test_func((2, 1), (2, 4, 3), None)
test_multivariate.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_init_check_shape(self):
        with self.test_session(use_gpu=True):
            with self.assertRaisesRegexp(ValueError, "should have rank"):
                MultivariateNormalCholesky(tf.zeros([]), tf.zeros([]))
            with self.assertRaisesRegexp(ValueError, "should have rank"):
                MultivariateNormalCholesky(tf.zeros([1]), tf.zeros([1]))
            with self.assertRaisesRegexp(ValueError, 'compatible'):
                MultivariateNormalCholesky(
                    tf.zeros([1, 2]), tf.placeholder(tf.float32, [1, 2, 3]))
            u = tf.placeholder(tf.float32, [None])
            len_u = tf.shape(u)[0]
            dst = MultivariateNormalCholesky(
                tf.zeros([2]), tf.zeros([len_u, len_u]))
            with self.assertRaisesRegexp(
                    tf.errors.InvalidArgumentError, 'compatible'):
                dst.sample().eval(feed_dict={u: np.ones((3,))})
test_multivariate.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def test_shape_inference(self):
        with self.test_session(use_gpu=True):
            # Static
            mean = 10 * np.random.normal(size=(10, 11, 2)).astype('d')
            cov = np.zeros((10, 11, 2, 2))
            dst = MultivariateNormalCholesky(
                tf.constant(mean), tf.constant(cov))
            self.assertEqual(dst.get_batch_shape().as_list(), [10, 11])
            self.assertEqual(dst.get_value_shape().as_list(), [2])
            # Dynamic
            unk_mean = tf.placeholder(tf.float32, None)
            unk_cov = tf.placeholder(tf.float32, None)
            dst = MultivariateNormalCholesky(unk_mean, unk_cov)
            self.assertEqual(dst.get_value_shape().as_list(), [None])
            feed_dict = {unk_mean: np.ones(2), unk_cov: np.eye(2)}
            self.assertEqual(list(dst.batch_shape.eval(feed_dict)), [])
            self.assertEqual(list(dst.value_shape.eval(feed_dict)), [2])
thingtalk.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def get_init_state(self, batch_size):
        return tf.ones((batch_size,), dtype=tf.int32) * self.start_state
threepart_aligner.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def get_init_state(self, batch_size):
        return tf.ones((batch_size,), dtype=tf.int32) * self.grammar.bookeeping_state_id
beam_aligner.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def initialize(self):
        """Initialize the decoder.
        Args:
          name: Name scope for any created operations.
        Returns:
          `(finished, start_inputs, initial_state)`.
        """
        start_inputs = self._embedding_fn(self._tiled_start_tokens)
        print('start_inputs', start_inputs)
        finished = tf.zeros((self.batch_size, self._beam_width), dtype=tf.bool)

        self._initial_num_available_beams = tf.ones((self._batch_size,), dtype=tf.int32)
        self._full_num_available_beams = tf.fill((self._batch_size,), self._beam_width)

        with tf.name_scope('first_beam_mask'):
            self._first_beam_mask = self._make_beam_mask(self._initial_num_available_beams)
        with tf.name_scope('full_beam_mask'):
            self._full_beam_mask = self._make_beam_mask(self._full_num_available_beams)
        with tf.name_scope('minus_inifinity_scores'):
            self._minus_inifinity_scores = tf.fill((self.batch_size, self._beam_width, self._output_size), -1e+8)

        self._batch_size_range = tf.range(self.batch_size)
        initial_state = BeamSearchOptimizationDecoderState(
            cell_state=self._tiled_initial_cell_state,
            previous_logits=tf.zeros([self.batch_size, self._beam_width, self._output_size], dtype=tf.float32),
            previous_score=tf.zeros([self.batch_size, self._beam_width], dtype=tf.float32),
            # During the first time step we only consider the initial beam
            num_available_beams=self._initial_num_available_beams,
            gold_beam_id=tf.zeros([self.batch_size], dtype=tf.int32),
            finished=finished)

        return (finished, start_inputs, initial_state)
frame_level_models.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def create_model(self,
                     model_input,
                     vocab_size,
                     num_frames,
                     **unused_params):

        shape = model_input.get_shape().as_list()
        frames_sum = tf.reduce_sum(tf.abs(model_input),axis=2)
        frames_true = tf.ones(tf.shape(frames_sum))
        frames_false = tf.zeros(tf.shape(frames_sum))
        frames_bool = tf.reshape(tf.where(tf.greater(frames_sum, frames_false), frames_true, frames_false),[-1,shape[1],1])

        activation_1 = tf.reduce_max(model_input, axis=1)
        activation_2 = tf.reduce_sum(model_input*frames_bool, axis=1)/(tf.reduce_sum(frames_bool, axis=1)+1e-6)
        activation_3 = tf.reduce_min(model_input, axis=1)

        model_input_1, final_probilities_1 = self.sub_moe(activation_1,vocab_size,scopename="_max")
        model_input_2, final_probilities_2 = self.sub_moe(activation_2,vocab_size,scopename="_mean")
        model_input_3, final_probilities_3 = self.sub_moe(activation_3,vocab_size,scopename="_min")
        final_probilities = tf.stack((final_probilities_1,final_probilities_2,final_probilities_3),axis=1)
        weight2d = tf.get_variable("ensemble_weight2d",
                                   shape=[shape[2], 3, vocab_size],
                                   regularizer=slim.l2_regularizer(1.0e-8))
        activations = tf.stack((model_input_1, model_input_2, model_input_3), axis=2)
        weight = tf.nn.softmax(tf.einsum("aij,ijk->ajk", activations, weight2d), dim=1)
        result = {}
        result["prediction_frames"] = tf.reshape(final_probilities,[-1,vocab_size])
        result["predictions"] = tf.reduce_sum(final_probilities*weight,axis=1)
        return result
losses.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def calculate_loss_mix(self, predictions, predictions_class, labels, **unused_params):
    with tf.name_scope("loss_mix"):
      float_labels = tf.cast(labels, tf.float32)
      if FLAGS.support_type=="class":
        seq = np.loadtxt(FLAGS.class_file)
        tf_seq = tf.one_hot(tf.constant(seq,dtype=tf.int32),FLAGS.encoder_size)
        float_classes_org = tf.matmul(float_labels,tf_seq)
        class_true = tf.ones(tf.shape(float_classes_org))
        class_false = tf.zeros(tf.shape(float_classes_org))
        float_classes = tf.where(tf.greater(float_classes_org, class_false), class_true, class_false)
        cross_entropy_class = self.calculate_loss(predictions_class,float_classes)
      elif FLAGS.support_type=="frequent":
        float_classes = float_labels[:,0:FLAGS.encoder_size]
        cross_entropy_class = self.calculate_loss(predictions_class,float_classes)
      elif FLAGS.support_type=="encoder":
        float_classes = float_labels
        for i in range(FLAGS.encoder_layers):
          var_i = np.loadtxt(FLAGS.autoencoder_dir+'autoencoder_layer%d.model' % i)
          weight_i = tf.constant(var_i[:-1,:],dtype=tf.float32)
          bias_i = tf.reshape(tf.constant(var_i[-1,:],dtype=tf.float32),[-1])
          float_classes = tf.nn.xw_plus_b(float_classes,weight_i,bias_i)
          if i<FLAGS.encoder_layers-1:
            float_classes = tf.nn.relu(float_classes)
          else:
            float_classes = tf.nn.sigmoid(float_classes)
            #float_classes = tf.nn.relu(tf.sign(float_classes - 0.5))
        cross_entropy_class = self.calculate_mseloss(predictions_class,float_classes)
      else:
        float_classes = float_labels
        for i in range(FLAGS.moe_layers-1):
          float_classes = tf.concat((float_classes,float_labels),axis=1)
        cross_entropy_class = self.calculate_loss(predictions_class,float_classes)
      cross_entropy_loss = self.calculate_loss(predictions,labels)
      return cross_entropy_loss + 0.1*cross_entropy_class


问题


面经


文章

微信
公众号

扫码关注公众号