python类initialize_variables()的实例源码

tuple_rect.py 文件源码 项目:failures_of_DL 作者: shakedshammah 项目源码 文件源码 阅读 63 收藏 0 点赞 0 评论 0
def Affine(name_scope,input_tensor,out_channels, relu=True):
    input_shape = input_tensor.get_shape().as_list()
    input_channels = input_shape[-1]
    with tf.name_scope(name_scope):
        weights = tf.Variable(
            tf.truncated_normal([input_channels, out_channels],
                                stddev=1.0 / math.sqrt(float(input_channels))),name='weights')
        biases = tf.Variable(tf.zeros([out_channels]),name='biases')
#         initializer = tf.initialize_variables([weights,biases])
        if relu: return tf.nn.relu(tf.matmul(input_tensor, weights) + biases)#,initializer
        else: return tf.matmul(input_tensor, weights) + biases#,initializer
metric_ops_test.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _test_streaming_sparse_precision_at_k(self,
                                            predictions,
                                            labels,
                                            k,
                                            expected,
                                            class_id=None,
                                            ignore_mask=None,
                                            weights=None):
    with tf.Graph().as_default() as g, self.test_session(g):
      if ignore_mask is not None:
        ignore_mask = tf.constant(ignore_mask, tf.bool)
      if weights is not None:
        weights = tf.constant(weights, tf.float32)
      metric, update = metrics.streaming_sparse_precision_at_k(
          predictions=tf.constant(predictions, tf.float32), labels=labels,
          k=k, class_id=class_id, ignore_mask=ignore_mask, weights=weights)

      # Fails without initialized vars.
      self.assertRaises(tf.OpError, metric.eval)
      self.assertRaises(tf.OpError, update.eval)
      tf.initialize_variables(tf.local_variables()).run()

      # Run per-step op and assert expected values.
      if math.isnan(expected):
        self.assertTrue(math.isnan(update.eval()))
        self.assertTrue(math.isnan(metric.eval()))
      else:
        self.assertEqual(expected, update.eval())
        self.assertEqual(expected, metric.eval())
metric_ops_test.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def test_sparse_tensor_value(self):
    predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
    labels = [[0, 0, 0, 1], [0, 0, 1, 0]]
    expected_precision = 0.5
    with self.test_session():
      _, precision = metrics.streaming_sparse_precision_at_k(
          predictions=tf.constant(predictions, tf.float32),
          labels=_binary_2d_label_to_sparse_value(labels), k=1)

      tf.initialize_variables(tf.local_variables()).run()

      self.assertEqual(expected_precision, precision.eval())
metric_ops_test.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _test_streaming_sparse_recall_at_k(self,
                                         predictions,
                                         labels,
                                         k,
                                         expected,
                                         class_id=None,
                                         ignore_mask=None,
                                         weights=None):
    with tf.Graph().as_default() as g, self.test_session(g):
      if ignore_mask is not None:
        ignore_mask = tf.constant(ignore_mask, tf.bool)
      if weights is not None:
        weights = tf.constant(weights, tf.float32)
      metric, update = metrics.streaming_sparse_recall_at_k(
          predictions=tf.constant(predictions, tf.float32),
          labels=labels, k=k, class_id=class_id, ignore_mask=ignore_mask,
          weights=weights)

      # Fails without initialized vars.
      self.assertRaises(tf.OpError, metric.eval)
      self.assertRaises(tf.OpError, update.eval)
      tf.initialize_variables(tf.local_variables()).run()

      # Run per-step op and assert expected values.
      if math.isnan(expected):
        self.assertTrue(math.isnan(update.eval()))
        self.assertTrue(math.isnan(metric.eval()))
      else:
        self.assertEqual(expected, update.eval())
        self.assertEqual(expected, metric.eval())
metric_ops_test.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def test_sparse_tensor_value(self):
    predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
    labels = [[0, 0, 1, 0], [0, 0, 0, 1]]
    expected_recall = 0.5
    with self.test_session():
      _, recall = metrics.streaming_sparse_recall_at_k(
          predictions=tf.constant(predictions, tf.float32),
          labels=_binary_2d_label_to_sparse_value(labels), k=1)

      tf.initialize_variables(tf.local_variables()).run()

      self.assertEqual(expected_recall, recall.eval())
metric_ops_test.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _test_streaming_sparse_precision_at_k(self,
                                            predictions,
                                            labels,
                                            k,
                                            expected,
                                            class_id=None,
                                            weights=None):
    with tf.Graph().as_default() as g, self.test_session(g):
      if weights is not None:
        weights = tf.constant(weights, tf.float32)
      metric, update = metrics.streaming_sparse_precision_at_k(
          predictions=tf.constant(predictions, tf.float32), labels=labels,
          k=k, class_id=class_id, weights=weights)

      # Fails without initialized vars.
      self.assertRaises(tf.OpError, metric.eval)
      self.assertRaises(tf.OpError, update.eval)
      tf.initialize_variables(tf.local_variables()).run()

      # Run per-step op and assert expected values.
      if math.isnan(expected):
        _assert_nan(self, update.eval())
        _assert_nan(self, metric.eval())
      else:
        self.assertEqual(expected, update.eval())
        self.assertEqual(expected, metric.eval())
metric_ops_test.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_top_k_rank_invalid(self):
    with self.test_session():
      # top_k_predictions has rank < 2.
      top_k_predictions = [9, 4, 6, 2, 0]
      sp_labels = tf.SparseTensorValue(
          indices=np.array([[0,], [1,], [2,]], np.int64),
          values=np.array([2, 7, 8], np.int64),
          shape=np.array([10,], np.int64))

      with self.assertRaises(ValueError):
        precision, _ = metrics.streaming_sparse_precision_at_top_k(
            top_k_predictions=tf.constant(top_k_predictions, tf.int64),
            labels=sp_labels)
        tf.initialize_variables(tf.local_variables()).run()
        precision.eval()
metric_ops_test.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def test_sparse_tensor_value(self):
    predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
    labels = [[0, 0, 0, 1], [0, 0, 1, 0]]
    expected_precision = 0.5
    with self.test_session():
      _, precision = metrics.streaming_sparse_precision_at_k(
          predictions=tf.constant(predictions, tf.float32),
          labels=_binary_2d_label_to_sparse_value(labels), k=1)

      tf.initialize_variables(tf.local_variables()).run()

      self.assertEqual(expected_precision, precision.eval())
metric_ops_test.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_sparse_tensor_value(self):
    predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
    labels = [[0, 0, 1, 0], [0, 0, 0, 1]]
    expected_recall = 0.5
    with self.test_session():
      _, recall = metrics.streaming_sparse_recall_at_k(
          predictions=tf.constant(predictions, tf.float32),
          labels=_binary_2d_label_to_sparse_value(labels), k=1)

      tf.initialize_variables(tf.local_variables()).run()

      self.assertEqual(expected_recall, recall.eval())
factorization_ops.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def initialize_op(self):
    """Returns an op for initializing tensorflow variables."""
    all_vars = self._row_factors + self._col_factors
    all_vars.extend([self._row_gramian, self._col_gramian])
    if self._row_weights is not None:
      assert self._col_weights is not None
      all_vars.extend(self._row_weights + self._col_weights)
    return tf.initialize_variables(all_vars)
core.py 文件源码 项目:tensorlight 作者: bsautermeister 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def initialize_uninitialized_variables(session, var_list=None):
    """Initializes all uninitialized variables.
    Parameters
    ----------
    session: tf.Session
        The TensorFlow session to scan for uninitialized variables
    var_list: list(tf.Varaible) or None
        The list of variables to filter for uninitialized ones.
        Defaults to tf.all_variables() is used.
    """
    uninit_vars = uninitialized_variables(session, var_list)
    session.run(tf.initialize_variables(uninit_vars))
rbm_train_by_pair_layers.py 文件源码 项目:rbm_based_autoencoders_with_tensorflow 作者: ikhlestov 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _train_layer_pair(self):
        self.build_model()
        prev_run_no = self.params.get('run_no', None)
        self.define_runner_folders()
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        with tf.Session(config=config) as sess:
            self.sess = sess

            if prev_run_no:
                print("Restore variables from previous run:")
                restore_vars_dict = self._get_restored_variables_names()
                for var_name in restore_vars_dict.keys():
                    print("\t%s" % var_name)
                restorer = tf.train.Saver(restore_vars_dict)
                restorer.restore(sess, self.saves_path)
                print("Initialize not restored variables:")
                new_variables = self._get_new_variables_names()
                for var in new_variables:
                    print("\t%s" % var.name)
                sess.run(tf.initialize_variables(new_variables))

            else:
                print("Initialize new variables")
                tf.initialize_all_variables().run()
            self.summary_writer = tf.train.SummaryWriter(
                self.logs_dir, sess.graph)
            for epoch in range(self.params['epochs']):
                start = time.time()
                self._epoch_train_step()
                time_cons = time.time() - start
                time_cons = str(datetime.timedelta(seconds=time_cons))
                print("Epoch: %d, time consumption: %s" % (epoch, time_cons))

            # Save all trained variables
            saver = tf.train.Saver()
            saver.save(sess, self.saves_path)
batch_norm.py 文件源码 项目:DDPG 作者: MOCR 项目源码 文件源码 阅读 43 收藏 0 点赞 0 评论 0
def __init__(self, x, size, selectTrain, sess, toTarget=None, ts=0.001):

        self.sess = sess
        self.mean_x_train, self.variance_x_train = moments(x, [0])

        #self.mean_x_ma, self.variance_x_ma = moments(self.x_splh, [0])

        self.mean_x_ma = tf.Variable(tf.zeros([size]))
        self.variance_x_ma = tf.Variable(tf.ones([size]))


        self.update = tf.tuple([self.variance_x_ma.assign(0.95*self.variance_x_ma+ 0.05*self.variance_x_train)] , control_inputs=[self.mean_x_ma.assign(0.95*self.mean_x_ma+ 0.05*self.mean_x_train)])[0]
        self.mean_x_ma_update = tf.tuple([self.mean_x_train] , control_inputs=[])[0]
        self.printUp = tf.Print(self.mean_x_ma_update, [selectTrain], message="selectTrain value : ")
        self.variance_x_ma_update = tf.tuple([self.variance_x_train], control_inputs=[])[0]

        def getxmau(): return self.mean_x_ma_update
        def getxma(): return self.mean_x_ma    

        def getvxmau(): return self.variance_x_ma_update
        def getvxma(): return self.variance_x_ma

        self.mean_x = tf.cond(selectTrain, getxmau, getxma)
        self.variance_x = tf.cond(selectTrain, getvxmau, getvxma)

        self.beta = tf.Variable(tf.zeros([size]))
        self.gamma = tf.Variable(tf.ones([size]))

        #tfs.tfs.session.run(tf.initialize_variables([self.beta, self.gamma]))#, self.mean_x_ma, self.variance_x_ma]))
        self.xNorm = tf.reshape(tf.nn.batch_norm_with_global_normalization(tf.reshape(x, [-1, 1, 1, size]), self.mean_x, self.variance_x, self.beta, self.gamma, 0.01, True), [-1, size])

        if toTarget!=None:
            self.isTracking = toTarget
            self.updateBeta = self.beta.assign(self.beta*(1-ts)+self.isTracking.beta*ts)
            self.updateGamma = self.gamma.assign(self.gamma*(1-ts)+self.isTracking.gamma*ts)
            self.updateTarget = tf.group(self.updateBeta, self.updateGamma)
parameterized.py 文件源码 项目:gail-driver 作者: sisl 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __setstate__(self, d):
        Serializable.__setstate__(self, d)
        global load_params
        if load_params:
            tf.get_default_session().run(tf.initialize_variables(self.get_params()))
            self.set_param_values(d["params"])
trainer.py 文件源码 项目:ssd_tensorflow 作者: seann999 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __init__(self, model_dir=None, gpu_fraction=0.7):
        config = tf.ConfigProto(allow_soft_placement=True)
        config.gpu_options.per_process_gpu_memory_fraction=gpu_fraction
        self.sess = tf.Session(config=config)
        self.imgs_ph, self.bn, self.output_tensors, self.pred_labels, self.pred_locs = model.model(self.sess)
        total_boxes = self.pred_labels.get_shape().as_list()[1]
        self.positives_ph, self.negatives_ph, self.true_labels_ph, self.true_locs_ph, self.total_loss, self.class_loss, self.loc_loss = \
            model.loss(self.pred_labels, self.pred_locs, total_boxes)
        out_shapes = [out.get_shape().as_list() for out in self.output_tensors]
        c.out_shapes = out_shapes
        c.defaults = model.default_boxes(out_shapes)

        # variables in model are already initialized, so only initialize those declared after
        with tf.variable_scope("optimizer"):
            self.global_step = tf.Variable(0)
            self.lr_ph = tf.placeholder(tf.float32, shape=[])

            self.optimizer = tf.train.AdamOptimizer(1e-3).minimize(self.total_loss, global_step=self.global_step)
        new_vars = tf.get_collection(tf.GraphKeys.VARIABLES, scope="optimizer")
        self.sess.run(tf.initialize_variables(new_vars))

        if model_dir is None:
            model_dir = FLAGS.model_dir

        ckpt = tf.train.get_checkpoint_state(model_dir)
        self.saver = tf.train.Saver()

        if ckpt and ckpt.model_checkpoint_path:
            self.saver.restore(self.sess, ckpt.model_checkpoint_path)
            print("restored %s" % ckpt.model_checkpoint_path)
test_thin_stack.py 文件源码 项目:thinstack-rl 作者: hans 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def test_basic_ff(self):
        self._make_stack(seq_length=5)

        X = np.array([
            [3, 1,  2],
            [3, 2,  4]
        ], dtype=np.int32).T

        transitions = np.array([
            [0, 0, 0, 1, 1],
            [0, 0, 1, 0, 1]
        ], dtype=np.float32)

        num_transitions = np.array([4, 4], dtype=np.int32)

        expected = np.array([[ 3.,  3.,  3.],
                             [ 3.,  3.,  3.],
                             [ 1.,  1.,  1.],
                             [ 2.,  2.,  2.],
                             [ 2.,  2.,  2.],
                             [ 5.,  5.,  5.],
                             [ 3.,  3.,  3.],
                             [ 4.,  4.,  4.],
                             [ 6.,  6.,  6.],
                             [ 9.,  9.,  9.]])

        # Run twice to make sure first state is properly erased
        with self.test_session() as s:
            s.run(tf.initialize_variables(tf.trainable_variables()))
            ts = self.stack

            feed = {ts.transitions[t]: transitions[:, t]
                    for t in range(self.seq_length)}
            feed[ts.buff] = X
            feed[ts.num_transitions] = num_transitions

            for _ in range(2):
                ts.reset(s)

                ret = s.run(ts.stack, feed)
                np.testing.assert_almost_equal(ret, expected)
func.py 文件源码 项目:tf_img_tech 作者: david-berthelot 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def init(self):
        if not self.is_initialized:
            print('Initializing...')
            uninitialized_vars = []
            for var in tf.all_variables():
                try:
                    self.session.run(var)
                except tf.errors.FailedPreconditionError:
                    uninitialized_vars.append(var)
            tf.initialize_variables(uninitialized_vars).run()
            self.is_initialized = True
model_me.py 文件源码 项目:crnn_tf 作者: liuhu-bigeye 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def assign_from_pkl(self, pkl_path):
    with open(pkl_path, 'rb') as f:
      load_variables = pickle.load(f)

    uninitialized_vars = []
    for i, variable in enumerate(tf.global_variables()):
      # 0 -41
      # 42-77 + 10
      # 78-117+ 20
      if i<=41:
        idx = i
      elif i<=77:
        idx = i + 10
      elif i<=117:
        idx = i + 20
      else:
        uninitialized_vars.append(variable)
        continue

      variable_shape = load_variables[idx].shape
      if len(variable_shape) == 1:
        load_variable = load_variables[idx]
      elif len(variable_shape) == 4:
        load_variable = np.transpose(load_variables[idx], [3, 2, 1, 0])
      elif len(variable_shape) == 3:
        load_variable = np.transpose(load_variables[idx], [2, 1, 0])
      else:
        assert False

      print variable.name, variable.get_shape(), load_variable.shape
      variable.assign(load_variable).op.run()

    pdb.set_trace()
    tf.initialize_variables(uninitialized_vars).op.run()
    return
train.py 文件源码 项目:Face-Recognition 作者: aswl01 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def restore_variables(sess, saver, pretrained_model):
    saver.restore(sess, pretrained_model)
    uninit_vars = []
    for var in tf.all_variables():
        try:
            sess.run(var)
        except tf.errors.FailedPreconditionError:
            uninit_vars.append(var)

    init_new_vars_op = tf.initialize_variables(uninit_vars)
    sess.run(init_new_vars_op)
prune.py 文件源码 项目:ternarynet 作者: czhu95 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def _setup_graph(self):
        self._init_mask_op = tf.initialize_variables(tf.get_collection('masks'))
        self._init_thre_op = tf.initialize_variables(tf.get_collection('thresholds'))


问题


面经


文章

微信
公众号

扫码关注公众号