python类initialize_variables()的实例源码

dqn_utils.py 文件源码 项目:deep-q-learning 作者: alvinwan 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def initialize_interdependent_variables(session, vars_list, feed_dict):
    """Initialize a list of variables one at a time, which is useful if
    initialization of some variables depends on initialization of the others.
    """
    vars_left = vars_list
    while len(vars_left) > 0:
        new_vars_left = []
        for v in vars_left:
            try:
                # If using an older version of TensorFlow, uncomment the line
                # below and comment out the line after it.
        #session.run(tf.initialize_variables([v]), feed_dict)
                session.run(tf.variables_initializer([v]), feed_dict)
            except tf.errors.FailedPreconditionError:
                new_vars_left.append(v)
        if len(new_vars_left) >= len(vars_left):
            # This can happend if the variables all depend on each other, or more likely if there's
            # another variable outside of the list, that still needs to be initialized. This could be
            # detected here, but life's finite.
            raise Exception("Cycle in variable dependencies, or extenrnal precondition unsatisfied.")
        else:
            vars_left = new_vars_left
tensorflow_backend.py 文件源码 项目:keras 作者: GeekLiB 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def _initialize_variables():
    if hasattr(tf, 'global_variables'):
        variables = tf.global_variables()
    else:
        variables = tf.all_variables()

    uninitialized_variables = []
    for v in variables:
        if not hasattr(v, '_keras_initialized') or not v._keras_initialized:
            uninitialized_variables.append(v)
            v._keras_initialized = True
    if uninitialized_variables:
        sess = get_session()
        if hasattr(tf, 'variables_initializer'):
            sess.run(tf.variables_initializer(uninitialized_variables))
        else:
            sess.run(tf.initialize_variables(uninitialized_variables))
dqn_utils.py 文件源码 项目:rl_algorithms 作者: DanielTakeshi 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def initialize_interdependent_variables(session, vars_list, feed_dict):
    """Initialize a list of variables one at a time, which is useful if
    initialization of some variables depends on initialization of the others.
    """
    vars_left = vars_list
    while len(vars_left) > 0:
        new_vars_left = []
        for v in vars_left:
            try:
                # If using an older version of TensorFlow, uncomment the line
                # below and comment out the line after it.
        #session.run(tf.initialize_variables([v]), feed_dict)
                session.run(tf.variables_initializer([v]), feed_dict)
            except tf.errors.FailedPreconditionError:
                new_vars_left.append(v)
        if len(new_vars_left) >= len(vars_left):
            # This can happend if the variables all depend on each other, or more likely if there's
            # another variable outside of the list, that still needs to be initialized. This could be
            # detected here, but life's finite.
            raise Exception("Cycle in variable dependencies, or extenrnal precondition unsatisfied.")
        else:
            vars_left = new_vars_left
tensorflow_backend.py 文件源码 项目:deep-learning-keras-projects 作者: jasmeetsb 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _initialize_variables():
    if hasattr(tf, 'global_variables'):
        variables = tf.global_variables()
    else:
        variables = tf.all_variables()

    uninitialized_variables = []
    for v in variables:
        if not hasattr(v, '_keras_initialized') or not v._keras_initialized:
            uninitialized_variables.append(v)
            v._keras_initialized = True
    if uninitialized_variables:
        sess = get_session()
        if hasattr(tf, 'variables_initializer'):
            sess.run(tf.variables_initializer(uninitialized_variables))
        else:
            sess.run(tf.initialize_variables(uninitialized_variables))
resnet.py 文件源码 项目:bone-age 作者: radinformatics 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __init__(self, checkpoint_path):
        layers = 50
        num_blocks = [3, 4, 6, 3]
        self.inference = lambda images, is_train : inference(images, 
                                                   is_training=is_train, 
                                                   num_classes=NUM_AGES*2,
                                                   num_blocks=num_blocks, 
                                                   bottleneck=True)

        self.x = tf.placeholder(tf.uint8, shape=(256,256,3), name='input_image')
        self.crops = fixed_crops(self.x)
        self.logits = self.inference(self.crops, is_train=False)
        self.pred = tf.nn.softmax(self.logits, name='prediction')

        # Restore saved weights
        restore_variables = tf.trainable_variables() \
                + tf.moving_average_variables()
        self.saver = tf.train.Saver(restore_variables)
        self.sess = tf.Session()
        self.saver.restore(self.sess, checkpoint_path)

        #self.sess.run(tf.initialize_variables([var for var \
        #        in tf.all_variables() if var not in restore_variables]))
rnn-speed.py 文件源码 项目:LSTM-TensorSpark 作者: EmanuelOverflow 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def evaluate(self, t_data, t_label, s):
        state = self.fit_next(t_data, s, train=False)
        label = tf.Variable(t_label, name="label", trainable=False, dtype=tf.float32)
        s.run(tf.initialize_variables([label]))
        with tf.name_scope('evaluate'):
            return self.output_layer.evaluate(tf.transpose(state[0]), label)


        # decay_fn = tf.train.exponential_decay
        # Tutta sta roba da aggiornare???
        # loss = tf.argmax(self.ht, 1)
        # learning_rate_decay_fn=decay_fn
        # optimization = tf.contrib.layers.optimize_loss(self.ht, global_step=tf.Variable([1, 1]), optimizer=optimizer,
        #                                                learning_rate=0.01,
        #                                                variables=[self.weight_forget, self.weight_input, self.weight_output,
        #                                                           self.weight_C, self.biases_forget, self.biases_input,
        #                                                           self.biases_C, self.biases_output])
        # opt_op = optimizer.minimize(loss, var_list=[self.weight_forget, self.weight_input, self.weight_output,
        # self.weight_C, self.biases_forget, self.biases_input, self.biases_C,
        # self.biases_output])

##########################################################################
##########################################################################
metric_ops_test.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _test_streaming_sparse_average_precision_at_k(
      self, predictions, labels, k, expected, weights=None):
    with tf.Graph().as_default() as g, self.test_session(g):
      if weights is not None:
        weights = tf.constant(weights, tf.float32)
      predictions = tf.constant(predictions, tf.float32)
      metric, update = metrics.streaming_sparse_average_precision_at_k(
          predictions=predictions, labels=labels, k=k, weights=weights)

      # Fails without initialized vars.
      self.assertRaises(tf.OpError, metric.eval)
      self.assertRaises(tf.OpError, update.eval)
      local_variables = tf.local_variables()
      tf.initialize_variables(local_variables).run()

      # Run per-step op and assert expected values.
      if math.isnan(expected):
        self.assertTrue(math.isnan(update.eval()))
        self.assertTrue(math.isnan(metric.eval()))
      else:
        self.assertAlmostEqual(expected, update.eval())
        self.assertAlmostEqual(expected, metric.eval())
metric_ops_test.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _test_streaming_sparse_precision_at_top_k(self,
                                                top_k_predictions,
                                                labels,
                                                expected,
                                                class_id=None,
                                                weights=None):
    with tf.Graph().as_default() as g, self.test_session(g):
      if weights is not None:
        weights = tf.constant(weights, tf.float32)
      metric, update = metrics.streaming_sparse_precision_at_top_k(
          top_k_predictions=tf.constant(top_k_predictions, tf.int32),
          labels=labels, class_id=class_id, weights=weights)

      # Fails without initialized vars.
      self.assertRaises(tf.OpError, metric.eval)
      self.assertRaises(tf.OpError, update.eval)
      tf.initialize_variables(tf.local_variables()).run()

      # Run per-step op and assert expected values.
      if math.isnan(expected):
        self.assertTrue(math.isnan(update.eval()))
        self.assertTrue(math.isnan(metric.eval()))
      else:
        self.assertEqual(expected, update.eval())
        self.assertEqual(expected, metric.eval())
metric_ops_test.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def _test_streaming_sparse_average_precision_at_k(
      self, predictions, labels, k, expected, weights=None):
    with tf.Graph().as_default() as g, self.test_session(g):
      if weights is not None:
        weights = tf.constant(weights, tf.float32)
      predictions = tf.constant(predictions, tf.float32)
      metric, update = metrics.streaming_sparse_average_precision_at_k(
          predictions, labels, k, weights=weights)

      # Fails without initialized vars.
      self.assertRaises(tf.OpError, metric.eval)
      self.assertRaises(tf.OpError, update.eval)
      local_variables = tf.local_variables()
      tf.initialize_variables(local_variables).run()

      # Run per-step op and assert expected values.
      if math.isnan(expected):
        _assert_nan(self, update.eval())
        _assert_nan(self, metric.eval())
      else:
        self.assertAlmostEqual(expected, update.eval())
        self.assertAlmostEqual(expected, metric.eval())
tensorflow_backend.py 文件源码 项目:keras-customized 作者: ambrite 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def _initialize_variables():
    if hasattr(tf, 'global_variables'):
        variables = tf.global_variables()
    else:
        variables = tf.all_variables()

    uninitialized_variables = []
    for v in variables:
        if not hasattr(v, '_keras_initialized') or not v._keras_initialized:
            uninitialized_variables.append(v)
            v._keras_initialized = True
    if uninitialized_variables:
        sess = get_session()
        if hasattr(tf, 'variables_initializer'):
            sess.run(tf.variables_initializer(uninitialized_variables))
        else:
            sess.run(tf.initialize_variables(uninitialized_variables))
test_optimizer.py 文件源码 项目:tensorprob 作者: tensorprob 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def test_scipy_lbfgsb():
    sess = tf.Session()
    x = tf.Variable(np.float64(2), name='x')
    sess.run(tf.initialize_variables([x]))
    optimizer = ScipyLBFGSBOptimizer(verbose=True, session=sess)
    # With gradient
    results = optimizer.minimize([x], x**2, [2 * x])
    assert results.success
    # Without gradient
    results = optimizer.minimize([x], x**2)
    assert results.success
    # Test callback
    def callback(xs):
        pass
    optimizer = ScipyLBFGSBOptimizer(verbose=True, session=sess, callback=callback)
    assert optimizer.minimize([x], x**2).success
    @raises(ValueError)
    def test_illegal_parameter_as_variable1():
        optimizer.minimize([42], x**2)
    test_illegal_parameter_as_variable1()
    @raises(ValueError)
    def test_illegal_parameter_as_variable2():
        optimizer.minimize(42, x**2)
    test_illegal_parameter_as_variable2()
test_optimizer.py 文件源码 项目:tensorprob 作者: tensorprob 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def test_migrad():
    sess = tf.Session()
    x = tf.Variable(np.float64(2), name='x')
    sess.run(tf.initialize_variables([x]))
    optimizer = MigradOptimizer(session=sess)
    # With gradient
    results = optimizer.minimize([x], x**2, [2 * x])
    assert results.success
    # Without gradient
    results = optimizer.minimize([x], x**2)
    assert results.success
    @raises(ValueError)
    def test_illegal_parameter_as_variable1():
        optimizer.minimize([42], x**2)
    test_illegal_parameter_as_variable1()
    @raises(ValueError)
    def test_illegal_parameter_as_variable2():
        optimizer.minimize(42, x**2)
    test_illegal_parameter_as_variable2()
thin_stack.py 文件源码 项目:thinstack-rl 作者: hans 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def _create_state(self):
        """Prepare stateful variables modified during the recurrence."""

        # Both the queue and the stack are flattened stack_size * batch_size
        # tensors. `stack_size` many blocks of `batch_size` values
        stack_shape = (self.stack_size * self.batch_size, self.model_dim)
        self.stack = tf.Variable(tf.zeros(stack_shape, dtype=tf.float32),
                                 trainable=False, name="stack")
        self.queue = tf.Variable(tf.zeros((self.stack_size * self.batch_size,), dtype=tf.float32),
                                 trainable=False, name="queue")

        self.buff_cursors = tf.Variable(tf.zeros((self.batch_size,), dtype=tf.float32),
                                          trainable=False, name="buff_cursors")
        self.cursors = tf.Variable(tf.ones((self.batch_size,), dtype=tf.float32) * - 1,
                                   trainable=False, name="cursors")

        # TODO make parameterizable
        self.tracking_value = tf.Variable(tf.zeros((self.batch_size, self.tracking_dim), dtype=tf.float32),
                                          trainable=False, name="tracking_value")

        # Create an Op which will (re-)initialize the auxiliary variables
        # declared above.
        self._aux_vars = [self.stack, self.queue, self.buff_cursors, self.cursors,
                          self.tracking_value]
        self.variable_initializer = tf.initialize_variables(self._aux_vars)
tensorflow_backend.py 文件源码 项目:keras 作者: NVIDIA 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _initialize_variables():
    if hasattr(tf, 'global_variables'):
        variables = tf.global_variables()
    else:
        variables = tf.all_variables()

    uninitialized_variables = []
    for v in variables:
        if not hasattr(v, '_keras_initialized') or not v._keras_initialized:
            uninitialized_variables.append(v)
            v._keras_initialized = True
    if uninitialized_variables:
        sess = get_session()
        if hasattr(tf, 'variables_initializer'):
            sess.run(tf.variables_initializer(uninitialized_variables))
        else:
            sess.run(tf.initialize_variables(uninitialized_variables))
graphs.py 文件源码 项目:AuthoringDecompositions 作者: jrock08 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __init__(self, codes_shape, scipy_gmm):
        self.graph = tf.Graph()
        sess_config = tf.ConfigProto()
        sess_config.gpu_options.allow_growth = True
        self.sess = tf.Session(graph=self.graph, config=sess_config)

        print self.graph
        n,c = codes_shape

        with self.graph.as_default():
            self.pl = PlaceholderManager()
            self.pl.add_placeholder('codes', tf.float32, codes_shape)
            self.pl.add_placeholder('phase_train', tf.bool, [])

            with tf.variable_scope('GMM'):
                self.gmm = models.GMM(self.pl['codes'], scipy_gmm, self.pl['phase_train'])

        print var_collect.collect_all(self.graph)
        #tf.initialize_variables(
        #    var_list=var_collect.collect_all(self.graph)).run(session=self.sess)
model.py 文件源码 项目:tfplus 作者: renmengye 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def init(self, sess):
        if not self.has_built_all:
            raise Exception(
                'Need to call build_all or build_eval before init')
        self._has_init = True
        my_var_list = self.get_all_vars()
        sess.run(tf.initialize_variables(my_var_list))
        return self
block_compiler_test.py 文件源码 项目:fold 作者: tensorflow 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_all_initialized(self):
    with self.test_session() as sess:
      x = tf.Variable(tf.zeros([]))
      sess.run(tf.initialize_variables([x]))
      self.assertEqual([], tdc._init_uninitialized(sess))
model.py 文件源码 项目:uai2017_learning_to_acquire_information 作者: evanthebouncy 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, name):
    with tf.variable_scope('inv') as scope:
      self.true_label = tf.placeholder(tf.float32, [N_BATCH, X_L], name="true_label_"+name)
      self.observations = tf.placeholder(tf.float32, [N_BATCH, L, L, 2], name="obs_"+name)

      self.n_hidden = 1200

      W_inv1 = weight_variable([L*L*2, self.n_hidden], name="W_inv1_"+name)
      b_inv1 = bias_variable([self.n_hidden], name="b_inv1_"+name)

      W_inv2 = weight_variable([self.n_hidden,X_L], name="W_inv2_"+name)
      b_inv2 = bias_variable([X_L], name="b_inv2_"+name)

      self.VARS = [W_inv1, b_inv1, W_inv2, b_inv2]

      reshape_ob = tf.reshape(self.observations, [N_BATCH, L*L*2])
      blah = tf.nn.relu(tf.matmul(reshape_ob, W_inv1) + b_inv1)
      epsilon1 = tf.constant(1e-10, shape=[N_BATCH, X_L])
      self.pred = tf.nn.softmax(tf.matmul(blah, W_inv2) + b_inv2) + epsilon1
      self.cost = -tf.reduce_sum(self.true_label * tf.log(self.pred))

      optimizer = tf.train.RMSPropOptimizer(0.001)

      inv_gvs = optimizer.compute_gradients(self.cost)
      self.train_inv = optimizer.apply_gradients(inv_gvs)

      all_var_var = tf.get_collection(tf.GraphKeys.VARIABLES, scope='inv')
      self.init = tf.initialize_variables(all_var_var)
      self.saver = tf.train.Saver(self.VARS)

  # train on a particular data batch
model_hypothesis.py 文件源码 项目:uai2017_learning_to_acquire_information 作者: evanthebouncy 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __init__(self, name):
    with tf.variable_scope('inv') as scope:
      self.true_label = tf.placeholder(tf.float32, [N_BATCH, X_L], name="true_label_"+name)
      self.observations = tf.placeholder(tf.float32, [N_BATCH, L, L, 2], name="obs_"+name)

      self.n_hidden = 1200

      W_inv1 = weight_variable([L*L*2, self.n_hidden], name="W_inv1_"+name)
      b_inv1 = bias_variable([self.n_hidden], name="b_inv1_"+name)

      W_inv2 = weight_variable([self.n_hidden,X_L], name="W_inv2_"+name)
      b_inv2 = bias_variable([X_L], name="b_inv2_"+name)

      self.VARS = [W_inv1, b_inv1, W_inv2, b_inv2]

      reshape_ob = tf.reshape(self.observations, [N_BATCH, L*L*2])
      blah = tf.nn.relu(tf.matmul(reshape_ob, W_inv1) + b_inv1)
      epsilon1 = tf.constant(1e-10, shape=[N_BATCH, X_L])
      self.pred = tf.nn.softmax(tf.matmul(blah, W_inv2) + b_inv2) + epsilon1
      self.cost = -tf.reduce_sum(self.true_label * tf.log(self.pred))

      optimizer = tf.train.RMSPropOptimizer(0.001)

      inv_gvs = optimizer.compute_gradients(self.cost)
      self.train_inv = optimizer.apply_gradients(inv_gvs)

      all_var_var = tf.get_collection(tf.GraphKeys.VARIABLES, scope='inv')
      self.init = tf.initialize_variables(all_var_var)
      self.saver = tf.train.Saver(self.VARS)

  # train on a particular data batch
model_hypothesis.py 文件源码 项目:uai2017_learning_to_acquire_information 作者: evanthebouncy 项目源码 文件源码 阅读 44 收藏 0 点赞 0 评论 0
def __init__(self, name):
    with tf.variable_scope('inv') as scope:
      self.true_label = tf.placeholder(tf.float32, [N_BATCH, X_L], name="true_label_"+name)
      self.observations = tf.placeholder(tf.float32, [N_BATCH, L, L, 2], name="obs_"+name)

      self.n_hidden = 1200

      W_inv1 = weight_variable([L*L*2, self.n_hidden], name="W_inv1_"+name)
      b_inv1 = bias_variable([self.n_hidden], name="b_inv1_"+name)

      W_inv2 = weight_variable([self.n_hidden,X_L], name="W_inv2_"+name)
      b_inv2 = bias_variable([X_L], name="b_inv2_"+name)

      self.VARS = [W_inv1, b_inv1, W_inv2, b_inv2]

      reshape_ob = tf.reshape(self.observations, [N_BATCH, L*L*2])
      blah = tf.nn.relu(tf.matmul(reshape_ob, W_inv1) + b_inv1)
      epsilon1 = tf.constant(1e-10, shape=[N_BATCH, X_L])
      self.pred = tf.nn.softmax(tf.matmul(blah, W_inv2) + b_inv2) + epsilon1
      self.cost = -tf.reduce_sum(self.true_label * tf.log(self.pred))

      optimizer = tf.train.RMSPropOptimizer(0.001)

      inv_gvs = optimizer.compute_gradients(self.cost)
      self.train_inv = optimizer.apply_gradients(inv_gvs)

      all_var_var = tf.get_collection(tf.GraphKeys.VARIABLES, scope='inv')
      self.init = tf.initialize_variables(all_var_var)
      self.saver = tf.train.Saver(self.VARS)

  # train on a particular data batch
parameterized.py 文件源码 项目:third_person_im 作者: bstadie 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __setstate__(self, d):
        Serializable.__setstate__(self, d)
        global load_params
        if load_params:
            tf.get_default_session().run(tf.initialize_variables(self.get_params()))
            self.set_param_values(d["params"])
crbm.py 文件源码 项目:CDBN-for-Tensorflow 作者: shygiants 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def build_init_ops(self):
        if self.model_exists:
            self.init_ops = tf.initialize_local_variables()
        else:
            self.init_ops = [tf.initialize_local_variables(), tf.initialize_variables(self.variables)]
        self.saver = tf.train.Saver(self.variables)
rnn-speed.py 文件源码 项目:LSTM-TensorSpark 作者: EmanuelOverflow 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def fit_next(self, data, s, last_state=True, train=True):  # set choose optimizer
        with tf.name_scope('optimizer'):
            input_data_T = tf.transpose([data], name="input_data_T")

            if not self.ht:
                # Init h_t
                self.ht = tf.Variable(tf.random_normal([self.shape[0], 1]), trainable=False, name="ht_%d" % self.node_id)
                # Init C_t
                self.Ct = tf.Variable(tf.ones([self.shape[0], 1]), trainable=False, name="Ct_%d" % self.node_id)
                #self.Cta = tf.Variable(tf.ones([self.shape[0] + input_length, self.shape[0]]), trainable=False, name="Cat_%d" % self.node_id)

                # Init layers variables
                self.ft = tf.Variable(tf.ones([self.shape[0], 1]), trainable=False, name="ft_%d" % self.node_id)
                self.it = tf.Variable(tf.ones([self.shape[0], 1]), trainable=False, name="it_%d" % self.node_id)
                self.Cta = tf.Variable(tf.ones([self.shape[0], 1]), trainable=False, name="Cta_%d" % self.node_id)

                s.run(tf.initialize_variables([self.ht, self.Ct, self.ft, self.it, self.Cta]))

            with tf.name_scope('train_layer'):
                self.train_layer(input_data_T, s)
                if train:
                    self.state.append((self.ht, self.Ct)) # store the state of each step
                    ret = self.state[-1] if last_state else self.state
                else:
                    ret = (self.ht, self.Ct)
                    self.restore_state()
        return ret
rnn-speed.py 文件源码 项目:LSTM-TensorSpark 作者: EmanuelOverflow 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def minimize(self, data, t_label, s, optimizer):
        label = tf.Variable(t_label, name="label", trainable=False, dtype=tf.float32)
        s.run(tf.initialize_variables([label]))
        with tf.name_scope('cost_function'):
            cost = self.output_layer.compute_loss(tf.transpose(self.ht), label)
        with tf.name_scope('minimization'):
            #optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
            return optimizer.minimize(cost)
rnn-no-spark.py 文件源码 项目:LSTM-TensorSpark 作者: EmanuelOverflow 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def fit_next(self, data, s, last_state=True, train=True):  # set choose optimizer
        with tf.name_scope('optimizer'):
            input_data_T = tf.transpose([data], name="input_data_T")

            if not self.ht:
                # Init h_t
                self.ht = tf.Variable(tf.random_normal([self.shape[0], 1]), trainable=False, name="ht_%d" % self.node_id)
                # Init C_t
                self.Ct = tf.Variable(tf.ones([self.shape[0], 1]), trainable=False, name="Ct_%d" % self.node_id)
                #self.Cta = tf.Variable(tf.ones([self.shape[0] + input_length, self.shape[0]]), trainable=False, name="Cat_%d" % self.node_id)

                # Init layers variables
                self.ft = tf.Variable(tf.ones([self.shape[0], 1]), trainable=False, name="ft_%d" % self.node_id)
                self.it = tf.Variable(tf.ones([self.shape[0], 1]), trainable=False, name="it_%d" % self.node_id)
                self.Cta = tf.Variable(tf.ones([self.shape[0], 1]), trainable=False, name="Cta_%d" % self.node_id)

                s.run(tf.initialize_variables([self.ht, self.Ct, self.ft, self.it, self.Cta]))

            with tf.name_scope('train_layer'):
                self.train_layer(input_data_T, s)
                if train:
                    self.state.append((self.ht, self.Ct)) # store the state of each step
                    ret = self.state[-1] if last_state else self.state
                else:
                    ret = (self.ht, self.Ct)
                    self.restore_state()
        return ret
rnn-no-spark.py 文件源码 项目:LSTM-TensorSpark 作者: EmanuelOverflow 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def minimize(self, data, t_label, s, optimizer):
        label = tf.Variable(t_label, name="label", trainable=False, dtype=tf.float32)
        s.run(tf.initialize_variables([label]))
        with tf.name_scope('cost_function'):
            cost = self.output_layer.compute_loss(tf.transpose(self.ht), label)
        with tf.name_scope('minimization'):
            #optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
            return optimizer.minimize(cost, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)
rnn.py 文件源码 项目:LSTM-TensorSpark 作者: EmanuelOverflow 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def fit_next(self, data, s, last_state=True, train=True):  # set choose optimizer
        with tf.name_scope('optimizer'):
            input_data_T = tf.transpose([data], name="input_data_T")

            if not self.ht:
                # Init h_t
                self.ht = tf.Variable(tf.random_normal([self.shape[0], 1]), trainable=False, name="ht_%d" % self.node_id)
                # Init C_t
                self.Ct = tf.Variable(tf.ones([self.shape[0], 1]), trainable=False, name="Ct_%d" % self.node_id)

                # Init layers variables
                self.ft = tf.Variable(tf.ones([self.shape[0], 1]), trainable=False, name="ft_%d" % self.node_id)
                self.it = tf.Variable(tf.ones([self.shape[0], 1]), trainable=False, name="it_%d" % self.node_id)
                self.Cta = tf.Variable(tf.ones([self.shape[0], 1]), trainable=False, name="Cta_%d" % self.node_id)

                s.run(tf.initialize_variables([self.ht, self.Ct, self.ft, self.it, self.Cta]))

            with tf.name_scope('train_layer'):
                self.train_layer(input_data_T, s)
                if train:
                    self.state.append((self.ht, self.Ct)) # store the state of each step
                    ret = self.state[-1] if last_state else self.state
                else:
                    ret = (self.ht, self.Ct)
                    self.restore_state()
        return ret
rnn.py 文件源码 项目:LSTM-TensorSpark 作者: EmanuelOverflow 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def minimize(self, data, t_label, s, optimizer):
        label = tf.Variable(t_label, name="label", trainable=False, dtype=tf.float32)
        s.run(tf.initialize_variables([label]))
        with tf.name_scope('cost_function'):
            cost = self.output_layer.compute_loss(tf.transpose(self.ht), label)
        with tf.name_scope('minimization'):
            #optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
            return optimizer.minimize(cost, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)
rnn.py 文件源码 项目:LSTM-TensorSpark 作者: EmanuelOverflow 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def evaluate(self, t_data, t_label, s):
        state = self.fit_next(t_data, s, train=False)
        label = tf.Variable(t_label, name="label", trainable=False, dtype=tf.float32)
        s.run(tf.initialize_variables([label]))
        with tf.name_scope('evaluate'):
            return self.output_layer.evaluate(tf.transpose(state[0]), label)
tuple_rect_SNR.py 文件源码 项目:failures_of_DL 作者: shakedshammah 项目源码 文件源码 阅读 52 收藏 0 点赞 0 评论 0
def Affine(name_scope,input_tensor,out_channels, relu=True, init_sess=None):
    input_shape = input_tensor.get_shape().as_list()
    input_channels = input_shape[-1]
    with tf.name_scope(name_scope):
        weights = tf.Variable(
            tf.truncated_normal([input_channels, out_channels],
                                stddev=1.0 / math.sqrt(float(input_channels))),name='weights')
        biases = tf.Variable(tf.zeros([out_channels]),name='biases')
        if init_sess is not None: init_sess.run(tf.initialize_variables([weights,biases]))
        if relu: return tf.nn.relu(tf.matmul(input_tensor, weights) + biases)
        else: return tf.matmul(input_tensor, weights) + biases


问题


面经


文章

微信
公众号

扫码关注公众号