python类placeholder()的实例源码

metrics_test.py 文件源码 项目:seq2seq 作者: google 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def _test_metric_spec(self, metric_spec, hyps, refs, expected_scores):
    """Tests a MetricSpec"""
    predictions = {"predicted_tokens": tf.placeholder(dtype=tf.string)}
    labels = {"target_tokens": tf.placeholder(dtype=tf.string)}

    value, update_op = metric_spec.create_metric_ops(None, labels, predictions)

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      sess.run(tf.local_variables_initializer())

      scores = []
      for hyp, ref in zip(hyps, refs):
        hyp = hyp.split(" ")
        ref = ref.split(" ")
        sess.run(update_op, {
            predictions["predicted_tokens"]: [hyp],
            labels["target_tokens"]: [ref]
        })
        scores.append(sess.run(value))

      for score, expected in zip(scores, expected_scores):
        np.testing.assert_almost_equal(score, expected, decimal=2)
        np.testing.assert_almost_equal(score, expected, decimal=2)
bidirectional.py 文件源码 项目:deep-summarization 作者: harpribot 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def _load_data_graph(self):
        """
        Loads the data graph consisting of the encoder and decoder input placeholders, Label (Target tip summary)
        placeholders and the weights of the hidden layer of the Seq2Seq model.

        :return: None
        """
        # input
        with tf.variable_scope("train_test", reuse=True):
            # review input - Both original and reversed
            self.enc_inp_fwd = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t)
                                for t in range(self.seq_length)]
            self.enc_inp_bwd = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t)
                                for t in range(self.seq_length)]
            # desired output
            self.labels = [tf.placeholder(tf.int32, shape=(None,), name="labels%i" % t)
                           for t in range(self.seq_length)]
            # weight of the hidden layer
            self.weights = [tf.ones_like(labels_t, dtype=tf.float32)
                            for labels_t in self.labels]

            # Decoder input: prepend some "GO" token and drop the final
            # token of the encoder input
            self.dec_inp = ([tf.zeros_like(self.labels[0], dtype=np.int32, name="GO")] + self.labels[:-1])
stacked_simple.py 文件源码 项目:deep-summarization 作者: harpribot 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def _load_data_graph(self):
        """
        Loads the data graph consisting of the encoder and decoder input placeholders, Label (Target tip summary)
        placeholders and the weights of the hidden layer of the Seq2Seq model.

        :return: None
        """
        # input
        with tf.variable_scope("train_test", reuse=True):
            self.enc_inp = [tf.placeholder(tf.int32, shape=(None,),
                                           name="input%i" % t)
                            for t in range(self.seq_length)]
            # desired output
            self.labels = [tf.placeholder(tf.int32, shape=(None,),
                                          name="labels%i" % t)
                           for t in range(self.seq_length)]
            # weight of the hidden layer
            self.weights = [tf.ones_like(labels_t, dtype=tf.float32)
                            for labels_t in self.labels]

            # Decoder input: prepend some "GO" token and drop the final
            # token of the encoder input
            self.dec_inp = ([tf.zeros_like(self.labels[0], dtype=np.int32, name="GO")]
                            + self.labels[:-1])
simple.py 文件源码 项目:deep-summarization 作者: harpribot 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _load_data_graph(self):
        """
        Loads the data graph consisting of the encoder and decoder input placeholders, Label (Target tip summary)
        placeholders and the weights of the hidden layer of the Seq2Seq model.

        :return: None
        """
        # input
        with tf.variable_scope("train_test", reuse=True):
            self.enc_inp = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t)
                            for t in range(self.seq_length)]
            # desired output
            self.labels = [tf.placeholder(tf.int32, shape=(None,), name="labels%i" % t)
                           for t in range(self.seq_length)]
            # weight of the hidden layer
            self.weights = [tf.ones_like(labels_t, dtype=tf.float32)
                            for labels_t in self.labels]

            # Decoder input: prepend some "GO" token and drop the final
            # token of the encoder input
            self.dec_inp = ([tf.zeros_like(self.labels[0], dtype=np.int32, name="GO")] + self.labels[:-1])
stacked_bidirectional.py 文件源码 项目:deep-summarization 作者: harpribot 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _load_data_graph(self):
        """
        Loads the data graph consisting of the encoder and decoder input placeholders, Label (Target tip summary)
        placeholders and the weights of the hidden layer of the Seq2Seq model.

        :return: None
        """
        # input
        with tf.variable_scope("train_test", reuse=True):
            # review input - Both original and reversed
            self.enc_inp_fwd = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t)
                                for t in range(self.seq_length)]
            self.enc_inp_bwd = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t)
                                for t in range(self.seq_length)]
            # desired output
            self.labels = [tf.placeholder(tf.int32, shape=(None,), name="labels%i" % t)
                           for t in range(self.seq_length)]
            # weight of the hidden layer
            self.weights = [tf.ones_like(labels_t, dtype=tf.float32)
                            for labels_t in self.labels]

            # Decoder input: prepend some "GO" token and drop the final
            # token of the encoder input
            self.dec_inp = ([tf.zeros_like(self.labels[0], dtype=np.int32, name="GO")] + self.labels[:-1])
nasm.py 文件源码 项目:variational-text-tensorflow 作者: carpedm20 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def build_model(self):
    self.q = tf.placeholder(tf.float32, [self.reader.vocab_size], name="question")
    self.a = tf.placeholder(tf.float32, [self.reader.vocab_size], name="answer")

    self.build_encoder()
    self.build_decoder()

    # Kullback Leibler divergence
    self.e_loss = -0.5 * tf.reduce_sum(1 + self.log_sigma_sq - tf.square(self.mu) - tf.exp(self.log_sigma_sq))

    # Log likelihood
    self.g_loss = tf.reduce_sum(tf.log(self.p_x_i))

    self.loss = tf.reduce_mean(self.e_loss + self.g_loss)
    self.optim = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(-self.loss)

    _ = tf.scalar_summary("encoder loss", self.e_loss)
    _ = tf.scalar_summary("decoder loss", self.g_loss)
    _ = tf.scalar_summary("loss", self.loss)
data_set_helpers.py 文件源码 项目:AVSR-Deep-Speech 作者: pandeydivesh15 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def __init__(self, files_list, thread_count, batch_size, numcep, numcontext, next_index=lambda x: x + 1):
        self._coord = None
        self._numcep = numcep
        self._x = tf.placeholder(tf.float32, [None, numcep + (2 * numcep * numcontext)])
        self._x_length = tf.placeholder(tf.int32, [])
        self._y = tf.placeholder(tf.int32, [None,])
        self._y_length = tf.placeholder(tf.int32, [])
        self.example_queue = tf.PaddingFIFOQueue(shapes=[[None, numcep + (2 * numcep * numcontext)], [], [None,], []],
                                                  dtypes=[tf.float32, tf.int32, tf.int32, tf.int32],
                                                  capacity=2 * self._get_device_count() * batch_size)
        self._enqueue_op = self.example_queue.enqueue([self._x, self._x_length, self._y, self._y_length])
        self._close_op = self.example_queue.close(cancel_pending_enqueues=True)
        self.batch_size = batch_size
        self._numcontext = numcontext
        self._thread_count = thread_count
        self._files_list = self._create_files_list(files_list)
        self._next_index = next_index
data_set_helpers_RHL.py 文件源码 项目:AVSR-Deep-Speech 作者: pandeydivesh15 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, files_list, thread_count, batch_size, numcep, numcontext, next_index=lambda x: x + 1):
        self._coord = None
        self._numcep = numcep
        self._x = tf.placeholder(tf.float32, [None, numcep + (2 * numcep * numcontext)])
        self._x_length = tf.placeholder(tf.int32, [])
        self._y = tf.placeholder(tf.int32, [None,])
        self._y_length = tf.placeholder(tf.int32, [])
        self.example_queue = tf.PaddingFIFOQueue(shapes=[[None, numcep + (2 * numcep * numcontext)], [], [None,], []],
                                                  dtypes=[tf.float32, tf.int32, tf.int32, tf.int32],
                                                  capacity=2 * self._get_device_count() * batch_size)
        self._enqueue_op = self.example_queue.enqueue([self._x, self._x_length, self._y, self._y_length])
        self._close_op = self.example_queue.close(cancel_pending_enqueues=True)
        self.batch_size = batch_size
        self._numcontext = numcontext
        self._thread_count = thread_count
        self._files_list = self._create_files_list(files_list)
        self._next_index = next_index
data_set_helpers_RHL_AVSR.py 文件源码 项目:AVSR-Deep-Speech 作者: pandeydivesh15 项目源码 文件源码 阅读 44 收藏 0 点赞 0 评论 0
def __init__(self, files_list, thread_count, batch_size, numcep, numcontext, next_index=lambda x: x + 1):
        self._coord = None
        self._numcep = numcep
        self._x = tf.placeholder(tf.float32, [None, numcep + (2 * numcep * numcontext)])
        self._x_length = tf.placeholder(tf.int32, [])
        self._y = tf.placeholder(tf.int32, [None,])
        self._y_length = tf.placeholder(tf.int32, [])
        self.example_queue = tf.PaddingFIFOQueue(shapes=[[None, numcep + (2 * numcep * numcontext)], [], [None,], []],
                                                  dtypes=[tf.float32, tf.int32, tf.int32, tf.int32],
                                                  capacity=2 * self._get_device_count() * batch_size)
        self._enqueue_op = self.example_queue.enqueue([self._x, self._x_length, self._y, self._y_length])
        self._close_op = self.example_queue.close(cancel_pending_enqueues=True)
        self.batch_size = batch_size
        self._numcontext = numcontext
        self._thread_count = thread_count
        self._files_list = self._create_files_list(files_list)
        self._next_index = next_index
train-with-predictions.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def get_video_weights(video_id_batch):
  video_id_to_index = tf.contrib.lookup.string_to_index_table_from_file(
                          vocabulary_file=FLAGS.sample_vocab_file, default_value=0)
  indexes = video_id_to_index.lookup(video_id_batch)
  weights, length = get_video_weights_array()
  weights_input = tf.placeholder(tf.float32, shape=[length], name="sample_weights_input")
  weights_tensor = tf.get_variable("sample_weights",
                               shape=[length],
                               trainable=False,
                               dtype=tf.float32,
                               initializer=tf.constant_initializer(weights))
  weights_assignment = tf.assign(weights_tensor, weights_input)

  tf.add_to_collection("weights_input", weights_input)
  tf.add_to_collection("weights_assignment", weights_assignment)

  video_weight_batch = tf.nn.embedding_lookup(weights_tensor, indexes)
  return video_weight_batch
train.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 43 收藏 0 点赞 0 评论 0
def get_video_weights(video_id_batch):
  video_id_to_index = tf.contrib.lookup.string_to_index_table_from_file(
                          vocabulary_file=FLAGS.sample_vocab_file, default_value=0)
  indexes = video_id_to_index.lookup(video_id_batch)
  weights, length = get_video_weights_array()
  weights_input = tf.placeholder(tf.float32, shape=[length], name="sample_weights_input")
  weights_tensor = tf.get_variable("sample_weights",
                               shape=[length],
                               trainable=False,
                               dtype=tf.float32,
                               initializer=tf.constant_initializer(weights))
  weights_assignment = tf.assign(weights_tensor, weights_input)

  tf.add_to_collection("weights_input", weights_input)
  tf.add_to_collection("weights_assignment", weights_assignment)

  video_weight_batch = tf.nn.embedding_lookup(weights_tensor, indexes)
  return video_weight_batch
train-with-rebuild.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 44 收藏 0 点赞 0 评论 0
def get_video_weights(video_id_batch):
  video_id_to_index = tf.contrib.lookup.string_to_index_table_from_file(
                          vocabulary_file=FLAGS.sample_vocab_file, default_value=0)
  indexes = video_id_to_index.lookup(video_id_batch)
  weights, length = get_video_weights_array()
  weights_input = tf.placeholder(tf.float32, shape=[length], name="sample_weights_input")
  weights_tensor = tf.get_variable("sample_weights",
                               shape=[length],
                               trainable=False,
                               dtype=tf.float32,
                               initializer=tf.constant_initializer(weights))
  weights_assignment = tf.assign(weights_tensor, weights_input)

  tf.add_to_collection("weights_input", weights_input)
  tf.add_to_collection("weights_assignment", weights_assignment)

  video_weight_batch = tf.nn.embedding_lookup(weights_tensor, indexes)
  return video_weight_batch
train.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def get_video_weights(video_id_batch):
  video_id_to_index = tf.contrib.lookup.string_to_index_table_from_file(
                          vocabulary_file=FLAGS.sample_vocab_file, default_value=0)
  indexes = video_id_to_index.lookup(video_id_batch)
  weights, length = get_video_weights_array()
  weights_input = tf.placeholder(tf.float32, shape=[length], name="sample_weights_input")
  weights_tensor = tf.get_variable("sample_weights",
                               shape=[length],
                               trainable=False,
                               dtype=tf.float32,
                               initializer=tf.constant_initializer(weights))
  weights_assignment = tf.assign(weights_tensor, weights_input)

  tf.add_to_collection("weights_input", weights_input)
  tf.add_to_collection("weights_assignment", weights_assignment)

  video_weight_batch = tf.nn.embedding_lookup(weights_tensor, indexes)
  return video_weight_batch
model.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def __init__(self, ob_space, ac_space, layers=[256], **kwargs):
        self.x = x = tf.placeholder(tf.float32, [None] + list(ob_space))

        rank = len(ob_space)

        if rank == 3: # pixel input
            for i in range(4):
                x = tf.nn.elu(conv2d(x, 32, "c{}".format(i + 1), [3, 3], [2, 2]))
        elif rank == 1: # plain features
            #x = tf.nn.elu(linear(x, 256, "l1", normalized_columns_initializer(0.01)))
            pass
        else:
            raise TypeError("observation space must have rank 1 or 3, got %d" % rank)

        x = flatten(x)

        for i, layer in enumerate(layers):
            x = tf.nn.elu(linear(x, layer, "l{}".format(i + 1), tf.contrib.layers.xavier_initializer()))

        self.logits = linear(x, ac_space, "action", tf.contrib.layers.xavier_initializer())
        self.vf = tf.reshape(linear(x, 1, "value", tf.contrib.layers.xavier_initializer()), [-1])
        self.sample = categorical_sample(self.logits, ac_space)[0, :]
        self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name)
        self.state_in = []
model.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __init__(self, ob_space, ac_space, layers=[256], **kwargs):
        self.x = x = tf.placeholder(tf.float32, [None] + list(ob_space))

        rank = len(ob_space)

        if rank == 3: # pixel input
            for i in range(4):
                x = tf.nn.elu(conv2d(x, 32, "c{}".format(i + 1), [3, 3], [2, 2]))
        elif rank == 1: # plain features
            #x = tf.nn.elu(linear(x, 256, "l1", normalized_columns_initializer(0.01)))
            pass
        else:
            raise TypeError("observation space must have rank 1 or 3, got %d" % rank)

        x = flatten(x)

        for i, layer in enumerate(layers):
            x = tf.nn.elu(linear(x, layer, "l{}".format(i + 1), tf.contrib.layers.xavier_initializer()))

        self.logits = linear(x, ac_space, "action", tf.contrib.layers.xavier_initializer())
        self.vf = tf.reshape(linear(x, 1, "value", tf.contrib.layers.xavier_initializer()), [-1])
        self.sample = categorical_sample(self.logits, ac_space)[0, :]
        self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name)
        self.state_in = []
model.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def __init__(self, ob_space, ac_space, size=256, **kwargs):
        self.x = x = tf.placeholder(tf.float32, [None] + list(ob_space))

        for i in range(4):
            x = tf.nn.elu(conv2d(x, 32, "l{}".format(i + 1), [3, 3], [2, 2]))
        # introduce a "fake" batch dimension of 1 after flatten so that we can do GRU over time dim
        x = tf.expand_dims(flatten(x), 1)

        gru = rnn.GRUCell(size)

        h_init = np.zeros((1, size), np.float32)
        self.state_init = [h_init]
        h_in = tf.placeholder(tf.float32, [1, size])
        self.state_in = [h_in]

        gru_outputs, gru_state = tf.nn.dynamic_rnn(
            gru, x, initial_state=h_in, sequence_length=[size], time_major=True)
        x = tf.reshape(gru_outputs, [-1, size])
        self.logits = linear(x, ac_space, "action", normalized_columns_initializer(0.01))
        self.vf = tf.reshape(linear(x, 1, "value", normalized_columns_initializer(1.0)), [-1])
        self.state_out = [gru_state[:1]]
        self.sample = categorical_sample(self.logits, ac_space)[0, :]
        self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name)
tf_util.py 文件源码 项目:distributional_perspective_on_RL 作者: Kiwoo 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __init__(self, shape, name=None):
        """Takes input in uint8 format which is cast to float32 and divided by 255
        before passing it to the model.

        On GPU this ensures lower data transfer times.

        Parameters
        ----------
        shape: [int]
            shape of the tensor.
        name: str
            name of the underlying placeholder
        """

        super().__init__(tf.placeholder(tf.uint8, [None] + list(shape), name=name))
        self._shape = shape
        self._output = tf.cast(super().get(), tf.float32) / 255.0
test_tf_qrnn_forward.py 文件源码 项目:tensorflow_qrnn 作者: icoxfog417 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def test_qrnn_linear_forward(self):
        batch_size = 100
        sentence_length = 5
        word_size = 10
        size = 5
        data = self.create_test_data(batch_size, sentence_length, word_size)

        with tf.Graph().as_default() as q_linear:
            qrnn = QRNN(in_size=word_size, size=size, conv_size=1)
            X = tf.placeholder(tf.float32, [batch_size, sentence_length, word_size])
            forward_graph = qrnn.forward(X)

            with tf.Session() as sess:
                sess.run(tf.global_variables_initializer())
                hidden = sess.run(forward_graph, feed_dict={X: data})
                self.assertEqual((batch_size, size), hidden.shape)
test_tf_qrnn_forward.py 文件源码 项目:tensorflow_qrnn 作者: icoxfog417 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def test_qrnn_with_previous(self):
        batch_size = 100
        sentence_length = 5
        word_size = 10
        size = 5
        data = self.create_test_data(batch_size, sentence_length, word_size)

        with tf.Graph().as_default() as q_with_previous:
            qrnn = QRNN(in_size=word_size, size=size, conv_size=2)
            X = tf.placeholder(tf.float32, [batch_size, sentence_length, word_size])
            forward_graph = qrnn.forward(X)

            with tf.Session() as sess:
                sess.run(tf.global_variables_initializer())
                hidden = sess.run(forward_graph, feed_dict={X: data})
                self.assertEqual((batch_size, size), hidden.shape)
test_tf_qrnn_forward.py 文件源码 项目:tensorflow_qrnn 作者: icoxfog417 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def test_qrnn_convolution(self):
        batch_size = 100
        sentence_length = 5
        word_size = 10
        size = 5
        data = self.create_test_data(batch_size, sentence_length, word_size)

        with tf.Graph().as_default() as q_conv:
            qrnn = QRNN(in_size=word_size, size=size, conv_size=3)
            X = tf.placeholder(tf.float32, [batch_size, sentence_length, word_size])
            forward_graph = qrnn.forward(X)

            with tf.Session() as sess:
                sess.run(tf.global_variables_initializer())
                hidden = sess.run(forward_graph, feed_dict={X: data})
                self.assertEqual((batch_size, size), hidden.shape)
inference.py 文件源码 项目:dl-classification 作者: matthieuo 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __init__(self, check_):
        self.img_feed = tf.placeholder(tf.float32)

        self.output_logits = tf.nn.softmax(
            models.foodv_test(
                self.img_feed,
                reg_val=0.0,
                is_train=False,
                dropout_p=1.0))

        self.sess = tf.Session()

        self.checkpoint_name = check_

        saver = tf.train.Saver()
        print("loading model...")

        saver.restore(self.sess, self.checkpoint_name)

        print("Model loaded !")
test-embedding.py 文件源码 项目:deep-learning 作者: ljanyst 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __init__(self, embedding):
        self.sess         = tf.Session()
        self.inputs       = tf.placeholder(tf.float32,
                                           [None, embedding.shape[1]],
                                           name='inputs')
        self.test_vec     = tf.placeholder(tf.float32, [1, embedding.shape[1]],
                                           name='test_vec')
        self.cos_distance = tf.matmul(self.inputs, tf.transpose(self.test_vec))

        #-----------------------------------------------------------------------
        # Compute normalized embedding matrix
        #-----------------------------------------------------------------------
        row_sum    = tf.reduce_sum(tf.square(self.inputs), axis=1,
                                   keep_dims=True)
        norm       = tf.sqrt(row_sum)
        self.normalized = self.inputs / norm
        self.embedding = self.sess.run(self.normalized,
                                       feed_dict={self.inputs: embedding})

    #---------------------------------------------------------------------------
mnist_gan.py 文件源码 项目:deep-learning 作者: ljanyst 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, rnd_vec_dim, hidden_units, output_dim, alpha):
        #-----------------------------------------------------------------------
        # Inputs
        #-----------------------------------------------------------------------
        self.inputs_rnd  = tf.placeholder(tf.float32, (None, rnd_vec_dim),
                                          name='inputs_rnd')

        #-----------------------------------------------------------------------
        # The generator
        #-----------------------------------------------------------------------
        self.alpha = alpha
        with tf.variable_scope('generator'):
            h1 = tf.layers.dense(self.inputs_rnd, hidden_units, activation=None)
            h1 = LeakyReLU(h1, self.alpha)

            self.gen_logits = tf.layers.dense(h1, output_dim, activation=None)
            self.gen_out    = tf.tanh(self.gen_logits)

    #---------------------------------------------------------------------------
dagger.py 文件源码 项目:Imitation-Learning-Dagger-Torcs 作者: zsdonghao 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self, name='model', sess=None):
        assert sess != None
        self.name = name
        self.sess = sess

        self.x = tf.placeholder(tf.float32, [None, img_dim[0], img_dim[1], img_dim[2]], name='Observaion')
        self.y = tf.placeholder(tf.float32, [None, n_action], name='Steer')

        self._build_net(True, False)
        self._build_net(False, True)
        self._define_train_ops()

        tl.layers.initialize_global_variables(self.sess)

        print()
        self.n_test.print_layers()
        print()
        self.n_test.print_params(False)
        print()
        # exit()
model.py 文件源码 项目:Renewables_Scenario_Gen_GAN 作者: chennnnnyize 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def build_model(self):

        Z = tf.placeholder(tf.float32, [self.batch_size, self.dim_z])
        Y = tf.placeholder(tf.float32, [self.batch_size, self.dim_y])

        image_real = tf.placeholder(tf.float32, [self.batch_size]+self.image_shape)
        h4 = self.generate(Z,Y)
        #image_gen comes from sigmoid output of generator
        image_gen = tf.nn.sigmoid(h4)

        raw_real2 = self.discriminate(image_real, Y)
        #p_real = tf.nn.sigmoid(raw_real)
        p_real=tf.reduce_mean(raw_real2)

        raw_gen2 = self.discriminate(image_gen, Y)
        #p_gen = tf.nn.sigmoid(raw_gen)
        p_gen = tf.reduce_mean(raw_gen2)

        discrim_cost = tf.reduce_sum(raw_real2) - tf.reduce_sum(raw_gen2)
        gen_cost = -tf.reduce_mean(raw_gen2)

        return Z, Y, image_real, discrim_cost, gen_cost, p_real, p_gen
model.py 文件源码 项目:Renewables_Scenario_Gen_GAN 作者: chennnnnyize 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def samples_generator(self, batch_size):
        Z = tf.placeholder(tf.float32, [batch_size, self.dim_z])
        Y = tf.placeholder(tf.float32, [batch_size, self.dim_y])

        yb = tf.reshape(Y, [batch_size, 1, 1, self.dim_y])
        Z_ = tf.concat([Z,Y], 1)
        h1 = tf.nn.relu(batchnormalize(tf.matmul(Z_, self.gen_W1)))
        h1 = tf.concat([h1, Y], 1)
        h2 = tf.nn.relu(batchnormalize(tf.matmul(h1, self.gen_W2)))
        h2 = tf.reshape(h2, [batch_size,6,6,self.dim_W2])
        h2 = tf.concat([h2, yb*tf.ones([batch_size, 6,6, self.dim_y])], 3)

        output_shape_l3 = [batch_size,12,12,self.dim_W3]
        h3 = tf.nn.conv2d_transpose(h2, self.gen_W3, output_shape=output_shape_l3, strides=[1,2,2,1])
        h3 = tf.nn.relu( batchnormalize(h3) )
        h3 = tf.concat([h3, yb*tf.ones([batch_size, 12,12,self.dim_y])], 3)

        output_shape_l4 = [batch_size,24,24,self.dim_channel]
        h4 = tf.nn.conv2d_transpose(h3, self.gen_W4, output_shape=output_shape_l4, strides=[1,2,2,1])
        x = tf.nn.sigmoid(h4)
        return Z, Y, x
unet.py 文件源码 项目:lung-cancer-detector 作者: YichenGong 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __init__(self, channels=3, n_class=2, cost="cross_entropy", cost_kwargs={}, **kwargs):
        tf.reset_default_graph()

        self.n_class = n_class
        self.summaries = kwargs.get("summaries", True)

        self.x = tf.placeholder("float", shape=[None, None, None, channels])
        self.y = tf.placeholder("float", shape=[None, None, None, n_class])
        self.keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)

        logits, self.variables, self.offset = create_conv_net(self.x, self.keep_prob, channels, n_class, **kwargs)

        self.cost = self._get_cost(logits, cost, cost_kwargs)

        self.gradients_node = tf.gradients(self.cost, self.variables)

        self.cross_entropy = tf.reduce_mean(cross_entropy(tf.reshape(self.y, [-1, n_class]),
                                                          tf.reshape(pixel_wise_softmax_2(logits), [-1, n_class])))

        self.predicter = pixel_wise_softmax_2(logits)
        self.correct_pred = tf.equal(tf.argmax(self.predicter, 3), tf.argmax(self.y, 3))
        self.accuracy = tf.reduce_mean(tf.cast(self.correct_pred, tf.float32))
nn.py 文件源码 项目:snake 作者: rhinech 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __init__(self,
                 name,
                 size_input__layer,
                 size_hidden_layer,
                 size_output_layer,
                 l2_coeff,
                 keep_prob,
                 optimizer='SGD'):
        """Make new tensors and connect them."""

        self.size_input__layer = size_input__layer
        self.size_hidden_layer = size_hidden_layer
        self.size_output_layer = size_output_layer
        self.keep_prob = keep_prob
        self.input__placeholder = tf.placeholder(tf.float32, shape=(None, size_input__layer))
        self.answer_placeholder = tf.placeholder(tf.float32, shape=(None, size_output_layer))
        self.learning_rate = tf.placeholder(tf.float32)
        self.inference_proc, l2_proc = self.inference(self.input__placeholder, name)
        self.loss_proc = NN.loss(self.inference_proc, l2_proc, l2_coeff, self.answer_placeholder)
        self.training_proc = NN.training(self.loss_proc, self.learning_rate, optimizer)
train.py 文件源码 项目:DeepWorks 作者: daigo0927 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def _build_graph(self, image_size):

        self.image_size = image_size
        self.images = tf.placeholder(tf.float32,
                                     shape = (None, image_size, image_size, 3))
        images_mini = tf.image.resize_images(self.images,
                                             size = (int(image_size/4),
                                                     int(image_size/4)))
        self.images_blur = tf.image.resize_images(images_mini,
                                                  size = (image_size, image_size))

        self.net = U_Net(output_ch = 3, block_fn = 'origin')
        self.images_reconst = self.net(self.images_blur, reuse = False)
        # self.image_reconst can be [-inf +inf], so need to clip its value if visualize them as images.
        self.loss = tf.reduce_mean((self.images_reconst - self.images)**2)
        self.opt = tf.train.AdamOptimizer()\
                           .minimize(self.loss, var_list = self.net.vars)

        self.saver = tf.train.Saver()
        self.sess.run(tf.global_variables_initializer())
BaseUnet.py 文件源码 项目:kaggle-review 作者: daxiongshu 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def predictPL(self):
        B = self.flags.batch_size
        W,H,C = self.flags.width, self.flags.height, self.flags.color
        inputs = tf.placeholder(dtype=tf.float32,shape=[None,H,W,C])

        #with open(self.flags.pred_path,'w') as f:
        #    pass

        self._build(inputs,resize=False)
        counter = 0
        with tf.Session() as sess:
            self.sess = sess
            sess.run(tf.global_variables_initializer())
            sess.run(tf.local_variables_initializer())
            for imgs,imgnames in self.DATA.test_generator():
                pred = sess.run(self.logit,feed_dict={inputs:imgs})
                np.save("%s/%d.npy"%(self.flags.pred_path,counter),{"pred":pred,"name":imgnames})
                counter+=len(imgs)
                if counter/B%10 ==0:
                    print_mem_time("%d images predicted"%counter)

    # train with placeholders


问题


面经


文章

微信
公众号

扫码关注公众号