python类placeholder()的实例源码

nn.py 文件源码 项目:kaggle-review 作者: daxiongshu 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _build(self):
        V = self.V
        M = self.flags.embedding_size # 64
        H = self.flags.num_units 
        C = self.flags.classes
        D = self.flags.d2v_size # embedding for d2v

        netname = "D2V"
        with tf.variable_scope(netname):
            self.inputs = tf.placeholder(dtype=tf.int32,shape=[None]) #[B]
            layer_name = "{}/embedding".format(netname)
            x = self._get_embedding(layer_name, self.inputs, V, D, reuse=False) # [B, S, M]

        netname = "NN"
        cell_name = self.flags.cell
        H1,H2 = 32,16
        with tf.variable_scope(netname):
            net = self._fc(x, fan_in=D, fan_out=H1, layer_name="%s/fc1"%netname, activation='relu')
            net = self._dropout(net)
            net = self._fc(net, fan_in=H1, fan_out=H2, layer_name="%s/fc2"%netname, activation='relu')
            net = self._dropout(net)
            net = self._fc(net, fan_in=H2, fan_out=C, layer_name="%s/fc3"%netname, activation=None)
            self.logit = net
embedding.py 文件源码 项目:kaggle-review 作者: daxiongshu 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def _build(self):
        netname = "CBOW"
        W = self.flags.window_size
        M = self.flags.embedding_size
        V = self.V # vocabulary size, should be passed from DB
        H = 128

        # the real window is W*2 + 1
        with tf.variable_scope(netname):
            self.inputs = tf.placeholder(tf.int32, shape=(None,W*2+1)) # [B, W*2+1]

            layer_name = "{}/embedding".format(netname)
            x = self._get_embedding(layer_name, self.inputs, V, M, reuse=False) # [B, W*2+1, M]
            x = tf.reshape(x,[tf.shape(x)[0],tf.shape(x)[1]*tf.shape(x)[2]]) # [B,(W*2+1)*M]

            layer_name = "{}/fc1".format(netname)
            net = self._fc(x, fan_in=M*(W*2+1), fan_out=H, layer_name=layer_name, 
                    activation='relu') # [B, H]

            layer_name = "{}/fc2".format(netname)
            net = self._fc(net, fan_in=H, fan_out=2, layer_name=layer_name, 
                    activation=None) # [B, 2]

            self.logit = net
train.py 文件源码 项目:comprehend 作者: Fenugreek 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def rnn_extend(sess, coder, inputs, skips=None, length=1):
    """
    inputs is batch_size x n_consecutive_seqs x n_visible x seq_length.
    """

    shape = inputs.shape
    n_seqs =  shape[1]
    batch = tf.placeholder(coder.dtype, name='batch_seq',
                           shape=(shape[0], shape[2], shape[3]))

    coder.reset_state()
    output = coder.recode(batch, store=True, skips=skips)

    outputs = []
    for index in range(n_seqs):
        batch_seq = inputs[:, index, :, :]
        o, s = sess.run([output, coder.get_state()], feed_dict={batch: batch_seq})
        outputs.append(o)


    outputs.append(coder.predict_sequence(None,
                                          s['hidden'],
                                          length=length*shape[3]).eval())

    return np.array(outputs)
networks.py 文件源码 项目:comprehend 作者: Fenugreek 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def __init__(self, sigma=0.1, beta_sampling=True, **kwargs):
        """
        sigma:
        Standard deviation of input data, for use in sampling.

        beta_sampling:
        Use beta distribution for sampling, instead of Gaussian.
        """

        RBM.__init__(self, **kwargs)
        if not kwargs.get('fromfile'):
            self.sigma = sigma
            self.beta_sampling = beta_sampling
        if self.sigma is None: raise AssertionError('Need to supply sigma param.')

        self.hidden = tf.placeholder(self.dtype, name='hidden',
                                     shape=[None, self.n_hidden])
        self.mean_v = tf.sigmoid(tf.matmul(self.hidden, self.params['W'],
                                           transpose_b=True) +
                                 self.params['bvis'])
a2_encoder.py 文件源码 项目:text_classification 作者: brightmart 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def init():
    #1. assign value to fields
    vocab_size=1000
    d_model = 512
    d_k = 64
    d_v = 64
    sequence_length = 5*10
    h = 8
    batch_size=4*32
    initializer = tf.random_normal_initializer(stddev=0.1)
    # 2.set values for Q,K,V
    vocab_size=1000
    embed_size=d_model
    Embedding = tf.get_variable("Embedding_E", shape=[vocab_size, embed_size],initializer=initializer)
    input_x = tf.placeholder(tf.int32, [batch_size,sequence_length], name="input_x") #[4,10]
    print("input_x:",input_x)
    embedded_words = tf.nn.embedding_lookup(Embedding, input_x) #[batch_size*sequence_length,embed_size]
    Q = embedded_words  # [batch_size*sequence_length,embed_size]
    K_s = embedded_words  # [batch_size*sequence_length,embed_size]
    num_layer=6
    mask = get_mask(batch_size, sequence_length)
    #3. get class object
    encoder_class=Encoder(d_model,d_k,d_v,sequence_length,h,batch_size,num_layer,Q,K_s,mask=mask) #Q,K_s,embedded_words
    return encoder_class,Q,K_s
critic_net.py 文件源码 项目:ddpg-aigym 作者: stevenpjg 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def create_critic_net(self, num_states=4, num_actions=1):
        N_HIDDEN_1 = 400
        N_HIDDEN_2 = 300
        critic_state_in = tf.placeholder("float",[None,num_states])
        critic_action_in = tf.placeholder("float",[None,num_actions])    

        W1_c = tf.Variable(tf.random_uniform([num_states,N_HIDDEN_1],-1/math.sqrt(num_states),1/math.sqrt(num_states)))
        B1_c = tf.Variable(tf.random_uniform([N_HIDDEN_1],-1/math.sqrt(num_states),1/math.sqrt(num_states)))
        W2_c = tf.Variable(tf.random_uniform([N_HIDDEN_1,N_HIDDEN_2],-1/math.sqrt(N_HIDDEN_1+num_actions),1/math.sqrt(N_HIDDEN_1+num_actions)))    
        W2_action_c = tf.Variable(tf.random_uniform([num_actions,N_HIDDEN_2],-1/math.sqrt(N_HIDDEN_1+num_actions),1/math.sqrt(N_HIDDEN_1+num_actions)))    
        B2_c= tf.Variable(tf.random_uniform([N_HIDDEN_2],-1/math.sqrt(N_HIDDEN_1+num_actions),1/math.sqrt(N_HIDDEN_1+num_actions))) 
        W3_c= tf.Variable(tf.random_uniform([N_HIDDEN_2,1],-0.003,0.003))
        B3_c= tf.Variable(tf.random_uniform([1],-0.003,0.003))

        H1_c=tf.nn.softplus(tf.matmul(critic_state_in,W1_c)+B1_c)
        H2_c=tf.nn.tanh(tf.matmul(H1_c,W2_c)+tf.matmul(critic_action_in,W2_action_c)+B2_c)

        critic_q_model=tf.matmul(H2_c,W3_c)+B3_c


        return W1_c, B1_c, W2_c, W2_action_c, B2_c, W3_c, B3_c, critic_q_model, critic_state_in, critic_action_in
actor_net.py 文件源码 项目:ddpg-aigym 作者: stevenpjg 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def create_actor_net(self, num_states=4, num_actions=1):
        """ Network that takes states and return action """
        N_HIDDEN_1 = 400
        N_HIDDEN_2 = 300
        actor_state_in = tf.placeholder("float",[None,num_states])    
        W1_a=tf.Variable(tf.random_uniform([num_states,N_HIDDEN_1],-1/math.sqrt(num_states),1/math.sqrt(num_states)))
        B1_a=tf.Variable(tf.random_uniform([N_HIDDEN_1],-1/math.sqrt(num_states),1/math.sqrt(num_states)))
        W2_a=tf.Variable(tf.random_uniform([N_HIDDEN_1,N_HIDDEN_2],-1/math.sqrt(N_HIDDEN_1),1/math.sqrt(N_HIDDEN_1)))
        B2_a=tf.Variable(tf.random_uniform([N_HIDDEN_2],-1/math.sqrt(N_HIDDEN_1),1/math.sqrt(N_HIDDEN_1)))
        W3_a=tf.Variable(tf.random_uniform([N_HIDDEN_2,num_actions],-0.003,0.003))
        B3_a=tf.Variable(tf.random_uniform([num_actions],-0.003,0.003))

        H1_a=tf.nn.softplus(tf.matmul(actor_state_in,W1_a)+B1_a)
        H2_a=tf.nn.tanh(tf.matmul(H1_a,W2_a)+B2_a)
        actor_model=tf.matmul(H2_a,W3_a) + B3_a
        return W1_a, B1_a, W2_a, B2_a, W3_a, B3_a, actor_state_in, actor_model
train_c3d_ucf101.py 文件源码 项目:C3D-tensorflow 作者: hx173149 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def placeholder_inputs(batch_size):
  """Generate placeholder variables to represent the input tensors.

  These placeholders are used as inputs by the rest of the model building
  code and will be fed from the downloaded data in the .run() loop, below.

  Args:
    batch_size: The batch size will be baked into both placeholders.

  Returns:
    images_placeholder: Images placeholder.
    labels_placeholder: Labels placeholder.
  """
  # Note that the shapes of the placeholders match the shapes of the full
  # image and label tensors, except the first dimension is now batch_size
  # rather than the full size of the train or test data sets.
  images_placeholder = tf.placeholder(tf.float32, shape=(batch_size,
                                                         c3d_model.NUM_FRAMES_PER_CLIP,
                                                         c3d_model.CROP_SIZE,
                                                         c3d_model.CROP_SIZE,
                                                         c3d_model.CHANNELS))
  labels_placeholder = tf.placeholder(tf.int64, shape=(batch_size))
  return images_placeholder, labels_placeholder
predict_c3d_ucf101.py 文件源码 项目:C3D-tensorflow 作者: hx173149 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def placeholder_inputs(batch_size):
  """Generate placeholder variables to represent the input tensors.
  These placeholders are used as inputs by the rest of the model building
  code and will be fed from the downloaded data in the .run() loop, below.
  Args:
    batch_size: The batch size will be baked into both placeholders.
  Returns:
    images_placeholder: Images placeholder.
    labels_placeholder: Labels placeholder.
  """
  # Note that the shapes of the placeholders match the shapes of the full
  # image and label tensors, except the first dimension is now batch_size
  # rather than the full size of the train or test data sets.
  images_placeholder = tf.placeholder(tf.float32, shape=(batch_size,
                                                         c3d_model.NUM_FRAMES_PER_CLIP,
                                                         c3d_model.CROP_SIZE,
                                                         c3d_model.CROP_SIZE,
                                                         c3d_model.CHANNELS))
  labels_placeholder = tf.placeholder(tf.int64, shape=(batch_size))
  return images_placeholder, labels_placeholder
test_feature.py 文件源码 项目:tf-image-interpreter 作者: ThoughtWorksInc 项目源码 文件源码 阅读 109 收藏 0 点赞 0 评论 0
def test_vgg():
  vgg = Vgg16()
  image_tensor = tf.placeholder(tf.float32)
  with tf.Session() as sess:
    vgg.build(image_tensor)
    init = tf.initialize_all_variables()
    sess.run(init)

    load_feature_layer_params('/Users/dtong/code/data/tf-image-interpreter/pretrain/vgg16_weights.npz', sess)

    for v in tf.get_collection(tf.GraphKeys.VARIABLES):
      print_op = tf.Print(v, [v], message=v.name, first_n=10)
      sess.run(print_op)

    roidb = RoiDb('val.txt', 2007)
    batch_gen = BatchGenerator(roidb)

    for i in range(10):
      image, scale, bboxes = batch_gen.next_batch()

      print(sess.run(vgg.conv5_3, feed_dict={image_tensor: image}))
test_minibatch.py 文件源码 项目:tf-image-interpreter 作者: ThoughtWorksInc 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def main():
  roidb = RoiDb('val.txt', 2007)
  batch_gen = BatchGenerator(roidb)

  image_tensor = tf.placeholder(dtype=tf.float32)
  scale_tensor = tf.placeholder(dtype=tf.float32)
  bboxes_tensor = tf.placeholder(dtype=tf.float32)
  p_op = tf.Print(image_tensor, [tf.shape(image_tensor), scale_tensor, bboxes_tensor])

  sess = tf.Session()
  init = tf.initialize_all_variables()
  sess.run(init)

  coord = tf.train.Coordinator()
  queue_threads = queue_runner.start_queue_runners(sess, coord=coord)

  for i in range(10):
    if coord.should_stop():
      break
    image, scale, bboxes = batch_gen.next_batch()

    sess.run([p_op], feed_dict={image_tensor: image, scale_tensor: scale, bboxes_tensor:bboxes})

  coord.request_stop()
  coord.join(queue_threads)
test_rpn.py 文件源码 项目:tf-image-interpreter 作者: ThoughtWorksInc 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def test_rpn():
  vgg = Vgg16()
  rpn = RpnNet()
  image_tensor = tf.placeholder(tf.float32)
  with tf.Session() as sess:
    vgg.build(image_tensor)
    rpn.build(vgg.conv5_3, None)
    init = tf.initialize_all_variables()
    sess.run(init)

    load_feature_layer_params('/Users/dtong/code/data/tf-image-interpreter/pretrain/vgg16_weights.npz', sess)

    roidb = RoiDb('val.txt', 2007)
    batch_gen = BatchGenerator(roidb)

    for i in range(10):
      image, scale, bboxes = batch_gen.next_batch()
      feature_shape = tf.shape(rpn.rpn_cls_score_reshape)
      print_feat_shape = tf.Print(feature_shape, [feature_shape], summarize=5)
      sess.run(print_feat_shape, feed_dict={image_tensor: image})

      # print(sess.run(vgg.conv5_3, feed_dict={image_tensor: image}))
dataset_utils.py 文件源码 项目:X-ray-classification 作者: bendidi 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __init__(self):
    # Initializes function that decodes RGB png data.
    self._decode_png_data = tf.placeholder(dtype=tf.string)
    self._decode_png = tf.image.decode_png(self._decode_png_data, channels=3)
TensorFlowInterface.py 文件源码 项目:IntroToDeepLearning 作者: robb-brown 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def initialize(self):
        with tf.variable_scope(self.name):
            self.keepProb = tf.placeholder('float')         # Variable to hold the dropout probability
TensorFlowInterface.py 文件源码 项目:IntroToDeepLearning 作者: robb-brown 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def initialize(self):
        with tf.variable_scope(self.name):
            self.keepProb = tf.placeholder('float')         # Variable to hold the dropout probability
gen_embeddings.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def make_skipgram_softmax_loss(embeddings_matrix, vocabulary_size, vector_size):
    vectors = tf.get_variable('vectors', (vocabulary_size, vector_size), dtype=tf.float32, initializer=tf.constant_initializer(embeddings_matrix))
    minibatch = tf.placeholder(shape=(None, 2), dtype=tf.int32)

    center_word_vector = tf.nn.embedding_lookup(vectors, minibatch[:,0])
    yhat = tf.matmul(center_word_vector, vectors, transpose_b=True)

    predict_word = minibatch[:,1]
    loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=predict_word, logits=yhat)
    loss = tf.reduce_mean(loss)
    return vectors, minibatch, loss
threepart_aligner.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def add_output_placeholders(self):
        self.top_placeholder = tf.placeholder(tf.int32, shape=(None,))
        self.special_label_placeholder = tf.placeholder(tf.int32, shape=(None, MAX_SPECIAL_LENGTH))
        self.part_function_placeholders = dict()
        self.part_sequence_placeholders = dict()
        self.part_sequence_length_placeholders = dict()
        for part in ('trigger', 'query', 'action'):
            self.part_function_placeholders[part] = tf.placeholder(tf.int32, shape=(None,))
            self.part_sequence_placeholders[part] = tf.placeholder(tf.int32, shape=(None, MAX_PRIMITIVE_LENGTH))
            self.part_sequence_length_placeholders[part] = tf.placeholder(tf.int32, shape=(None,))
base_aligner.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def add_input_placeholders(self):
        self.input_placeholder = tf.placeholder(tf.int32, shape=(None, self.config.max_length))
        self.input_length_placeholder = tf.placeholder(tf.int32, shape=(None,))
        self.constituency_parse_placeholder = tf.placeholder(tf.bool, shape=(None, 2*self.config.max_length-1))
base_aligner.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def add_output_placeholders(self):
        self.output_placeholder = tf.placeholder(tf.int32, shape=(None, self.config.max_length))
        self.output_length_placeholder = tf.placeholder(tf.int32, shape=(None,))
base_aligner.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def add_extra_placeholders(self):
        self.batch_number_placeholder = tf.placeholder(tf.int32, shape=())
        self.dropout_placeholder = tf.placeholder(tf.float32, shape=())


问题


面经


文章

微信
公众号

扫码关注公众号