python类uniform_unit_scaling_initializer()的实例源码

net.py 文件源码 项目:l3 作者: jacobandreas 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def _linear(t_in, n_out):
    v_w = tf.get_variable(
            "w",
            shape=(t_in.get_shape()[-1], n_out),
            initializer=tf.uniform_unit_scaling_initializer(
                factor=INIT_SCALE))
    v_b = tf.get_variable(
            "b",
            shape=n_out,
            initializer=tf.constant_initializer(0))
    if len(t_in.get_shape()) == 2:
        return tf.einsum("ij,jk->ik", t_in, v_w) + v_b
    elif len(t_in.get_shape()) == 3:
        return tf.einsum("ijk,kl->ijl", t_in, v_w) + v_b
    else:
        assert False
model_cifar.py 文件源码 项目:deep_separation_contraction 作者: edouardoyallon 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def net(x, n_layer_per_block, n_classes, phase_train,alpha,number_channel, scope='deep_net'):
  with tf.variable_scope(scope):
    n1=number_channel
    n2=number_channel
    n3=number_channel
    n4=number_channel

    y = conv2d(x, 3, n1, 3, 1, 'SAME',False, phase_train,scope='conv_init')
    y = batch_norm(y, n1, phase_train, scope='bn_init')
    y = tf.nn.relu(y, name='relu_init')

    y = group(y, n1, n2, n_layer_per_block, False, alpha,phase_train, scope='group_1')
    y = group(y, n2, n3, n_layer_per_block, True,alpha, phase_train, scope='group_2')
    y = group(y, n3, n4, n_layer_per_block, True,alpha, phase_train, scope='group_3')

    y = tf.nn.avg_pool(y, [1, 8, 8, 1], [1, 1, 1, 1], 'VALID', name='avg_pool')
    y = tf.squeeze(y, squeeze_dims=[1, 2])

    w = tf.get_variable('DW', [n4, n_classes],initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
    tf.add_to_collection('weights', w)
    bias = tf.get_variable('bias', [n_classes], initializer=tf.constant_initializer(0.0))
    y=tf.nn.xw_plus_b(y, w, bias)
  return y
model_cifar_contract.py 文件源码 项目:deep_separation_contraction 作者: edouardoyallon 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def net(x, n_layer_per_block, n_classes, phase_train,number_channel, scope='deep_net'):
  with tf.variable_scope(scope):
    n1=number_channel
    n2=number_channel
    n3=number_channel
    n4=number_channel

    y = conv2d(x, 3, n1, 3, 1, 'SAME',False, phase_train,scope='conv_init')
    y = batch_norm(y, n1, phase_train, scope='bn_init')
    y = tf.nn.relu(y, name='relu_init')

    y = group(y, n1, n2, n_layer_per_block, False, phase_train, scope='group_1')
    y = group(y, n2, n3, n_layer_per_block, True, phase_train, scope='group_2')
    y = group(y, n3, n4, n_layer_per_block, True, phase_train, scope='group_3')

    y = tf.nn.avg_pool(y, [1, 8, 8, 1], [1, 1, 1, 1], 'VALID', name='avg_pool')
    y = tf.squeeze(y, squeeze_dims=[1, 2])

    w = tf.get_variable('DW', [n4, n_classes],initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
    tf.add_to_collection('weights', w)
    bias = tf.get_variable('bias', [n_classes], initializer=tf.constant_initializer(0.0))
    y=tf.nn.xw_plus_b(y, w, bias)
  return y
RestNet.py 文件源码 项目:ResNet-deeplabV3 作者: Harvey1973 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def output_layer (input_layer, num_labels):
    '''
    param input_layer : flattend 2D tensor
    param num_lables: number of classes
    return the output of FC layer : Y =Wx+b
    '''
    input_dim = input_layer.get_shape().as_list()[-1]
    fc_w = create_variables(name = 'fc_weight',shape = [input_dim,num_labels],is_fc_layer = True,initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
    fc_b = create_variables(name = 'fc_bias',shape = [num_labels],is_fc_layer = False,initializer = tf.zeros_initializer())
    output = tf.matmul(input_layer,fc_w) + fc_b
    return output
models.py 文件源码 项目:CausalGAN 作者: mkocaoglu 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def linear(input_, output_dim, scope=None, stddev=.7):
    unif = tf.uniform_unit_scaling_initializer()
    norm = tf.random_normal_initializer(stddev=stddev)
    const = tf.constant_initializer(0.0)
    with tf.variable_scope(scope or 'linear'):
        #w = tf.get_variable('w', [input_.get_shape()[1], output_dim], initializer=unif)
        w = tf.get_variable('w', [input_.get_shape()[1], output_dim], initializer=norm)
        b = tf.get_variable('b', [output_dim], initializer=const)
        return tf.matmul(input_, w) + b
neural.py 文件源码 项目:attend_infer_repeat 作者: akosiorek 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def activation_based_init(nonlinearity):
    """Returns initialiaation based on a nonlinearlity"""

    init = tf.uniform_unit_scaling_initializer()
    if nonlinearity == tf.nn.relu:
        init = tf.contrib.layers.xavier_initializer()
    elif nonlinearity == tf.nn.elu:
        init = tf.contrib.layers.variance_scaling_initializer()
    elif nonlinearity == selu:
        init = tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_IN')

    return init
pose_model.py 文件源码 项目:Face-Pose-Net 作者: fengju514 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def _fully_connected(self, x, out_dim):
    """FullyConnected layer for final output."""
    x = tf.reshape(x, [self.hps.batch_size, -1])
    #print "*** ", x.get_shape()
    w = tf.get_variable(
        'DW', [x.get_shape()[1], out_dim],
        initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
    #print "*** ", w.get_shape()
    b = tf.get_variable('biases', [out_dim],
                        initializer=tf.constant_initializer())
    #print "*** ", b.get_shape()
    aaa = tf.nn.xw_plus_b(x, w, b)
    #print "*** ", aaa.get_shape()
    return tf.nn.xw_plus_b(x, w, b)
pose_model.py 文件源码 项目:Face-Pose-Net 作者: fengju514 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def _fully_connected_ST(self, x, out_dim):
    """FullyConnected layer for final output of the localization network in the spatial transformer"""
    x = tf.reshape(x, [self.hps.batch_size, -1])
    w = tf.get_variable(
        'DW2', [x.get_shape()[1], out_dim],
        initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
    initial = np.array([[1., 0, 0], [0, 1., 0]])
    initial = initial.astype('float32')
    initial = initial.flatten()
    b = tf.get_variable('biases2', [out_dim],
                        initializer=tf.constant_initializer(initial))
    return tf.nn.xw_plus_b(x, w, b)
model_utils.py 文件源码 项目:lm 作者: rafaljozefowicz 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def sharded_variable(name, shape, num_shards, dtype=tf.float32, transposed=False):
    # The final size of the sharded variable may be larger than requested.
    # This should be fine for embeddings.
    shard_size = int((shape[0] + num_shards - 1) / num_shards)
    if transposed:
        initializer = tf.uniform_unit_scaling_initializer(dtype=dtype, full_shape=[shape[1], shape[0]])
    else:
        initializer = tf.uniform_unit_scaling_initializer(dtype=dtype, full_shape=shape)
    return [tf.get_variable(name + "_%d" % i, [shard_size, shape[1]], initializer=initializer, dtype=dtype)
            for i in range(num_shards)]


# XXX(rafal): Code below copied from rnn_cell.py
iclr_2017_benchmark.py 文件源码 项目:fold 作者: tensorflow 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def tree_fc(self, left, right):
    # A simple tree RNN with a single fully connected layer.
    if self._weights is None:
      with tf.variable_scope(self._vscope):
        self._weights = tf.get_variable(
            "weights", [FLAGS.vector_size*2, FLAGS.vector_size],
            initializer=tf.uniform_unit_scaling_initializer(1.43))
        self._bias = tf.get_variable("bias", [FLAGS.vector_size],
                                     initializer=tf.zeros_initializer())
    x = tf.concat([left, right], 1)
    result = tf.add(tf.matmul(x, self._weights), self._bias)
    return tf.nn.relu(result)
iclr_2017_benchmark.py 文件源码 项目:fold 作者: tensorflow 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def tree_lstm(self, left, right):
    # A variation on the tree LSTM -- we add an extra hidden layer.
    if self._weights is None:
      with tf.variable_scope(self._vscope):
        self._weights_0 = tf.get_variable(
            "weights_0", [FLAGS.vector_size*2, FLAGS.vector_size],
            initializer=tf.uniform_unit_scaling_initializer(1.43))
        self._bias_0 = tf.get_variable("bias_0", [FLAGS.vector_size],
                                       initializer=tf.zeros_initializer())
        self._weights = tf.get_variable(
            "weights", [FLAGS.vector_size, FLAGS.vector_size*4],
            initializer=tf.uniform_unit_scaling_initializer(1.0))
        self._bias = tf.get_variable("bias", [FLAGS.vector_size*4],
                                     initializer=tf.zeros_initializer())
    # One hidden layer
    x = tf.concat([left, right], 1)
    h0 = tf.nn.relu(tf.add(tf.matmul(x, self._weights_0), self._bias_0))

    # Do a single matrix multiply to compute all gates
    h1 = tf.add(tf.matmul(h0, self._weights), self._bias)
    (hfl, hfr, hi, hg) = tf.split(h1, 4, axis=1)

    fl = tf.nn.sigmoid(hfl)  # forget left
    fr = tf.nn.sigmoid(hfr)  # forget right
    i = tf.nn.sigmoid(hi)    # input gate
    g = tf.nn.tanh(hg)       # computation

    ylr = tf.add(tf.multiply(fl, left), tf.multiply(fr, right))
    ygi = tf.multiply(i, g)
    y = tf.add(ylr, ygi)

    return y
model.py 文件源码 项目:the-wavenet-pianist 作者: 821760408-sp 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def _create_conv_layer(fitler_width, in_channels, out_channels):
        kernel_shape = [fitler_width,
                        in_channels,
                        out_channels]
        biases_shape = [out_channels]
        return {
            'weights': tf.get_variable(
                'weights',
                kernel_shape,
                initializer=tf.uniform_unit_scaling_initializer(1.0)),
            'biases': tf.get_variable(
                'biases',
                biases_shape,
                initializer=tf.constant_initializer(0.0))
        }
resnet_model.py 文件源码 项目:deep_learning_study 作者: jowettcz 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def _fully_connected(self, x, out_dim):
    """FullyConnected layer for final output."""
    x = tf.reshape(x, [self.hps.batch_size, -1])
    w = tf.get_variable(
        'DW', [x.get_shape()[1], out_dim],
        initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
    b = tf.get_variable('biases', [out_dim],
                        initializer=tf.constant_initializer())
    return tf.nn.xw_plus_b(x, w, b)
tagger.py 文件源码 项目:convseg 作者: chqiwang 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def build_graph(self):
        parameters = self.parameters
        with tf.variable_scope(name_or_scope=self.scope, initializer=tf.uniform_unit_scaling_initializer()):
            seq_ids_pl, seq_other_ids_pls, inputs = self.build_input_graph(vocab_size=parameters['vocab_size'],
                                                                           emb_size=parameters['emb_size'],
                                                                           word_window_size=parameters['word_window_size'],
                                                                           word_vocab_size=parameters['word_vocab_size'],
                                                                           word_emb_size=parameters['word_emb_size'])
            stag_ids_pl, seq_lengths_pl, is_train_pl, cost_op, train_cost_op, scores_op, summary_op = \
                self.build_tagging_graph(inputs=inputs,
                                         num_tags=parameters['num_tags'],
                                         use_crf=parameters['use_crf'],
                                         lamd=parameters['lamd'],
                                         dropout_emb=parameters['dropout_emb'],
                                         dropout_hidden=parameters['dropout_hidden'],
                                         hidden_layers=parameters['hidden_layers'],
                                         channels=parameters['channels'],
                                         kernel_size=parameters['kernel_size'],
                                         use_bn=parameters['use_bn'],
                                         use_wn=parameters['use_wn'],
                                         active_type=parameters['active_type'])
        self.seq_ids_pl = seq_ids_pl
        self.seq_other_ids_pls = seq_other_ids_pls
        self.stag_ids_pl = stag_ids_pl
        self.seq_lengths_pl = seq_lengths_pl
        self.is_train_pl = is_train_pl
        self.cost_op = cost_op
        self.train_cost_op = train_cost_op
        self.scores_op = scores_op
        self.summary_op = summary_op
model_utils.py 文件源码 项目:f-lm 作者: okuchaiev 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def sharded_variable(name, shape, num_shards, dtype=tf.float32, transposed=False):
    # The final size of the sharded variable may be larger than requested.
    # This should be fine for embeddings.
    shard_size = int((shape[0] + num_shards - 1) / num_shards)
    if transposed:
        initializer = tf.uniform_unit_scaling_initializer(dtype=dtype)
    else:        
        initializer = tf.uniform_unit_scaling_initializer(dtype=dtype)
    return [tf.get_variable(name + "_" + str(i), [shard_size, shape[1]],
                            initializer=initializer, dtype=dtype) for i in range(num_shards)]


# XXX(rafal): Code below copied from rnn_cell.py
resnet_model.py 文件源码 项目:YellowFin 作者: JianGoForIt 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _fully_connected(self, x, out_dim):
    """FullyConnected layer for final output."""
    x = tf.reshape(x, [self.hps.batch_size, -1])
    w = tf.get_variable(
        'DW', [x.get_shape()[1], out_dim],
        initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
    b = tf.get_variable('biases', [out_dim],
                        initializer=tf.constant_initializer())
    return tf.nn.xw_plus_b(x, w, b)
net.py 文件源码 项目:l3 作者: jacobandreas 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def _embed(t_in, n_embeddings, n_out):
    v = tf.get_variable(
            "embed", shape=(n_embeddings, n_out),
            initializer=tf.uniform_unit_scaling_initializer())
    t_embed = tf.nn.embedding_lookup(v, t_in)
    return t_embed
net.py 文件源码 项目:l3 作者: jacobandreas 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def _linear(t_in, n_out):
    assert len(t_in.get_shape()) == 2
    v_w = tf.get_variable(
            "w",
            shape=(t_in.get_shape()[1], n_out),
            initializer=tf.uniform_unit_scaling_initializer(
                factor=INIT_SCALE))
    v_b = tf.get_variable(
            "b",
            shape=n_out,
            initializer=tf.constant_initializer(0))
    return tf.einsum("ij,jk->ik", t_in, v_w) + v_b
net.py 文件源码 项目:l3 作者: jacobandreas 项目源码 文件源码 阅读 43 收藏 0 点赞 0 评论 0
def _embed(t_in, n_embeddings, n_out):
    v = tf.get_variable(
            "embed", shape=(n_embeddings, n_out),
            initializer=tf.uniform_unit_scaling_initializer())
    t_embed = tf.nn.embedding_lookup(v, t_in)
    return t_embed
resnet_model.py 文件源码 项目:AM-GAN 作者: ZhimingZhou 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def _fully_connected(self, x, out_dim):
    """FullyConnected layer for final output."""
    x = tf.reshape(x, [self.hps.batch_size, -1])
    w = tf.get_variable(
        'DW', [x.get_shape()[1], out_dim],
        initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
    b = tf.get_variable('biases', [out_dim],
                        initializer=tf.constant_initializer())
    return tf.nn.xw_plus_b(x, w, b)
qacnn.py 文件源码 项目:InsuranceQA_zh 作者: l11x0m7 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def add_embeddings(self):
        with tf.variable_scope('embedding'):
            embeddings = tf.get_variable('embeddings', shape=[self.config.vocab_size, self.config.embedding_size], initializer=tf.uniform_unit_scaling_initializer())
            q_embed = tf.nn.embedding_lookup(embeddings, self.q)
            aplus_embed = tf.nn.embedding_lookup(embeddings, self.aplus)
            aminus_embed = tf.nn.embedding_lookup(embeddings, self.aminus)
            return q_embed, aplus_embed, aminus_embed

    # Hidden Layer
qacnn.py 文件源码 项目:InsuranceQA_zh 作者: l11x0m7 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def add_hl(self, q_embed, aplus_embed, aminus_embed):
        with tf.variable_scope('HL'):
            W = tf.get_variable('weights', shape=[self.config.embedding_size, self.config.hidden_size], initializer=tf.uniform_unit_scaling_initializer())
            b = tf.get_variable('biases', initializer=tf.constant(0.1, shape=[self.config.hidden_size]))
            h_q = tf.reshape(tf.nn.tanh(tf.matmul(tf.reshape(q_embed, [-1, self.config.embedding_size]), W)+b), [-1, self.config.sequence_length, self.config.hidden_size])
            h_ap = tf.reshape(tf.nn.tanh(tf.matmul(tf.reshape(aplus_embed, [-1, self.config.embedding_size]), W)+b), [-1, self.config.sequence_length, self.config.hidden_size])
            h_am = tf.reshape(tf.nn.tanh(tf.matmul(tf.reshape(aminus_embed, [-1, self.config.embedding_size]), W)+b), [-1, self.config.sequence_length, self.config.hidden_size])
            tf.add_to_collection('total_loss', 0.5*self.config.l2_reg_lambda*tf.nn.l2_loss(W))
            return h_q, h_ap, h_am

    # CNN?
nn.py 文件源码 项目:hart 作者: akosiorek 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def _build(self):
        n_inpt_channels = self.inpt.get_shape().as_list()[-1]
        n_dfn_filter_params = n_inpt_channels * self.n_channels * np.prod(self.ksize)

        filter_inpt = self.filter_inpt
        for i in xrange(1, self.n_param_layers):
            filter_inpt = AffineLayer(filter_inpt, filter_inpt.get_shape().as_list()[-1],
                                      transfer=tf.nn.elu, name='param_layer_{}'.format(i))

        dfn_weight_init = tf.uniform_unit_scaling_initializer(self.dfn_weight_factor)
        self.dynamic_weights = AffineLayer(filter_inpt, n_dfn_filter_params, transfer=None,
                                           weight_init=dfn_weight_init, bias_init=dfn_weight_init, name='dynamic_weights')

        dfn_weights = tf.reshape(self.dynamic_weights, (-1, 1, 1, n_dfn_filter_params))
        dfn = DynamicFilterConvLayer(self.inpt, dfn_weights, self.ksize, name='dfn')

        if self.adaptive_bias:
            dfn_bias_init = tf.uniform_unit_scaling_initializer(self.dfn_bias_factor)
            self.dynamic_bias = AffineLayer(filter_inpt, self.n_channels, transfer=None,
                                            weight_init=dfn_bias_init, bias_init=dfn_bias_init,
                                            name='dynamic_bias')

            dfn_adaptive_bias = tf.reshape(self.dynamic_bias, (-1, 1, 1, self.n_channels))
            dfn += dfn_adaptive_bias

        if self.bias:
            self.bias = tf.get_variable('dfn_bias', (1, 1, 1, self.n_channels))
            dfn += self.bias

        self.features = self.transfer(dfn)
deeplab_model.py 文件源码 项目:TF-deeplab 作者: chenxi116 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _fully_connected(self, x, out_dim):
    """FullyConnected layer for final output."""
    x = tf.reshape(x, [self.batch_size, -1])
    w = tf.get_variable(
        'DW', [self.filters[-1], out_dim],
        initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
    b = tf.get_variable('biases', [out_dim],
                        initializer=tf.constant_initializer())
    return tf.nn.xw_plus_b(x, w, b)
deeplab_model.py 文件源码 项目:TF-deeplab 作者: chenxi116 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _fully_convolutional(self, x, out_dim):
    """FullyConvolutional layer for final output."""
    w = tf.get_variable(
        'DW', [1, 1, self.filters[-1], out_dim],
        initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
    b = tf.get_variable('biases', [out_dim],
                        initializer=tf.constant_initializer())  
    return tf.nn.conv2d(x, w, self._stride_arr(1), padding='SAME') + b
Resnet.py 文件源码 项目:DeepLab 作者: 2prime 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _fully_connected(self, x, out_dim):
    """FullyConnected layer for final output."""
    x = tf.reshape(x, [self.hps.batch_size, -1])
    w = tf.get_variable(
        'DW', [x.get_shape()[1], out_dim],
        initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
    b = tf.get_variable('biases', [out_dim],
                        initializer=tf.constant_initializer())
    return tf.nn.xw_plus_b(x, w, b)
model_utils.py 文件源码 项目:ran 作者: kentonl 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def sharded_variable(name, shape, num_shards, dtype=tf.float32, transposed=False):
    # The final size of the sharded variable may be larger than requested.
    # This should be fine for embeddings.
    shard_size = int((shape[0] + num_shards - 1) / num_shards)
    if transposed:
        initializer = tf.uniform_unit_scaling_initializer(dtype=dtype)
    else:
        initializer = tf.uniform_unit_scaling_initializer(dtype=dtype)
    return [tf.get_variable(name + "_%d" % i, [shard_size, shape[1]], initializer=initializer, dtype=dtype)
            for i in range(num_shards)]


# XXX(rafal): Code below copied from rnn_cell.py
asr_model.py 文件源码 项目:TF-Speech-Recognition 作者: ZhishengWang 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def _fully_connected(self, x, out_dim):
        """FullyConnected layer for final output."""
        x = tf.reshape(x, [self.hps.batch_size, -1])
        w = tf.get_variable(
            'DW', [x.get_shape()[1], out_dim],
            initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
        b = tf.get_variable('biases', [out_dim],
                            initializer=tf.constant_initializer())

        return tf.nn.xw_plus_b(x, w, b)
asr_model.py 文件源码 项目:TF-Speech-Recognition 作者: ZhishengWang 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _fully_connected_v2(self, x, name, out_dim):
        """FullyConnected layer for final output."""
        #x = tf.reshape(x, [self.hps.batch_size, -1])
        with tf.variable_scope(name):
            w = tf.get_variable(
            'DW', [x.get_shape()[1], out_dim],
            initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
            b = tf.get_variable('biases', [out_dim],
                            initializer=tf.constant_initializer())

        return tf.nn.xw_plus_b(x, w, b)
qacnn.py 文件源码 项目:InsuranceQA 作者: l11x0m7 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def add_embeddings(self):
        with tf.variable_scope('embedding'):
            embeddings = tf.get_variable('embeddings', shape=[self.config.vocab_size, self.config.embedding_size], initializer=tf.uniform_unit_scaling_initializer())
            q_embed = tf.nn.embedding_lookup(embeddings, self.q)
            aplus_embed = tf.nn.embedding_lookup(embeddings, self.aplus)
            aminus_embed = tf.nn.embedding_lookup(embeddings, self.aminus)
            return q_embed, aplus_embed, aminus_embed

    # Hidden Layer


问题


面经


文章

微信
公众号

扫码关注公众号