python类sigmoid()的实例源码

birdCLEF_train.py 文件源码 项目:BirdCLEF2017 作者: kahst 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def calc_loss_multi(prediction, targets):

    #we need to clip predictions when calculating the log-loss
    prediction = T.clip(prediction, 0.0000001, 0.9999999)

    #binary crossentropy is the best choice for a multi-class sigmoid output
    loss = T.mean(objectives.binary_crossentropy(prediction, targets))

    return loss

#theano variable for the class targets
network.py 文件源码 项目:rllabplusplus 作者: shaneshixiang 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __init__(self, incoming, num_units, hidden_nonlinearity,
                 gate_nonlinearity=LN.sigmoid, name=None,
                 W_init=LI.GlorotUniform(), b_init=LI.Constant(0.),
                 hidden_init=LI.Constant(0.), hidden_init_trainable=True):

        if hidden_nonlinearity is None:
            hidden_nonlinearity = LN.identity

        if gate_nonlinearity is None:
            gate_nonlinearity = LN.identity

        super(GRULayer, self).__init__(incoming, name=name)

        input_shape = self.input_shape[2:]

        input_dim = ext.flatten_shape_dim(input_shape)
        # self._name = name
        # Weights for the initial hidden state
        self.h0 = self.add_param(hidden_init, (num_units,), name="h0", trainable=hidden_init_trainable,
                                 regularizable=False)
        # Weights for the reset gate
        self.W_xr = self.add_param(W_init, (input_dim, num_units), name="W_xr")
        self.W_hr = self.add_param(W_init, (num_units, num_units), name="W_hr")
        self.b_r = self.add_param(b_init, (num_units,), name="b_r", regularizable=False)
        # Weights for the update gate
        self.W_xu = self.add_param(W_init, (input_dim, num_units), name="W_xu")
        self.W_hu = self.add_param(W_init, (num_units, num_units), name="W_hu")
        self.b_u = self.add_param(b_init, (num_units,), name="b_u", regularizable=False)
        # Weights for the cell gate
        self.W_xc = self.add_param(W_init, (input_dim, num_units), name="W_xc")
        self.W_hc = self.add_param(W_init, (num_units, num_units), name="W_hc")
        self.b_c = self.add_param(b_init, (num_units,), name="b_c", regularizable=False)
        self.gate_nonlinearity = gate_nonlinearity
        self.num_units = num_units
        self.nonlinearity = hidden_nonlinearity
LSTM_Eac_fusion.py 文件源码 项目:EAC-Net 作者: wiibrew 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def build_tempral_model():
    net={}
    net['input']=InputLayer((None,24,2048))
    net['lstm1']=LSTMLayer(net['input'],256)
    net['fc']=DenseLayer(net['lstm1'],num_units=12,nonlinearity=sigmoid)

    return net
BP4D_EAC_dlib_train.py 文件源码 项目:EAC-Net 作者: wiibrew 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def build_model():
    net = {}
    net['input'] = InputLayer((None, 512*20, 3, 3))

    au_fc_layers=[]
    for i in range(20):
        net['roi_AU_N_'+str(i)]=SliceLayer(net['input'],indices=slice(i*512,(i+1)*512),axis=1)

        #try to adding upsampling here for more conv

        net['Roi_upsample_'+str(i)]=Upscale2DLayer(net['roi_AU_N_'+str(i)],scale_factor=2)

        net['conv_roi_'+str(i)]=ConvLayer(net['Roi_upsample_'+str(i)],512,3)

        net['au_fc_'+str(i)]=DenseLayer(net['conv_roi_'+str(i)],num_units=150)

        au_fc_layers+=[net['au_fc_'+str(i)]]

    #
    net['local_fc']=concat(au_fc_layers)
    net['local_fc2']=DenseLayer(net['local_fc'],num_units=2048)

    net['local_fc_dp']=DropoutLayer(net['local_fc2'],p=0.5)


    # net['fc_comb']=concat([net['au_fc_layer'],net['local_fc_dp']])


    # net['fc_dense']=DenseLayer(net['fc_comb'],num_units=1024)

    # net['fc_dense_dp']=DropoutLayer(net['fc_dense'],p=0.3)

    net['real_out']=DenseLayer(net['local_fc_dp'],num_units=12,nonlinearity=sigmoid)


    # net['final']=concat([net['pred_pos_layer'],net['output_layer']])

    return net
lsgan.py 文件源码 项目:Theano-MPI 作者: uoguelph-mlrg 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def build_generator(input_var=None):
    from lasagne.layers import InputLayer, ReshapeLayer, DenseLayer
    try:
        from lasagne.layers import TransposedConv2DLayer as Deconv2DLayer
    except ImportError:
        raise ImportError("Your Lasagne is too old. Try the bleeding-edge "
                          "version: http://lasagne.readthedocs.io/en/latest/"
                          "user/installation.html#bleeding-edge-version")
    try:
        from lasagne.layers.dnn import batch_norm_dnn as batch_norm
    except ImportError:
        from lasagne.layers import batch_norm
    from lasagne.nonlinearities import sigmoid
    # input: 100dim
    layer = InputLayer(shape=(None, 100), input_var=input_var)
    # fully-connected layer
    layer = batch_norm(DenseLayer(layer, 1024))
    # project and reshape
    layer = batch_norm(DenseLayer(layer, 128*7*7))
    layer = ReshapeLayer(layer, ([0], 128, 7, 7))
    # two fractional-stride convolutions
    layer = batch_norm(Deconv2DLayer(layer, 64, 5, stride=2, crop='same',
                                     output_size=14))
    layer = Deconv2DLayer(layer, 1, 5, stride=2, crop='same', output_size=28,
                          nonlinearity=sigmoid)
    print ("Generator output:", layer.output_shape)
    return layer
lsgan_cifar10.py 文件源码 项目:Theano-MPI 作者: uoguelph-mlrg 项目源码 文件源码 阅读 51 收藏 0 点赞 0 评论 0
def build_generator(input_var=None, verbose=False):
    from lasagne.layers import InputLayer, ReshapeLayer, DenseLayer
    try:
        from lasagne.layers import TransposedConv2DLayer as Deconv2DLayer
    except ImportError:
        raise ImportError("Your Lasagne is too old. Try the bleeding-edge "
                          "version: http://lasagne.readthedocs.io/en/latest/"
                          "user/installation.html#bleeding-edge-version")
    try:
        from lasagne.layers.dnn import batch_norm_dnn as batch_norm
    except ImportError:
        from lasagne.layers import batch_norm
    from lasagne.nonlinearities import sigmoid
    # input: 100dim
    layer = InputLayer(shape=(None, 100), input_var=input_var)
    # # fully-connected layer
    # layer = batch_norm(DenseLayer(layer, 1024))
    # project and reshape
    layer = batch_norm(DenseLayer(layer, 1024*4*4))
    layer = ReshapeLayer(layer, ([0], 1024, 4, 4))
    # two fractional-stride convolutions
    layer = batch_norm(Deconv2DLayer(layer, 512, 5, stride=2, crop='same',
                                     output_size=8))
    layer = batch_norm(Deconv2DLayer(layer, 256, 5, stride=2, crop='same',
                                     output_size=16))
    layer = Deconv2DLayer(layer, 3, 5, stride=2, crop='same', output_size=32,
                          nonlinearity=sigmoid)
    if verbose: print ("Generator output:", layer.output_shape)
    return layer
wgan.py 文件源码 项目:Theano-MPI 作者: uoguelph-mlrg 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def build_generator(input_var=None):
    from lasagne.layers import InputLayer, ReshapeLayer, DenseLayer
    try:
        from lasagne.layers import TransposedConv2DLayer as Deconv2DLayer
    except ImportError:
        raise ImportError("Your Lasagne is too old. Try the bleeding-edge "
                          "version: http://lasagne.readthedocs.io/en/latest/"
                          "user/installation.html#bleeding-edge-version")
    try:
        from lasagne.layers.dnn import batch_norm_dnn as batch_norm
    except ImportError:
        from lasagne.layers import batch_norm
    from lasagne.nonlinearities import sigmoid
    # input: 100dim
    layer = InputLayer(shape=(None, 100), input_var=input_var)
    # fully-connected layer
    layer = batch_norm(DenseLayer(layer, 1024))
    # project and reshape
    layer = batch_norm(DenseLayer(layer, 128*7*7))
    layer = ReshapeLayer(layer, ([0], 128, 7, 7))
    # two fractional-stride convolutions
    layer = batch_norm(Deconv2DLayer(layer, 64, 5, stride=2, crop='same',
                                     output_size=14))
    layer = Deconv2DLayer(layer, 1, 5, stride=2, crop='same', output_size=28,
                          nonlinearity=sigmoid)
    print ("Generator output:", layer.output_shape)
    return layer
memory.py 文件源码 项目:MEM_DGM 作者: thu-ml 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def get_output_for(self, input, **kwargs):
        assert input.ndim == 2
        activation = T.dot(input, self.C)
        if self.b is not None:
            activation = activation + self.b.dimshuffle('x', 0)
        return self.nonlinearity_final(nonlinearities.sigmoid(activation).dot(self.M))
memory.py 文件源码 项目:MEM_DGM 作者: thu-ml 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self, u_net, z_net,
                 nonlinearity=nonlinearities.sigmoid,
                 nonlinearity_final=nonlinearities.identity, **kwargs):
        super(LadderCompositionLayer, self).__init__([u_net, z_net], **kwargs)

        u_shp, z_shp = self.input_shapes


        if not u_shp[-1] == z_shp[-1]:
            raise ValueError("last dimension of u and z  must be equal"
                             " u was %s, z was %s" % (str(u_shp), str(z_shp)))
        self.num_inputs = z_shp[-1]
        self.nonlinearity = nonlinearity
        self.nonlinearity_final = nonlinearity_final
        constant = init.Constant
        self.a1 = self.add_param(constant(0.), (self.num_inputs,), name="a1")
        self.a2 = self.add_param(constant(1.), (self.num_inputs,), name="a2")
        self.a3 = self.add_param(constant(0.), (self.num_inputs,), name="a3")
        self.a4 = self.add_param(constant(0.), (self.num_inputs,), name="a4")

        self.c1 = self.add_param(constant(0.), (self.num_inputs,), name="c1")
        self.c2 = self.add_param(constant(1.), (self.num_inputs,), name="c2")
        self.c3 = self.add_param(constant(0.), (self.num_inputs,), name="c3")

        self.c4 = self.add_param(constant(0.), (self.num_inputs,), name="c4")

        self.b1 = self.add_param(constant(0.), (self.num_inputs,),
                                 name="b1", regularizable=False)
analysis_memory.py 文件源码 项目:MEM_DGM 作者: thu-ml 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def get_output_for(self, input, **kwargs):
        activation = T.dot(input, self.C)
        if self.b is not None:
            activation = activation + self.b.dimshuffle('x', 0)
        return nonlinearities.sigmoid(activation)
analysis_memory.py 文件源码 项目:MEM_DGM 作者: thu-ml 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__(self, u_net, z_net,
                 nonlinearity=nonlinearities.sigmoid,
                 nonlinearity_final=nonlinearities.identity, **kwargs):
        super(LadderCompositionLayer, self).__init__([u_net, z_net], **kwargs)

        u_shp, z_shp = self.input_shapes


        if not u_shp[-1] == z_shp[-1]:
            raise ValueError("last dimension of u and z  must be equal"
                             " u was %s, z was %s" % (str(u_shp), str(z_shp)))
        self.num_inputs = z_shp[-1]
        self.nonlinearity = nonlinearity
        self.nonlinearity_final = nonlinearity_final
        constant = init.Constant
        self.a1 = self.add_param(constant(0.), (self.num_inputs,), name="a1")
        self.a2 = self.add_param(constant(1.), (self.num_inputs,), name="a2")
        self.a3 = self.add_param(constant(0.), (self.num_inputs,), name="a3")
        self.a4 = self.add_param(constant(0.), (self.num_inputs,), name="a4")

        self.c1 = self.add_param(constant(0.), (self.num_inputs,), name="c1")
        self.c2 = self.add_param(constant(1.), (self.num_inputs,), name="c2")
        self.c3 = self.add_param(constant(0.), (self.num_inputs,), name="c3")

        self.c4 = self.add_param(constant(0.), (self.num_inputs,), name="c4")

        self.b1 = self.add_param(constant(0.), (self.num_inputs,),
                                 name="b1", regularizable=False)
DMN.py 文件源码 项目:DynamicMemoryNetworks 作者: swstarlab 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, W_in=Normal(0.1), W_hid=Normal(0.1),
                 b=Constant(0.), nonlinearity=nonlin.sigmoid):
        self.W_in  = W_in
        self.W_hid = W_hid
        self.b     = b
        if nonlinearity is None:
            self.nonlinearity = nonlin.identity
        else:
            self.nonlinearity = nonlinearity
AttentionNet.py 文件源码 项目:AttentionNet 作者: sayvazov 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def get_output_for(self, arguments, **kwargs):
        input, hprev, Cprev = arguments
        i = nl.sigmoid(self.Wi * input + self.Ui* hprev + self.bi)
        cand = nl.tanh(self.Wc *input + self.Uc * hprev + self.bc)
        f = nl.sigmoid(self.Wf*input + self.Uf*hprev + self.bf)
        C = i*cand + f * Cprev
        o = nl.sigmoid(self.Wo*input + self.Uo*hprev + self.Vo*C + self.bo)
        h = o*nl.tanh(C)
        return h, C
tmp_model.py 文件源码 项目:StockPredictor 作者: wallsbreaker 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def build_combination(input_var, output_nodes, input_size, stocks, period, feature_types):
    # Input layer
    input_layer = InputLayer(shape=(None, 1, input_size), input_var=input_var)
    assert input_size == stocks * period * feature_types
    input_layer = ReshapeLayer(input_layer, (([0], stocks, period, feature_types)))

    #slice for partition
    stock_feature_type_layers = []
    for ix in range(stocks):
        stock_layer = SliceLayer(input_layer, indices=ix, axis=1)
        this_stock_feature_type_layers = []
        for rx in range(feature_types):
            this_stock_feature_type_layers.append(SliceLayer(stock_layer, indices=rx, axis=1))
        stock_feature_type_layers.append(this_stock_feature_type_layers)

    stock_networks = []
    for this_stock_feature_type_layers in stock_feature_type_layers:
        this_stock_networks = []
        for feature_type_layer in this_stock_feature_type_layers:
            tmp = DenseLayer(dropout(feature_type_layer, p=.2),
                num_units=10, nonlinearity=tanh)
            tmp = DenseLayer(dropout(tmp, p=.5), num_units=1, nonlinearity=tanh)
            this_stock_networks.append(tmp)

        this_stock_network = ConcatLayer(this_stock_networks)

        stock_network = DenseLayer(dropout(this_stock_network, p=.5),
                num_units=1, nonlinearity=tanh)

        stock_networks.append(stock_network)

    network = ConcatLayer(stock_networks)
    network = DenseLayer(dropout(network, p=.5),
                num_units=output_nodes, nonlinearity=sigmoid)

    return network, stock_networks
unimodal.py 文件源码 项目:ip-avsr 作者: lzuwei 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def create_pretrained_encoder(weights, biases, incoming):
    l_1 = DenseLayer(incoming, 2000, W=weights[0], b=biases[0], nonlinearity=sigmoid, name='fc1')
    l_2 = DenseLayer(l_1, 1000, W=weights[1], b=biases[1], nonlinearity=sigmoid, name='fc2')
    l_3 = DenseLayer(l_2, 500, W=weights[2], b=biases[2], nonlinearity=sigmoid, name='fc3')
    l_4 = DenseLayer(l_3, 50, W=weights[3], b=biases[3], nonlinearity=linear, name='encoder')
    return l_4
leave_one_out.py 文件源码 项目:ip-avsr 作者: lzuwei 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def create_pretrained_encoder(weights, biases, incoming):
    l_1 = DenseLayer(incoming, 2000, W=weights[0], b=biases[0], nonlinearity=sigmoid, name='fc1')
    l_2 = DenseLayer(l_1, 1000, W=weights[1], b=biases[1], nonlinearity=sigmoid, name='fc2')
    l_3 = DenseLayer(l_2, 500, W=weights[2], b=biases[2], nonlinearity=sigmoid, name='fc3')
    l_4 = DenseLayer(l_3, 50, W=weights[3], b=biases[3], nonlinearity=linear, name='encoder')
    return l_4
bimodal.py 文件源码 项目:ip-avsr 作者: lzuwei 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def create_pretrained_encoder(weights, biases, incoming):
    l_1 = DenseLayer(incoming, 2000, W=weights[0], b=biases[0], nonlinearity=sigmoid, name='fc1')
    l_2 = DenseLayer(l_1, 1000, W=weights[1], b=biases[1], nonlinearity=sigmoid, name='fc2')
    l_3 = DenseLayer(l_2, 500, W=weights[2], b=biases[2], nonlinearity=sigmoid, name='fc3')
    l_4 = DenseLayer(l_3, 50, W=weights[3], b=biases[3], nonlinearity=linear, name='encoder')
    return l_4
demo.py 文件源码 项目:ip-avsr 作者: lzuwei 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def create_pretrained_encoder(weights, biases, incoming):
    l_1 = DenseLayer(incoming, 2000, W=weights[0], b=biases[0], nonlinearity=sigmoid, name='fc1')
    l_2 = DenseLayer(l_1, 1000, W=weights[1], b=biases[1], nonlinearity=sigmoid, name='fc2')
    l_3 = DenseLayer(l_2, 500, W=weights[2], b=biases[2], nonlinearity=sigmoid, name='fc3')
    l_4 = DenseLayer(l_3, 50, W=weights[3], b=biases[3], nonlinearity=linear, name='encoder')
    return l_4
unimodal_nodelta.py 文件源码 项目:ip-avsr 作者: lzuwei 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def load_finetuned_dbn(path):
    """
    Load a fine tuned Deep Belief Net from file
    :param path: path to deep belief net parameters
    :return: deep belief net
    """
    dbn = NeuralNet(
        layers=[
            ('input', las.layers.InputLayer),
            ('l1', las.layers.DenseLayer),
            ('l2', las.layers.DenseLayer),
            ('l3', las.layers.DenseLayer),
            ('l4', las.layers.DenseLayer),
            ('l5', las.layers.DenseLayer),
            ('l6', las.layers.DenseLayer),
            ('l7', las.layers.DenseLayer),
            ('output', las.layers.DenseLayer)
        ],
        input_shape=(None, 1200),
        l1_num_units=2000, l1_nonlinearity=sigmoid,
        l2_num_units=1000, l2_nonlinearity=sigmoid,
        l3_num_units=500, l3_nonlinearity=sigmoid,
        l4_num_units=50, l4_nonlinearity=linear,
        l5_num_units=500, l5_nonlinearity=sigmoid,
        l6_num_units=1000, l6_nonlinearity=sigmoid,
        l7_num_units=2000, l7_nonlinearity=sigmoid,
        output_num_units=1200, output_nonlinearity=linear,
        update=nesterov_momentum,
        update_learning_rate=0.001,
        update_momentum=0.5,
        objective_l2=0.005,
        verbose=1,
        regression=True
    )
    with open(path, 'rb') as f:
        pretrained_nn = pickle.load(f)
    if pretrained_nn is not None:
        dbn.load_params_from(path)
    return dbn
dbn.py 文件源码 项目:ip-avsr 作者: lzuwei 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def load_encoder(path):
    """
        load a pretrained dbn from path
        :param path: path to the .mat dbn
        :return: pretrained unrolled encoder
        """
    # create the network using weights from pretrain_nn.mat
    nn = sio.loadmat(path)
    w1 = nn['w1']
    w2 = nn['w2']
    w3 = nn['w3']
    w4 = nn['w4']
    b1 = nn['b1'][0]
    b2 = nn['b2'][0]
    b3 = nn['b3'][0]
    b4 = nn['b4'][0]

    encoder = NeuralNet(
        layers=[
            (InputLayer, {'name': 'input', 'shape': (None, 1200)}),
            (DenseLayer, {'name': 'l1', 'num_units': 2000, 'nonlinearity': sigmoid,
                          'W': w1, 'b': b1}),
            (DenseLayer, {'name': 'l2', 'num_units': 1000, 'nonlinearity': sigmoid,
                          'W': w2, 'b': b2}),
            (DenseLayer, {'name': 'l3', 'num_units': 500, 'nonlinearity': sigmoid,
                          'W': w3, 'b': b3}),
            (DenseLayer, {'name': 'l4', 'num_units': 50, 'nonlinearity': linear,
                          'W': w4, 'b': b4}),
        ],
        update=nesterov_momentum,
        update_learning_rate=0.001,
        update_momentum=0.5,
        objective_l2=0.005,
        verbose=1,
        regression=True
    )
    encoder.initialize()
    return encoder


问题


面经


文章

微信
公众号

扫码关注公众号