python类ConcatLayer()的实例源码

inception_v3.py 文件源码 项目:no_fuss_dml 作者: brotherofken 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def inceptionB(input_layer, nfilt):
    # Corresponds to a modified version of figure 10 in the paper
    l1 = bn_conv(input_layer, num_filters=nfilt[0][0], filter_size=3, stride=2)

    l2 = bn_conv(input_layer, num_filters=nfilt[1][0], filter_size=1)
    l2 = bn_conv(l2, num_filters=nfilt[1][1], filter_size=3, pad=1)
    l2 = bn_conv(l2, num_filters=nfilt[1][2], filter_size=3, stride=2)

    l3 = Pool2DLayer(input_layer, pool_size=3, stride=2)

    return ConcatLayer([l1, l2, l3])
inception_v3.py 文件源码 项目:no_fuss_dml 作者: brotherofken 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def inceptionD(input_layer, nfilt):
    # Corresponds to a modified version of figure 10 in the paper
    l1 = bn_conv(input_layer, num_filters=nfilt[0][0], filter_size=1)
    l1 = bn_conv(l1, num_filters=nfilt[0][1], filter_size=3, stride=2)

    l2 = bn_conv(input_layer, num_filters=nfilt[1][0], filter_size=1)
    l2 = bn_conv(l2, num_filters=nfilt[1][1], filter_size=(1, 7), pad=(0, 3))
    l2 = bn_conv(l2, num_filters=nfilt[1][2], filter_size=(7, 1), pad=(3, 0))
    l2 = bn_conv(l2, num_filters=nfilt[1][3], filter_size=3, stride=2)

    l3 = Pool2DLayer(input_layer, pool_size=3, stride=2)

    return ConcatLayer([l1, l2, l3])
tmp_model.py 文件源码 项目:StockPredictor 作者: wallsbreaker 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def build_combination(input_var, output_nodes, input_size, stocks, period, feature_types):
    # Input layer
    input_layer = InputLayer(shape=(None, 1, input_size), input_var=input_var)
    assert input_size == stocks * period * feature_types
    input_layer = ReshapeLayer(input_layer, (([0], stocks, period, feature_types)))

    #slice for partition
    stock_feature_type_layers = []
    for ix in range(stocks):
        stock_layer = SliceLayer(input_layer, indices=ix, axis=1)
        this_stock_feature_type_layers = []
        for rx in range(feature_types):
            this_stock_feature_type_layers.append(SliceLayer(stock_layer, indices=rx, axis=1))
        stock_feature_type_layers.append(this_stock_feature_type_layers)

    stock_networks = []
    for this_stock_feature_type_layers in stock_feature_type_layers:
        this_stock_networks = []
        for feature_type_layer in this_stock_feature_type_layers:
            tmp = DenseLayer(dropout(feature_type_layer, p=.2),
                num_units=10, nonlinearity=tanh)
            tmp = DenseLayer(dropout(tmp, p=.5), num_units=1, nonlinearity=tanh)
            this_stock_networks.append(tmp)

        this_stock_network = ConcatLayer(this_stock_networks)

        stock_network = DenseLayer(dropout(this_stock_network, p=.5),
                num_units=1, nonlinearity=tanh)

        stock_networks.append(stock_network)

    network = ConcatLayer(stock_networks)
    network = DenseLayer(dropout(network, p=.5),
                num_units=output_nodes, nonlinearity=sigmoid)

    return network, stock_networks
unimodal_nodelta.py 文件源码 项目:ip-avsr 作者: lzuwei 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def test_concatlayer():
    a = np.array([
        [
            [1, 2, 3, 4],
            [1, 2, 3, 4],
            [1, 2, 3, 4]
        ],
        [
            [1, 2, 3, 4],
            [1, 2, 3, 4],
            [1, 2, 3, 4]
        ]
    ], dtype=np.int32)
    b = np.array([
        [
            [5, 6, 7],
            [5, 6, 7],
            [5, 6, 7]
        ],
        [
            [5, 6, 7],
            [5, 6, 7],
            [5, 6, 7]
        ]
    ], dtype=np.int32)

    input_var = T.tensor3('input', dtype='int32')
    dct_var = T.tensor3('dct', dtype='int32')
    l_in = InputLayer((None, None, 4), input_var, name='input')
    l_dct = InputLayer((None, None, 3), dct_var, name='dct')
    l_merge = ConcatLayer([l_in, l_dct], axis=2, name='merge')
    network = las.layers.get_all_layers(l_merge)
    print_network(network)
    output = las.layers.get_output(l_merge)
    merge_fn = theano.function([input_var, dct_var], output, allow_input_downcast=True)
    res = merge_fn(a, b)
    assert res.shape == (2, 3, 7)
avnet.py 文件源码 项目:ip-avsr 作者: lzuwei 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def create_model(substreams, mask_shape, mask_var, lstm_size=250, output_classes=26,
                 fusiontype='concat', w_init_fn=las.init.Orthogonal(), use_peepholes=True):

    gate_parameters = Gate(
        W_in=w_init_fn, W_hid=w_init_fn,
        b=las.init.Constant(0.))
    cell_parameters = Gate(
        W_in=w_init_fn, W_hid=w_init_fn,
        # Setting W_cell to None denotes that no cell connection will be used.
        W_cell=None, b=las.init.Constant(0.),
        # By convention, the cell nonlinearity is tanh in an LSTM.
        nonlinearity=tanh)

    l_mask = InputLayer(mask_shape, mask_var, 'mask')
    symbolic_seqlen_raw = l_mask.input_var.shape[1]

    # We'll combine the forward and backward layer output by summing.
    # Merge layers take in lists of layers to merge as input.
    if fusiontype == 'adasum':
        l_fuse = AdaptiveElemwiseSumLayer(substreams, name='adasum1')
    elif fusiontype == 'sum':
        l_fuse = ElemwiseSumLayer(substreams, name='sum1')
    elif fusiontype == 'concat':
        l_fuse = ConcatLayer(substreams, axis=-1, name='concat')

    f_lstm_agg, b_lstm_agg = create_blstm(l_fuse, l_mask, lstm_size, cell_parameters, gate_parameters, 'lstm_agg')
    l_sum2 = ElemwiseSumLayer([f_lstm_agg, b_lstm_agg], name='sum2')

    # reshape to (num_examples * seq_len, lstm_size)
    l_reshape3 = ReshapeLayer(l_sum2, (-1, lstm_size), name='reshape3')

    # Now, we can apply feed-forward layers as usual.
    # We want the network to predict a classification for the sequence,
    # so we'll use a the number of classes.
    l_softmax = DenseLayer(
        l_reshape3, num_units=output_classes,
        nonlinearity=las.nonlinearities.softmax, name='softmax')

    l_out = ReshapeLayer(l_softmax, (-1, symbolic_seqlen_raw, output_classes), name='output')

    return l_out, l_fuse
googlenet.py 文件源码 项目:kaggle-dsg-qualification 作者: Ignotus 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def build_inception_module(self, name, input_layer, nfilters):
        # nfilters: (pool_proj, 1x1, 3x3_reduce, 3x3, 5x5_reduce, 5x5)
        net = dict()
        net['pool'] = PoolLayerDNN(input_layer, pool_size=3, stride=1, pad=1)
        net['pool_proj'] = ConvLayer(
            net['pool'], nfilters[0], 1, flip_filters=False)

        net['1x1'] = ConvLayer(input_layer, nfilters[1], 1, flip_filters=False)

        net['3x3_reduce'] = ConvLayer(
            input_layer, nfilters[2], 1, flip_filters=False)
        net['3x3'] = ConvLayer(
            net['3x3_reduce'], nfilters[3], 3, pad=1, flip_filters=False)

        net['5x5_reduce'] = ConvLayer(
            input_layer, nfilters[4], 1, flip_filters=False)
        net['5x5'] = ConvLayer(
            net['5x5_reduce'], nfilters[5], 5, pad=2, flip_filters=False)

        net['output'] = ConcatLayer([
            net['1x1'],
            net['3x3'],
            net['5x5'],
            net['pool_proj'],
            ])

        return {'{}/{}'.format(name, k): v for k, v in net.items()}
googlenet.py 文件源码 项目:kaggle-dsg-qualification 作者: Ignotus 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def inceptionB(self, input_layer, nfilt):
        # Corresponds to a modified version of figure 10 in the paper
        l1 = self.bn_conv(input_layer, num_filters=nfilt[0][0], filter_size=3, stride=2)

        l2 = self.bn_conv(input_layer, num_filters=nfilt[1][0], filter_size=1)
        l2 = self.bn_conv(l2, num_filters=nfilt[1][1], filter_size=3, pad=1)
        l2 = self.bn_conv(l2, num_filters=nfilt[1][2], filter_size=3, stride=2)

        l3 = Pool2DLayer(input_layer, pool_size=3, stride=2)

        return ConcatLayer([l1, l2, l3])
googlenet.py 文件源码 项目:kaggle-dsg-qualification 作者: Ignotus 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def inceptionD(self, input_layer, nfilt):
        # Corresponds to a modified version of figure 10 in the paper
        l1 = self.bn_conv(input_layer, num_filters=nfilt[0][0], filter_size=1)
        l1 = self.bn_conv(l1, num_filters=nfilt[0][1], filter_size=3, stride=2)

        l2 = self.bn_conv(input_layer, num_filters=nfilt[1][0], filter_size=1)
        l2 = self.bn_conv(l2, num_filters=nfilt[1][1], filter_size=(1, 7), pad=(0, 3))
        l2 = self.bn_conv(l2, num_filters=nfilt[1][2], filter_size=(7, 1), pad=(3, 0))
        l2 = self.bn_conv(l2, num_filters=nfilt[1][3], filter_size=3, stride=2)

        l3 = Pool2DLayer(input_layer, pool_size=3, stride=2)

        return ConcatLayer([l1, l2, l3])
FlowNetS.py 文件源码 项目:theano-flownet 作者: Ignotus 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def build_model(weights):
    net = dict()

    # T.nnet.abstract_conv.bilinear_upsampling doesn't work properly if not to
    # specify a batch size
    batch_size = 1

    net['input_1'] = InputLayer([batch_size, 3, 384, 512])

    net['input_2'] = InputLayer([batch_size, 3, 384, 512])

    net['input'] = ConcatLayer([net['input_1'], net['input_2']])

    net['conv1'] = leaky_conv(net['input'], num_filters=64, filter_size=7, stride=2)
    net['conv2'] = leaky_conv(net['conv1'], num_filters=128, filter_size=5, stride=2)

    net['conv3'] = leaky_conv(net['conv2'], num_filters=256, filter_size=5, stride=2)
    net['conv3_1'] = leaky_conv(net['conv3'], num_filters=256, filter_size=3, stride=1)

    net['conv4'] = leaky_conv(net['conv3_1'], num_filters=512, filter_size=3, stride=2)
    net['conv4_1'] = leaky_conv(net['conv4'], num_filters=512, filter_size=3, stride=1)

    net['conv5'] = leaky_conv(net['conv4_1'], num_filters=512, filter_size=3, stride=2)
    net['conv5_1'] = leaky_conv(net['conv5'], num_filters=512, filter_size=3, stride=1)

    net['conv6'] = leaky_conv(net['conv5_1'], num_filters=1024, filter_size=3, stride=2)
    net['conv6_1'] = leaky_conv(net['conv6'], num_filters=1024, filter_size=3, stride=1)

    for layer_id in ['1', '2', '3', '3_1', '4', '4_1', '5', '5_1', '6', '6_1']:
        layer_name = 'conv' + layer_id
        print(layer_name, net[layer_name].W.shape.eval(), weights[layer_name][0].shape)
        print(layer_name, net[layer_name].b.shape.eval(), weights[layer_name][1].shape)
        net[layer_name].W.set_value(weights[layer_name][0])
        net[layer_name].b.set_value(weights[layer_name][1])

    refine_flow(net, weights)

    return net
layers.py 文件源码 项目:Neural-Photo-Editor 作者: ajbrock 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def InceptionLayer(incoming,param_dict,block_name):
    branch = [0]*len(param_dict)
    # Loop across branches
    for i,dict in enumerate(param_dict):
        for j,style in enumerate(dict['style']): # Loop up branch
            branch[i] = C2D(
                incoming = branch[i] if j else incoming,
                num_filters = dict['num_filters'][j],
                filter_size = dict['filter_size'][j],
                pad =  dict['pad'][j] if 'pad' in dict else None,
                stride = dict['stride'][j],
                W = initmethod('relu'),
                nonlinearity = dict['nonlinearity'][j],
                name = block_name+'_'+str(i)+'_'+str(j)) if style=='convolutional'\
            else NL(lasagne.layers.dnn.Pool2DDNNLayer(
                incoming=incoming if j == 0 else branch[i],
                pool_size = dict['filter_size'][j],
                mode = dict['mode'][j],
                stride = dict['stride'][j],
                pad = dict['pad'][j],
                name = block_name+'_'+str(i)+'_'+str(j)),
                nonlinearity = dict['nonlinearity'][j]) if style=='pool'\
            else lasagne.layers.DilatedConv2DLayer(
                incoming = lasagne.layers.PadLayer(incoming = incoming if j==0 else branch[i],width = dict['pad'][j]) if 'pad' in dict else incoming if j==0 else branch[i],
                num_filters = dict['num_filters'][j],
                filter_size = dict['filter_size'][j],
                dilation = dict['dilation'][j],
                # pad = dict['pad'][j] if 'pad' in dict else None,
                W = initmethod('relu'),
                nonlinearity = dict['nonlinearity'][j],
                name = block_name+'_'+str(i)+'_'+str(j))  if style== 'dilation'\
            else DL(
                    incoming = incoming if j==0 else branch[i],
                    num_units = dict['num_filters'][j],
                    W = initmethod('relu'),
                    b = None,
                    nonlinearity = dict['nonlinearity'][j],
                    name = block_name+'_'+str(i)+'_'+str(j))   
                # Apply Batchnorm    
            branch[i] = BN(branch[i],name = block_name+'_bnorm_'+str(i)+'_'+str(j)) if dict['bnorm'][j] else branch[i]
        # Concatenate Sublayers        

    return CL(incomings=branch,name=block_name)

# Convenience function to define an inception-style block with upscaling
doodle.py 文件源码 项目:neural-doodle 作者: alexjc 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def setup_model(self, input=None):
        """Use lasagne to create a network of convolution layers, first using VGG19 as the framework
        and then adding augmentations for Semantic Style Transfer.
        """
        net, self.channels = {}, {}

        # Primary network for the main image. These are convolution only, and stop at layer 4_2 (rest unused).
        net['img']     = input or InputLayer((None, 3, None, None))
        net['conv1_1'] = ConvLayer(net['img'],     64, 3, pad=1)
        net['conv1_2'] = ConvLayer(net['conv1_1'], 64, 3, pad=1)
        net['pool1']   = PoolLayer(net['conv1_2'], 2, mode='average_exc_pad')
        net['conv2_1'] = ConvLayer(net['pool1'],   128, 3, pad=1)
        net['conv2_2'] = ConvLayer(net['conv2_1'], 128, 3, pad=1)
        net['pool2']   = PoolLayer(net['conv2_2'], 2, mode='average_exc_pad')
        net['conv3_1'] = ConvLayer(net['pool2'],   256, 3, pad=1)
        net['conv3_2'] = ConvLayer(net['conv3_1'], 256, 3, pad=1)
        net['conv3_3'] = ConvLayer(net['conv3_2'], 256, 3, pad=1)
        net['conv3_4'] = ConvLayer(net['conv3_3'], 256, 3, pad=1)
        net['pool3']   = PoolLayer(net['conv3_4'], 2, mode='average_exc_pad')
        net['conv4_1'] = ConvLayer(net['pool3'],   512, 3, pad=1)
        net['conv4_2'] = ConvLayer(net['conv4_1'], 512, 3, pad=1)
        net['conv4_3'] = ConvLayer(net['conv4_2'], 512, 3, pad=1)
        net['conv4_4'] = ConvLayer(net['conv4_3'], 512, 3, pad=1)
        net['pool4']   = PoolLayer(net['conv4_4'], 2, mode='average_exc_pad')
        net['conv5_1'] = ConvLayer(net['pool4'],   512, 3, pad=1)
        net['conv5_2'] = ConvLayer(net['conv5_1'], 512, 3, pad=1)
        net['conv5_3'] = ConvLayer(net['conv5_2'], 512, 3, pad=1)
        net['conv5_4'] = ConvLayer(net['conv5_3'], 512, 3, pad=1)
        net['main']    = net['conv5_4']

        # Auxiliary network for the semantic layers, and the nearest neighbors calculations.
        net['map'] = InputLayer((1, 1, None, None))
        for j, i in itertools.product(range(5), range(4)):
            if j < 2 and i > 1: continue
            suffix = '%i_%i' % (j+1, i+1)

            if i == 0:
                net['map%i'%(j+1)] = PoolLayer(net['map'], 2**j, mode='average_exc_pad')
            self.channels[suffix] = net['conv'+suffix].num_filters

            if args.semantic_weight > 0.0:
                net['sem'+suffix] = ConcatLayer([net['conv'+suffix], net['map%i'%(j+1)]])
            else:
                net['sem'+suffix] = net['conv'+suffix]

            net['dup'+suffix] = InputLayer(net['sem'+suffix].output_shape)
            net['nn'+suffix] = ConvLayer(net['dup'+suffix], 1, 3, b=None, pad=0, flip_filters=False)

        self.network = net
train_mixgan.py 文件源码 项目:MIX-plus-GAN 作者: yz-ignescent 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def get_generator(self, meanx, z0, y_1hot):
        ''' specify generator G0, gen_x = G0(z0, h1) '''
        """
        #z0 = theano_rng.uniform(size=(self.args.batch_size, 16)) # uniform noise
        gen0_layers = [LL.InputLayer(shape=(self.args.batch_size, 50), input_var=z0)] # Input layer for z0
        gen0_layers.append(nn.batch_norm(LL.DenseLayer(nn.batch_norm(LL.DenseLayer(gen0_layers[0], num_units=128, W=Normal(0.02), nonlinearity=nn.relu)),
                          num_units=128, W=Normal(0.02), nonlinearity=nn.relu))) # embedding, 50 -> 128
        gen0_layer_z_embed = gen0_layers[-1] 

        #gen0_layers.append(LL.InputLayer(shape=(self.args.batch_size, 256), input_var=real_fc3)) # Input layer for real_fc3 in independent training, gen_fc3 in joint training
        gen0_layers.append(LL.InputLayer(shape=(self.args.batch_size, 10), input_var=y_1hot)) # Input layer for real_fc3 in independent training, gen_fc3 in joint training
        gen0_layer_fc3 = gen0_layers[-1]

        gen0_layers.append(LL.ConcatLayer([gen0_layer_fc3,gen0_layer_z_embed], axis=1)) # concatenate noise and fc3 features
        gen0_layers.append(LL.ReshapeLayer(nn.batch_norm(LL.DenseLayer(gen0_layers[-1], num_units=256*5*5, W=Normal(0.02), nonlinearity=T.nnet.relu)),
                         (self.args.batch_size,256,5,5))) # fc
        gen0_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen0_layers[-1], (self.args.batch_size,256,10,10), (5,5), stride=(2, 2), padding = 'half',
                         W=Normal(0.02),  nonlinearity=nn.relu))) # deconv
        gen0_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen0_layers[-1], (self.args.batch_size,128,14,14), (5,5), stride=(1, 1), padding = 'valid',
                         W=Normal(0.02),  nonlinearity=nn.relu))) # deconv

        gen0_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen0_layers[-1], (self.args.batch_size,128,28,28), (5,5), stride=(2, 2), padding = 'half',
                         W=Normal(0.02),  nonlinearity=nn.relu))) # deconv
        gen0_layers.append(nn.Deconv2DLayer(gen0_layers[-1], (self.args.batch_size,3,32,32), (5,5), stride=(1, 1), padding = 'valid',
                         W=Normal(0.02),  nonlinearity=T.nnet.sigmoid)) # deconv

        gen_x_pre = LL.get_output(gen0_layers[-1], deterministic=False)
        gen_x = gen_x_pre - meanx
        # gen_x_joint = LL.get_output(gen0_layers[-1], {gen0_layer_fc3: gen_fc3}, deterministic=False) - meanx

        return gen0_layers, gen_x 
        """
        gen_x_layer_z = LL.InputLayer(shape=(self.args.batch_size, self.args.z0dim), input_var=z0) # z, 20
        # gen_x_layer_z_embed = nn.batch_norm(LL.DenseLayer(gen_x_layer_z, num_units=128), g=None) # 20 -> 64

        gen_x_layer_y = LL.InputLayer(shape=(self.args.batch_size, 10), input_var=y_1hot) # conditioned on real fc3 activations
        gen_x_layer_y_z = LL.ConcatLayer([gen_x_layer_y,gen_x_layer_z],axis=1) #512+256 = 768
        gen_x_layer_pool2 = LL.ReshapeLayer(nn.batch_norm(LL.DenseLayer(gen_x_layer_y_z, num_units=256*5*5)), (self.args.batch_size,256,5,5))
        gen_x_layer_dconv2_1 = nn.batch_norm(nn.Deconv2DLayer(gen_x_layer_pool2, (self.args.batch_size,256,10,10), (5,5), stride=(2, 2), padding = 'half',
                         W=Normal(0.02),  nonlinearity=nn.relu))
        gen_x_layer_dconv2_2 = nn.batch_norm(nn.Deconv2DLayer(gen_x_layer_dconv2_1, (self.args.batch_size,128,14,14), (5,5), stride=(1, 1), padding = 'valid',
                         W=Normal(0.02),  nonlinearity=nn.relu))

        gen_x_layer_dconv1_1 = nn.batch_norm(nn.Deconv2DLayer(gen_x_layer_dconv2_2, (self.args.batch_size,128,28,28), (5,5), stride=(2, 2), padding = 'half',
                         W=Normal(0.02),  nonlinearity=nn.relu))
        gen_x_layer_x = nn.Deconv2DLayer(gen_x_layer_dconv1_1, (self.args.batch_size,3,32,32), (5,5), stride=(1, 1), padding = 'valid',
                         W=Normal(0.02),  nonlinearity=T.nnet.sigmoid)
        # gen_x_layer_x = dnn.Conv2DDNNLayer(gen_x_layer_dconv1_2, 3, (1,1), pad=0, stride=1, 
        #                  W=Normal(0.02), nonlinearity=T.nnet.sigmoid)

        gen_x_layers = [gen_x_layer_z, gen_x_layer_y, gen_x_layer_y_z, gen_x_layer_pool2, gen_x_layer_dconv2_1, 
            gen_x_layer_dconv2_2, gen_x_layer_dconv1_1, gen_x_layer_x]

        gen_x_pre = LL.get_output(gen_x_layer_x, deterministic=False)
        gen_x = gen_x_pre - meanx

        return gen_x_layers, gen_x
train_dcgan_baseline.py 文件源码 项目:MIX-plus-GAN 作者: yz-ignescent 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def load_data():
    xs = []
    ys = []
    for j in range(5):
      d = unpickle('data/cifar-10-python/cifar-10-batches-py/data_batch_'+`j+1`)
      x = d['data']
      y = d['labels']
      xs.append(x)
      ys.append(y)

    d = unpickle('data/cifar-10-python/cifar-10-batches-py/test_batch')
    xs.append(d['data'])
    ys.append(d['labels'])

    x = np.concatenate(xs)/np.float32(255)
    y = np.concatenate(ys)
    x = np.dstack((x[:, :1024], x[:, 1024:2048], x[:, 2048:]))
    x = x.reshape((x.shape[0], 32, 32, 3)).transpose(0,3,1,2)

    # subtract per-pixel mean
    pixel_mean = np.mean(x[0:50000],axis=0)
    #pickle.dump(pixel_mean, open("cifar10-pixel_mean.pkl","wb"))
    x -= pixel_mean

    # create mirrored images
    X_train = x[0:50000,:,:,:]
    Y_train = y[0:50000]
    # X_train_flip = X_train[:,:,:,::-1]
    # Y_train_flip = Y_train
    # X_train = np.concatenate((X_train,X_train_flip),axis=0)
    # Y_train = np.concatenate((Y_train,Y_train_flip),axis=0)

    X_test = x[50000:,:,:,:]
    Y_test = y[50000:]

    return pixel_mean, dict(
        X_train=lasagne.utils.floatX(X_train),
        Y_train=Y_train.astype('int32'),
        X_test = lasagne.utils.floatX(X_test),
        Y_test = Y_test.astype('int32'),)

## specify generator, gen_pool5 = G(z, y_1hot)
#z = theano_rng.uniform(size=(args.batch_size, 100)) # uniform noise
#y_1hot = T.matrix()
#gen_pool5_layer_z = LL.InputLayer(shape=(args.batch_size, 100), input_var=z) # z, 100
#gen_pool5_layer_z_embed = nn.batch_norm(LL.DenseLayer(gen_pool5_layer_z, num_units=256, W=Normal(0.02), nonlinearity=T.nnet.relu), g=None) # 100 -> 256
#gen_pool5_layer_y = LL.InputLayer(shape=(args.batch_size, 10), input_var=y_1hot) # y, 10
#gen_pool5_layer_y_embed = nn.batch_norm(LL.DenseLayer(gen_pool5_layer_y, num_units=512, W=Normal(0.02), nonlinearity=T.nnet.relu), g=None) # 10 -> 512
#gen_pool5_layer_fc4 = LL.ConcatLayer([gen_pool5_layer_z_embed,gen_pool5_layer_y_embed],axis=1) #512+256 = 768
##gen_pool5_layer_fc4 = nn.batch_norm(LL.DenseLayer(gen_pool5_layer_fc5, num_units=512, nonlinearity=T.nnet.relu))#, g=None) 
#gen_pool5_layer_fc3 = nn.batch_norm(LL.DenseLayer(gen_pool5_layer_fc4, num_units=512, W=Normal(0.02), nonlinearity=T.nnet.relu), g=None) 
#gen_pool5_layer_pool5_flat = LL.DenseLayer(gen_pool5_layer_fc3, num_units=4*4*32, nonlinearity=T.nnet.relu) # NO batch normalization at output layer
##gen_pool5_layer_pool5_flat = nn.batch_norm(LL.DenseLayer(gen_pool5_layer_fc3, num_units=4*4*32, W=Normal(0.02), nonlinearity=T.nnet.relu), g=None) # no batch-norm at output layer
#gen_pool5_layer_pool5 = LL.ReshapeLayer(gen_pool5_layer_pool5_flat, (args.batch_size,32,4,4))
#gen_pool5_layers = [gen_pool5_layer_z, gen_pool5_layer_z_embed, gen_pool5_layer_y, gen_pool5_layer_y_embed, #gen_pool5_layer_fc5,
# gen_pool5_layer_fc4, gen_pool5_layer_fc3, gen_pool5_layer_pool5_flat, gen_pool5_layer_pool5]
#gen_pool5 = LL.get_output(gen_pool5_layer_pool5, deterministic=False)
approximators.py 文件源码 项目:dqn_vizdoom_theano 作者: mihahauke 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _initialize_network(self, img_input_shape, misc_len, output_size, img_input, misc_input=None, **kwargs):
        input_layers = []
        inputs = [img_input]
        # weights_init = lasagne.init.GlorotUniform("relu")
        weights_init = lasagne.init.HeNormal("relu")

        network = ls.InputLayer(shape=img_input_shape, input_var=img_input)
        input_layers.append(network)
        network = ls.Conv2DLayer(network, num_filters=32, filter_size=8, nonlinearity=rectify, W=weights_init,
                                 b=lasagne.init.Constant(0.1), stride=4)
        network = ls.Conv2DLayer(network, num_filters=64, filter_size=4, nonlinearity=rectify, W=weights_init,
                                 b=lasagne.init.Constant(0.1), stride=2)
        network = ls.Conv2DLayer(network, num_filters=64, filter_size=3, nonlinearity=rectify, W=weights_init,
                                 b=lasagne.init.Constant(0.1), stride=1)
        network = ls.FlattenLayer(network)

        if self.misc_state_included:
            health_inputs = 4
            units_per_health_input = 100
            layers_for_merge = []
            for i in range(health_inputs):
                health_input_layer = ls.InputLayer(shape=(None, 1), input_var=misc_input[:, i:i + 1])

                health_layer = ls.DenseLayer(health_input_layer, units_per_health_input, nonlinearity=rectify,
                                             W=weights_init, b=lasagne.init.Constant(0.1))
                health_layer = ls.DenseLayer(health_layer, units_per_health_input, nonlinearity=rectify,
                                             W=weights_init, b=lasagne.init.Constant(0.1))

                inputs.append(misc_input[:, i:i + 1])
                input_layers.append(health_input_layer)
                layers_for_merge.append(health_layer)

            misc_input_layer = ls.InputLayer(shape=(None, misc_len - health_inputs),
                                             input_var=misc_input[:, health_inputs:])
            input_layers.append(misc_input_layer)
            layers_for_merge.append(misc_input_layer)
            inputs.append(misc_input[:, health_inputs:])

            layers_for_merge.append(network)
            network = ls.ConcatLayer(layers_for_merge)

        network = ls.DenseLayer(network, 512, nonlinearity=rectify,
                                W=weights_init, b=lasagne.init.Constant(0.1))

        network = ls.DenseLayer(network, output_size, nonlinearity=None, b=lasagne.init.Constant(.1))
        return network, input_layers, inputs
approximators.py 文件源码 项目:dqn_vizdoom_theano 作者: mihahauke 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _initialize_network(self, img_input_shape, misc_len, output_size, img_input, misc_input=None, **kwargs):
        input_layers = []
        inputs = [img_input]
        # weights_init = lasagne.init.GlorotUniform("relu")
        weights_init = lasagne.init.HeNormal("relu")

        network = ls.InputLayer(shape=img_input_shape, input_var=img_input)
        input_layers.append(network)
        network = ls.Conv2DLayer(network, num_filters=32, filter_size=8, nonlinearity=rectify, W=weights_init,
                                 b=lasagne.init.Constant(0.1), stride=4)
        network = ls.Conv2DLayer(network, num_filters=64, filter_size=4, nonlinearity=rectify, W=weights_init,
                                 b=lasagne.init.Constant(0.1), stride=2)
        network = ls.Conv2DLayer(network, num_filters=64, filter_size=3, nonlinearity=rectify, W=weights_init,
                                 b=lasagne.init.Constant(0.1), stride=1)
        network = ls.FlattenLayer(network)

        if self.misc_state_included:
            layers_for_merge = []

            health_inputs = 4
            units_per_health_input = 100
            for i in range(health_inputs):
                oh_input = lasagne.utils.one_hot(misc_input[:, i] - 1, units_per_health_input)
                health_input_layer = ls.InputLayer(shape=(None, units_per_health_input), input_var=oh_input)
                inputs.append(oh_input)
                input_layers.append(health_input_layer)
                layers_for_merge.append(health_input_layer)

            time_inputs = 4
            # TODO set this somewhere else cause it depends on skiprate and timeout ....
            units_pertime_input = 525
            for i in range(health_inputs,health_inputs+time_inputs):
                oh_input = lasagne.utils.one_hot(misc_input[:, i] - 1, units_pertime_input)
                time_input_layer = ls.InputLayer(shape=(None, units_pertime_input), input_var=oh_input)
                inputs.append(oh_input)
                input_layers.append(time_input_layer)
                layers_for_merge.append(time_input_layer)

            other_misc_input = misc_input[:, health_inputs+time_inputs:]
            other_misc_shape = (None, misc_len - health_inputs-time_inputs)
            other_misc_input_layer = ls.InputLayer(shape=other_misc_shape,
                                             input_var=other_misc_input)
            input_layers.append(other_misc_input_layer)
            layers_for_merge.append(other_misc_input_layer)
            inputs.append(other_misc_input)

            layers_for_merge.append(network)
            network = ls.ConcatLayer(layers_for_merge)

        network = ls.DenseLayer(network, 512, nonlinearity=rectify,
                                W=weights_init, b=lasagne.init.Constant(0.1))

        network = ls.DenseLayer(network, output_size, nonlinearity=None, b=lasagne.init.Constant(.1))
        return network, input_layers, inputs
doodle.py 文件源码 项目:neural-doodle 作者: GeekLiB 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def setup_model(self, input=None):
        """Use lasagne to create a network of convolution layers, first using VGG19 as the framework
        and then adding augmentations for Semantic Style Transfer.
        """
        net, self.channels = {}, {}

        # Primary network for the main image. These are convolution only, and stop at layer 4_2 (rest unused).
        net['img']     = input or InputLayer((None, 3, None, None))
        net['conv1_1'] = ConvLayer(net['img'],     64, 3, pad=1)
        net['conv1_2'] = ConvLayer(net['conv1_1'], 64, 3, pad=1)
        net['pool1']   = PoolLayer(net['conv1_2'], 2, mode='average_exc_pad')
        net['conv2_1'] = ConvLayer(net['pool1'],   128, 3, pad=1)
        net['conv2_2'] = ConvLayer(net['conv2_1'], 128, 3, pad=1)
        net['pool2']   = PoolLayer(net['conv2_2'], 2, mode='average_exc_pad')
        net['conv3_1'] = ConvLayer(net['pool2'],   256, 3, pad=1)
        net['conv3_2'] = ConvLayer(net['conv3_1'], 256, 3, pad=1)
        net['conv3_3'] = ConvLayer(net['conv3_2'], 256, 3, pad=1)
        net['conv3_4'] = ConvLayer(net['conv3_3'], 256, 3, pad=1)
        net['pool3']   = PoolLayer(net['conv3_4'], 2, mode='average_exc_pad')
        net['conv4_1'] = ConvLayer(net['pool3'],   512, 3, pad=1)
        net['conv4_2'] = ConvLayer(net['conv4_1'], 512, 3, pad=1)
        net['conv4_3'] = ConvLayer(net['conv4_2'], 512, 3, pad=1)
        net['conv4_4'] = ConvLayer(net['conv4_3'], 512, 3, pad=1)
        net['pool4']   = PoolLayer(net['conv4_4'], 2, mode='average_exc_pad')
        net['conv5_1'] = ConvLayer(net['pool4'],   512, 3, pad=1)
        net['conv5_2'] = ConvLayer(net['conv5_1'], 512, 3, pad=1)
        net['conv5_3'] = ConvLayer(net['conv5_2'], 512, 3, pad=1)
        net['conv5_4'] = ConvLayer(net['conv5_3'], 512, 3, pad=1)
        net['main']    = net['conv5_4']

        # Auxiliary network for the semantic layers, and the nearest neighbors calculations.
        net['map'] = InputLayer((1, 1, None, None))
        for j, i in itertools.product(range(5), range(4)):
            if j < 2 and i > 1: continue
            suffix = '%i_%i' % (j+1, i+1)

            if i == 0:
                net['map%i'%(j+1)] = PoolLayer(net['map'], 2**j, mode='average_exc_pad')
            self.channels[suffix] = net['conv'+suffix].num_filters

            if args.semantic_weight > 0.0:
                net['sem'+suffix] = ConcatLayer([net['conv'+suffix], net['map%i'%(j+1)]])
            else:
                net['sem'+suffix] = net['conv'+suffix]

            net['dup'+suffix] = InputLayer(net['sem'+suffix].output_shape)
            net['nn'+suffix] = ConvLayer(net['dup'+suffix], 1, 3, b=None, pad=0, flip_filters=False)

        self.network = net
FlowNetCommon.py 文件源码 项目:theano-flownet 作者: Ignotus 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def refine_flow(net, weights):
    net['flow6'] = flow(net['conv6_1'])
    net['flow6_up'] = upsample(net['flow6'])
    net['deconv5'] = leaky_deconv(net['conv6_1'], num_filters=512)

    net['concat5'] = ConcatLayer([net['conv5_1'], net['deconv5'], net['flow6_up']])
    net['flow5'] = flow(net['concat5'])
    net['flow5_up'] = upsample(net['flow5'])
    net['deconv4'] = leaky_deconv(net['concat5'], num_filters=256)

    net['concat4'] = ConcatLayer([net['conv4_1'], net['deconv4'], net['flow5_up']])
    net['flow4'] = flow(net['concat4'])
    net['flow4_up'] = upsample(net['flow4'])
    net['deconv3'] = leaky_deconv(net['concat4'], num_filters=128)

    net['concat3'] = ConcatLayer([net['conv3_1'], net['deconv3'], net['flow4_up']])
    net['flow3'] = flow(net['concat3'])
    net['flow3_up'] = upsample(net['flow3'])
    net['deconv2'] = leaky_deconv(net['concat3'], num_filters=64)

    net['concat2'] = ConcatLayer([net['conv2'], net['deconv2'], net['flow3_up']])
    net['flow2'] = flow(net['concat2'])

    # TODO: What does this magic number mean? We reduced an image size only 4
    # times, didn't we?
    # https://github.com/liruoteng/FlowNet/blob/master/models/flownet/model_simple/deploy.tpl.prototxt#L869
    net['eltwise4'] = ExpressionLayer(net['flow2'], lambda x: x * 20)

    # Should be upsampled before 'flow1' to 384x512
    net['resample4'] = BilinearUpscaleLayer(net['eltwise4'], 4)

    net['flow1'] = flow(net['resample4'], filter_size=1, pad=0)

    for layer_name in ['deconv5', 'deconv4', 'deconv3', 'deconv2']:
        net[layer_name].W.set_value(weights[layer_name][0])

    upsample_map = {
        'flow6_up': 'upsample_flow6to5',
        'flow5_up': 'upsample_flow5to4',
        'flow4_up': 'upsample_flow4to3',
        'flow3_up': 'upsample_flow3to2'
    }

    for layer_name in ['flow6_up', 'flow5_up', 'flow4_up', 'flow3_up']:
        net[layer_name].W.set_value(weights[upsample_map[layer_name]][0])

    flow_map = {
        'flow6': 'Convolution1',
        'flow5': 'Convolution2',
        'flow4': 'Convolution3',
        'flow3': 'Convolution4',
        'flow2': 'Convolution5',
        'flow1': 'Convolution6'
    }

    for layer_name in ['flow6', 'flow5', 'flow4', 'flow3', 'flow2', 'flow1']:
        net[layer_name].W.set_value(weights[flow_map[layer_name]][0])
        net[layer_name].b.set_value(weights[flow_map[layer_name]][1])
FlowNetC.py 文件源码 项目:theano-flownet 作者: Ignotus 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def build_model(weights):
    net = dict()

    # T.nnet.abstract_conv.bilinear_upsampling doesn't work properly if not to
    # specify a batch size
    batch_size = 1

    net['input_1'] = InputLayer([batch_size, 3, 384, 512])
    net['input_2'] = InputLayer([batch_size, 3, 384, 512])

    net['conv1'] = leaky_conv(
        net['input_1'], num_filters=64, filter_size=7, stride=2)
    net['conv1b'] = leaky_conv(
        net['input_2'], num_filters=64, filter_size=7, stride=2,
        W=net['conv1'].W, b=net['conv1'].b)

    net['conv2'] = leaky_conv(
        net['conv1'], num_filters=128, filter_size=5, stride=2)
    net['conv2b'] = leaky_conv(
        net['conv1b'], num_filters=128, filter_size=5, stride=2,
        W=net['conv2'].W, b=net['conv2'].b)

    net['conv3'] = leaky_conv(
        net['conv2'], num_filters=256, filter_size=5, stride=2)
    net['conv3b'] = leaky_conv(
        net['conv2b'], num_filters=256, filter_size=5, stride=2,
        W=net['conv3'].W, b=net['conv3'].b)

    net['corr'] = CorrelationLayer(net['conv3'], net['conv3b'])
    net['corr'] = ExpressionLayer(net['corr'], leaky_rectify)

    net['conv_redir'] = leaky_conv(
        net['conv3'], num_filters=32, filter_size=1, stride=1, pad=0)

    net['concat'] = ConcatLayer([net['conv_redir'], net['corr']])

    net['conv3_1'] = leaky_conv(net['concat'], num_filters=256, filter_size=3, stride=1)

    net['conv4'] = leaky_conv(net['conv3_1'], num_filters=512, filter_size=3, stride=2)
    net['conv4_1'] = leaky_conv(net['conv4'], num_filters=512, filter_size=3, stride=1)

    net['conv5'] = leaky_conv(net['conv4_1'], num_filters=512, filter_size=3, stride=2)
    net['conv5_1'] = leaky_conv(net['conv5'], num_filters=512, filter_size=3, stride=1)

    net['conv6'] = leaky_conv(net['conv5_1'], num_filters=1024, filter_size=3, stride=2)
    net['conv6_1'] = leaky_conv(net['conv6'], num_filters=1024, filter_size=3, stride=1)

    for layer_id in ['1', '2', '3', '_redir', '3_1', '4', '4_1', '5', '5_1', '6', '6_1']:
        layer_name = 'conv' + layer_id
        print(layer_name, net[layer_name].W.shape.eval(), weights[layer_name][0].shape)
        print(layer_name, net[layer_name].b.shape.eval(), weights[layer_name][1].shape)
        net[layer_name].W.set_value(weights[layer_name][0])
        net[layer_name].b.set_value(weights[layer_name][1])

    refine_flow(net, weights)

    return net


问题


面经


文章

微信
公众号

扫码关注公众号