python类conv2d()的实例源码

helper_dataset.py 文件源码 项目:reseg 作者: fvisin 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def local_mean_subtraction(input, kernel_size=5):

    input_shape = (input.shape[0], 1, input.shape[1], input.shape[2])
    input = input.reshape(input_shape).astype(floatX)

    X = T.tensor4(dtype=floatX)
    filter_shape = (1, 1, kernel_size, kernel_size)
    filters = mean_filter(kernel_size).reshape(filter_shape)
    filters = shared(_asarray(filters, dtype=floatX), borrow=True)

    mean = conv2d(input=X,
                  filters=filters,
                  input_shape=input.shape,
                  filter_shape=filter_shape,
                  border_mode='half')
    new_X = X - mean
    f = function([X], new_X)
    return f(input)
network.py 文件源码 项目:python-machine-learning 作者: sho-87 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def set_inpt(self, inpt, inpt_dropout, mini_batch_size):
        # Reshape the input to 2D
        self.inpt = inpt.reshape(self.image_shape)

        # Do convolution
        self.conv_out = conv2d(
            input=self.inpt, filters=self.w, filter_shape=self.filter_shape,
            input_shape=self.image_shape, border_mode=self.border_mode,
            subsample=self.stride)

        # Get the feature maps for this layer
        self.feature_maps = theano.function([self.inpt], self.conv_out)

        # Apply bias and activation and set as output
        self.output = self.activation_fn(
            self.conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
        self.output_dropout = self.output # no dropout in convolutional layers
network.py 文件源码 项目:python-machine-learning 作者: sho-87 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def set_inpt(self, inpt, inpt_dropout, mini_batch_size):
        # Reshape the input to 2D
        self.inpt = inpt.reshape(self.image_shape)

        # Do convolution
        self.conv_out = conv2d(
            input=self.inpt, filters=self.w, filter_shape=self.filter_shape,
            input_shape=self.image_shape, border_mode=self.border_mode,
            subsample=self.stride)

        # Get the feature maps for this layer
        self.feature_maps = theano.function([self.inpt], self.conv_out)

        # Max pooling
        pooled_out = pool.pool_2d(input=self.conv_out, ds=self.poolsize,
                                  ignore_border=True, mode='max')

        # Apply bias and activation and set as output
        self.output = self.activation_fn(
            pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
        self.output_dropout = self.output # no dropout in convolutional layers
cnn_theano_plot_filters.py 文件源码 项目:lazyprogrammer 作者: inhwane 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def convpool(X, W, b, poolsize=(2, 2)):
    conv_out = conv2d(input=X, filters=W)

    # downsample each feature map individually, using maxpooling
    pooled_out = downsample.max_pool_2d(
        input=conv_out,
        ds=poolsize,
        ignore_border=True
    )

    # add the bias term. Since the bias is a vector (1D array), we first
    # reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
    # thus be broadcasted across mini-batches and feature map
    # width & height
    # return T.tanh(pooled_out + b.dimshuffle('x', 0, 'x', 'x'))
    return relu(pooled_out + b.dimshuffle('x', 0, 'x', 'x'))
cnn_theano.py 文件源码 项目:lazyprogrammer 作者: inhwane 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def convpool(X, W, b, poolsize=(2, 2)):
    conv_out = conv2d(input=X, filters=W)

    # downsample each feature map individually, using maxpooling
    pooled_out = downsample.max_pool_2d(
        input=conv_out,
        ds=poolsize,
        ignore_border=True
    )

    # add the bias term. Since the bias is a vector (1D array), we first
    # reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
    # thus be broadcasted across mini-batches and feature map
    # width & height
    # return T.tanh(pooled_out + b.dimshuffle('x', 0, 'x', 'x'))
    return relu(pooled_out + b.dimshuffle('x', 0, 'x', 'x'))
layers.py 文件源码 项目:IQA_BIECON_release 作者: jongyookim 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def get_output(self, input, **kwargs):
        var_shape = kwargs.get('var_shape', False)
        if var_shape:
            input_shape = None
        else:
            input_shape = self.input_shape
        lin_output = conv2d(
            input=input,
            filters=self.W,
            filter_shape=self.filter_shape,
            border_mode=self.mode,
            subsample=self.subsample,
            input_shape=input_shape
        )

        if self.batch_norm:
            lin_output = self.bn_layer.get_output(lin_output)
        elif not self.no_bias:
            lin_output += self.b.dimshuffle('x', 0, 'x', 'x')

        return self.activation(lin_output)
Deploy_Depth.py 文件源码 项目:theano-mc-cnn 作者: epiception 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def model(X, w1, w2, w3, w4):

    l1 = relu((conv2d(X,w1, border_mode='full')))
    l2 = relu((conv2d(l1,w2, border_mode='valid')))
    l3 = relu((conv2d(l2,w3,border_mode='full')))
    l4 = conv2d(l3,w4,border_mode='valid')

    output = l2_norm_layer(l4)

    return output
MC_CNN_Fast.py 文件源码 项目:theano-mc-cnn 作者: epiception 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def model(X, w1, w2, w3, w4):

    l1 = relu((conv2d(X,w1, border_mode='full')))
    l2 = relu((conv2d(l1,w2, border_mode='valid')))
    l3 = relu((conv2d(l2,w3,border_mode='full')))
    l4 = conv2d(l3,w4,border_mode='valid')

    output = l2_norm_layer(l4)

    return output
functions.py 文件源码 项目:vaegan 作者: anitan0925 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def conv( x, w, b=None ):
    s = int(np.floor(w.get_value().shape[-1]/2.))
    z = conv2d(x, w, border_mode='full')[:, :, s:-s, s:-s]
    if b is not None:
        z += b.dimshuffle('x', 0, 'x', 'x')

    return z
MNIST_COVNET.py 文件源码 项目:LeNet5 作者: LukaszObara 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, input, filter_shape, image_shape, padding=(0, 0), 
                 stride=(1, 1), activation_fn=None, seed=3235):

        assert image_shape[1] == filter_shape[1]

        # rng = np.random.RandomState(seed)

        self.input = input
        self.filter_shape = filter_shape
        self.image_shape = image_shape
        self.activation_fn = activation_fn

        fan_in = np.prod(filter_shape[1:])
        fan_out = filter_shape[0]*np.prod(filter_shape[2:]) // 2
        W_bound = np.sqrt(6/(fan_in+fan_out))
        w = np.random.uniform(low=-W_bound, high=W_bound, size=filter_shape)
        b_vals = np.random.uniform(size=filter_shape[0])

        # Initiliaze weights with random variables
        self.W = theano.shared(name='weights',
                               value=w.astype(theano.config.floatX),
                               borrow=True)
        self.b = theano.shared(name='bias',
                               value=b_vals.astype(theano.config.floatX), 
                               borrow=True)

        conv_out = conv2d(input=input, filters=self.W, border_mode=padding,
                          subsample=stride, filter_shape=filter_shape, 
                          input_shape=image_shape)

        l_output = conv_out + self.b.dimshuffle(('x', 0, 'x', 'x'))
        self.output = (l_output if activation_fn is None 
                       else activation_fn(l_output))

        # Parameters of the model
        self.params = [self.W, self.b]
ConvLayers.py 文件源码 项目:LeNet5 作者: LukaszObara 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __init__(self, input, filter_shape, image_shape, padding=(0, 0), 
                 stride=(1, 1), activation_fn=None, seed=3235):

        assert image_shape[1] == filter_shape[1]

        # rng = np.random.RandomState(seed)

        self.input = input
        self.filter_shape = filter_shape
        self.image_shape = image_shape
        self.activation_fn = activation_fn

        fan_in = np.prod(filter_shape[1:])
        fan_out = filter_shape[0]*np.prod(filter_shape[2:]) // 2
        W_bound = np.sqrt(6/(fan_in+fan_out))
        w = np.random.uniform(low=-W_bound, high=W_bound, size=filter_shape)
        b_vals = np.random.uniform(size=filter_shape[0])

        # Initiliaze weights with random variables
        self.W = theano.shared(name='weights',
                               value=w.astype(theano.config.floatX),
                               borrow=True)
        self.b = theano.shared(name='bias',
                               value=b_vals.astype(theano.config.floatX), 
                               borrow=True)

        conv_out = conv2d(input=input, filters=self.W, border_mode=padding,
                          subsample=stride, filter_shape=filter_shape, 
                          input_shape=image_shape)

        l_output = conv_out + self.b.dimshuffle(('x', 0, 'x', 'x'))
        self.output = (l_output if activation_fn is None 
                       else activation_fn(l_output))

        # Parameters of the model
        self.params = [self.W, self.b]
qa_cnn.py 文件源码 项目:DBQA 作者: nanfeng1101 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __call__(self, q_input, a_input, *args, **kwargs):
        # convolve input feature maps with filters
        q_conv_out = conv2d(
            input=q_input,
            filters=self.W,
            filter_shape=self.filter_shape
        )
        a_conv_out = conv2d(
            input=a_input,
            filters=self.W,
            filter_shape=self.filter_shape
        )
        # add the bias term. Since the bias is a vector (1D array), we first
        # reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
        # thus be broadcasted across mini-batches and feature map
        # width & height
        if self.non_linear == "tanh":
            q_conv_out_tanh = Tanh(q_conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
            a_conv_out_tanh = Tanh(a_conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
            q_output = pool.pool_2d(input=q_conv_out_tanh, ws=self.pool_size, ignore_border=True) # max
            a_output = pool.pool_2d(input=a_conv_out_tanh, ws=self.pool_size, ignore_border=True)
        elif self.non_linear == "relu":
            q_conv_out_relu = ReLU(q_conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
            a_conv_out_relu = ReLU(a_conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
            q_output = pool.pool_2d(input=q_conv_out_relu, ws=self.pool_size, ignore_border=True)
            a_output = pool.pool_2d(input=a_conv_out_relu, ws=self.pool_size, ignore_border=True)
        else:
            q_output = pool.pool_2d(input=q_conv_out, ws=self.pool_size, ignore_border=True)
            a_output = pool.pool_2d(input=a_conv_out, ws=self.pool_size, ignore_border=True)

        return q_output, a_output
insqa_lstm.py 文件源码 项目:insuranceQA-cnn-lstm 作者: cszhz 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _cnn_net(self, tparams, cnn_input, batch_size, sequence_len, num_filters, filter_sizes, proj_size):
    outputs = []
    for filter_size in filter_sizes:
        filter_shape = (num_filters, 1, filter_size, proj_size)
        image_shape = (batch_size, 1, sequence_len, proj_size)
        W = tparams['cnn_W_' + str(filter_size)]
        b = tparams['cnn_b_' + str(filter_size)]
        conv_out = conv2d(input=cnn_input, filters=W, filter_shape=filter_shape, input_shape=image_shape)
        pooled_out = pool.pool_2d(input=conv_out, ds=(sequence_len - filter_size + 1, 1), ignore_border=True, mode='max')
        pooled_active = T.tanh(pooled_out + b.dimshuffle('x', 0, 'x', 'x'))
        outputs.append(pooled_active)
    num_filters_total = num_filters * len(filter_sizes)
    output_tensor = T.reshape(T.concatenate(outputs, axis=1), [batch_size, num_filters_total])
    return output_tensor
util.py 文件源码 项目:sca-cnn 作者: zjuchenlong 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def res5b_2_res5c_branch2a(res5b_res5b_relu_0_split_0, conv_params, bn_params):
    assert res5b_res5b_relu_0_split_0.ndim == 4
    res5c_branch2a = conv2d(input=res5b_res5b_relu_0_split_0, filters=conv_params['res5c_branch2a_0'], border_mode='valid', filter_flip=False)
    bn5c_branch2a = (res5c_branch2a - bn_params['bn5c_branch2a_0'].dimshuffle('x', 0, 'x', 'x')) / tensor.sqrt(bn_params['bn5c_branch2a_1'].dimshuffle('x', 0, 'x', 'x') + numpy.float32(1e-5))
    scale5c_branch2a = bn5c_branch2a * bn_params['scale5c_branch2a_0'].dimshuffle('x', 0, 'x', 'x') + bn_params['scale5c_branch2a_1'].dimshuffle('x', 0, 'x', 'x')
    res5c_branch2a_relu = tensor.nnet.relu(scale5c_branch2a, alpha=0.0)

    return res5c_branch2a_relu
util.py 文件源码 项目:sca-cnn 作者: zjuchenlong 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def res5c_branch2a_2_res5c_branch2b(res5c_branch2a_relu, conv_params, bn_params):
    res5c_branch2b = conv2d(input=res5c_branch2a_relu, filters=conv_params['res5c_branch2b_0'], border_mode='half', filter_flip=False)
    bn5c_branch2b = (res5c_branch2b - bn_params['bn5c_branch2b_0'].dimshuffle('x', 0, 'x', 'x')) / tensor.sqrt(bn_params['bn5c_branch2b_1'].dimshuffle('x', 0, 'x', 'x') + numpy.float32(1e-5))
    scale5c_branch2b = bn5c_branch2b * bn_params['scale5c_branch2b_0'].dimshuffle('x', 0, 'x', 'x') + bn_params['scale5c_branch2b_1'].dimshuffle('x', 0, 'x', 'x')
    res5c_branch2b_relu = tensor.nnet.relu(scale5c_branch2b, alpha=0.0)

    return res5c_branch2b_relu
util.py 文件源码 项目:sca-cnn 作者: zjuchenlong 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def res5c_branch2b_2_res5c_branch2c(res5c_branch2b_relu, conv_params, bn_params):
    res5c_branch2c = conv2d(input=res5c_branch2b_relu, filters=conv_params['res5c_branch2c_0'], border_mode='valid', filter_flip=False)
    bn5c_branch2c = (res5c_branch2c - bn_params['bn5c_branch2c_0'].dimshuffle('x', 0, 'x', 'x')) / tensor.sqrt(bn_params['bn5c_branch2c_1'].dimshuffle('x', 0, 'x', 'x') + numpy.float32(1e-5))
    scale5c_branch2c = bn5c_branch2c * bn_params['scale5c_branch2c_0'].dimshuffle('x', 0, 'x', 'x') + bn_params['scale5c_branch2c_1'].dimshuffle('x', 0, 'x', 'x')

    return scale5c_branch2c
kdllib.py 文件源码 项目:crikey 作者: kastnerkyle 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def conv2d(input, filters, biases=None, border_mode=0, stride=(1, 1)):
    """
    Light wrapper around conv2d - optionally handle biases
    """
    r = nnet.conv2d(
            input=input,
            filters=filters,
            border_mode=border_mode,
            subsample=stride,
            filter_flip=True)
    if biases is None:
        return r
    else:
        return r + biases.dimshuffle('x', 0, 'x', 'x')
kdllib.py 文件源码 项目:crikey 作者: kastnerkyle 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def conv2d(input, filters, biases=None, border_mode=0, stride=(1, 1)):
    """
    Light wrapper around conv2d - optionally handle biases
    """
    r = nnet.conv2d(
            input=input,
            filters=filters,
            border_mode=border_mode,
            subsample=stride,
            filter_flip=True)
    if biases is None:
        return r
    else:
        return r + biases.dimshuffle('x', 0, 'x', 'x')
kdllib.py 文件源码 项目:crikey 作者: kastnerkyle 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def conv2d(input, filters, biases=None, border_mode=0, stride=(1, 1)):
    """
    Light wrapper around conv2d - optionally handle biases
    """
    r = nnet.conv2d(
            input=input,
            filters=filters,
            border_mode=border_mode,
            subsample=stride,
            filter_flip=True)
    if biases is None:
        return r
    else:
        return r + biases.dimshuffle('x', 0, 'x', 'x')
kdllib.py 文件源码 项目:crikey 作者: kastnerkyle 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def conv2d(input, filters, biases=None, border_mode=0, stride=(1, 1)):
    """
    Light wrapper around conv2d - optionally handle biases
    """
    r = nnet.conv2d(
            input=input,
            filters=filters,
            border_mode=border_mode,
            subsample=stride,
            filter_flip=True)
    if biases is None:
        return r
    else:
        return r + biases.dimshuffle('x', 0, 'x', 'x')
kdllib.py 文件源码 项目:crikey 作者: kastnerkyle 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def conv2d(input, filters, biases=None, border_mode=0, stride=(1, 1)):
    """
    Light wrapper around conv2d - optionally handle biases
    """
    r = nnet.conv2d(
            input=input,
            filters=filters,
            border_mode=border_mode,
            subsample=stride,
            filter_flip=True)
    if biases is None:
        return r
    else:
        return r + biases.dimshuffle('x', 0, 'x', 'x')
kdllib.py 文件源码 项目:crikey 作者: kastnerkyle 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def conv2d(input, filters, biases=None, border_mode=0, stride=(1, 1)):
    """
    Light wrapper around conv2d - optionally handle biases
    """
    r = nnet.conv2d(
            input=input,
            filters=filters,
            border_mode=border_mode,
            subsample=stride,
            filter_flip=True)
    if biases is None:
        return r
    else:
        return r + biases.dimshuffle('x', 0, 'x', 'x')
kdllib.py 文件源码 项目:crikey 作者: kastnerkyle 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def conv2d(input, filters, biases=None, border_mode=0, stride=(1, 1)):
    """
    Light wrapper around conv2d - optionally handle biases
    """
    r = nnet.conv2d(
            input=input,
            filters=filters,
            border_mode=border_mode,
            subsample=stride,
            filter_flip=True)
    if biases is None:
        return r
    else:
        return r + biases.dimshuffle('x', 0, 'x', 'x')
kdllib.py 文件源码 项目:crikey 作者: kastnerkyle 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def conv2d(input, filters, biases=None, border_mode=0, stride=(1, 1)):
    """
    Light wrapper around conv2d - optionally handle biases
    """
    r = nnet.conv2d(
            input=input,
            filters=filters,
            border_mode=border_mode,
            subsample=stride,
            filter_flip=True)
    if biases is None:
        return r
    else:
        return r + biases.dimshuffle('x', 0, 'x', 'x')
kdllib.py 文件源码 项目:crikey 作者: kastnerkyle 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def conv2d(input, filters, biases=None, border_mode=0, stride=(1, 1)):
    """
    Light wrapper around conv2d - optionally handle biases
    """
    r = nnet.conv2d(
            input=input,
            filters=filters,
            border_mode=border_mode,
            subsample=stride,
            filter_flip=True)
    if biases is None:
        return r
    else:
        return r + biases.dimshuffle('x', 0, 'x', 'x')
kdllib.py 文件源码 项目:crikey 作者: kastnerkyle 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def conv2d(input, filters, biases=None, border_mode=0, stride=(1, 1)):
    """
    Light wrapper around conv2d - optionally handle biases
    """
    r = nnet.conv2d(
            input=input,
            filters=filters,
            border_mode=border_mode,
            subsample=stride,
            filter_flip=True)
    if biases is None:
        return r
    else:
        return r + biases.dimshuffle('x', 0, 'x', 'x')
kdllib.py 文件源码 项目:crikey 作者: kastnerkyle 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def conv2d(input, filters, biases=None, border_mode=0, stride=(1, 1)):
    """
    Light wrapper around conv2d - optionally handle biases
    """
    r = nnet.conv2d(
            input=input,
            filters=filters,
            border_mode=border_mode,
            subsample=stride,
            filter_flip=True)
    if biases is None:
        return r
    else:
        return r + biases.dimshuffle('x', 0, 'x', 'x')
mixer.py 文件源码 项目:dl4mt-c2c 作者: nyu-dl 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def conv_encoder(tparams, state_below, options, prefix='conv_enc',
          one_step=False, init_state=None, width=None, nkernels=None, pool_window=None, pool_stride=None, **kwargs):
    # state_below : maxlen X n_samples X dim_word_src
    # mask : maxlen X n_samples
    # data = (n_samples, dim, maxlen, 1)
    # kernel = (nkernels, dim, width, 1)

    maxlen = state_below.shape[0]
    n_samples = state_below.shape[1]
    dim = state_below.shape[2]

    data = state_below.dimshuffle(1,2,0,'x')
    # data : n_samples X dim X maxlen X 1

    W = tparams[_p(prefix, 'convW')]
    b = tparams[_p(prefix, 'convB')]

    #conv_out = dnn_conv(data, W, border_mode='valid', subsample=(stride,1), precision='float32')
    output = dnn_conv(data, W, border_mode='half', precision='float32')
    #conv_out = conv2d(data, W, border_mode='valid')
    #conv_out = conv2d(data, W, input_shape=(8, 256, 450, 1), filter_shape=(64, 1, 4, 1), border_mode='valid')

    if curr_width % 2 == 0:
        output = output[:,:,:-1,:]

    output = tensor.nnet.relu(output + b.dimshuffle('x',0,'x','x'))

    output = dnn_pool(output, (pool_window, 1), stride=(pool_stride, 1), mode='max', pad=(0, 0))

    #output = tensor.nnet.sigmoid(conv_out)
    # output : n_samples X nkernels X (maxlen-width+1) X 1

    #output = output.dimshuffle(2,0,1,3).squeeze()
    output = output.dimshuffle(2,0,1,3)[:,:,:,0]
    # NOTE : when we pass 1 or 2 instead of 0, get IndexError: index out of bounds
    # not sure why squeeze wouldn't work though

    # output : (maxlen-width+1) X n_samples X nkernels 

    return output
    # emb : maxlen X n_samples X dim_word_src
mixer.py 文件源码 项目:dl4mt-c2c 作者: nyu-dl 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def multi_scale_conv_encoder(tparams, state_below, options, prefix='conv_enc',
              one_step=False, init_state=None, width=None, nkernels=None, pool_window=None, pool_stride=None, **kwargs):
    # state_below.shape = (maxlen_x_pad + 2*pool_stride, n_samples, dim_word_src)
    # mask.shape = (maxlen_x_pad/pool_stride, n_samples)
    assert len(width) == len(nkernels)

    data = state_below.dimshuffle(1,2,0,'x')
    # data.shape = (n_samples, dim_word_src, maxlen_x_pad + 2*pool_stride, 1)

    W = [tparams[_p(prefix, 'convW')+str(idx)] for idx in range(len(width))]
    b = [tparams[_p(prefix, 'convB')+str(idx)] for idx in range(len(width))]

    output = []

    for idx in range(len(width)):
        curr_width = width[idx]

        output.append(dnn_conv(data, W[idx], border_mode='half', precision='float32'))
        # output[idx].shape = (n_samples, nkernels[idx], (maxlen_x_pad + 2*pool_stride), 1)

        if curr_width % 2 == 0:
            output[idx] = (output[idx])[:,:,:-1,:] # for filters with an even numbered width, half convolution yields an output whose length is 1 longer than the input, hence discarding the last one here. For more detail, consult http://deeplearning.net/software/theano/library/tensor/nnet/conv.html#theano.tensor.nnet.conv2d

        output[idx] = tensor.nnet.relu(output[idx] + b[idx].dimshuffle('x',0,'x','x'))

    result = tensor.concatenate(output, axis=1)
    # result.shape = (n_samples, sum(nkernels), (maxlen_x_pad + 2*pool_stride), 1)

    result = dnn_pool(result, (pool_window, 1), stride=(pool_stride, 1), mode='max', pad=(0, 0))
    # result.shape = (n_samples, sum(nkernels), (maxlen_x_pad/pool_stride + 2), 1)

    result = result.dimshuffle(2,0,1,3)[1:-1,:,:,0]
    # We get rid of the first and the last result and shuffle.
    # result.shape = (maxlen_x_pad/pool_stride, n_samples, sum(nkernels))

    return result
layers.py 文件源码 项目:structured-output-ae 作者: sbelharbi 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __init__(self, rng, input, filter_shape, image_shape, activation,
                 padding, W=None, b=None, b_v=0., stride=(1, 1)):
        """Implement a convolution layer. No pooling."""
        assert image_shape[1] == filter_shape[1]
        self.input = input
        self.x = input
        print filter_shape, "***********"
        fan_in = numpy.prod(filter_shape[1:])
        fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]))
        # initialize weights with random weights
        W_bound = numpy.sqrt(6. / (fan_in + fan_out))
        if rng is None:
            rng = numpy.random.RandomState(23455)
        if W is None:
            W = theano.shared(
                numpy.asarray(
                    rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
                    dtype=theano.config.floatX
                ),
                name="w_conv",
                borrow=True
            )
        if b is None:
            b_v = (
                numpy.ones(
                    (filter_shape[0],)) * b_v).astype(theano.config.floatX)
            b = theano.shared(value=b_v, name="b_conv", borrow=True)

        self.W = W
        self.b = b
        conv_out = conv2d(
            input=self.x,
            filters=self.W,
            input_shape=image_shape,
            filter_shape=filter_shape,
            border_mode=padding,
            subsample=stride
        )
        linear = conv_out + self.b.dimshuffle('x', 0, 'x', 'x')
        if activation is not None:
            self.output = activation(linear)
        else:
            self.output = linear
        self.params = [self.W, self.b]


问题


面经


文章

微信
公众号

扫码关注公众号