python类pool_2d()的实例源码

network.py 文件源码 项目:python-machine-learning 作者: sho-87 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def set_inpt(self, inpt, inpt_dropout, mini_batch_size):
        # Reshape the input to 2D
        self.inpt = inpt.reshape(self.image_shape)

        # Do convolution
        self.conv_out = conv2d(
            input=self.inpt, filters=self.w, filter_shape=self.filter_shape,
            input_shape=self.image_shape, border_mode=self.border_mode,
            subsample=self.stride)

        # Get the feature maps for this layer
        self.feature_maps = theano.function([self.inpt], self.conv_out)

        # Max pooling
        pooled_out = pool.pool_2d(input=self.conv_out, ds=self.poolsize,
                                  ignore_border=True, mode='max')

        # Apply bias and activation and set as output
        self.output = self.activation_fn(
            pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
        self.output_dropout = self.output # no dropout in convolutional layers
Theano.py 文件源码 项目:NNBuilder 作者: aeloyq 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def pool(self, input, window, mode, stride, pad, autopad):
            if mode == 'max':
                mode = 'max'
            elif mode == 'sum':
                mode = 'sum'
            elif mode == 'avg':
                mode = 'average_exc_pad'
            elif mode == 'avgpad':
                mode = 'average_inc_pad'
            else:
                mode = 'sum'
            if input.ndim == 4:
                return P.pool_2d(input=input, ws=window, ignore_border=not autopad, stride=stride, pad=pad, mode=mode)
            elif input.ndim == 5:
                return P.pool_3d(input=input, ws=window, ignore_border=not autopad, stride=stride, pad=pad, mode=mode)
            else:
                basic.defaultreturn()
pooling.py 文件源码 项目:pyextremelm 作者: tobifinn 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def _generate_conv(self):
        input = T.tensor4(name='input')
        if self.pooling == 'squareroot':
            conv_out = Pool.pool_2d(
                T.power(input,2),
                ds=(self.spatial[0], self.spatial[1]),
                ignore_border=self.ignore_border,
                mode='sum',
                padding=self.pad,
                st=None if self.stride is None else (self.stride, self.stride))
            conv_out = T.sqrt(conv_out)
        else:
            conv_out = Pool.pool_2d(
                input,
                ds=(self.spatial[0], self.spatial[1]),
                ignore_border=self.ignore_border,
                mode=self.pooling,
                padding=self.pad,
                st=None if self.stride is None else (self.stride, self.stride))
        if self.activation_fct is None:
            output = conv_out
        else:
            output = self.activation_fct(conv_out)
        self.conv = theano.function([input], output)
pool.py 文件源码 项目:lemontree 作者: khshim 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def get_output(self, input_):
        """
        This function overrides the parents' one.
        Creates symbolic function to compute output from an input.

        Parameters
        ----------
        input_: TensorVariable

        Returns
        -------
        TensorVariable
        """
        return pool_2d(input_,
                       ws=self.kernel_shape,
                       ignore_border=True,  # if you don't want to ignore border, use padding
                       stride=self.stride,
                       pad=self.padding,
                       mode=self.pool_mode)
pool.py 文件源码 项目:lemontree 作者: khshim 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def get_output(self, input_):
        """
        This function overrides the parents' one.
        Creates symbolic function to compute output from an input.

        Parameters
        ----------
        input_: TensorVariable

        Returns
        -------
        TensorVariable
        """
        result = pool_2d(input_,
                         ws=self.input_shape[1:],
                         ignore_border=True,
                         stride=self.input_shape[1:],
                         pad=self.padding,
                         mode='average_exc_pad')  # result is 4D tensor yet, (batch size, output channel, 1, 1)
        return T.reshape(result, (input_.shape[0], input_.shape[1]))  # flatten to 2D matrix
cnn_model.py 文件源码 项目:dsde-deep-learning 作者: broadinstitute 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def cnn_model(X, w, w2, w3, w4, w_o, p_drop_conv, p_drop_hidden):
    l1a = rectify(T.nnet.conv2d(X, w, border_mode='full'))
    l1 = pool.pool_2d(l1a, (2, 2))
    l1 = dropout(l1, p_drop_conv)

    l2a = rectify(T.nnet.conv2d(l1, w2))
    l2 = pool.pool_2d(l2a, (2, 2))
    l2 = dropout(l2, p_drop_conv)

    l3a = rectify(T.nnet.conv2d(l2, w3))
    l3b = pool.pool_2d(l3a, (2, 2))
    l3 = T.flatten(l3b, outdim=2)
    l3 = dropout(l3, p_drop_conv)

    l4 = rectify(T.dot(l3, w4))
    l4 = dropout(l4, p_drop_hidden)

    pyx = softmax(T.dot(l4, w_o))
    return l1, l2, l3, l4, pyx
mlp_utils.py 文件源码 项目:amdtk 作者: amdtkdev 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, inputs, maxpool_height, maxpool_width):
        if inputs is None:
            self.inputs = T.tensor4(dtype=theano.config.floatX)
        else:
            self.inputs = inputs.flatten(4)
        self.maxpool_height = maxpool_height
        self.maxpool_width = maxpool_width
        self.outputs = pool.pool_2d(
            self.inputs,
            (maxpool_height, maxpool_width),
            ignore_border=True
        )

        # Pooling layer has no learnable parameters.
        self.params = []


# Possible layer types.
layers.py 文件源码 项目:structured-output-ae 作者: sbelharbi 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __init__(self, input, poolsize, ignore_border, mode="max"):
        """Implement a pooling layer."""
        self.input = input
        self.x = input
        self.output = pool.pool_2d(
            input=self.x,
            ws=poolsize,
            ignore_border=ignore_border,
            mode=mode)
        self.params = []
pooling.py 文件源码 项目:sesame-paste-noodle 作者: aissehust 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def forward(self, inputtensor):
        inputactivation = inputtensor[0]
        return (pool_2d(inputactivation
                        , self.size
                        , ignore_border=True
                        , stride=self.stride
                        , pad=self.pad
                        , mode=self.mode),)
layers.py 文件源码 项目:3D-R2N2 作者: chrischoy 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def set_output(self):
        pooled_out = pool.pool_2d(
            input=self._prev_layer.output,
            ds=self._pool_size,
            ignore_border=True,
            padding=self._padding)
        self._output = pooled_out
layers.py 文件源码 项目:yadll 作者: pchavanne 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def pool(self, input, ws):
        return pool.pool_2d(input=input, ws=ws, st=self.stride, ignore_border=self.ignore_border,
                            pad=self.pad, mode=self.mode)
functions.py 文件源码 项目:vaegan 作者: anitan0925 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def max_pool( x, size, ignore_border=False ):
    return pool_2d( x, size, ignore_border=ignore_border )
model.py 文件源码 项目:deeplearning 作者: wangzhics 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, rng, input, input_shape, filter_shape, pool_shape=(2, 2)):
        """
        ???????????????????????
        :param input: ?????
        :param input_shape: ????????(batch_size, image_channel, image_weight, image_height)
        :param filter_shape: ???????(filter_count, filter_channel, filter_weight, filter_height)
        :param pool_shape: ??????
        :return:
        """
        #
        assert input_shape[1] == filter_shape[1]
        self.input = input
        self.input_shape = input_shape
        self.filter_shape = filter_shape
        self.pool_shape = pool_shape
        # ?????????
        n_in = numpy.prod(input_shape[1:])
        n_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) // numpy.prod(pool_shape))
        weight_max = numpy.sqrt(6. / (n_in + n_out))
        self.w = theano.shared(
            numpy.asarray(
                rng.uniform(low=-weight_max, high=weight_max, size=filter_shape),
                dtype=theano.config.floatX
            ),
            borrow=True
        )
        self.b = theano.shared(numpy.zeros((filter_shape[0],), dtype=theano.config.floatX), borrow=True)
        self.params = [self.w, self.b]
        # calculate the output
        self.conv_out = conv2d(
            input=self.input,
            filters=self.w,
            filter_shape=self.filter_shape,
            image_shape=self.input_shape
        )
        self.pool_out = pool_2d(
            input=self.conv_out,
            ds=pool_shape,
            ignore_border=True
        )
        self.output = T.tanh(self.pool_out + self.b.dimshuffle('x', 0, 'x', 'x'))
MNIST_COVNET.py 文件源码 项目:LeNet5 作者: LukaszObara 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, input, pool_shape=(2, 2), ignore_border=True,
                 activation_fn=None):
        self.input = input
        self.pool_shape = pool_shape
        self.ignore_border = ignore_border

        l_output = pool.pool_2d(input=input, ds=pool_shape, 
                                ignore_border=ignore_border)

        self.output = (l_output if activation_fn is None 
                       else activation_fn(l_output))
ConvLayers.py 文件源码 项目:LeNet5 作者: LukaszObara 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, input, pool_shape=(2, 2), ignore_border=True,
                 activation_fn=None):
        self.input = input
        self.pool_shape = pool_shape
        self.ignore_border = ignore_border

        l_output = pool.pool_2d(input=input, ds=pool_shape, 
                                ignore_border=ignore_border)

        self.output = (l_output if activation_fn is None 
                       else activation_fn(l_output))
basic.py 文件源码 项目:CNNbasedMedicalSegmentation 作者: BRML 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def max_pool_3d(inpt, inpt_shape, ds, ignore_border=True):
    # Downsize 'into the depth' by downsizing twice.
    inpt_shape_4d = (
        inpt_shape[0] * inpt_shape[1],
        inpt_shape[2],
        inpt_shape[3],
        inpt_shape[4]
    )

    inpt_as_tensor4 = T.reshape(inpt, inpt_shape_4d, ndim=4)

    # The first pooling only downsizes the height and the width.
    pool_out1 = pool.pool_2d(inpt_as_tensor4, (ds[1], ds[2]),
                                       ignore_border=True)
    out_shape1 = T.join(0, inpt_shape[:-2], pool_out1.shape[-2:])

    inpt_pooled_once = T.reshape(pool_out1, out_shape1, ndim=5)

    # Shuffle dimensions so the depth is the last dimension.
    inpt_shuffled = inpt_pooled_once.dimshuffle(0, 4, 2, 3, 1)

    shuffled_shape = inpt_shuffled.shape
    # Reshape input to be 4 dimensional.
    shuffle_shape_4d = (
        shuffled_shape[0] * shuffled_shape[1],
        shuffled_shape[2],
        shuffled_shape[3],
        shuffled_shape[4]
    )

    inpt_shuffled_4d = T.reshape(inpt_shuffled, shuffle_shape_4d, ndim=4)

    pool_out2 = pool.pool_2d(inpt_shuffled_4d, (1, ds[0]),
                                       ignore_border=True)
    out_shape2 = T.join(0, shuffled_shape[:-2], pool_out2.shape[-2:])

    inpt_pooled_twice = T.reshape(pool_out2, out_shape2, ndim=5)
    pool_output_fin = inpt_pooled_twice.dimshuffle(0, 4, 2, 3, 1)

    return pool_output_fin
Convolution.py 文件源码 项目:DeepFashion 作者: DevilC 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __init__(self, Layer_num, layer_input,
                 mini_batch_size, input_feature_num, img_shp,
                 this_layer_feature_num, last_layer_feature_num, filter_shp,
                 pool_shp
                 ):
        self.num = Layer_num
        #function conv2d(input, W),ip_shp = input.shape(); w_shp = W.shape()
        self.ip_shp = (mini_batch_size, input_feature_num, img_shp[0], img_shp[1])
        self.w_shp = (this_layer_feature_num, last_layer_feature_num, filter_shp[0], filter_shp[1])

        #input is a tensor4, input's shape = ip_shp
        self.input = layer_input

        #weight is a shared type, it saves the filters, its shape = w_shp
        self.W = theano.shared(init_weight(Layer_num, self.w_shp), 'Layer_'+str(Layer_num)+'_weight')

        #bias of this layer, size depend on the number of layer's feature maps
        b_shp = (this_layer_feature_num, )
        self.B = theano.shared(init_B(Layer_num, b_shp), 'Layer_'+str(Layer_num)+'_bias')

        self.conv_result = T.nnet.conv2d(self.input, self.W)

        self.pool_result = pool.pool_2d(self.conv_result, ds = pool_shp, ignore_border=False)#shape(batch_size, layer0_feature_num, 14, 14)

        #output of this layer
        self.output = T.nnet.relu(self.pool_result + self.B.dimshuffle('x', 0, 'x', 'x'))

        #parament
        self.para = [self.W, self.B]
layers.py 文件源码 项目:DeepRepICCV2015 作者: tomrunia 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)):

        assert image_shape[1] == filter_shape[1]
        self.input = input

        # there are "num input feature maps * filter height * filter width"
        # inputs to each hidden unit
        fan_in = numpy.prod(filter_shape[1:])
        # each unit in the lower layer receives a gradient from:
        # "num output feature maps * filter height * filter width" /
        #   pooling size
        fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) /
                   numpy.prod(poolsize))
        # initialize weights with random weights
        W_bound = numpy.sqrt(6. / (fan_in + fan_out))
        self.W = theano.shared(numpy.asarray(
            rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
            dtype=theano.config.floatX),
                               borrow=True)

        # the bias is a 1D tensor -- one bias per output feature map
        b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX)
        self.b = theano.shared(value=b_values, borrow=True)

        # convolve input feature maps with filters
        conv_out = conv.conv2d(input=input, filters=self.W,
                filter_shape=filter_shape, image_shape=image_shape)

        # downsample each feature map individually, using maxpooling
        pooled_out = pool.pool_2d(input=conv_out, ds=poolsize, ignore_border=True)

        self.output = T.maximum(0.0, pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))

        # store parameters of this layer
        self.params = [self.W, self.b]
layers.py 文件源码 项目:DeepRepICCV2015 作者: tomrunia 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)):

        assert image_shape[1] == filter_shape[1]
        self.input = input

        # there are "num input feature maps * filter height * filter width"
        # inputs to each hidden unit
        fan_in = numpy.prod(filter_shape[1:])
        # each unit in the lower layer receives a gradient from:
        # "num output feature maps * filter height * filter width" /
        #   pooling size
        fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) /
                   numpy.prod(poolsize))
        # initialize weights with random weights
        W_bound = numpy.sqrt(6. / (fan_in + fan_out))
        self.W = theano.shared(numpy.asarray(
            rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
            dtype=theano.config.floatX),
                               borrow=True)

        # the bias is a 1D tensor -- one bias per output feature map
        b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX)
        self.b = theano.shared(value=b_values, borrow=True)

        # convolve input feature maps with filters
        conv_out = conv.conv2d(input=input, filters=self.W,
                filter_shape=filter_shape, image_shape=image_shape)

        # downsample each feature map individually, using maxpooling
        pooled_out = pool.pool_2d(input=conv_out, ds=poolsize, ignore_border=True)

        self.output = T.maximum(0.0, pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))

        # store parameters of this layer
        self.params = [self.W, self.b]
theano_backend.py 文件源码 项目:reading-text-in-the-wild 作者: mathDR 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def pool2d(x, pool_size, strides=(1, 1), border_mode='valid',
           dim_ordering='th', pool_mode='max'):
    if border_mode == 'same':
        w_pad = pool_size[0] - 2 if pool_size[0] % 2 == 1 else pool_size[0] - 1
        h_pad = pool_size[1] - 2 if pool_size[1] % 2 == 1 else pool_size[1] - 1
        padding = (w_pad, h_pad)
    elif border_mode == 'valid':
        padding = (0, 0)
    else:
        raise Exception('Invalid border mode: ' + str(border_mode))

    if dim_ordering not in {'th', 'tf'}:
        raise Exception('Unknown dim_ordering ' + str(dim_ordering))

    if dim_ordering == 'tf':
        x = x.dimshuffle((0, 3, 1, 2))

    if pool_mode == 'max':
        pool_out = pool.pool_2d(x, ds=pool_size, st=strides,
                                ignore_border=True,
                                padding=padding,
                                mode='max')
    elif pool_mode == 'avg':
        pool_out = pool.pool_2d(x, ds=pool_size, st=strides,
                                ignore_border=True,
                                padding=padding,
                                mode='average_exc_pad')
    else:
        raise Exception('Invalid pooling mode: ' + str(pool_mode))

    if border_mode == 'same':
        expected_width = (x.shape[2] + strides[0] - 1) // strides[0]
        expected_height = (x.shape[3] + strides[1] - 1) // strides[1]

        pool_out = pool_out[:, :,
                            : expected_width,
                            : expected_height]

    if dim_ordering == 'tf':
        pool_out = pool_out.dimshuffle((0, 2, 3, 1))
    return pool_out
Layer.py 文件源码 项目:Theano-NN_Starter 作者: nightinwhite 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def get_output(self):
        return pool.pool_2d(self.input, self.pool_size, ignore_border=True)
qa_cnn.py 文件源码 项目:DBQA 作者: nanfeng1101 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __call__(self, q_input, a_input, *args, **kwargs):
        # convolve input feature maps with filters
        q_conv_out = conv2d(
            input=q_input,
            filters=self.W,
            filter_shape=self.filter_shape
        )
        a_conv_out = conv2d(
            input=a_input,
            filters=self.W,
            filter_shape=self.filter_shape
        )
        # add the bias term. Since the bias is a vector (1D array), we first
        # reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
        # thus be broadcasted across mini-batches and feature map
        # width & height
        if self.non_linear == "tanh":
            q_conv_out_tanh = Tanh(q_conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
            a_conv_out_tanh = Tanh(a_conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
            q_output = pool.pool_2d(input=q_conv_out_tanh, ws=self.pool_size, ignore_border=True) # max
            a_output = pool.pool_2d(input=a_conv_out_tanh, ws=self.pool_size, ignore_border=True)
        elif self.non_linear == "relu":
            q_conv_out_relu = ReLU(q_conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
            a_conv_out_relu = ReLU(a_conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
            q_output = pool.pool_2d(input=q_conv_out_relu, ws=self.pool_size, ignore_border=True)
            a_output = pool.pool_2d(input=a_conv_out_relu, ws=self.pool_size, ignore_border=True)
        else:
            q_output = pool.pool_2d(input=q_conv_out, ws=self.pool_size, ignore_border=True)
            a_output = pool.pool_2d(input=a_conv_out, ws=self.pool_size, ignore_border=True)

        return q_output, a_output
cnn_layers.py 文件源码 项目:sentence_classification 作者: zhegan27 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def encoder(tparams, layer0_input, filter_shape, pool_size,
                      prefix='cnn_encoder'):

    """ filter_shape: (number of filters, num input feature maps, filter height,
                        filter width)
        image_shape: (batch_size, num input feature maps, image height, image width)
    """

    conv_out = conv.conv2d(input=layer0_input, filters=tparams[_p(prefix,'W')], 
                            filter_shape=filter_shape)

    conv_out_tanh = tensor.tanh(conv_out + tparams[_p(prefix,'b')].dimshuffle('x', 0, 'x', 'x'))
    output = pool.pool_2d(input=conv_out_tanh, ds=pool_size, ignore_border=True)

    return output.flatten(2)
poollayer.py 文件源码 项目:ObjRecPoseEst 作者: paroj 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def __init__(self, rng, inputVar, cfgParams, copyLayer=None, layerNum=None):    
        """
        :type rng: numpy.random.RandomState
        :param rng: a random number generator used to initialize weights

        :type inputVar: theano.tensor.dtensor4
        :param inputVar: symbolic image tensor, of shape image_shape

        :type cfgParams: ConvLayerParams
        """
        self.cfgParams = cfgParams

        poolsize  = cfgParams.poolsize
        poolType  = cfgParams.poolType

        self.inputVar = inputVar

        if poolType == 0:
            pooled_out = pool.pool_2d(input = inputVar,
                                      ds = poolsize, ignore_border=True)

        self.output = pooled_out

        # store parameters of this layer; has none
        self.params = []
        self.weights = []
layers.py 文件源码 项目:rnn-theano 作者: wangxggc 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def get_output(self, input):
        """
        input is

        :param input: A 4-D tensor with shape(batch_size, channels, sen_len, embedding_size),
                      usually, embedding_size == filter_width
        :return: A 4-D tensor with shape(batch_size, filter_size, sen_len-filter_height+1, embedding_size-filter_width+1)
        """
        # usually output is a 4-D tensor with shape(batch_size, filters, sen_len-filter_height+1, 1)
        output = T.nnet.conv2d(input=input,
                      filters=self.params[self.id + "conv_w"],
                      input_shape=self.input_shape,
                      filter_shape=self.filter_shape,
                      border_mode="valid")
        #  output = output.reshape([self.batch_size, self.filter_size, self.pooling_shape[0], self.pooling_shape[1]])
        # add a bias to each filter
        output += self.params[self.id + "conv_b"].dimshuffle("x", 0, "x", "x")

        if self.pooling_mode != "average": #self.pooling_mode == "max":
            output = pool.pool_2d(input=output,
                                 ignore_border=True,
                                 ds=self.pooling_shape,
                                 st=self.pooling_shape,
                                 padding=(0, 0),    # padding shape
                                 mode="max")
            # output = theano.printing.Print("Conv Pool Out")(output)
            return output.flatten().reshape([self.batch_size, self.filter_size])
        elif self.pooling_mode == "average":
            output = pool.pool_2d(input=output,
                                 ignore_border=True,
                                 ds=self.pooling_shape,
                                 st=self.pooling_shape,
                                 padding=(0, 0),    # padding shape
                                 mode="average_inc_pad")

            return output.flatten().reshape([self.batch_size, self.filter_size])
test_dnn.py 文件源码 项目:Theano-Deep-learning 作者: GeekLiB 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def test_dnn_tag():
    """
    Test that if cudnn isn't avail we crash and that if it is avail, we use it.
    """
    x = T.ftensor4()
    old = theano.config.on_opt_error
    theano.config.on_opt_error = "raise"

    sio = StringIO()
    handler = logging.StreamHandler(sio)
    logging.getLogger('theano.compile.tests.test_dnn').addHandler(handler)
    # Silence original handler when intentionnally generating warning messages
    logging.getLogger('theano').removeHandler(theano.logging_default_handler)
    raised = False
    try:
        f = theano.function(
            [x],
            pool_2d(x, ds=(2, 2), ignore_border=True),
            mode=mode_with_gpu.including("cudnn"))
    except (AssertionError, RuntimeError):
        assert not dnn.dnn_available(test_ctx_name)
        raised = True
    finally:
        theano.config.on_opt_error = old
        logging.getLogger(
            'theano.compile.tests.test_dnn').removeHandler(handler)
        logging.getLogger('theano').addHandler(theano.logging_default_handler)

    if not raised:
        assert dnn.dnn_available(test_ctx_name)
        assert any([isinstance(n.op, dnn.GpuDnnPool)
                    for n in f.maker.fgraph.toposort()])
test_dnn.py 文件源码 项目:Theano-Deep-learning 作者: GeekLiB 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def test_dnn_tag():
    """
    Test that if cudnn isn't avail we crash and that if it is avail, we use it.
    """
    x = T.ftensor4()
    old = theano.config.on_opt_error
    theano.config.on_opt_error = "raise"

    sio = StringIO()
    handler = logging.StreamHandler(sio)
    logging.getLogger('theano.compile.tests.test_dnn').addHandler(handler)
    # Silence original handler when intentionnally generating warning messages
    logging.getLogger('theano').removeHandler(theano.logging_default_handler)
    raised = False
    try:
        f = theano.function(
            [x],
            pool_2d(x, ds=(2, 2), ignore_border=True),
            mode=mode_with_gpu.including("cudnn"))
    except (AssertionError, RuntimeError):
        assert not cuda.dnn.dnn_available()
        raised = True
    finally:
        theano.config.on_opt_error = old
        logging.getLogger(
            'theano.compile.tests.test_dnn').removeHandler(handler)
        logging.getLogger('theano').addHandler(theano.logging_default_handler)

    if not raised:
        assert cuda.dnn.dnn_available()
        assert any([isinstance(n.op, cuda.dnn.GpuDnnPool)
                    for n in f.maker.fgraph.toposort()])
theano_backend.py 文件源码 项目:Hat 作者: qiuqiangkong 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def pool2d(input, ds, ignore_border=True, st=None, padding=(0, 0), mode='max'):
    if mode=='avg': mode='average_exc_pad'
    return pool.pool_2d(input, ds, ignore_border, st, padding, mode)
insqa_lstm.py 文件源码 项目:insuranceQA-cnn-lstm 作者: cszhz 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def _cnn_net(self, tparams, cnn_input, batch_size, sequence_len, num_filters, filter_sizes, proj_size):
    outputs = []
    for filter_size in filter_sizes:
        filter_shape = (num_filters, 1, filter_size, proj_size)
        image_shape = (batch_size, 1, sequence_len, proj_size)
        W = tparams['cnn_W_' + str(filter_size)]
        b = tparams['cnn_b_' + str(filter_size)]
        conv_out = conv2d(input=cnn_input, filters=W, filter_shape=filter_shape, input_shape=image_shape)
        pooled_out = pool.pool_2d(input=conv_out, ds=(sequence_len - filter_size + 1, 1), ignore_border=True, mode='max')
        pooled_active = T.tanh(pooled_out + b.dimshuffle('x', 0, 'x', 'x'))
        outputs.append(pooled_active)
    num_filters_total = num_filters * len(filter_sizes)
    output_tensor = T.reshape(T.concatenate(outputs, axis=1), [batch_size, num_filters_total])
    return output_tensor
network3.py 文件源码 项目:DeepLearningPython35 作者: MichalDanielDobrzanski 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def set_inpt(self, inpt, inpt_dropout, mini_batch_size):
        self.inpt = inpt.reshape(self.image_shape)
        conv_out = conv.conv2d(
            input=self.inpt, filters=self.w, filter_shape=self.filter_shape,
            image_shape=self.image_shape)
        pooled_out = pool_2d(
            input=conv_out, ws=self.poolsize, ignore_border=True)
        self.output = self.activation_fn(
            pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
        self.output_dropout = self.output # no dropout in the convolutional layers


问题


面经


文章

微信
公众号

扫码关注公众号