def convpool(X, W, b, poolsize=(2, 2)):
conv_out = conv2d(input=X, filters=W)
# downsample each feature map individually, using maxpooling
pooled_out = downsample.max_pool_2d(
input=conv_out,
ds=poolsize,
ignore_border=True
)
# add the bias term. Since the bias is a vector (1D array), we first
# reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
# thus be broadcasted across mini-batches and feature map
# width & height
# return T.tanh(pooled_out + b.dimshuffle('x', 0, 'x', 'x'))
return relu(pooled_out + b.dimshuffle('x', 0, 'x', 'x'))
python类max_pool_2d()的实例源码
def convpool(X, W, b, poolsize=(2, 2)):
conv_out = conv2d(input=X, filters=W)
# downsample each feature map individually, using maxpooling
pooled_out = downsample.max_pool_2d(
input=conv_out,
ds=poolsize,
ignore_border=True
)
# add the bias term. Since the bias is a vector (1D array), we first
# reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
# thus be broadcasted across mini-batches and feature map
# width & height
# return T.tanh(pooled_out + b.dimshuffle('x', 0, 'x', 'x'))
return relu(pooled_out + b.dimshuffle('x', 0, 'x', 'x'))
def predict(self, new_data, batch_size):
"""
predict for new data
"""
img_shape = (batch_size, 1, self.image_shape[2], self.image_shape[3])
conv_out = conv.conv2d(input=new_data, filters=self.W, filter_shape=self.filter_shape, image_shape=img_shape)
if self.non_linear=="tanh":
conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
if self.non_linear=="relu":
conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
else:
pooled_out = downsample.max_pool_2d(input=conv_out, ds=self.poolsize, ignore_border=True)
output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')
return output
def get_output(self, train):
X = self.get_input(train)
output = downsample.max_pool_2d(X, ds=self.poolsize, st=self.stride, ignore_border=self.ignore_border,
mode=globals.pooling_mode)
return output
# class AveragePooling2D(MaxPooling2D):
# def __init__(self, poolsize=(2, 2), stride=None, ignore_border=True):
# super(AveragePooling2D, self).__init__()
# self.input = T.tensor4()
# self.poolsize = tuple(poolsize)
# self.stride = stride
# self.ignore_border = ignore_border
# def get_output(self, train):
# X = self.get_input(train)
# sums = images2neibs(X, neib_shape=(globals.s_size, 1)).sum(axis=-1)
# counts = T.neq(images2neibs(X, neib_shape=(globals.s_size, 1)), 0).sum(axis=-1)
# average = (sums/counts).reshape((X.shape[0], X.shape[1], 2, 1))
# return average
def predict(self, new_data, batch_size):
"""
predict for new data
"""
img_shape = (batch_size, 1, self.image_shape[2], self.image_shape[3])
conv_out = conv.conv2d(input=new_data, filters=self.W, filter_shape=self.filter_shape, image_shape=img_shape)
if self.non_linear=="tanh":
conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
if self.non_linear=="relu":
conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
else:
pooled_out = downsample.max_pool_2d(input=conv_out, ds=self.poolsize, ignore_border=True)
output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')
return output
def predict(self, new_data, batch_size):
"""
predict for new data
"""
img_shape = (batch_size, 1, self.image_shape[2], self.image_shape[3])
conv_out = conv.conv2d(input=new_data, filters=self.W, filter_shape=self.filter_shape, image_shape=img_shape)
if self.non_linear=="tanh":
conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
if self.non_linear=="relu":
conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
else:
pooled_out = downsample.max_pool_2d(input=conv_out, ds=self.poolsize, ignore_border=True)
output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')
return output
def __init__(self, input,params_W,params_b, filter_shape, image_shape, poolsize=(2, 2)):
assert image_shape[1] == filter_shape[1]
self.input = input
self.W = params_W
self.b = params_b
# ??
conv_out = conv.conv2d(
input=input,
filters=self.W,
filter_shape=filter_shape,
image_shape=image_shape
)
# ???
pooled_out = downsample.max_pool_2d(
input=conv_out,
ds=poolsize,
ignore_border=True
)
self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
self.params = [self.W, self.b]
def __init__(self, input,params_W,params_b, filter_shape, image_shape, poolsize=(2, 2)):
assert image_shape[1] == filter_shape[1]
self.input = input
self.W = params_W
self.b = params_b
# ??
conv_out = conv.conv2d(
input=input,
filters=self.W,
filter_shape=filter_shape,
image_shape=image_shape
)
# ???
pooled_out = downsample.max_pool_2d(
input=conv_out,
ds=poolsize,
ignore_border=True
)
self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
self.params = [self.W, self.b]
T6SB_conv_net_classes.py 文件源码
项目:SE16-Task6-Stance-Detection
作者: nestle1993
项目源码
文件源码
阅读 31
收藏 0
点赞 0
评论 0
def predict(self, new_data, batch_size):
"""
predict for new data
"""
img_shape = (batch_size, 1, self.image_shape[2], self.image_shape[3])
conv_out = conv.conv2d(input=new_data, filters=self.W, filter_shape=self.filter_shape, image_shape=img_shape)
if self.non_linear=="tanh":
conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
if self.non_linear=="relu":
conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
else:
pooled_out = downsample.max_pool_2d(input=conv_out, ds=self.poolsize, ignore_border=True)
output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')
return output
conv_net_classes.py 文件源码
项目:SE16-Task6-Stance-Detection
作者: nestle1993
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def predict(self, new_data, batch_size):
"""
predict for new data
"""
img_shape = (batch_size, 1, self.image_shape[2], self.image_shape[3])
conv_out = conv.conv2d(input=new_data, filters=self.W, filter_shape=self.filter_shape, image_shape=img_shape)
if self.non_linear=="tanh":
conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
if self.non_linear=="relu":
conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
else:
pooled_out = downsample.max_pool_2d(input=conv_out, ds=self.poolsize, ignore_border=True)
output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')
return output
def max_pooling(matrix, pool_size):
"""
Applies max-pooling for the given matrix for specified pool_size.
Only the maximum value in the given pool size is chosen to construct the result.
:param matrix: Input matrix
:param pool_size: pooling cell size
:return: max-pooled output
"""
"""
t_input = tensor.dmatrix('input')
pool_out = ds.max_pool_2d(t_input, pool_size, ignore_border=True)
pool_f = theano.function([t_input], pool_out)
return pool_f(matrix)
"""
pass
def encoder(tparams, layer0_input, filter_shape, pool_size, options, prefix='cnn_d'):
""" filter_shape: (number of filters, num input feature maps, filter height,
filter width)
image_shape: (batch_size, num input feature maps, image height, image width)
"""
conv_out = conv.conv2d(input=layer0_input, filters=tparams[_p(prefix,'W')], filter_shape=filter_shape)
# conv_out_tanh = tensor.tanh(conv_out + tparams[_p(prefix,'b')].dimshuffle('x', 0, 'x', 'x'))
# output = downsample.max_pool_2d(input=conv_out_tanh, ds=pool_size, ignore_border=False)
if options['cnn_activation'] == 'tanh':
conv_out_tanh = tensor.tanh(conv_out + tparams[_p(prefix,'b')].dimshuffle('x', 0, 'x', 'x'))
output = downsample.max_pool_2d(input=conv_out_tanh, ds=pool_size, ignore_border=False) # the ignore border is very important
elif options['cnn_activation'] == 'linear':
conv_out2 = conv_out + tparams[_p(prefix,'b')].dimshuffle('x', 0, 'x', 'x')
output = downsample.max_pool_2d(input=conv_out2, ds=pool_size, ignore_border=False) # the ignore border is very important
else:
print(' Wrong specification of activation function in CNN')
return output.flatten(2)
#output.flatten(2)
def predict(self, new_data, batch_size):
"""
predict for new data
"""
img_shape = None#(batch_size, 1, self.image_shape[2], self.image_shape[3])
conv_out = conv.conv2d(input=new_data, filters=self.W, filter_shape=self.filter_shape, image_shape=img_shape)
if self.non_linear=="tanh":
conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
if self.non_linear=="relu":
conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
else:
pooled_out = downsample.max_pool_2d(input=conv_out, ds=self.poolsize, ignore_border=True)
output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')
return output
def predict(self, new_data, batch_size):
"""
predict for new data
"""
img_shape = (batch_size, 1, self.image_shape[2], self.image_shape[3])
conv_out = conv.conv2d(input=new_data, filters=self.W, filter_shape=self.filter_shape, image_shape=img_shape)
if self.non_linear=="tanh":
conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
if self.non_linear=="relu":
conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
else:
pooled_out = downsample.max_pool_2d(input=conv_out, ds=self.poolsize, ignore_border=True)
output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')
return output
conv_net_classes.py 文件源码
项目:coling2016-claim-classification
作者: UKPLab
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def predict(self, new_data, batch_size):
"""
predict for new data
"""
img_shape = (batch_size, 1, self.image_shape[2], self.image_shape[3])
conv_out = conv.conv2d(input=new_data, filters=self.W, filter_shape=self.filter_shape, image_shape=img_shape)
if self.non_linear=="tanh":
conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
if self.non_linear=="relu":
conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
else:
pooled_out = downsample.max_pool_2d(input=conv_out, ds=self.poolsize, ignore_border=True)
output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')
return output
def predict(self, new_data, batch_size):
"""
predict for new data
"""
img_shape = (batch_size, 1, self.image_shape[2], self.image_shape[3])
conv_out = conv.conv2d(input=new_data, filters=self.W, filter_shape=self.filter_shape, image_shape=img_shape)
if self.non_linear=="tanh":
conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
if self.non_linear=="relu":
conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
else:
pooled_out = downsample.max_pool_2d(input=conv_out, ds=self.poolsize, ignore_border=True)
output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')
return output
def predict_maxpool(self, new_data, batch_size):
"""
predict for new data
"""
img_shape = (batch_size, 1, self.image_shape[2], self.image_shape[3])
conv_out = conv.conv2d(input=new_data, filters=self.W, filter_shape=self.filter_shape, image_shape=img_shape)
if self.non_linear=="tanh":
conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
if self.non_linear=="relu":
conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
return conv_out_tanh
output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
else:
pooled_out = downsample.max_pool_2d(input=conv_out, ds=self.poolsize, ignore_border=True)
output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')
return output
def predict(self, new_data, batch_size):
"""
predict for new data
"""
img_shape = (batch_size, 1, self.image_shape[2], self.image_shape[3])
conv_out = conv.conv2d(input=new_data, filters=self.W, filter_shape=self.filter_shape, image_shape=img_shape)
if self.non_linear=="tanh":
conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
if self.non_linear=="relu":
conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
else:
pooled_out = downsample.max_pool_2d(input=conv_out, ds=self.poolsize, ignore_border=True)
output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')
return output
def predict(self, new_data, batch_size):
"""
predict for new data
"""
img_shape = (batch_size, 1, self.image_shape[2], self.image_shape[3])
conv_out = conv.conv2d(input=new_data, filters=self.W, filter_shape=self.filter_shape, image_shape=img_shape)
if self.non_linear=="tanh":
conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
if self.non_linear=="relu":
conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
else:
pooled_out = downsample.max_pool_2d(input=conv_out, ds=self.poolsize, ignore_border=True)
output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')
return output
def predict(self, new_data, batch_size):
"""
predict for new data
"""
img_shape = (batch_size, 1, self.image_shape[2], self.image_shape[3])
conv_out = conv.conv2d(input=new_data, filters=self.W, filter_shape=self.filter_shape, image_shape=img_shape)
if self.non_linear=="tanh":
conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
if self.non_linear=="relu":
conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
else:
pooled_out = downsample.max_pool_2d(input=conv_out, ds=self.poolsize, ignore_border=True)
output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')
return output
def predict(self, new_data, batch_size):
"""
predict for new data
"""
img_shape = (batch_size, 1, self.image_shape[2], self.image_shape[3])
conv_out = conv.conv2d(input=new_data, filters=self.W, filter_shape=self.filter_shape, image_shape=img_shape)
if self.non_linear=="tanh":
conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
if self.non_linear=="relu":
conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
else:
pooled_out = downsample.max_pool_2d(input=conv_out, ds=self.poolsize, ignore_border=True)
output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')
return output
def predict(self, new_data, batch_size):
"""
predict for new data
"""
img_shape = (batch_size, 1, self.image_shape[2], self.image_shape[3])
conv_out = conv.conv2d(input=new_data, filters=self.W, filter_shape=self.filter_shape, image_shape=img_shape)
if self.non_linear=="tanh":
conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
if self.non_linear=="relu":
conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
else:
pooled_out = downsample.max_pool_2d(input=conv_out, ds=self.poolsize, ignore_border=True)
output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')
return output
def output(self, x, a):
return downsample.max_pool_2d(input, maxpool_shape, ignore_border=True)
def __init__(self, rng, input, image_shape, filter_shape, poolsize=(2, 2)):
# ???????????????????
assert image_shape[1] == filter_shape[1]
fan_in = np.prod(filter_shape[1:])
fan_out = filter_shape[0] * np.prod(filter_shape[2:]) / np.prod(poolsize)
W_bound = np.sqrt(6.0 / (fan_in + fan_out))
self.W = theano.shared(
np.asarray(rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=theano.config.floatX), # @UndefinedVariable
borrow=True)
b_values = np.zeros((filter_shape[0],), dtype=theano.config.floatX) # @UndefinedVariable
self.b = theano.shared(value=b_values, borrow=T)
# ??????????????????
conv_out = conv.conv2d(
input=input,
filters=self.W,
filter_shape=filter_shape,
image_shape=image_shape)
# Max-pooling????????????????????
pooled_out = downsample.max_pool_2d(
input=conv_out,
ds=poolsize,
ignore_border=True)
# ????????
self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
self.params = [self.W, self.b]
def set_inpt(self, inpt, inpt_dropout, mini_batch_size):
self.inpt = inpt.reshape(self.image_shape)
conv_out = conv.conv2d(
input=self.inpt, filters=self.w, filter_shape=self.filter_shape,
image_shape=self.image_shape)
pooled_out = downsample.max_pool_2d(
input=conv_out, ds=self.poolsize, ignore_border=True)
self.output = self.activation_fn(
pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
self.output_dropout = self.output # no dropout in the convolutional layers
def conv_and_pool(input_expr, w, convs_mult, p_drop_conv):
conv_w = w
if convs_mult == 2:
conv_w = T.concatenate([w, w[:,:,::-1,::-1]], axis=0)
elif convs_mult == 4:
conv_w = T.concatenate([w, w[:,:,::-1], w[:,:,:,::-1], w[:,:,::-1,::-1]], axis=0)
e1 = rectify(conv2d(input_expr, conv_w))
e2 = max_pool_2d(e1, (2, 2), ignore_border=False)
return dropout(e2, p_drop_conv)
def _decorate_fprop(self, layer):
layer_fprop = layer.fprop
def decorated_fprop(instance, input, return_output_preactivation=False):
if return_output_preactivation:
output, pre_output = layer_fprop(input, return_output_preactivation)
pooled_output = downsample.max_pool_2d(output, self.pool_shape, ignore_border=self.ignore_border)
pooled_pre_output = downsample.max_pool_2d(pre_output, self.pool_shape, ignore_border=self.ignore_border)
return pooled_output, pooled_pre_output
output = layer_fprop(input, return_output_preactivation)
pooled_output = downsample.max_pool_2d(output, self.pool_shape, ignore_border=self.ignore_border)
return pooled_output
layer.fprop = MethodType(decorated_fprop, layer)
def get_output(self, train):
X = self.get_input(train)
X = theano.tensor.reshape(X, (X.shape[0], X.shape[1], X.shape[2], 1)).dimshuffle(0, 1, 3, 2)
output = downsample.max_pool_2d(X, ds=self.poolsize, st=self.st, ignore_border=self.ignore_border)
output = output.dimshuffle(0, 1, 3, 2)
return theano.tensor.reshape(output, (output.shape[0], output.shape[1], output.shape[2]))
def get_output(self, train):
X = self.get_input(train)
output = downsample.max_pool_2d(X, ds=self.poolsize, st=self.stride, ignore_border=self.ignore_border)
return output
2d_convolutional_net.py 文件源码
项目:GT-Deep-Learning-for-Sign-Language-Recognition
作者: payamsiyari
项目源码
文件源码
阅读 29
收藏 0
点赞 0
评论 0
def model(X, w, w2, w3, w35, w4, p_drop_conv, p_drop_hidden):
l1a = rectify(conv2d(X, w, border_mode='full'))
#print "l1a",l1a.type
#print "l1a",l1a.shape.eval()
l1 = max_pool_2d(l1a, (2, 2))
#print "l1",l1.get_value().shape
#l1 = dropout(l1, p_drop_conv)
l2a = rectify(conv2d(l1, w2))
#print "l2a",l2a.get_value().shape
l2 = max_pool_2d(l2a, (2, 2))
#print "l2",l2.get_value().shape
#l2 = dropout(l2, p_drop_conv)
l3 = rectify(conv2d(l2, w3))
#print "l3",l3.get_value().shape
#l3 = max_pool_2d(l3a, (1, 1))
#l3 = dropout(l3, p_drop_conv)
l35a = rectify(conv2d(l3, w35))
#print "l35a",l35a.get_value().shape
l35b = max_pool_2d(l35a, (2, 2))
#print "l35b",l35b.get_value().shape
l35 = T.flatten(l35b, outdim=2)
#print "l35",l35.get_value().shape
#l35 = dropout(l35, p_drop_conv)
l4 = rectify(T.dot(l35, w4))
#print "l4",l4.get_value().shape
#l4 = dropout(l4, p_drop_hidden)
pyx = softmax(T.dot(l4, w_o))
return l1, l2, l3, l35, l4, pyx