def build_cnn(input_var=None):
# Input layer, as usual:
l_in = InputLayer(shape=(None, 1, 64, 512), input_var=input_var)
l_conv1 = Conv2DLayer(incoming = l_in, num_filters = 8, filter_size = (3,3),
stride = 1, pad = 'same', W = lasagne.init.Normal(std = 0.02),
nonlinearity = lasagne.nonlinearities.very_leaky_rectify)
l_pool1 = Pool2DLayer(incoming = l_conv1, pool_size = 2, stride = 2)
# A fully-connected layer
l_fc = lasagne.layers.DenseLayer(
l_pool1,
num_units=512,
nonlinearity=lasagne.nonlinearities.rectify)
l_out = lasagne.layers.DenseLayer(
l_fc,
num_units=2,
nonlinearity=lasagne.nonlinearities.softmax)
return l_out
# ############################# Batch iterator ###############################
# This is just a simple helper function iterating over training data in
# mini-batches of a particular size, optionally in random order. It assumes
# data is available as numpy arrays. For big datasets, you could load numpy
# arrays as memory-mapped files (np.load(..., mmap_mode='r')), or write your
# own custom data iteration function. For small datasets, you can also copy
# them to GPU at once for slightly improved performance. This would involve
# several changes in the main program, though, and is not demonstrated here.
# Notice that this function returns only mini-batches of size `batchsize`.
# If the size of the data is not a multiple of `batchsize`, it will not
# return the last (remaining) mini-batch.
评论列表
文章目录