def build_cnn(k_height, k_width, input_var=None):
# Input layer, as usual:
l_in = InputLayer(shape=(None, 1, 64, 512), input_var=input_var)
l_conv1 = Conv2DLayer(incoming = l_in, num_filters = 8,
filter_size = (k_height, k_width),
stride = 1, pad = 'same',
W = lasagne.init.Normal(std = 0.02),
nonlinearity = lasagne.nonlinearities.very_leaky_rectify)
l_pool1 = Pool2DLayer(incoming = l_conv1, pool_size = 2, stride = 2)
l_drop1 = lasagne.layers.dropout(l_pool1, p=.75)
l_fc = lasagne.layers.DenseLayer(
l_drop1,
num_units=50,
nonlinearity=lasagne.nonlinearities.rectify)
l_drop2 = lasagne.layers.dropout(l_fc, p=.75)
l_out = lasagne.layers.DenseLayer(
l_drop2,
num_units=2,
nonlinearity=lasagne.nonlinearities.softmax)
return l_out
# ############################# Batch iterator ###############################
# This is just a simple helper function iterating over training data in
# mini-batches of a particular size, optionally in random order. It assumes
# data is available as numpy arrays. For big datasets, you could load numpy
# arrays as memory-mapped files (np.load(..., mmap_mode='r')), or write your
# own custom data iteration function. For small datasets, you can also copy
# them to GPU at once for slightly improved performance. This would involve
# several changes in the main program, though, and is not demonstrated here.
# Notice that this function returns only mini-batches of size `batchsize`.
# If the size of the data is not a multiple of `batchsize`, it will not
# return the last (remaining) mini-batch.
5_eeg_mw_search_kernel.py 文件源码
python
阅读 18
收藏 0
点赞 0
评论 0
评论列表
文章目录