def archiEncoder(convoList, fullList, lmdb, batch_size):
n = caffe.NetSpec()
n.data, n.label = L.Data(batch_size=batch_size, backend=P.Data.LMDB, source=lmdb, transform_param=dict(scale=1./255), ntop=2)
n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=convoList[0], weight_filler=dict(type='xavier'))
n.relu1 = L.ReLU(n.conv1, in_place=True)
n.pool1 = L.Pooling(n.relu1, kernel_size=2, stride=2, pool=P.Pooling.MAX)
n.conv2 = L.Convolution(n.pool1, kernel_size=5, num_output=convoList[1], weight_filler=dict(type='xavier'))
n.relu2 = L.ReLU(n.conv2, in_place=True)
n.pool2 = L.Pooling(n.relu2, kernel_size=2, stride=2, pool=P.Pooling.MAX)
n.ip2 = L.InnerProduct(n.pool2, num_output=10, weight_filler=dict(type='xavier'))
n.loss = L.SoftmaxWithLoss(n.ip2, n.label)
return n.to_proto()
python类NetSpec()的实例源码
def make_frontend_vgg(options, is_training):
batch_size = options.train_batch if is_training else options.test_batch
image_path = options.train_image if is_training else options.test_image
label_path = options.train_label if is_training else options.test_label
net = caffe.NetSpec()
net.data, net.label = network.make_image_label_data(
image_path, label_path, batch_size,
is_training, options.crop_size, options.mean)
last = network.build_frontend_vgg(
net, net.data, options.classes)[0]
if options.up:
net.upsample = network.make_upsample(last, options.classes)
last = net.upsample
net.loss = network.make_softmax_loss(last, net.label)
if not is_training:
net.accuracy = network.make_accuracy(last, net.label)
return net.to_proto()
def make_context(options, is_training):
batch_size = options.train_batch if is_training else options.test_batch
image_path = options.train_image if is_training else options.test_image
label_path = options.train_label if is_training else options.test_label
net = caffe.NetSpec()
net.data, net.label = network.make_bin_label_data(
image_path, label_path, batch_size,
options.label_shape, options.label_stride)
last = network.build_context(
net, net.data, options.classes, options.layers)[0]
if options.up:
net.upsample = network.make_upsample(last, options.classes)
last = net.upsample
net.loss = network.make_softmax_loss(last, net.label)
if not is_training:
net.accuracy = network.make_accuracy(last, net.label)
return net.to_proto()
def make_joint(options, is_training):
batch_size = options.train_batch if is_training else options.test_batch
image_path = options.train_image if is_training else options.test_image
label_path = options.train_label if is_training else options.test_label
net = caffe.NetSpec()
net.data, net.label = network.make_image_label_data(
image_path, label_path, batch_size,
is_training, options.crop_size, options.mean)
last = network.build_frontend_vgg(
net, net.data, options.classes)[0]
last = network.build_context(
net, last, options.classes, options.layers)[0]
if options.up:
net.upsample = network.make_upsample(last, options.classes)
last = net.upsample
net.loss = network.make_softmax_loss(last, net.label)
if not is_training:
net.accuracy = network.make_accuracy(last, net.label)
return net.to_proto()
def fcn(split, tops):
n = caffe.NetSpec()
n.color, n.hha, n.label = L.Python(module='nyud_layers',
layer='NYUDSegDataLayer', ntop=3,
param_str=str(dict(nyud_dir='../data/nyud', split=split,
tops=tops, seed=1337)))
n = modality_fcn(n, 'color', 'color')
n = modality_fcn(n, 'hha', 'hha')
n.score_fused = L.Eltwise(n.score_frcolor, n.score_frhha,
operation=P.Eltwise.SUM, coeff=[0.5, 0.5])
n.upscore = L.Deconvolution(n.score_fused,
convolution_param=dict(num_output=40, kernel_size=64, stride=32,
bias_term=False),
param=[dict(lr_mult=0)])
n.score = crop(n.upscore, n.color)
n.loss = L.SoftmaxWithLoss(n.score, n.label,
loss_param=dict(normalize=False, ignore_label=255))
return n.to_proto()
def _new_model(self):
self.netspec = caffe.NetSpec()
if self.cnn == 'AlexNet':
from cnns import AlexNet
self.base_cnn = AlexNet(netspec=self.netspec)
self.feat_layer = 'fc7'
self.feat_dim = 4096
elif self.cnn == 'GoogLeNet':
from cnns import InceptionV1
self.base_cnn = InceptionV1(netspec=self.netspec)
self.feat_layer = 'pool5'
self.feat_dim = 1024
elif self.cnn == 'VGG19':
from cnns import VGG19
self.base_cnn = VGG19(netspec=self.netspec)
self.feat_layer = 'fc7'
self.feat_dim = 4096
return self.netspec
def fcn(split, tops):
n = caffe.NetSpec()
n.color, n.hha, n.label = L.Python(module='nyud_layers',
layer='NYUDSegDataLayer', ntop=3,
param_str=str(dict(nyud_dir='../data/nyud', split=split,
tops=tops, seed=1337)))
n = modality_fcn(n, 'color', 'color')
n = modality_fcn(n, 'hha', 'hha')
n.score_fused = L.Eltwise(n.score_frcolor, n.score_frhha,
operation=P.Eltwise.SUM, coeff=[0.5, 0.5])
n.upscore = L.Deconvolution(n.score_fused,
convolution_param=dict(num_output=40, kernel_size=64, stride=32,
bias_term=False),
param=[dict(lr_mult=0)])
n.score = crop(n.upscore, n.color)
n.loss = L.SoftmaxWithLoss(n.score, n.label,
loss_param=dict(normalize=False, ignore_label=255))
return n.to_proto()
def lenet(lmdb,batch_size):
#define Lenet
n = caffe.NetSpec()
n.data, n.label = L.Data(batch_size=batch_size, backend=P.Data.LMDB, source=lmdb, transform_param=dict(scale=1./255),ntop=2)
n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=20, weight_filler=dict(type='xavier'))
n.pool1 = L.Pooling(n.conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX)
n.conv2 = L.Convolution(n.pool1, kernel_size=5, num_output=50, weight_filler = dict(type='xavier'))
n.pool2 = L.Pooling(n.conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX)
n.fc1 = L.InnerProduct(n.pool2, num_output=500, weight_filler=dict(type='xavier'))
n.relu1 = L.ReLU(n.fc1, in_place=True)
n.score = L.InnerProduct(n.relu1,num_output=10, weight_filler=dict(type='xavier'))
n.loss = L.SoftmaxWithLoss(n.score, n.label)
return n.to_proto()
def LeNet(lmdb, batch_size):
# our version of LeNet: a series of linear and simple nonlinear transformations
n = caffe.NetSpec()
n.data, n.label = L.Data(batch_size=batch_size, backend=P.Data.LMDB, source=lmdb,
transform_param=dict(scale=1./255), ntop=2)
n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=20, weight_filler=dict(type='xavier'))
n.pool1 = L.Pooling(n.conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX)
n.conv2 = L.Convolution(n.pool1, kernel_size=5, num_output=50, weight_filler=dict(type='xavier'))
n.pool2 = L.Pooling(n.conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX)
n.fc1 = L.InnerProduct(n.pool2, num_output=500, weight_filler=dict(type='xavier'))
n.relu1 = L.ReLU(n.fc1, in_place=True)
n.score = L.InnerProduct(n.relu1, num_output=10, weight_filler=dict(type='xavier'))
n.loss = L.SoftmaxWithLoss(n.score, n.label)
proto = n.to_proto()
proto.name = 'LeNet'
return proto
def LeNet(lmdb, batch_size):
# our version of LeNet: a series of linear and simple nonlinear transformations
n = caffe.NetSpec()
n.data, n.label = L.Data(batch_size=batch_size, backend=P.Data.LMDB, source=lmdb,
transform_param=dict(scale=1./255), ntop=2)
n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=20, weight_filler=dict(type='xavier'))
n.pool1 = L.Pooling(n.conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX)
n.conv2 = L.Convolution(n.pool1, kernel_size=5, num_output=50, weight_filler=dict(type='xavier'))
n.pool2 = L.Pooling(n.conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX)
n.fc1 = L.InnerProduct(n.pool2, num_output=500, weight_filler=dict(type='xavier'))
n.relu1 = L.ReLU(n.fc1, in_place=True)
n.score = L.InnerProduct(n.relu1, num_output=10, weight_filler=dict(type='xavier'))
n.loss = L.SoftmaxWithLoss(n.score, n.label)
proto = n.to_proto()
proto.name = 'LeNet'
return proto
def LeNet(lmdb, batch_size):
# our version of LeNet: a series of linear and simple nonlinear transformations
n = caffe.NetSpec()
n.data, n.label = L.Data(batch_size=batch_size, backend=P.Data.LMDB, source=lmdb,
transform_param=dict(scale=1./255), ntop=2)
n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=20, weight_filler=dict(type='xavier'))
n.pool1 = L.Pooling(n.conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX)
n.conv2 = L.Convolution(n.pool1, kernel_size=5, num_output=50, weight_filler=dict(type='xavier'))
n.pool2 = L.Pooling(n.conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX)
n.fc1 = L.InnerProduct(n.pool2, num_output=500, weight_filler=dict(type='xavier'))
n.relu1 = L.ReLU(n.fc1, in_place=True)
n.score = L.InnerProduct(n.relu1, num_output=10, weight_filler=dict(type='xavier'))
n.loss = L.SoftmaxWithLoss(n.score, n.label)
proto = n.to_proto()
proto.name = 'LeNet'
return proto
def fcn(split, tops):
n = caffe.NetSpec()
n.color, n.hha, n.label = L.Python(module='nyud_layers',
layer='NYUDSegDataLayer', ntop=3,
param_str=str(dict(nyud_dir='../data/nyud', split=split,
tops=tops, seed=1337)))
n = modality_fcn(n, 'color', 'color')
n = modality_fcn(n, 'hha', 'hha')
n.score_fused = L.Eltwise(n.score_frcolor, n.score_frhha,
operation=P.Eltwise.SUM, coeff=[0.5, 0.5])
n.upscore = L.Deconvolution(n.score_fused,
convolution_param=dict(num_output=40, kernel_size=64, stride=32,
bias_term=False),
param=[dict(lr_mult=0)])
n.score = crop(n.upscore, n.color)
n.loss = L.SoftmaxWithLoss(n.score, n.label,
loss_param=dict(normalize=False, ignore_label=255))
return n.to_proto()
def __create_network_prototxt(self, deploy):
n = caffe.NetSpec()
top = self.__create_data_layer(n, deploy=deploy)
top = self.__create_first_convolutional_layer(n, top)
# Create Residual Units
for i in range(len(self.__num_residual_units)):
stride = 1 if i == 0 else 2
top = self.__layer(n, top, 'res%d' % (2+i), self.__wide_basic, self.__num_feature_maps[i],
self.__num_feature_maps[i+1], self.__num_residual_units[i], stride, deploy)
top = self.__create_fully_connected_layer(n, top, deploy=deploy)
self.__create_output_layer(n, top, deploy=deploy)
# Return prototxt
return str(n.to_proto()).replace('__test', '')
def construc_net():
net = caffe.NetSpec()
net.data = L.Input(shape = dict(dim = [10,3,224,224]))
block_1 = _block_crp('1', 2, net, net.data, 64)
block_2 = _block_crp('2', 2, net, block_1, 128)
block_3 = _block_crp('3', 4, net, block_2, 256)
block_4 = _block_crp('4', 4, net, block_3, 512)
block_5 = _block_crp('5', 4, net, block_4, 512)
block_6 = _block_frd('6', net, block_5, 4096)
block_7 = _block_frd('7', net, block_6, 4096)
net.fc8 = L.InnerProduct(block_7, num_output = 1000)
net.prob = L.Softmax(net.fc8)
return net.to_proto()
def construc_net():
net = caffe.NetSpec()
net.data = L.Input(shape = dict(dim = [10,3,224,224]))
block_1 = _block_crp('1', 2, net, net.data, 64)
block_2 = _block_crp('2', 2, net, block_1, 128)
block_3 = _block_crp('3', 3, net, block_2, 256)
block_4 = _block_crp('4', 3, net, block_3, 512)
block_5 = _block_crp('5', 3, net, block_4, 512)
block_6 = _block_frd('6', net, block_5, 4096)
block_7 = _block_frd('7', net, block_6, 4096)
net.fc8 = L.InnerProduct(block_7, num_output = 1000)
net.prob = L.Softmax(net.fc8)
return net.to_proto()
def overall_net(batch_size, channels, height, width, action_size, net_type):
# param = learned_param
n=caffe.NetSpec()
# action
n.frames = L.Input(shape=dict(dim=[batch_size, channels, height, width]))
# Image feature
if net_type == 'action':
param = learned_param
else:
param = frozen_param
n.conv1, n.relu1 = conv_relu(n.frames, 8, 32, stride=4, param=param)
n.conv2, n.relu2 = conv_relu(n.relu1, 4, 64, stride=2, param=param)
n.conv3, n.relu3 = conv_relu(n.relu2, 3, 64, stride=1, param=param)
n.fc4, n.relu4 = fc_relu(n.relu3, 512, param=param)
n.value_q = L.InnerProduct(n.relu4, num_output=action_size, param=param,
weight_filler=dict(type='gaussian', std=0.005),
bias_filler=dict(type='constant', value=1))
if net_type == 'test':
return n.to_proto()
n.filter = L.Input(shape=dict(dim=[batch_size, action_size]))
# operation 0: PROD
n.filtered_value_q = L.Eltwise(n.value_q, n.filter, operation=0)
n.target = L.Input(shape=dict(dim=[batch_size, action_size]))
n.loss = L.EuclideanLoss(n.filtered_value_q, n.target)
return n.to_proto()
### define solver
def __init__(self):
self.net = caffe.NetSpec()
self.testnet = caffe.NetSpec()
def make_frontend_vgg(options):
deploy_net = caffe.NetSpec()
deploy_net.data = network.make_input_data(options.input_size)
last, final_name = network.build_frontend_vgg(
deploy_net, deploy_net.data, options.classes)
if options.up:
deploy_net.upsample = network.make_upsample(last, options.classes)
last = deploy_net.upsample
deploy_net.prob = network.make_prob(last)
deploy_net = deploy_net.to_proto()
return deploy_net, final_name
def make_context(options):
deploy_net = caffe.NetSpec()
deploy_net.data = network.make_input_data(
options.input_size, options.classes)
last, final_name = network.build_context(
deploy_net, deploy_net.data, options.classes, options.layers)
if options.up:
deploy_net.upsample = network.make_upsample(last, options.classes)
last = deploy_net.upsample
deploy_net.prob = network.make_prob(last)
deploy_net = deploy_net.to_proto()
return deploy_net, final_name
proto_file.py 文件源码
项目:Sensor-Specific-Hyperspectral-Image-Feature-Learning
作者: MeiShaohui
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def deploy_net(conf, batch_size, class_num) :
'''
:param conf: the data_set_config information, defined in data_info_set.item
:param batch_size: the batch_size of prototxt
:param class_num: the class_num of the data_set
:param channels: the channels of hyperspectral data, maybe it is 224,448 or 103,206
:param kernel_size: the kernel_size of the convolution layer, often is 1/9 of the channels
:return: deploy file handle
'''
n = caffe.NetSpec()
if conf.use_CK is True:
n.data, n.label = L.DummyData(shape= {'dim' : [batch_size, 1, conf.CK_channels, 1]}, ntop = 2)
n.conv1 = L.Convolution(n.data, kernel_h=conf.CK_kernel_size, kernel_w=1, num_output=20,
weight_filler=dict(type='gaussian', std=0.05),
bias_filler=dict(type='constant', value=0.1))
else:
n.data, n.label = L.DummyData(shape= {'dim' : [batch_size, 1, conf.channels, 1]}, ntop = 2)
n.conv1 = L.Convolution(n.data, kernel_h = conf.kernel_size, kernel_w = 1, num_output = 20,
weight_filler = dict(type = 'gaussian', std = 0.05),
bias_filler = dict(type = 'constant', value = 0.1))
n.bn1 = L.BatchNorm(n.conv1, use_global_stats = 1, in_place = True)
n.relu1 = L.PReLU(n.bn1, in_place = True)
n.ip1 = L.InnerProduct(n.relu1, num_output = 100, weight_filler = dict(type = 'gaussian', std = 0.05),
bias_filler = dict(type = 'constant', value = 0.1))
n.drop1 = L.Dropout(n.ip1, dropout_ratio = 0.1, in_place = True)
n.relu2 = L.PReLU(n.drop1, in_place = True)
n.ip2 = L.InnerProduct(n.relu2, num_output = class_num, weight_filler = dict(type = 'gaussian', std = 0.05),
bias_filler = dict(type = 'constant', value = 0.1))
return n.to_proto()
def lenet_proto(self, batch_size, phase='TRAIN'):
n = caffe.NetSpec()
if phase == 'TRAIN':
source_data = self.train_data
mirror = False
else:
source_data = self.test_data
mirror = False
n.data, n.label = L.Data(source=source_data, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
transform_param=dict(scale=0.00390625, mirror=mirror))
n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=20, stride=1,
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant'))
n.pool1 = L.Pooling(n.conv1, pool=P.Pooling.MAX, kernel_size=2, stride=2)
n.conv2 = L.Convolution(n.pool1, kernel_size=5, num_output=50, stride=1,
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant'))
n.pool2 = L.Pooling(n.conv2, pool=P.Pooling.MAX, kernel_size=2, stride=2)
n.ip1 = L.InnerProduct(n.pool2, num_output=500,
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant'))
n.relu1 = L.ReLU(n.ip1, in_place=True)
n.ip2 = L.InnerProduct(n.relu1, num_output=self.classifier_num,
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant'))
n.loss = L.SoftmaxWithLoss(n.ip2, n.label)
if phase == 'TRAIN':
pass
else:
n.accuracy = L.Accuracy(n.ip2, n.label, include=dict(phase=1))
return n.to_proto()
def lenet_bn_proto(self, batch_size, phase='TRAIN'):
n = caffe.NetSpec()
if phase == 'TRAIN':
source_data = self.train_data
mirror = False
else:
source_data = self.test_data
mirror = False
n.data, n.label = L.Data(source=source_data, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
transform_param=dict(scale=0.00390625, mirror=mirror))
n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=20, stride=1,
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant'))
n.bn1 = L.BatchNorm(n.conv1, use_global_stats=False)
n.pool1 = L.Pooling(n.bn1, pool=P.Pooling.MAX, kernel_size=2, stride=2)
n.conv2 = L.Convolution(n.pool1, kernel_size=5, num_output=50, stride=1,
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant'))
n.bn2 = L.BatchNorm(n.conv2, use_global_stats=False)
n.pool2 = L.Pooling(n.bn2, pool=P.Pooling.MAX, kernel_size=2, stride=2)
n.ip1 = L.InnerProduct(n.pool2, num_output=500,
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant'))
n.relu1 = L.ReLU(n.ip1, in_place=True)
n.ip2 = L.InnerProduct(n.relu1, num_output=self.classifier_num,
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant'))
n.loss = L.SoftmaxWithLoss(n.ip2, n.label)
if phase == 'TRAIN':
pass
else:
n.accuracy = L.Accuracy(n.ip2, n.label, include=dict(phase=1))
return n.to_proto()
def alexnet_proto(self, batch_size, phase='TRAIN'):
n = caffe.NetSpec()
if phase == 'TRAIN':
source_data = self.train_data
mirror = True
else:
source_data = self.test_data
mirror = False
n.data, n.label = L.Data(source=source_data, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
transform_param=dict(crop_size=227, mean_value=[104, 117, 123], mirror=mirror))
n.conv1, n.relu1 = conv_relu(n.data, num_output=96, kernel_size=11, stride=4) # 96x55x55
n.norm1 = L.LRN(n.conv1, local_size=5, alpha=0.0001, beta=0.75)
n.pool1 = L.Pooling(n.norm1, kernel_size=3, stride=2, pool=P.Pooling.MAX) # 96x27x27
n.conv2, n.relu2 = conv_relu(n.pool1, num_output=256, kernel_size=5, pad=2, group=2) # 256x27x27
n.norm2 = L.LRN(n.conv2, local_size=5, alpha=0.0001, beta=0.75)
n.pool2 = L.Pooling(n.norm2, kernel_size=3, stride=2, pool=P.Pooling.MAX) # 256x13x13
n.conv3, n.relu3 = conv_relu(n.pool2, num_output=384, kernel_size=3, pad=1) # 384x13x13
n.conv4, n.relu4 = conv_relu(n.conv3, num_output=384, kernel_size=3, pad=1, group=2) # 384x13x13
n.conv5, n.relu5 = conv_relu(n.conv4, num_output=256, kernel_size=3, pad=1, group=2) # 256x13x13
n.pool5 = L.Pooling(n.conv5, kernel_size=3, stride=2, pool=P.Pooling.MAX) # 256x6x16
n.fc6, n.relu6, n.drop6 = fc_relu_drop(n.pool5, num_output=4096) # 4096x1x1
n.fc7, n.relu7, n.drop7 = fc_relu_drop(n.fc6, num_output=4096) # 4096x1x1
n.fc8 = L.InnerProduct(n.fc7, num_output=self.classifier_num,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0))
n.loss = L.SoftmaxWithLoss(n.fc8, n.label)
if phase == 'TRAIN':
pass
else:
n.accuracy_top1, n.accuracy_top5 = accuracy_top1_top5(n.fc8, n.label)
return n.to_proto()
def net(hdf5, batch_size):
n = caffe.NetSpec()
n.data, n.label = L.HDF5Data(batch_size=batch_size, source=hdf5, ntop=2)
n.ip1 = L.InnerProduct(n.data, num_output=1024, weight_filler=dict(type='xavier'))
n.relu1 = L.ReLU(n.ip1, in_place=True)
n.ip2 = L.InnerProduct(n.relu1, num_output=1024, weight_filler=dict(type='xavier'))
n.relu2 = L.ReLU(n.ip2, in_place=True)
n.ip3 = L.InnerProduct(n.relu2, num_output=2, weight_filler=dict(type='xavier'))
n.loss = L.SigmoidCrossEntropyLoss(n.ip3, n.label)
return n.to_proto()
def net(hdf5, batch_size):
n = caffe.NetSpec()
n.data, n.label = L.HDF5Data(batch_size=batch_size, source=hdf5, ntop=2)
n.ip1 = L.InnerProduct(n.data, num_output=1024, weight_filler=dict(type='xavier'))
n.relu1 = L.ReLU(n.ip1, in_place=True)
n.ip2 = L.InnerProduct(n.relu1, num_output=1024, weight_filler=dict(type='xavier'))
n.relu2 = L.ReLU(n.ip2, in_place=True)
n.ip3 = L.InnerProduct(n.relu2, num_output=2, weight_filler=dict(type='xavier'))
n.loss = L.SigmoidCrossEntropyLoss(n.ip3, n.label)
return n.to_proto()
def test_train2deploy(self):
"""
Test train to deploy conversion.
"""
def network(lmdb_path, batch_size):
net = caffe.NetSpec()
net.data, net.labels = caffe.layers.Data(batch_size = batch_size,
backend = caffe.params.Data.LMDB,
source = lmdb_path,
transform_param = dict(scale = 1./255),
ntop = 2)
net.conv1 = caffe.layers.Convolution(net.data, kernel_size = 5, num_output = 20,
weight_filler = dict(type = 'xavier'))
net.pool1 = caffe.layers.Pooling(net.conv1, kernel_size = 2, stride = 2,
pool = caffe.params.Pooling.MAX)
net.conv2 = caffe.layers.Convolution(net.pool1, kernel_size = 5, num_output = 50,
weight_filler = dict(type = 'xavier'))
net.pool2 = caffe.layers.Pooling(net.conv2, kernel_size = 2, stride = 2,
pool = caffe.params.Pooling.MAX)
net.fc1 = caffe.layers.InnerProduct(net.pool2, num_output = 500,
weight_filler = dict(type = 'xavier'))
net.relu1 = caffe.layers.ReLU(net.fc1, in_place = True)
net.score = caffe.layers.InnerProduct(net.relu1, num_output = 10,
weight_filler = dict(type = 'xavier'))
net.loss = caffe.layers.SoftmaxWithLoss(net.score, net.labels)
return net.to_proto()
train_prototxt_path = 'tests/train.prototxt'
deploy_prototxt_path = 'tests/deploy.prototxt'
with open(train_prototxt_path, 'w') as f:
f.write(str(network('tests/train_lmdb', 128)))
tools.prototxt.train2deploy(train_prototxt_path, (128, 3, 28, 28), deploy_prototxt_path)
def _update_deploy_net(self):
""" Update deploy net. """
self._deploy_net = caffe.NetSpec()
data = self._add_input_layers(self._deploy_net, deploy=True)
bottom_dict = self._add_intermediate_layers(self._deploy_net, data, deploy=True)
self._add_output_layers(self._deploy_net, bottom_dict, deploy=True)
# add input definition strings.
self._deploy_str='input: {}\ninput_dim: {}\ninput_dim: {}\ninput_dim: {}\ninput_dim: {}'.\
format('"'+self.io.data_name+'"', 1, 3, 224, 224)
def _update_trainval_net(self):
""" Update trainval net. """
self._train_net = caffe.NetSpec()
self._val_net = caffe.NetSpec()
in_nets = {'train': self._train_net, 'val': self._val_net}
data = self._add_input_layers(in_nets, deploy=False)
bottom_dict = self._add_intermediate_layers(self._train_net, data, deploy=False)
self._add_output_layers(self._train_net, bottom_dict, deploy=False)
def caffenet(data, label=None, train=True, num_classes=1000,
classifier_name='fc8', learn_all=False):
"""Returns a NetSpec specifying CaffeNet, following the original proto text
specification (./models/bvlc_reference_caffenet/train_val.prototxt)."""
n = caffe.NetSpec()
n.data = data
param = learned_param if learn_all else frozen_param
n.conv1, n.relu1 = conv_relu(n.data, 11, 96, stride=4, param=param)
n.pool1 = max_pool(n.relu1, 3, stride=2)
n.norm1 = L.LRN(n.pool1, local_size=5, alpha=1e-4, beta=0.75)
n.conv2, n.relu2 = conv_relu(n.norm1, 5, 256, pad=2, group=2, param=param)
n.pool2 = max_pool(n.relu2, 3, stride=2)
n.norm2 = L.LRN(n.pool2, local_size=5, alpha=1e-4, beta=0.75)
n.conv3, n.relu3 = conv_relu(n.norm2, 3, 384, pad=1, param=param)
n.conv4, n.relu4 = conv_relu(n.relu3, 3, 384, pad=1, group=2, param=param)
n.conv5, n.relu5 = conv_relu(n.relu4, 3, 256, pad=1, group=2, param=param)
n.pool5 = max_pool(n.relu5, 3, stride=2)
n.fc6, n.relu6 = fc_relu(n.pool5, 4096, param=param)
if train:
n.drop6 = fc7input = L.Dropout(n.relu6, in_place=True)
else:
fc7input = n.relu6
n.fc7, n.relu7 = fc_relu(fc7input, 4096, param=param)
if train:
n.drop7 = fc8input = L.Dropout(n.relu7, in_place=True)
else:
fc8input = n.relu7
# always learn fc8 (param=learned_param)
fc8 = L.InnerProduct(fc8input, num_output=num_classes, param=learned_param)
# give fc8 the name specified by argument `classifier_name`
n.__setattr__(classifier_name, fc8)
if not train:
n.probs = L.Softmax(fc8)
if label is not None:
n.label = label
n.loss = L.SoftmaxWithLoss(fc8, n.label)
n.acc = L.Accuracy(fc8, n.label)
# write the net to a temporary file and return its filename
with open('/home/ldy/workspace/caffe/models/finetune_UCMerced_LandUse/deploy.prototxt','w') as f:
f.write(str(n.to_proto()))
return f.name
def customNet(lmdb,batch_size):
#define Lenet
n = caffe.NetSpec()
n.data, n.label = L.Data(batch_size=batch_size, backend=P.Data.LMDB, source=lmdb, transform_param=dict(scale=1./255),ntop=2)
n.conv1 = L.Convolution(n.data, kernel_size=3, num_output=20, weight_filler=dict(type='xavier'))
n.bn1 = L.BatchNorm(n.conv1,in_place=True)
n.scale1 = L.Scale(n.bn1,bias_term=True,bias_filler=dict(value=0),filler=dict(value=1))
n.conv2 = L.Convolution(n.scale1, kernel_size=3,stride=2, num_output=20, weight_filler=dict(type='xavier'))
n.bn2 = L.BatchNorm(n.conv2,in_place=True)
n.scale2 = L.Scale(n.bn2,bias_term=True,bias_filler=dict(value=0),filler=dict(value=1))
# n.pool1 = L.Pooling(n.scale2, kernel_size=2, stride=2, pool=P.Pooling.MAX)
n.conv3 = L.Convolution(n.scale2, kernel_size=3, num_output=50, weight_filler = dict(type='xavier'))
n.bn3 = L.BatchNorm(n.conv3,in_place=True)
n.scale3 = L.Scale(n.bn3,bias_term=True,bias_filler=dict(value=0),filler=dict(value=1))
n.conv4 = L.Convolution(n.scale3, kernel_size=3, stride=2,num_output=50, weight_filler=dict(type='xavier'))
n.bn4 = L.BatchNorm(n.conv4,in_place=True)
n.scale4 = L.Scale(n.bn4,bias_term=True,bias_filler=dict(value=0),filler=dict(value=1))
# n.pool2 = L.Pooling(n.scale4, kernel_size=2, stride=2, pool=P.Pooling.MAX)
n.fc1 = L.InnerProduct(n.scale4, num_output=500, weight_filler=dict(type='xavier'))
n.relu1 = L.ReLU(n.fc1, in_place=True)
n.fc2 = L.InnerProduct(n.relu1, num_output=100, weight_filler=dict(type='xavier'))
n.relu2 = L.ReLU(n.fc2, in_place=True)
n.score = L.InnerProduct(n.relu2,num_output=10, weight_filler=dict(type='xavier'))
n.loss = L.SoftmaxWithLoss(n.score, n.label)
return n.to_proto()