def siamese_pos(out_dim=100):
n = caffe.NetSpec()
n.data_p = L.Input(name='data',
shape=dict(dim=[1,1,225,225]))
n.conv1_p, n.relu1_p = conv_relu_triplet_dep(n.data_p, 15, 64, stride = 3)
n.pool1_p = pooling(n.relu1_p, 3, stride=2)
n.conv2_p, n.relu2_p = conv_relu_triplet_dep(n.pool1_p, 5, 128)
n.pool2_p = pooling(n.relu2_p, 3, stride=2)
n.conv3_p, n.relu3_p = conv_relu_triplet_dep(n.pool2_p, 3, 256)
n.conv4_p, n.relu4_p = conv_relu_triplet_dep(n.relu3_p, 3, 256)
n.conv5_p, n.relu5_p = conv_relu_triplet_dep(n.relu4_p, 3, 256)
n.pool5_p = pooling(n.relu5_p, 3, stride=2)
n.fc6_p, n.relu6_p = fc_relu_triplet_dep(n.pool5_p, 512)
n.fc7_p, n.relu7_p = fc_relu_triplet_dep(n.relu6_p, 512)
#n.fc8_p, n.feat_p = fc_norm_triplet_dep(n.relu7_p, out_dim)
n.feat_p = fc_triplet_dep(n.relu7_p, out_dim)
#n.norm_p = L.Normalize(n.feat_p,in_place=True)
proto = n.to_proto()
proto.name = 'SketchTriplet'
return proto
python类proto()的实例源码
def readProtoSolverFile(filepath):
solver_config = ''
if caffe_flag:
solver_config = caffe.proto.caffe_pb2.NetParameter()
else:
solver_config = caffe_parse.caffe_pb2.NetParameter()
return readProtoFile(filepath, solver_config)
def read_proto_solver_file(file_path):
solver_config = ''
if caffe_flag:
solver_config = caffe.proto.caffe_pb2.NetParameter()
else:
solver_config = caffe_parse.caffe_pb2.NetParameter()
return read_proto_file(file_path, solver_config)
classify-samples.py 文件源码
项目:have-fun-with-machine-learning
作者: humphd
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def setup_caffe(caffe_root):
# Load Caffe's Python interface from the specified path
sys.path.insert(0, os.path.join(caffe_root, 'python'))
global caffe
global caffe_pb2
import caffe
from caffe.proto import caffe_pb2
# Set Caffe to use CPU mode so this will work on as many machines as possible.
caffe.set_mode_cpu()
def get_caffe_pb():
dir = get_dataset_path('caffe')
caffe_pb_file = os.path.join(dir, 'caffe_pb2.py')
if not os.path.isfile(caffe_pb_file):
proto_path = download(CAFFE_PROTO_URL, dir)
ret = os.system('cd {} && protoc caffe.proto --python_out .'.format(dir))
assert ret == 0, \
"caffe proto compilation failed! Did you install protoc?"
import imp
return imp.load_source('caffepb', caffe_pb_file)
def _score_proto(self, xFeat, source_net=False, target_net=False, mult=1., deploy=False):
from caffe.proto import caffe_pb2
ns = self.netspec
w_params = {'lr_mult': mult, 'decay_mult': mult}
# Compute semantic space
name = 'SCoRe/sem/fc1'
layer_params = dict(weight_filler=FC_W_INIT, param=[w_params]) if not deploy else {}
x = ns[name] = L.InnerProduct(xFeat, name=name, num_output=sum(self.code_dim), bias_term=False, **layer_params)
# Note: In the case of completely binary semantics (Attributes), the two layers codewords+selector are compressed in 'SCoRe/obj/fc'.
# Otherwise, semantic state scores are first computed in SCoRe/sem/fc2 and then grouped into class scores using a selector in SCoRe/obj/fc.
# The selector is always kept fixed, and the codewords are learned whenever code_coeff<inf.
xSem = 'SCoRe/sem/fc1' if self.semantics == ATTRIBUTES else 'SCoRe/sem/fc2'
xObj = 'SCoRe/obj/fc'
lCW = xObj + '/params' if self.semantics == ATTRIBUTES else xSem + '/params'
if self.semantics != ATTRIBUTES:
w_params = {'name': xSem+'/params',
'share_mode': caffe_pb2.ParamSpec.STRICT,
'lr_mult': mult if self.code_coeff < np.inf else 0.0, # Lock weights if code_coeff is inf
'decay_mult': mult if self.code_coeff < np.inf else 0.0}
layer_params = dict(weight_filler=FC_W_INIT, param=[w_params]) if not deploy else {}
ns[xSem] = L.InnerProduct(x, name=xSem, num_output=sum(self.num_states), bias_term=False, **layer_params)
# Compute object scores
if source_net:
w_params = {'name': xObj+'/params',
'share_mode': caffe_pb2.ParamSpec.STRICT,
'lr_mult': mult if self.code_coeff < np.inf and self.semantics == ATTRIBUTES else 0.0, # If Attributes than codewords are used in this layer
'decay_mult': mult if self.code_coeff < np.inf and self.semantics == ATTRIBUTES else 0.0} # Lock weights if code_coeff is inf
layer_params = dict(weight_filler=FC_W_INIT, param=[w_params],
include=dict(not_stage='TestZeroShot')) if not deploy else {}
ns[xObj] = L.InnerProduct(ns[xSem], name=xObj, num_output=len(self.train_classes), bias_term=False, **layer_params)
if target_net:
name = xObj+'_target'
w_params = {'name': name+'/params', 'share_mode': caffe_pb2.ParamSpec.STRICT,
'lr_mult': 0.0, 'decay_mult': 0.0}
layer_params = dict(weight_filler=FC_W_INIT, param=[w_params],
include=dict(phase=caffe.TEST, stage='TestZeroShot')) if not deploy else {}
# NetSpec cannot handle two layers with same top blob defined for different phases/stages.
# Workaround: Set in_place=True with no inputs, then define bottom and top fields manually.
ns[name] = L.InnerProduct(name=name, bottom=[xSem], ntop=1, top=[xObj], in_place=True,
num_output=len(self.test_classes), bias_term=False, **layer_params)
return xObj, xSem, lCW
def caffenet(data, label=None, train=True, num_classes=1000,
classifier_name='fc8', learn_all=False):
"""Returns a NetSpec specifying CaffeNet, following the original proto text
specification (./models/bvlc_reference_caffenet/train_val.prototxt)."""
n = caffe.NetSpec()
n.data = data
param = learned_param if learn_all else frozen_param
n.conv1, n.relu1 = conv_relu(n.data, 11, 96, stride=4, param=param)
n.pool1 = max_pool(n.relu1, 3, stride=2)
n.norm1 = L.LRN(n.pool1, local_size=5, alpha=1e-4, beta=0.75)
n.conv2, n.relu2 = conv_relu(n.norm1, 5, 256, pad=2, group=2, param=param)
n.pool2 = max_pool(n.relu2, 3, stride=2)
n.norm2 = L.LRN(n.pool2, local_size=5, alpha=1e-4, beta=0.75)
n.conv3, n.relu3 = conv_relu(n.norm2, 3, 384, pad=1, param=param)
n.conv4, n.relu4 = conv_relu(n.relu3, 3, 384, pad=1, group=2, param=param)
n.conv5, n.relu5 = conv_relu(n.relu4, 3, 256, pad=1, group=2, param=param)
n.pool5 = max_pool(n.relu5, 3, stride=2)
n.fc6, n.relu6 = fc_relu(n.pool5, 4096, param=param)
if train:
n.drop6 = fc7input = L.Dropout(n.relu6, in_place=True)
else:
fc7input = n.relu6
n.fc7, n.relu7 = fc_relu(fc7input, 4096, param=param)
if train:
n.drop7 = fc8input = L.Dropout(n.relu7, in_place=True)
else:
fc8input = n.relu7
# always learn fc8 (param=learned_param)
fc8 = L.InnerProduct(fc8input, num_output=num_classes, param=learned_param)
# give fc8 the name specified by argument `classifier_name`
n.__setattr__(classifier_name, fc8)
if not train:
n.probs = L.Softmax(fc8)
if label is not None:
n.label = label
n.loss = L.SoftmaxWithLoss(fc8, n.label)
n.acc = L.Accuracy(fc8, n.label)
# write the net to a temporary file and return its filename
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(str(n.to_proto()))
return f.name
# Define a function style_net which calls caffenet on data from the Flickr style dataset.
def SketchANet(data_params, num_class = 20, val_mode = 0):
""" our version of Sketch-A-Net
data_params: batch_size, source, shape, scale, rot
val_mode: 0 if this is train net, 1 if test net, 2 if deploy net
"""
n = caffe.NetSpec()
if val_mode == 2:
n.data = L.Input(name='data',
shape=dict(dim=[1,1,225,225]))
else:
n.data, n.label = L.Python(module = 'data_layer', layer = 'DataLayer',
ntop = 2, phase = val_mode,
param_str = str(data_params))
#==============================================================================
# n.data, n.label = L.Data(batch_size=batch_size, backend=P.Data.LMDB, source=lmdb,
# transform_param=dict(scale=1./255), ntop=2)
#==============================================================================
n.conv1, n.relu1 = conv_relu(n.data, 15, 64, stride = 3)
n.pool1 = pooling(n.relu1,3, stride = 2)
n.conv2, n.relu2 = conv_relu(n.pool1, 5, 128)
n.pool2 = pooling(n.relu2,3, stride = 2)
n.conv3, n.relu3 = conv_relu(n.pool2, 3, 256, pad = 1)
n.conv4, n.relu4 = conv_relu(n.relu3, 3, 256, 1, 1)
n.conv5, n.relu5 = conv_relu(n.relu4, 3, 256, pad=1)
n.pool5 = pooling(n.relu5,3, stride = 2)
n.fc6, n.relu6 = fc_relu(n.pool5, 512)
if val_mode != 2:
n.drop6 = L.Dropout(n.relu6, dropout_ratio = 0.55, in_place = True)
n.fc7, n.relu7 = fc_relu(n.drop6, 512)
n.drop7 = L.Dropout(n.relu7, dropout_ratio = 0.55, in_place = True)
n.fc8 = fullconnect(n.drop7, num_class)
n.loss = L.SoftmaxWithLoss(n.fc8, n.label)
else: #deploy mode
n.fc7, n.relu7 = fc_relu(n.relu6, 512)
n.fc8 = fullconnect(n.relu7, num_class)
if val_mode==1:
n.accuracy = L.Accuracy(n.fc8, n.label, phase = val_mode)
proto = n.to_proto()
proto.name = 'SketchANet'
return proto
def SketchANet(data_params, num_class = 20, val_mode = 0):
""" our version of Sketch-A-Net
data_params: batch_size, source, shape, scale, rot
val_mode: 0 if this is train net, 1 if test net, 2 if deploy net
"""
n = caffe.NetSpec()
if val_mode == 2:
n.data = L.Input(name='data',
shape=dict(dim=[1,1,225,225]))
else:
n.data, n.label = L.Python(module = 'data_layer', layer = 'DataLayer',
ntop = 2, phase = val_mode,
param_str = str(data_params))
#==============================================================================
# n.data, n.label = L.Data(batch_size=batch_size, backend=P.Data.LMDB, source=lmdb,
# transform_param=dict(scale=1./255), ntop=2)
#==============================================================================
n.conv1, n.relu1 = conv_relu(n.data, 15, 64, stride = 3)
n.pool1 = pooling(n.relu1,3, stride = 2)
n.conv2, n.relu2 = conv_relu(n.pool1, 5, 128)
n.pool2 = pooling(n.relu2,3, stride = 2)
n.conv3, n.relu3 = conv_relu(n.pool2, 3, 256, pad = 1)
n.conv4, n.relu4 = conv_relu(n.relu3, 3, 256, 1, 1)
n.conv5, n.relu5 = conv_relu(n.relu4, 3, 256, pad=1)
n.pool5 = pooling(n.relu5,3, stride = 2)
n.fc6, n.relu6 = fc_relu(n.pool5, 512)
if val_mode != 2:
n.drop6 = L.Dropout(n.relu6, dropout_ratio = 0.55, in_place = True)
n.fc7, n.relu7 = fc_relu(n.drop6, 512)
n.drop7 = L.Dropout(n.relu7, dropout_ratio = 0.55, in_place = True)
n.fc8 = fullconnect(n.drop7, num_class)
n.loss = L.SoftmaxWithLoss(n.fc8, n.label)
else: #deploy mode
n.fc7, n.relu7 = fc_relu(n.relu6, 512)
n.fc8 = fullconnect(n.relu7, num_class)
if val_mode==1:
n.accuracy = L.Accuracy(n.fc8, n.label, phase = val_mode)
proto = n.to_proto()
proto.name = 'SketchANet'
return proto
def pretrain_sketch(data_params, num_class = 20, mode = 'train',learn_all=True):
""" our version of Sketch-A-Net
data_params: batch_size, source, shape, scale, rot
val_mode: 0 if this is train net, 1 if test net, 2 if deploy net
"""
param = learned_param if learn_all else frozen_param
n = caffe.NetSpec()
if mode == 'deploy':
n.data = L.Input(name='data',
shape=dict(dim=[1,1,225,225]))
else:
n.data, n.label = L.Python(module = 'data_layer', layer = 'DataLayer',
ntop = 2, phase = Train_Mode[mode],
param_str = str(data_params))
n.conv1_a, n.relu1_a = conv_relu(n.data,15,64,3,param=param,name_prefix='conv1_a')
n.pool1_a = pooling(n.relu1_a,3, 2)
n.conv2_a, n.relu2_a = conv_relu(n.pool1_a,5,128,param=param,name_prefix='conv2_a')
n.pool2_a = pooling(n.relu2_a,3,2)
n.conv3_a, n.relu3_a = conv_relu(n.pool2_a,3,256,param=param,name_prefix='conv3_a')
n.conv4_s, n.relu4_s = conv_relu(n.relu3_a,3,256,param=param,name_prefix='conv4_s')
n.conv5_s, n.relu5_s = conv_relu(n.relu4_s,3,256,param=param,name_prefix='conv5_s')
n.pool5_s = pooling(n.relu5_s,3,2)
n.fc6_s, n.relu6_s = fc_relu(n.pool5_s, 512,param=param,name_prefix='fc6_s')
if mode == 'train':
n.drop6_s = fc7input = L.Dropout(n.relu6_s, dropout_ratio=0.55,in_place=True)
else:
fc7input = n.relu6_s;
n.fc7_s, n.relu7_s = fc_relu(fc7input, 512, param=param,name_prefix='fc7_s')
if mode =='train':
n.drop7_s = fc8input= L.Dropout(n.relu7_s, dropout_ratio = 0.55,in_place=True)
else:
fc8input = n.relu7_s
#n.feat8_r_s = fullconnect(fc8input, 100,param=learned_param,name_prefix='fc8_r_s')
n.feat8_s = fullconnect(fc8input, num_class,param=learned_param,name_prefix='fc8_s')
if mode != 'deploy':
n.loss = L.SoftmaxWithLoss(n.feat8_s, n.label)
if mode=='test': #validation
n.accuracy = L.Accuracy(n.feat8_s, n.label, phase = Train_Mode[mode])
proto = n.to_proto()
proto.name = 'SketchANet'
return proto
def pretrain_image(data_params, num_class = 20, mode = 'train',learn_all=True):
""" our version of Sketch-A-Net
data_params: batch_size, source, shape, scale, rot
val_mode: 0 if this is train net, 1 if test net, 2 if deploy net
"""
param = learned_param if learn_all else frozen_param
n = caffe.NetSpec()
if mode == 'deploy':
n.data = L.Input(name='data',
shape=dict(dim=[1,1,225,225]))
else:
n.data, n.label = L.Python(module = 'data_layer', layer = 'DataLayer',
ntop = 2, phase = Train_Mode[mode],
param_str = str(data_params))
n.conv1_p, n.relu1_p = conv_relu(n.data,15,64,3,param=param,name_prefix='conv1_p')
n.pool1_p = pooling(n.relu1_p,3, 2)
n.conv2_p, n.relu2_p = conv_relu(n.pool1_p,5,128,param=param,name_prefix='conv2_p')
n.pool2_p = pooling(n.relu2_p,3,2)
n.conv3_p, n.relu3_p = conv_relu(n.pool2_p,3,256,param=param,name_prefix='conv3_p')
n.conv4, n.relu4 = conv_relu(n.relu3_p,3,256,param=param,name_prefix='conv4')
n.conv5, n.relu5 = conv_relu(n.relu4,3,256,param=param,name_prefix='conv5')
n.pool5 = pooling(n.relu5,3,2)
n.fc6, n.relu6 = fc_relu(n.pool5, 512,param=param,name_prefix='fc6')
if mode == 'train':
n.drop6 = fc7input = L.Dropout(n.relu6, dropout_ratio=0.55,in_place=True)
else:
fc7input = n.relu6;
n.fc7, n.relu7 = fc_relu(fc7input, 512, param=param,name_prefix='fc7')
if mode =='train':
n.drop7 = fc8input= L.Dropout(n.relu7, dropout_ratio = 0.55,in_place=True)
else:
fc8input = n.relu7
#n.feat8_r = fullconnect(fc8input, 100,param=learned_param,name_prefix='fc8_r')
n.feat8 = fullconnect(fc8input, num_class,param=learned_param,name_prefix='fc8')
if mode != 'deploy':
n.loss = L.SoftmaxWithLoss(n.feat8, n.label)
if mode=='test': #validation
n.accuracy = L.Accuracy(n.feat8, n.label, phase = Train_Mode[mode])
proto = n.to_proto()
proto.name = 'SketchANet'
return proto