def get_net(self):
#caffe.set_mode_cpu()
net = caffe.Net(self.deploy, self.model, caffe.TEST)
transformer = caffe.io.Transformer({'data':net.blobs['data'].data.shape})
transformer.set_transpose('data', (2,0,1))
transformer.set_mean('data', np.load(self.mean).mean(1).mean(1))
transformer.set_raw_scale('data', 255)
transformer.set_channel_swap('data', (2,1,0))
return net, transformer
python类set_mode_cpu()的实例源码
def __init__(self, hyperparams):
self._hyperparams = hyperparams
if self._hyperparams['use_gpu']:
caffe.set_device(self._hyperparams['gpu_id'])
caffe.set_mode_gpu()
else:
caffe.set_mode_cpu()
self.solver = caffe.get_solver(self._hyperparams['solver'])
self.X = None
self.caffe_iter = 0
def __init__(self, hyperparams, dO, dU):
config = copy.deepcopy(POLICY_OPT_CAFFE)
config.update(hyperparams)
PolicyOpt.__init__(self, config, dO, dU)
self.batch_size = self._hyperparams['batch_size']
if self._hyperparams['use_gpu']:
caffe.set_device(self._hyperparams['gpu_id'])
caffe.set_mode_gpu()
else:
caffe.set_mode_cpu()
self.init_solver()
# Load parameters from caffemodel file
if 'init_net' in self._hyperparams:
self.solver.net.copy_from(self._hyperparams['init_net'])
self.caffe_iter = 0
self.var = self._hyperparams['init_var'] * np.ones(dU)
self.policy = CaffePolicy(self.solver.test_nets[0],
self.solver.test_nets[1],
self.var)
self.policy.bias = None
self.policy.scale = None
if 'init_normalization' in self._hyperparams:
with open(self._hyperparams['init_normalization']) as fin:
normalzation_data = pickle.load(fin)
self.policy.bias = normalzation_data['bias']
self.policy.scale = normalzation_data['scale']
def initilize():
print 'initilize ... '
sys.path.insert(0, caffe_root + 'python')
caffe.set_mode_cpu()
net = caffe.Net(deployPrototxt, modelFile,caffe.TEST)
return net
# ??????
def initilize():
print 'initilize ... '
sys.path.insert(0, caffe_root + 'python')
caffe.set_mode_cpu()
net = caffe.Net(deployPrototxt, modelFile,caffe.TEST)
return net
# ??????
def caffe_set_device(type, id=0):
if type.lower() == "gpu":
caffe.set_device(id)
caffe.set_mode_gpu()
else:
caffe.set_mode_cpu()
def __init__(self):
if pa.GPU==True:
caffe.set_device(pa.device)
caffe.set_mode_gpu()
else:
caffe.set_mode_cpu()
self.solver=caffe.SGDSolver(pa.solver)
if pa.pretrain!="":
self.solver.net.copy_from(pa.pretrain)
self.solver_param=caffe_pb2.SolverParameter()
with open(pa.solver,'rt') as f:
pb2.text_format.Merge(f.read(),self.solver_param)
#self.output_dir=pa.output_dir
self.solver.net.layers[0].set_queue()
def load_weigths_from_caffe(self, protofile, caffemodel):
caffe.set_mode_cpu()
net = caffe.Net(protofile, caffemodel, caffe.TEST)
for name, layer in self.models.items():
if isinstance(layer, nn.Conv2d):
caffe_weight = net.params[name][0].data
layer.weight.data = torch.from_numpy(caffe_weight)
if len(net.params[name]) > 1:
caffe_bias = net.params[name][1].data
layer.bias.data = torch.from_numpy(caffe_bias)
continue
if isinstance(layer, nn.BatchNorm2d):
caffe_means = net.params[name][0].data
caffe_var = net.params[name][1].data
layer.running_mean = torch.from_numpy(caffe_means)
layer.running_var = torch.from_numpy(caffe_var)
# find the scale layer
top_name_of_bn = self.layer_map_to_top[name][0]
scale_name = ''
for caffe_layer in self.net_info['layers']:
if caffe_layer['type'] == 'Scale' and caffe_layer['bottom'][0] == top_name_of_bn:
scale_name = caffe_layer['name']
break
if scale_name != '':
caffe_weight = net.params[scale_name][0].data
layer.weight.data = torch.from_numpy(caffe_weight)
if len(net.params[name]) > 1:
caffe_bias = net.params[scale_name][1].data
layer.bias.data = torch.from_numpy(caffe_bias)
def __init__(self,
model_def,
model_weights,
y_tag_json_path,
is_mode_cpu=True,
width=32,
height=32):
self.net = caffe.Net(model_def,
model_weights,
caffe.TEST)
if is_mode_cpu:
caffe.set_mode_cpu()
self.y_tag_json = json.load(open(y_tag_json_path, "r"))
self.width = width
self.height = height
def __init__(self,
model_def,
model_weights,
y_tag_json_path,
is_mode_cpu=True,
width=32,
height=32):
self.net = caffe.Net(model_def,
model_weights,
caffe.TEST)
if is_mode_cpu:
caffe.set_mode_cpu()
self.y_tag_json = json.load(open(y_tag_json_path, "r"))
self.width = width
self.height = height
def __init__(self,
model_def,
model_weights,
y_tag_json_path,
is_mode_cpu=True,
width=64,
height=64):
self.net = caffe.Net(model_def,
model_weights,
caffe.TEST)
if is_mode_cpu:
caffe.set_mode_cpu()
self.y_tag_json = json.load(open(y_tag_json_path, "r"))
self.width = width
self.height = height
def __init__(self,
model_def,
model_weights,
y_tag_json_path,
is_mode_cpu=True,
width=64,
height=64):
self.net = caffe.Net(model_def,
model_weights,
caffe.TEST)
if is_mode_cpu:
caffe.set_mode_cpu()
self.y_tag_json = json.load(open(y_tag_json_path, "r"))
self.width = width
self.height = height
def get_symbol_classifications(symbols):
if os.environ["IS_GPU"]:
caffe.set_device(0)
caffe.set_mode_gpu()
else:
caffe.set_mode_cpu()
classifier = caffe.Classifier(os.path.join(os.environ["JAR_NOJAR_MODELS_DIR"], "deploy.prototxt"),
os.path.join(os.environ["JAR_NOJAR_MODELS_DIR"], "weights.caffemodel"),
image_dims=[64, 64],
raw_scale=255.0)
LOGGER.info("Classifying " + str(len(symbols)) + " inputs.")
predictions = classifier.predict([s[1] for s in symbols])
symbol_sequence = list()
classes = np.array([0, 1])
for i, prediction in enumerate(predictions):
idx = list((-prediction).argsort())
prediction = classes[np.array(idx)]
if prediction[0] == 1:
symbol_sequence.append([symbols[i], "jar"])
elif prediction[0] == 0:
symbol_sequence.append([symbols[i], "no-jar"])
return symbol_sequence
def get_depth(imagename):
caffe.set_mode_cpu()
netFile = 'model/net_deploy.prototxt'
modelFile = 'model/model_norm_abs_100k.caffemodel'
net = caffe.Net(netFile, modelFile, caffe.TEST)
input_image = cv2.imread(imagename)
res_input=cv2.resize(input_image,(420,320),interpolation=cv2.INTER_CUBIC)
input = loadImage(imagename, 3, WIDTH, HEIGHT)
input *= 255
input -= 127
output = testNet(net, input)
outWidth = OUT_WIDTH
outHeight = OUT_HEIGHT
scaleW = float(GT_WIDTH) / float(OUT_WIDTH)
scaleH = float(GT_HEIGHT) / float(OUT_HEIGHT)
output = scipy.ndimage.zoom(output, (1,1,scaleH,scaleW), order=3)
outWidth *= scaleW
outHeight *= scaleH
#input += 127
#input = input / 255.0
#input = np.transpose(input, (0,2,3,1))
#input = input[:,:,:,(2,1,0)]
output = ProcessToOutput(output)
path1 = DIR+'img.png'
path2 = DIR+'depth.png'
cv2.imwrite(path1, res_input)
printImage(output, path2, 1, int(outWidth), int(outHeight))
def load_and_fill_biases(src_model, src_weights, dst_model, dst_weights):
with open(src_model) as f:
model = caffe.proto.caffe_pb2.NetParameter()
pb.text_format.Merge(f.read(), model)
for i, layer in enumerate(model.layer):
if layer.type == 'Convolution': # or layer.type == 'Scale':
# Add bias layer if needed
if layer.convolution_param.bias_term == False:
layer.convolution_param.bias_term = True
layer.convolution_param.bias_filler.type = 'constant'
layer.convolution_param.bias_filler.value = 0.0
with open(dst_model, 'w') as f:
f.write(pb.text_format.MessageToString(model))
caffe.set_mode_cpu()
net_src = caffe.Net(src_model, src_weights, caffe.TEST)
net_dst = caffe.Net(dst_model, caffe.TEST)
for key in net_src.params.keys():
for i in range(len(net_src.params[key])):
net_dst.params[key][i].data[:] = net_src.params[key][i].data[:]
if dst_weights is not None:
# Store params
pass
return net_dst
def InitCaffe():
#S?n?fland?rma için hangi donan?m? kullanaca??m?z? belirtiyoruz
#caffe.set_mode_cpu() #CPU yani i?lemci üzerinde
caffe.set_mode_gpu() #GPU yani ekran kart? üzerinde
model_def = 'deploy.prototxt'
model_weights = 'bvlc_reference_caffenet.caffemodel' #imagenet model dosyas?
global net
net = caffe.Net(model_def, # Modelin yap?s?n? tan?mlar
model_weights, # E?itilmi? a??rl?klar? içerir
caffe.TEST) # Test modunda kullanaca??z
#Subtraction için ortalama ImageNet görüntüsü yüklenir.
mu = np.load('ilsvrc_2012_mean.npy')
mu = mu.mean(1).mean(1)
#Data ad? verilen transformatör giri? için olu?turulur
global transformer
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_transpose('data', (2,0,1)) # Görüntü kanallar?n? en d??a ta??r
transformer.set_mean('data', mu) # Her kanaldaki veri seti ortalamas? ç?kar?l?r.
transformer.set_raw_scale('data', 255) # [0, 1] 'den [0, 255]' e yeniden ölçeklendirme yap?l?r.
transformer.set_channel_swap('data', (2,1,0)) # Renk uzay? RGB den BGR renk uzay?na dönü?türülür.
# Giri?in boyutunu ayarlan?r.
# Varsay?lan olarak kals?n. ?sterseniz daha sonra farkl? y???n boyutlar? için de?i?tirebiliriz
net.blobs['data'].reshape(50, # Y???n?n boyutu
3, # 3 kanall? yani BGR resimler.
227, 227) # resimlerin boyutu 227x227 olarak ayarlanacak.
#Parametre ile gonderilen goruntu analiz edilecek
def __init__(self,modelFile,pretrainedFile):
caffe.Net.__init__(self,modelFile, pretrainedFile, caffe.TEST)
caffe.set_mode_cpu()
def load_caffe(model_desc, model_file):
"""
return a dict of params
"""
import caffe
caffe.set_mode_cpu()
net = caffe.Net(model_desc, model_file, caffe.TEST)
param_dict = CaffeLayerProcessor(net).process()
return param_dict
def set_caffe_mode(gpu):
''' Set whether caffe runs in gpu or not, input is boolean '''
if gpu:
caffe.set_mode_gpu()
else:
caffe.set_mode_cpu()
def get_caffe_variables(self,net_proto,net_model = None,bn_name = ''):
" This function get caffe variables"
caffe.set_mode_cpu()
self.blob_dict={}
if net_model is not None:
self.net_caffe = caffe.Net(net_proto,net_model,caffe.TEST)
else:
self.net_caffe = caffe.Net(net_proto,caffe.TEST)
# caffe net params layer_name w b
# bn_name : caffe bn layer name include bn_name
# Note: we must match tf_variables name and caffe params name
# so we modifiy caffe params name and save in bolb_dict
for layer_name,param in self.net_caffe.params.items():
param_len = len(param)
# find batch_normalization name must has 'bn_name'
# your can modify it
if param_len == 3 and layer_name.find(bn_name) >= 0:
scale_factor = 1.0 / param[2].data[0]
mean = param[0].data * scale_factor
variance = param[1].data *scale_factor
name = str(layer_name) + "/weights:0"
self.blob_dict[name] = mean
name = str(layer_name) + "/biases:0"
self.blob_dict[name] = variance
elif param_len == 2:
name = str(layer_name) + "/weights:0"
self.blob_dict[name] = param[0].data
name = str(layer_name) + "/biases:0"
self.blob_dict[name] = param[1].data
elif param_len == 1:
name = str(layer_name) + "/weights:0"
self.blob_dict[name] = param[0].data