def __init__(self, net_proto, net_weights, device_id, input_size=None):
caffe.set_mode_gpu()
caffe.set_device(device_id)
self._net = caffe.Net(net_proto, net_weights, caffe.TEST)
input_shape = self._net.blobs['data'].data.shape
if input_size is not None:
input_shape = input_shape[:2] + input_size
transformer = caffe.io.Transformer({'data': input_shape})
if self._net.blobs['data'].data.shape[1] == 3:
transformer.set_transpose('data', (2, 0, 1)) # move image channels to outermost dimension
transformer.set_mean('data', np.array([104, 117, 123])) # subtract the dataset-mean value in each channel
else:
pass # non RGB data need not use transformer
self._transformer = transformer
self._sample_shape = self._net.blobs['data'].data.shape
python类set_mode_gpu()的实例源码
action_caffe.py 文件源码
项目:Video-Classification-Action-Recognition
作者: qijiezhao
项目源码
文件源码
阅读 30
收藏 0
点赞 0
评论 0
def __init__(self, net_proto, net_weights, device_id, input_size=None):
caffe.set_mode_gpu()
caffe.set_device(device_id)
self._net = caffe.Net(net_proto, net_weights, caffe.TEST)
input_shape = self._net.blobs['data'].data.shape
if input_size is not None:
input_shape = input_shape[:2] + input_size
transformer = caffe.io.Transformer({'data': input_shape})
if self._net.blobs['data'].data.shape[1] == 3:
transformer.set_transpose('data', (2, 0, 1)) # move image channels to outermost dimension
transformer.set_mean('data', np.array([104, 117, 123])) # subtract the dataset-mean value in each channel
else:
pass # non RGB data need not use transformer
self._transformer = transformer
self._sample_shape = self._net.blobs['data'].data.shape
def __init__(self, videoThread):
threading.Thread.__init__(self)
print "Initializing recognition thread..."
self.videoThread = videoThread
#caffe.set_mode_cpu()
caffe.set_mode_gpu()
caffe.set_device(0)
# Model file and parameters are written by trainDnn.py
# Take the most recent parameter set
genderPath = "./dcnn_gender"
genderParamFiles = glob.glob(genderPath + os.sep + "*.caffemodel")
genderParamFiles = sorted(genderParamFiles, key=lambda x:os.path.getctime(x))
MODEL_FILE_GENDER = genderPath + os.sep + "deploy_gender.prototxt"
PRETRAINED_GENDER = genderParamFiles[-1]
MEAN_FILE_GENDER = genderPath + os.sep + "mean.binaryproto"
proto_data = open(MEAN_FILE_GENDER, 'rb').read()
a = caffe.io.caffe_pb2.BlobProto.FromString(proto_data)
mean = caffe.io.blobproto_to_array(a)[0]
# Initialize net
self.gender_net = caffe.Classifier(MODEL_FILE_GENDER, PRETRAINED_GENDER, image_dims=(227,227),)
def __init__(self, videoThread):
threading.Thread.__init__(self)
print "Initializing age recognition thread..."
self.videoThread = videoThread
#caffe.set_mode_cpu()
caffe.set_mode_gpu()
# Model file and parameters are written by trainDnn.py
# Take the most recent parameter set
dcnnPath = "./dcnn_age"
paramFiles = glob.glob(dcnnPath + os.sep + "*.caffemodel")
paramFiles = sorted(paramFiles, key=lambda x:os.path.getctime(x))
MODEL_FILE = dcnnPath + os.sep + "deploy.prototxt"
PRETRAINED = paramFiles[-1]
MEAN_FILE = dcnnPath + os.sep + "mean.binaryproto"
blob = caffe.proto.caffe_pb2.BlobProto()
with open(MEAN_FILE, 'rb') as f:
data = f.read()
blob.ParseFromString(data)
# mean = np.array( caffe.io.blobproto_to_array(blob) ) [0]
# Added simple mean
mean = np.array([93.5940, 104.7624, 129.1863])
# Initialize net
self.net = caffe.Classifier(MODEL_FILE, PRETRAINED, image_dims=(224,224), mean=mean)
def __init__(self, solver_prototxt, output_dir,
pretrained_model=None):
"""Initialize the SolverWrapper."""
self.output_dir = output_dir
caffe.set_mode_gpu()
caffe.set_device(0)
self.solver = caffe.SGDSolver(solver_prototxt)
if pretrained_model is not None:
print ('Loading pretrained model '
'weights from {:s}').format(pretrained_model)
self.solver.net.copy_from(pretrained_model)
self.solver_param = caffe_pb2.SolverParameter()
with open(solver_prototxt, 'rt') as f:
pb2.text_format.Merge(f.read(), self.solver_param)
def layer_features(layers, model_file, deploy_file, imagemean_file,
image_files, gpu=True, gpu_id=0, show_pred=False):
"""extract features from various layers"""
if gpu:
caffe.set_device(gpu_id)
caffe.set_mode_gpu()
net = feed_net(model_file, deploy_file, imagemean_file, image_files,
show_pred)
#if type(layers) == str:
#return net.blobs[layers].data
for layer in layers:
if layer not in net.blobs:
raise TypeError('Invalid layer name: ' + layer)
yield (layer, net.blobs[layer].data)
def train_dir(nets, optim, optim2, dataloader, args):
global image_size, it, image_sizes
caffe.set_mode_gpu()
if args.debug:
image_sizes = [[416, 416]]
while True:
if it % 500 == 0:
image_size = image_sizes[random.randint(0, len(image_sizes) - 1)]
print(image_size)
#im = cv2.imread('/home/busta/data/90kDICT32px/background/n03085781_3427.jpg')
#try:
process_batch(nets, optim, optim2, image_size, args)
if it % valid_interval == 0:
validate(nets, dataloader, image_size = [416, 416], split_words=False)
#except:
# continue
def __init__(self, model_def_file, pretrained_model_file,
class_labels_file, gpu_mode):
logging.info('Loading net and associated files...')
if gpu_mode:
caffe.set_mode_gpu()
else:
caffe.set_mode_cpu()
self.net = caffe.Classifier(
model_def_file, pretrained_model_file,
image_dims=(400, 400), raw_scale=400,
mean=np.load('{}/mean.npy'.format(REPO_DIRNAME)).mean(1).mean(1), channel_swap=(2, 1, 0)
)
with open(class_labels_file) as f:
labels_df = pd.DataFrame([
{
'synset_id': l.strip().split(' ')[0],
'name': ' '.join(l.strip().split(' ')[1:]).split(',')[0]
}
for l in f.readlines()
])
self.labels = labels_df.sort('synset_id')['name'].values
def __init__(self, solver, output_dir, pretrained_model=None, gpu_id=0, data=None):
"""Initialize the SolverWrapper."""
self.output_dir = output_dir
caffe.set_mode_gpu()
caffe.set_device(gpu_id)
self.solver = caffe.SGDSolver(solver)
if pretrained_model is not None:
print(('Loading pretrained model '
'weights from {:s}').format(pretrained_model))
self.solver.net.copy_from(pretrained_model)
self.solver_param = caffe_pb2.SolverParameter()
with open(solver, 'rt') as f:
pb2.text_format.Merge(f.read(), self.solver_param)
self.solver.net.layers[0].set_data(data)
def __init__(self, use_gpu=True, model=[]):
'''
Init net.
:param model: Network definition.
'''
if model == []:
raise("model should not be empty!")
print("Init NetTester: Use gpu: {}").format(use_gpu)
print("Network: {}").format(model)
if use_gpu:
caffe.set_device(0)
caffe.set_mode_gpu()
else:
caffe.set_mode_cpu()
self.__net = caffe.Net(model, caffe.TRAIN)
def __init__(self,
minsize = 20,
threshold = [0.6, 0.7, 0.7],
factor = 0.709,
fastresize = False,
gpuid = 0):
self.minsize = minsize
self.threshold = threshold
self.factor = factor
self.fastresize = fastresize
model_P = './model/det1.prototxt'
weights_P = './model/det1.caffemodel'
model_R = './model/det2.prototxt'
weights_R = './model/det2.caffemodel'
model_O = './model/det3.prototxt'
weights_O = './model/det3.caffemodel'
caffe.set_mode_gpu()
caffe.set_device(gpuid)
self.PNet = caffe.Net(model_P, weights_P, caffe.TEST)
self.RNet = caffe.Net(model_R, weights_R, caffe.TEST)
self.ONet = caffe.Net(model_O, weights_O, caffe.TEST)
def gen_net():
caffe.set_device(1)
caffe.set_mode_gpu()
filename = '2007_000032.jpg'
im = Image.open(filename)
m = np.asarray(im, dtype=np.float32)
m = m[:,:,::-1]
m -= np.array((104.00698793,116.66876762,122.67891434))
m = m.transpose((2, 0, 1))
net = caffe.Net(
"deploy.prototxt",
#"train_iter_" + str(num) + ".caffemodel",
#"/data/VGG16/caffemodel",
"good.caffemodel",
caffe.TRAIN)
net.blobs["data"].reshape(1, *m.shape)
net.blobs["data"].data[...] = m
net.forward()
return net
def gen_net(num):
caffe.set_device(0)
caffe.set_mode_gpu()
filename = '2007_000032.jpg'
im = Image.open(filename)
m = np.asarray(im, dtype=np.float32)
m = m[:,:,::-1]
m -= np.array((104.00698793,116.66876762,122.67891434))
m = m.transpose((2, 0, 1))
net = caffe.Net(
"train_val.prototxt",
"train_iter_" + str(num) + ".caffemodel",
# "/data/VGG16/caffemodel",
# "../fcn-32s/good.caffemodel",
caffe.TRAIN)
net.blobs["data"].reshape(1, *m.shape)
net.blobs["data"].data[...] = m
net.forward()
return net
def load_nets(args, cur_gpu):
# initialize solver and feature net,
# RNN should be initialized before CNN, because CNN cudnn conv layers
# may assume using all available memory
caffe.set_mode_gpu()
caffe.set_device(cur_gpu)
solver = caffe.SGDSolver(args.solver)
if args.snapshot:
print "Restoring history from {}".format(args.snapshot)
solver.restore(args.snapshot)
net = solver.net
if args.weights:
print "Copying weights from {}".format(args.weights)
net.copy_from(args.weights)
return solver, net
def load_nets(args, cur_gpu):
# initialize solver and feature net,
# RNN should be initialized before CNN, because CNN cudnn conv layers
# may assume using all available memory
caffe.set_mode_gpu()
caffe.set_device(cur_gpu)
solver = caffe.SGDSolver(args.solver)
if args.snapshot:
print "Restoring history from {}".format(args.snapshot)
solver.restore(args.snapshot)
rnn = solver.net
if args.weights:
rnn.copy_from(args.weights)
feature_net = caffe.Net(args.feature_net, args.feature_param, caffe.TEST)
# apply bbox regression normalization on the net weights
with open(args.bbox_mean, 'rb') as f:
bbox_means = cPickle.load(f)
with open(args.bbox_std, 'rb') as f:
bbox_stds = cPickle.load(f)
feature_net.params['bbox_pred_vid'][0].data[...] = \
feature_net.params['bbox_pred_vid'][0].data * bbox_stds[:, np.newaxis]
feature_net.params['bbox_pred_vid'][1].data[...] = \
feature_net.params['bbox_pred_vid'][1].data * bbox_stds + bbox_means
return solver, feature_net, rnn, bbox_means, bbox_stds
def load_models(args):
# load rnn model
caffe.set_mode_gpu()
if args.gpus is None:
caffe.set_device(args.job_id - 1)
else:
assert args.job_id <= len(args.gpus)
caffe.set_device(args.gpus[args.job_id-1])
if args.lstm_param is not '':
rnn_net = caffe.Net(args.lstm_def, args.lstm_param, caffe.TEST)
print 'Loaded RNN network from {:s}.'.format(args.lstm_def)
else:
rnn_net = caffe.Net(args.lstm_def, caffe.TEST)
print 'WARNING: dummy RNN network created.'
# load feature model
feature_net = caffe.Net(args.def_file, args.param, caffe.TEST)
print 'Loaded feature network from {:s}.'.format(args.def_file)
return feature_net, rnn_net
def __init__(self,params):
self.dimension = params['dimension']
self.dataset = params['dataset']
self.pooling = params['pooling']
# Read image lists
with open(params['query_list'],'r') as f:
self.query_names = f.read().splitlines()
with open(params['frame_list'],'r') as f:
self.database_list = f.read().splitlines()
# Parameters needed
self.layer = params['layer']
self.save_db_feats = params['database_feats']
# Init network
if params['gpu']:
caffe.set_mode_gpu()
caffe.set_device(0)
else:
caffe.set_mode_cpu()
print "Extracting from:", params['net_proto']
cfg.TEST.HAS_RPN = True
self.net = caffe.Net(params['net_proto'], params['net'], caffe.TEST)
def __init__(self, model_file, pretrained_file, mean_value=None,
layer=['pool5'], input_size = None ):
caffe.set_mode_gpu()
caffe.Net.__init__(self, model_file, pretrained_file, caffe.TEST)
# get name input layer
self.list_layers = layer
self.mean_value = mean_value
# set transformer object
self.transformer = caffe.io.Transformer({'data': self.blobs['data'].data.shape})
self.transformer.set_transpose( 'data', (2,0,1) )
if mean_value is not None:
self.transformer.set_mean('data', mean_value)
self.transformer.set_raw_scale('data', 255)
self.transformer.set_channel_swap('data', (2,1,0))
if input_size is not None:
#reshape the input
print "New input! {}".format(input_size)
self.reshape_input( input_size[0], input_size[1], input_size[2], input_size[3] )
def solve(proto, snapshot, gpus, timing, uid, rank):
caffe.set_mode_gpu()
caffe.set_device(gpus[rank])
caffe.set_solver_count(len(gpus))
caffe.set_solver_rank(rank)
caffe.set_multiprocess(True)
solver = caffe.SGDSolver(proto)
if snapshot and len(snapshot) != 0:
solver.restore(snapshot)
nccl = caffe.NCCL(solver, uid)
nccl.bcast()
if timing and rank == 0:
time(solver, nccl)
else:
solver.add_callback(nccl)
if solver.param.layer_wise_reduce:
solver.net.after_backward(nccl)
solver.step(solver.param.max_iter)
def __init__(self, hyperparams, dO, dU):
config = copy.deepcopy(POLICY_OPT_CAFFE)
config.update(hyperparams)
PolicyOpt.__init__(self, config, dO, dU)
self.batch_size = self._hyperparams['batch_size']
if self._hyperparams['use_gpu']:
caffe.set_device(self._hyperparams['gpu_id'])
caffe.set_mode_gpu()
else:
caffe.set_mode_cpu()
self.init_solver()
self.caffe_iter = 0
self.var = self._hyperparams['init_var'] * np.ones(dU)
self.policy = CaffePolicy(self.solver.test_nets[0],
self.solver.test_nets[1],
self.var)
def __init__(self, solver, output_dir, pretrained_model=None, gpu_id=0, data=None):
"""Initialize the SolverWrapper."""
self.output_dir = output_dir
caffe.set_mode_gpu()
caffe.set_device(gpu_id)
self.solver = caffe.SGDSolver(solver)
if pretrained_model is not None:
print(('Loading pretrained model '
'weights from {:s}').format(pretrained_model))
self.solver.net.copy_from(pretrained_model)
self.solver_param = caffe_pb2.SolverParameter()
with open(solver, 'rt') as f:
pb2.text_format.Merge(f.read(), self.solver_param)
self.solver.net.layers[0].set_data(data)
def run(self):
caffe.set_mode_gpu()
while self.videoThread.isTerminated() == False:
while self.videoThread.isTerminated() == False and self.videoThread.getEventReady() == True:
time.sleep(0.1)
print("Gender recognition sleep")
#print "Detecting..."
crop = None
while crop == None:
crop, rectangle = self.videoThread.getCropEx(1)
time.sleep(0.05)
if crop == None: # No crops available yet
time.sleep(0.1)
crop = crop.astype(np.float32)
propabilities = self.gender_net.predict([crop], oversample = False).ravel() #[Male, Female]
self.videoThread.setGender(propabilities)
def run(self):
caffe.set_mode_gpu()
while self.videoThread.isTerminated() == False:
while self.videoThread.isTerminated() == False and self.videoThread.getEventReady() == True:
time.sleep(0.1)
print("Age recognition sleep")
#print "Detecting..."
crop = None
while crop == None:
crop, rectangle = self.videoThread.getCropEx(0)
time.sleep(0.05)
if crop == None: # No crops available yet
time.sleep(0.1)
crop = crop.astype(np.float32)
out = self.net.predict([crop], oversample = False).ravel()
age = np.dot(out, range(101))
self.videoThread.setAge(age)
def get_net(caffemodel, deploy_file, use_gpu=True):
"""
Returns an instance of caffe.Net
Arguments:
caffemodel -- path to a .caffemodel file
deploy_file -- path to a .prototxt file
Keyword arguments:
use_gpu -- if True, use the GPU for inference
"""
#if use_gpu:
# caffe.set_mode_gpu()
caffe.set_mode_cpu()
# load a new model
return caffe.Net(deploy_file, caffemodel, caffe.TEST)
# Transformer function to perform image transformation
def get_net(caffemodel, deploy_file, use_gpu=True):
"""
Returns an instance of caffe.Net
Arguments:
caffemodel -- path to a .caffemodel file
deploy_file -- path to a .prototxt file
Keyword arguments:
use_gpu -- if True, use the GPU for inference
"""
if use_gpu:
caffe.set_mode_gpu()
# load a new model
return caffe.Net(deploy_file, caffemodel, caffe.TEST)
def get_aligner(caffe_model_path, use_more_stage=False):
caffe.set_mode_gpu()
# PNet = caffe.Net(caffe_model_path + "/det1.prototxt",
# caffe_model_path + "/det1.caffemodel", caffe.TEST)
if use_more_stage:
RNet = caffe.Net(caffe_model_path + "/det2.prototxt",
caffe_model_path + "/det2.caffemodel", caffe.TEST)
else:
RNet = None
ONet = caffe.Net(caffe_model_path + "/det3.prototxt",
caffe_model_path + "/det3.caffemodel", caffe.TEST)
LNet = caffe.Net(caffe_model_path + "/det4.prototxt",
caffe_model_path + "/det4.caffemodel", caffe.TEST)
# return (PNet, RNet, ONet)
return (RNet, ONet, LNet)
# return (RNet, ONet, None)
def get_aligner(caffe_model_path, use_more_stage=False):
caffe.set_mode_gpu()
# PNet = caffe.Net(caffe_model_path + "/det1.prototxt",
# caffe_model_path + "/det1.caffemodel", caffe.TEST)
if use_more_stage:
RNet = caffe.Net(caffe_model_path + "/det2.prototxt",
caffe_model_path + "/det2.caffemodel", caffe.TEST)
else:
RNet = None
ONet = caffe.Net(caffe_model_path + "/det3.prototxt",
caffe_model_path + "/det3.caffemodel", caffe.TEST)
LNet = caffe.Net(caffe_model_path + "/det4.prototxt",
caffe_model_path + "/det4.caffemodel", caffe.TEST)
# return (PNet, RNet, ONet)
return (RNet, ONet, LNet)
# return (RNet, ONet, None)
classifier.py 文件源码
项目:Barebones-Flask-and-Caffe-Classifier
作者: alex-paterson
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def get_net(caffemodel, deploy_file, use_gpu=True):
"""
Returns an instance of caffe.Net
Arguments:
caffemodel -- path to a .caffemodel file
deploy_file -- path to a .prototxt file
Keyword arguments:
use_gpu -- if True, use the GPU for inference
"""
if use_gpu:
caffe.set_mode_gpu()
# load a new model
return caffe.Net(deploy_file, caffemodel, caffe.TEST)
def main(args):
caffe.set_mode_gpu()
param_pairs = [('fc6', 'fc6-conv'),
('fc7', 'fc7-conv'),
('fc8', 'fc8-conv')]
make_fully_conv('/home/pierre/tmpModels/VGG/VGG_ILSVRC_16_layers_deploy.prototxt',
'/home/pierre/tmpModels/VGG/VGG_ILSVRC_16_layers.caffemodel',
'/home/pierre/tmpModels/VGG/VGG_ILSVRC_16_layers_fcn_deploy.prototxt',
param_pairs,
'/home/pierre/tmpModels/VGG/VGG_ILSVRC_16_layers_conv.caffemodel',
)
return 0
def main(argv):
sport = 'long_jump'
model = 'snap_iter_50000.caffemodel'
#---
weights = model_root + 'fcn/' + sport + '/' + model
netf = './fcn/' + sport + '/deploy.prototxt'
gpu = 0
caffe.set_device(gpu)
caffe.set_mode_gpu()
net = caffe.Net(netf, weights, caffe.TEST)
im_head = '/export/home/mfrank/data/OlympicSports/clips/'
im_head = '/export/home/mfrank/data/OlympicSports/patches/'
test_path_file = 'fcn/' + sport + '/test.txt'
train_path_file = 'fcn/' + sport + '/train.txt'
inferfile(net, train_path_file, im_head)
ifp_morris.apply_overlayfcn(train_path_file, factor=4)
inferfile(net, test_path_file, im_head)
ifp_morris.apply_overlayfcn(test_path_file, factor=4)