def __init__(self, args):
super().__init__(args, with_video_output=False)
if self.vgg_model_path is None:
self.vgg_model_path = "/media/" + getpass.getuser() + "/Data/AMBR_data/ml"
self.vgg_model_filename = os.path.join(self.vgg_model_path, self.vgg_model_filename)
self.vgg_pretrained_filename = os.path.join(self.vgg_model_path, self.vgg_pretrained_filename)
if self.output_datafile is None:
self.output_datafile = "{:s}_features.npz".format(self.in_video[:-4])
self.prev_frame_centroid = None
if self.caffe_cpu:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
self.extractor = None
self.blank_features = None
if not self.no_vgg:
self.extractor = VGGFeatureExtractor(model_file=self.vgg_model_filename,
pretrained_file=self.vgg_pretrained_filename)
self.blank_features = self.extractor.extract_single(np.zeros((256, 256, 3), dtype=np.uint8), blobs=['fc7'])[
'fc7']
self.features = []
self.present_flags = []
python类set_mode_gpu()的实例源码
def get_predictions(region_crops):
if os.environ["IS_GPU"]:
caffe.set_device(0)
caffe.set_mode_gpu()
else:
caffe.set_mode_cpu()
classifier = caffe.Classifier(os.path.join(os.environ["TEXT_NOTEXT_MODELS_DIR"], "deploy.prototxt"),
os.path.join(os.environ["TEXT_NOTEXT_MODELS_DIR"], "weights.caffemodel"),
mean=np.array([104, 117, 123], dtype='f4'),
image_dims=[224, 224],
raw_scale=255.0,
channel_swap=[2, 1, 0])
LOGGER.info("Classifying " + str(len(region_crops)) + " inputs.")
predictions = classifier.predict(region_crops)
return predictions
def run(self, _, app_context):
"""run the action"""
import caffe
# init CPU/GPU mode
cpu_mode = app_context.get_config('caffe.cpu_mode')
if cpu_mode:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
caffe.set_device(0)
# load test model
test_model_file = "models/" + app_context.get_config('caffe.test_model')
trained_data_file = "cache/data/" + app_context.get_config('caffe.trained_data')
test_net = caffe.Net(test_model_file, trained_data_file, caffe.TEST)
app_context.params['test_net'] = test_net
logging.getLogger(__name__).info('Loaded neural network: ' + trained_data_file)
def _loadModel(self, model_dirs, id):
print 'loading model...from{}'.format(model_dirs)
model_file = osp.join(model_dirs, 'vgg16.prototxt')
model_weights = osp.join(model_dirs, 'vgg16.caffemodel')
mean_file = osp.join(model_dirs, 'vgg16_mean.npy')
if id == -1:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
caffe.set_device(id)
net = caffe.Net(model_file, model_weights, caffe.TEST)
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_mean('data', np.load(mean_file).mean(1).mean(1))
transformer.set_channel_swap('data', (2, 1, 0))
transformer.set_transpose('data', (2, 0, 1))
#transformer.set_raw_scale('data', 255)
self.net = net
self.transformer = transformer
self.style_layers = VGG16_STYLES
self.content_layers = VGG16_CONTENTS
self.layers = VGG16_LAYERS
print 'model loading done'
def get_caffe_model(caffe_dir, caffe_model, gpu=True,
image_dims=(256, 256),
mean_file='default',
raw_scale=255.0,
channel_swap=(2,1,0),
input_scale=None):
if mean_file == 'default':
mean_file = os.path.join(caffe_dir, 'python', 'caffe', 'imagenet', 'ilsvrc_2012_mean.npy')
model_path = os.path.join(caffe_dir, 'models', caffe_model, '%s.caffemodel'%caffe_model)
model_def = os.path.join(caffe_dir, 'models', caffe_model, 'deploy.prototxt')
print('Loading mean file %s' % mean_file)
mean = np.load(mean_file).mean(1).mean(1)
if gpu:
caffe.set_mode_gpu()
else:
caffe.set_mode_cpu()
net = caffe.Classifier(model_def, model_path,
image_dims=image_dims, mean=mean,
input_scale=input_scale, raw_scale=raw_scale,
channel_swap=channel_swap)
return net
def __init__(self, model_path, deploy_path, mean, crop = 227, layer = 'fc7'):
self.net = caffe.Net(deploy_path, model_path, caffe.TEST)
self.transformer = caffe.io.Transformer({'data': self.net.blobs['data'].data.shape})
self.transformer.set_mean('data', mean)
self.transformer.set_transpose('data', (2, 0, 1))
self.transformer.set_channel_swap('data', (2, 1, 0))
self.transformer.set_raw_scale('data', 255.0)
self.crop = crop
self.image = []
self.layer = layer
caffe.set_mode_gpu()
print "Mean:", mean
def __init__(self, deploy, pretrained, mean, labels, gpu = False):
if gpu:
caffe.set_mode_gpu()
else:
caffe.set_mode_cpu() # in windows, only CPU mode supported
self.__labels = self.load_labels(labels);
mean_ar = self.convert(mean)
if True:
self.__net = caffe.Classifier(deploy, pretrained,
mean = mean_ar.mean(1).mean(1),
channel_swap = (2, 1, 0),
raw_scale = 255,
image_dims = (256, 256))
else:
self.__net = caffe.Net(deploy, pretrained, caffe.TEST)
print self.__net.blobs['data'].data.shape
self.__transformer = caffe.io.Transformer({'data': self.__net.blobs['data'].data.shape})
self.__transformer.set_transpose('data', (2,0,1)) # height*width*channel -> channel*height*width
self.__transformer.set_mean('data', mean_ar)
self.__transformer.set_raw_scale('data', 255)
self.__transformer.set_channel_swap('data', (2,1,0)) # RGB -> BGR
def __init__(self, net_proto, net_weights, device_id, input_size=None):
caffe.set_mode_gpu()
caffe.set_device(device_id)
self._net = caffe.Net(net_proto, net_weights, caffe.TEST)
input_shape = self._net.blobs['data'].data.shape
if input_size is not None:
input_shape = input_shape[:2] + input_size
transformer = caffe.io.Transformer({'data': input_shape})
if self._net.blobs['data'].data.shape[1] == 3:
transformer.set_transpose('data', (2, 0, 1)) # move image channels to outermost dimension
transformer.set_mean('data', np.array([104, 117, 123])) # subtract the dataset-mean value in each channel
else:
pass # non RGB data need not use transformer
self._transformer = transformer
self._sample_shape = self._net.blobs['data'].data.shape
def __init__(self):
caffe.set_mode_gpu()
#caffe.set_device(0)
model_path = '../models/bvlc_googlenet/' # substitute your path here
net_fn = model_path + 'deploy.prototxt'
param_fn = model_path + 'bvlc_googlenet.caffemodel'
model = caffe.io.caffe_pb2.NetParameter()
text_format.Merge(open(net_fn).read(), model)
model.force_backward = True #backward to input layer
open('tmp.prototxt', 'w').write(str(model))
self.net = caffe.Classifier('tmp.prototxt', param_fn,
mean = np.float32([104.0, 116.0, 122.0]),
channel_swap = (2,1,0))
# for the mode guide, if flag = 1
self.flag = 0
self.epoch = 20
self.end = 'inception_4c/output'
#self.end = 'conv4'
def __init__(self, solver_prototxt, pretrained_model=None):
"""Initialize the SolverWrapper."""
self.solver = caffe.SGDSolver(solver_prototxt)
if pretrained_model is not None:
print ('Loading pretrained model '
'weights from {:s}').format(pretrained_model)
self.solver.net.copy_from(pretrained_model)
self.solver_param = caffe.io.caffe_pb2.SolverParameter()
with open(solver_prototxt, 'rt') as f:
text_format.Merge(f.read(), self.solver_param)
if self.solver_param.solver_mode == 1:
caffe.set_mode_gpu()
caffe.set_device(params.gpu_id)
print 'Use GPU', params.gpu_id, 'to train'
else:
print 'Use CPU to train'
#initial python data layer
self.solver.net.layers[0].set_db()
def init_detection_net(self, gpu_id=0, prototxt=None, caffemodel=None):
"""init extraction network"""
cfg.TEST.HAS_RPN = True # Use RPN for proposals
if prototxt is None:
prototxt = os.path.join(cfg.ROOT_DIR, 'models', NETS['zf'][0],
'faster_rcnn_alt_opt', 'faster_rcnn_test.pt')
if caffemodel is None:
caffemodel = os.path.join(cfg.ROOT_DIR, 'output/default/train',
NETS['zf'][1])
if not os.path.isfile(caffemodel):
raise IOError(('{:s} not found.\nDid you run ./data/script/'
'fetch_faster_rcnn_models.sh?').format(caffemodel))
#np.random.seed(cfg.RNG_SEED)
caffe.set_random_seed(cfg.RNG_SEED)
caffe.set_mode_gpu()
caffe.set_device(gpu_id)
self.net_d = caffe.Net(prototxt, caffemodel, caffe.TEST)
def __init__(self):
caffe.set_mode_gpu()
#caffe.set_device(0)
model_path = '../models/bvlc_googlenet/' # substitute your path here
net_fn = model_path + 'deploy.prototxt'
param_fn = model_path + 'bvlc_googlenet.caffemodel'
model = caffe.io.caffe_pb2.NetParameter()
text_format.Merge(open(net_fn).read(), model)
model.force_backward = True #backward to input layer
open('tmp.prototxt', 'w').write(str(model))
self.net = caffe.Classifier('tmp.prototxt', param_fn,
mean = np.float32([104.0, 116.0, 122.0]),
channel_swap = (2,1,0))
# for the mode guide, if flag = 1
self.flag = 0
self.epoch = 20
self.end = 'inception_4c/output'
#self.end = 'conv4'
def __init__(self, solver_prototxt, pretrained_model=None):
"""Initialize the SolverWrapper."""
self.solver = caffe.SGDSolver(solver_prototxt)
if pretrained_model is not None:
print ('Loading pretrained model '
'weights from {:s}').format(pretrained_model)
self.solver.net.copy_from(pretrained_model)
self.solver_param = caffe.io.caffe_pb2.SolverParameter()
with open(solver_prototxt, 'rt') as f:
text_format.Merge(f.read(), self.solver_param)
if self.solver_param.solver_mode == 1:
caffe.set_mode_gpu()
caffe.set_device(params.gpu_id)
print 'Use GPU', params.gpu_id, 'to train'
else:
print 'Use CPU to train'
#initial python data layer
#self.solver.net.layers[0].set_db()
def solve(proto, gpus, uid, rank, max_iter):
caffe.set_mode_gpu()
caffe.set_device(gpus[rank])
caffe.set_solver_count(len(gpus))
caffe.set_solver_rank(rank)
caffe.set_multiprocess(True)
solver = caffe.SGDSolver(proto)
if rank == 0:
# solver.restore(_snapshot)
solver.net.copy_from(_weights)
solver.net.layers[0].get_gpu_id(gpus[rank])
nccl = caffe.NCCL(solver, uid)
nccl.bcast()
solver.add_callback(nccl)
if solver.param.layer_wise_reduce:
solver.net.after_backward(nccl)
for _ in range(max_iter):
solver.step(1)
def test_imdb_wiki_model():
# not finished
sample_lst_fn = 'datasets/IMDB-WIKI/Annotations/imdb_wiki_good_test.json'
img_root = 'datasets/IMDB-WIKI/Images'
batch_size = 128
num_batch = 10
gpu_id = 0
fn_model = 'datasets/IMDB-WIKI/caffe_models/age.prototxt'
fn_weight = 'datasets/IMDB-WIKI/caffe_models/dex_imdb_wiki.caffemodel'
imagenet_mean = [[[104, 117, 123]]]
caffe.set_device(gpu_id)
caffe.set_mode_gpu()
model = caffemodel(fn_model, fn_weight, caffe.TEST)
def get_net(caffemodel, deploy_file, use_gpu=True):
"""
Returns an instance of caffe.Net
Arguments:
caffemodel -- path to a .caffemodel file
deploy_file -- path to a .prototxt file
Keyword arguments:
use_gpu -- if True, use the GPU for inference
"""
if use_gpu:
caffe.set_mode_gpu()
# load a new model
return caffe.Net(deploy_file, caffemodel, caffe.TEST)
def get_net(caffemodel, deploy_file, use_gpu=True):
"""
Returns an instance of caffe.Net
Arguments:
caffemodel -- path to a .caffemodel file
deploy_file -- path to a .prototxt file
Keyword arguments:
use_gpu -- if True, use the GPU for inference
"""
if use_gpu:
caffe.set_mode_gpu()
# load a new model
return caffe.Net(deploy_file, caffemodel, caffe.TEST)
def _init_caffe(cfg):
"""Initialize pycaffe in a training process.
"""
import caffe
# fix the random seeds (numpy and caffe) for reproducibility
np.random.seed(cfg.RNG_SEED)
caffe.set_random_seed(cfg.RNG_SEED)
# set up caffe
caffe.set_mode_gpu()
caffe.set_device(cfg.GPU_ID)
def setup():
global resnet_mean
global resnet_net
global vqa_net
# data provider
vqa_data_provider_layer.CURRENT_DATA_SHAPE = EXTRACT_LAYER_SIZE
# mean substraction
blob = caffe.proto.caffe_pb2.BlobProto()
data = open( RESNET_MEAN_PATH , 'rb').read()
blob.ParseFromString(data)
resnet_mean = np.array( caffe.io.blobproto_to_array(blob)).astype(np.float32).reshape(3,224,224)
resnet_mean = np.transpose(cv2.resize(np.transpose(resnet_mean,(1,2,0)), (448,448)),(2,0,1))
# resnet
caffe.set_device(GPU_ID)
caffe.set_mode_gpu()
resnet_net = caffe.Net(RESNET_LARGE_PROTOTXT_PATH, RESNET_CAFFEMODEL_PATH, caffe.TEST)
# our net
vqa_net = caffe.Net(VQA_PROTOTXT_PATH, VQA_CAFFEMODEL_PATH, caffe.TEST)
# uploads
if not os.path.exists(UPLOAD_FOLDER):
os.makedirs(UPLOAD_FOLDER)
if not os.path.exists(VIZ_FOLDER):
os.makedirs(VIZ_FOLDER)
print 'Finished setup'
train_faster_rcnn_alt_opt.py 文件源码
项目:faster-rcnn-resnet
作者: Eniac-Xie
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def _init_caffe(cfg):
"""Initialize pycaffe in a training process.
"""
import caffe
# fix the random seeds (numpy and caffe) for reproducibility
np.random.seed(cfg.RNG_SEED)
caffe.set_random_seed(cfg.RNG_SEED)
# set up caffe
caffe.set_mode_gpu()
caffe.set_device(cfg.GPU_ID)