def __init__(self, net_proto, net_weights, device_id, input_size=None):
caffe.set_mode_gpu()
caffe.set_device(device_id)
self._net = caffe.Net(net_proto, net_weights, caffe.TEST)
input_shape = self._net.blobs['data'].data.shape
if input_size is not None:
input_shape = input_shape[:2] + input_size
transformer = caffe.io.Transformer({'data': input_shape})
if self._net.blobs['data'].data.shape[1] == 3:
transformer.set_transpose('data', (2, 0, 1)) # move image channels to outermost dimension
transformer.set_mean('data', np.array([104, 117, 123])) # subtract the dataset-mean value in each channel
else:
pass # non RGB data need not use transformer
self._transformer = transformer
self._sample_shape = self._net.blobs['data'].data.shape
python类set_device()的实例源码
action_caffe.py 文件源码
项目:Video-Classification-Action-Recognition
作者: qijiezhao
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def __init__(self, net_proto, net_weights, device_id, input_size=None):
caffe.set_mode_gpu()
caffe.set_device(device_id)
self._net = caffe.Net(net_proto, net_weights, caffe.TEST)
input_shape = self._net.blobs['data'].data.shape
if input_size is not None:
input_shape = input_shape[:2] + input_size
transformer = caffe.io.Transformer({'data': input_shape})
if self._net.blobs['data'].data.shape[1] == 3:
transformer.set_transpose('data', (2, 0, 1)) # move image channels to outermost dimension
transformer.set_mean('data', np.array([104, 117, 123])) # subtract the dataset-mean value in each channel
else:
pass # non RGB data need not use transformer
self._transformer = transformer
self._sample_shape = self._net.blobs['data'].data.shape
def __init__(self, videoThread):
threading.Thread.__init__(self)
print "Initializing recognition thread..."
self.videoThread = videoThread
#caffe.set_mode_cpu()
caffe.set_mode_gpu()
caffe.set_device(0)
# Model file and parameters are written by trainDnn.py
# Take the most recent parameter set
genderPath = "./dcnn_gender"
genderParamFiles = glob.glob(genderPath + os.sep + "*.caffemodel")
genderParamFiles = sorted(genderParamFiles, key=lambda x:os.path.getctime(x))
MODEL_FILE_GENDER = genderPath + os.sep + "deploy_gender.prototxt"
PRETRAINED_GENDER = genderParamFiles[-1]
MEAN_FILE_GENDER = genderPath + os.sep + "mean.binaryproto"
proto_data = open(MEAN_FILE_GENDER, 'rb').read()
a = caffe.io.caffe_pb2.BlobProto.FromString(proto_data)
mean = caffe.io.blobproto_to_array(a)[0]
# Initialize net
self.gender_net = caffe.Classifier(MODEL_FILE_GENDER, PRETRAINED_GENDER, image_dims=(227,227),)
def __init__(self, solver_prototxt, output_dir,
pretrained_model=None):
"""Initialize the SolverWrapper."""
self.output_dir = output_dir
caffe.set_mode_gpu()
caffe.set_device(0)
self.solver = caffe.SGDSolver(solver_prototxt)
if pretrained_model is not None:
print ('Loading pretrained model '
'weights from {:s}').format(pretrained_model)
self.solver.net.copy_from(pretrained_model)
self.solver_param = caffe_pb2.SolverParameter()
with open(solver_prototxt, 'rt') as f:
pb2.text_format.Merge(f.read(), self.solver_param)
def layer_features(layers, model_file, deploy_file, imagemean_file,
image_files, gpu=True, gpu_id=0, show_pred=False):
"""extract features from various layers"""
if gpu:
caffe.set_device(gpu_id)
caffe.set_mode_gpu()
net = feed_net(model_file, deploy_file, imagemean_file, image_files,
show_pred)
#if type(layers) == str:
#return net.blobs[layers].data
for layer in layers:
if layer not in net.blobs:
raise TypeError('Invalid layer name: ' + layer)
yield (layer, net.blobs[layer].data)
def __init__(self, solver, output_dir, pretrained_model=None, gpu_id=0, data=None):
"""Initialize the SolverWrapper."""
self.output_dir = output_dir
caffe.set_mode_gpu()
caffe.set_device(gpu_id)
self.solver = caffe.SGDSolver(solver)
if pretrained_model is not None:
print(('Loading pretrained model '
'weights from {:s}').format(pretrained_model))
self.solver.net.copy_from(pretrained_model)
self.solver_param = caffe_pb2.SolverParameter()
with open(solver, 'rt') as f:
pb2.text_format.Merge(f.read(), self.solver_param)
self.solver.net.layers[0].set_data(data)
def __init__(self, use_gpu=True, model=[]):
'''
Init net.
:param model: Network definition.
'''
if model == []:
raise("model should not be empty!")
print("Init NetTester: Use gpu: {}").format(use_gpu)
print("Network: {}").format(model)
if use_gpu:
caffe.set_device(0)
caffe.set_mode_gpu()
else:
caffe.set_mode_cpu()
self.__net = caffe.Net(model, caffe.TRAIN)
def __init__(self,
minsize = 20,
threshold = [0.6, 0.7, 0.7],
factor = 0.709,
fastresize = False,
gpuid = 0):
self.minsize = minsize
self.threshold = threshold
self.factor = factor
self.fastresize = fastresize
model_P = './model/det1.prototxt'
weights_P = './model/det1.caffemodel'
model_R = './model/det2.prototxt'
weights_R = './model/det2.caffemodel'
model_O = './model/det3.prototxt'
weights_O = './model/det3.caffemodel'
caffe.set_mode_gpu()
caffe.set_device(gpuid)
self.PNet = caffe.Net(model_P, weights_P, caffe.TEST)
self.RNet = caffe.Net(model_R, weights_R, caffe.TEST)
self.ONet = caffe.Net(model_O, weights_O, caffe.TEST)
def gen_net():
caffe.set_device(1)
caffe.set_mode_gpu()
filename = '2007_000032.jpg'
im = Image.open(filename)
m = np.asarray(im, dtype=np.float32)
m = m[:,:,::-1]
m -= np.array((104.00698793,116.66876762,122.67891434))
m = m.transpose((2, 0, 1))
net = caffe.Net(
"deploy.prototxt",
#"train_iter_" + str(num) + ".caffemodel",
#"/data/VGG16/caffemodel",
"good.caffemodel",
caffe.TRAIN)
net.blobs["data"].reshape(1, *m.shape)
net.blobs["data"].data[...] = m
net.forward()
return net
def gen_net(num):
caffe.set_device(0)
caffe.set_mode_gpu()
filename = '2007_000032.jpg'
im = Image.open(filename)
m = np.asarray(im, dtype=np.float32)
m = m[:,:,::-1]
m -= np.array((104.00698793,116.66876762,122.67891434))
m = m.transpose((2, 0, 1))
net = caffe.Net(
"train_val.prototxt",
"train_iter_" + str(num) + ".caffemodel",
# "/data/VGG16/caffemodel",
# "../fcn-32s/good.caffemodel",
caffe.TRAIN)
net.blobs["data"].reshape(1, *m.shape)
net.blobs["data"].data[...] = m
net.forward()
return net
def load_nets(args, cur_gpu):
# initialize solver and feature net,
# RNN should be initialized before CNN, because CNN cudnn conv layers
# may assume using all available memory
caffe.set_mode_gpu()
caffe.set_device(cur_gpu)
solver = caffe.SGDSolver(args.solver)
if args.snapshot:
print "Restoring history from {}".format(args.snapshot)
solver.restore(args.snapshot)
net = solver.net
if args.weights:
print "Copying weights from {}".format(args.weights)
net.copy_from(args.weights)
return solver, net
def load_nets(args, cur_gpu):
# initialize solver and feature net,
# RNN should be initialized before CNN, because CNN cudnn conv layers
# may assume using all available memory
caffe.set_mode_gpu()
caffe.set_device(cur_gpu)
solver = caffe.SGDSolver(args.solver)
if args.snapshot:
print "Restoring history from {}".format(args.snapshot)
solver.restore(args.snapshot)
rnn = solver.net
if args.weights:
rnn.copy_from(args.weights)
feature_net = caffe.Net(args.feature_net, args.feature_param, caffe.TEST)
# apply bbox regression normalization on the net weights
with open(args.bbox_mean, 'rb') as f:
bbox_means = cPickle.load(f)
with open(args.bbox_std, 'rb') as f:
bbox_stds = cPickle.load(f)
feature_net.params['bbox_pred_vid'][0].data[...] = \
feature_net.params['bbox_pred_vid'][0].data * bbox_stds[:, np.newaxis]
feature_net.params['bbox_pred_vid'][1].data[...] = \
feature_net.params['bbox_pred_vid'][1].data * bbox_stds + bbox_means
return solver, feature_net, rnn, bbox_means, bbox_stds
def load_models(args):
# load rnn model
caffe.set_mode_gpu()
if args.gpus is None:
caffe.set_device(args.job_id - 1)
else:
assert args.job_id <= len(args.gpus)
caffe.set_device(args.gpus[args.job_id-1])
if args.lstm_param is not '':
rnn_net = caffe.Net(args.lstm_def, args.lstm_param, caffe.TEST)
print 'Loaded RNN network from {:s}.'.format(args.lstm_def)
else:
rnn_net = caffe.Net(args.lstm_def, caffe.TEST)
print 'WARNING: dummy RNN network created.'
# load feature model
feature_net = caffe.Net(args.def_file, args.param, caffe.TEST)
print 'Loaded feature network from {:s}.'.format(args.def_file)
return feature_net, rnn_net
def __init__(self,params):
self.dimension = params['dimension']
self.dataset = params['dataset']
self.pooling = params['pooling']
# Read image lists
with open(params['query_list'],'r') as f:
self.query_names = f.read().splitlines()
with open(params['frame_list'],'r') as f:
self.database_list = f.read().splitlines()
# Parameters needed
self.layer = params['layer']
self.save_db_feats = params['database_feats']
# Init network
if params['gpu']:
caffe.set_mode_gpu()
caffe.set_device(0)
else:
caffe.set_mode_cpu()
print "Extracting from:", params['net_proto']
cfg.TEST.HAS_RPN = True
self.net = caffe.Net(params['net_proto'], params['net'], caffe.TEST)
def solve(proto, snapshot, gpus, timing, uid, rank):
caffe.set_mode_gpu()
caffe.set_device(gpus[rank])
caffe.set_solver_count(len(gpus))
caffe.set_solver_rank(rank)
caffe.set_multiprocess(True)
solver = caffe.SGDSolver(proto)
if snapshot and len(snapshot) != 0:
solver.restore(snapshot)
nccl = caffe.NCCL(solver, uid)
nccl.bcast()
if timing and rank == 0:
time(solver, nccl)
else:
solver.add_callback(nccl)
if solver.param.layer_wise_reduce:
solver.net.after_backward(nccl)
solver.step(solver.param.max_iter)
def __init__(self, hyperparams, dO, dU):
config = copy.deepcopy(POLICY_OPT_CAFFE)
config.update(hyperparams)
PolicyOpt.__init__(self, config, dO, dU)
self.batch_size = self._hyperparams['batch_size']
if self._hyperparams['use_gpu']:
caffe.set_device(self._hyperparams['gpu_id'])
caffe.set_mode_gpu()
else:
caffe.set_mode_cpu()
self.init_solver()
self.caffe_iter = 0
self.var = self._hyperparams['init_var'] * np.ones(dU)
self.policy = CaffePolicy(self.solver.test_nets[0],
self.solver.test_nets[1],
self.var)
def __init__(self, solver, output_dir, pretrained_model=None, gpu_id=0, data=None):
"""Initialize the SolverWrapper."""
self.output_dir = output_dir
caffe.set_mode_gpu()
caffe.set_device(gpu_id)
self.solver = caffe.SGDSolver(solver)
if pretrained_model is not None:
print(('Loading pretrained model '
'weights from {:s}').format(pretrained_model))
self.solver.net.copy_from(pretrained_model)
self.solver_param = caffe_pb2.SolverParameter()
with open(solver, 'rt') as f:
pb2.text_format.Merge(f.read(), self.solver_param)
self.solver.net.layers[0].set_data(data)
def main(argv):
sport = 'long_jump'
model = 'snap_iter_50000.caffemodel'
#---
weights = model_root + 'fcn/' + sport + '/' + model
netf = './fcn/' + sport + '/deploy.prototxt'
gpu = 0
caffe.set_device(gpu)
caffe.set_mode_gpu()
net = caffe.Net(netf, weights, caffe.TEST)
im_head = '/export/home/mfrank/data/OlympicSports/clips/'
im_head = '/export/home/mfrank/data/OlympicSports/patches/'
test_path_file = 'fcn/' + sport + '/test.txt'
train_path_file = 'fcn/' + sport + '/train.txt'
inferfile(net, train_path_file, im_head)
ifp_morris.apply_overlayfcn(train_path_file, factor=4)
inferfile(net, test_path_file, im_head)
ifp_morris.apply_overlayfcn(test_path_file, factor=4)
def get_predictions(region_crops):
if os.environ["IS_GPU"]:
caffe.set_device(0)
caffe.set_mode_gpu()
else:
caffe.set_mode_cpu()
classifier = caffe.Classifier(os.path.join(os.environ["TEXT_NOTEXT_MODELS_DIR"], "deploy.prototxt"),
os.path.join(os.environ["TEXT_NOTEXT_MODELS_DIR"], "weights.caffemodel"),
mean=np.array([104, 117, 123], dtype='f4'),
image_dims=[224, 224],
raw_scale=255.0,
channel_swap=[2, 1, 0])
LOGGER.info("Classifying " + str(len(region_crops)) + " inputs.")
predictions = classifier.predict(region_crops)
return predictions
def run(self, _, app_context):
"""run the action"""
import caffe
# init CPU/GPU mode
cpu_mode = app_context.get_config('caffe.cpu_mode')
if cpu_mode:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
caffe.set_device(0)
# load test model
test_model_file = "models/" + app_context.get_config('caffe.test_model')
trained_data_file = "cache/data/" + app_context.get_config('caffe.trained_data')
test_net = caffe.Net(test_model_file, trained_data_file, caffe.TEST)
app_context.params['test_net'] = test_net
logging.getLogger(__name__).info('Loaded neural network: ' + trained_data_file)
def _loadModel(self, model_dirs, id):
print 'loading model...from{}'.format(model_dirs)
model_file = osp.join(model_dirs, 'vgg16.prototxt')
model_weights = osp.join(model_dirs, 'vgg16.caffemodel')
mean_file = osp.join(model_dirs, 'vgg16_mean.npy')
if id == -1:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
caffe.set_device(id)
net = caffe.Net(model_file, model_weights, caffe.TEST)
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_mean('data', np.load(mean_file).mean(1).mean(1))
transformer.set_channel_swap('data', (2, 1, 0))
transformer.set_transpose('data', (2, 0, 1))
#transformer.set_raw_scale('data', 255)
self.net = net
self.transformer = transformer
self.style_layers = VGG16_STYLES
self.content_layers = VGG16_CONTENTS
self.layers = VGG16_LAYERS
print 'model loading done'
def __init__(self, net_proto, net_weights, device_id, input_size=None):
caffe.set_mode_gpu()
caffe.set_device(device_id)
self._net = caffe.Net(net_proto, net_weights, caffe.TEST)
input_shape = self._net.blobs['data'].data.shape
if input_size is not None:
input_shape = input_shape[:2] + input_size
transformer = caffe.io.Transformer({'data': input_shape})
if self._net.blobs['data'].data.shape[1] == 3:
transformer.set_transpose('data', (2, 0, 1)) # move image channels to outermost dimension
transformer.set_mean('data', np.array([104, 117, 123])) # subtract the dataset-mean value in each channel
else:
pass # non RGB data need not use transformer
self._transformer = transformer
self._sample_shape = self._net.blobs['data'].data.shape
def __init__(self):
caffe.set_mode_gpu()
#caffe.set_device(0)
model_path = '../models/bvlc_googlenet/' # substitute your path here
net_fn = model_path + 'deploy.prototxt'
param_fn = model_path + 'bvlc_googlenet.caffemodel'
model = caffe.io.caffe_pb2.NetParameter()
text_format.Merge(open(net_fn).read(), model)
model.force_backward = True #backward to input layer
open('tmp.prototxt', 'w').write(str(model))
self.net = caffe.Classifier('tmp.prototxt', param_fn,
mean = np.float32([104.0, 116.0, 122.0]),
channel_swap = (2,1,0))
# for the mode guide, if flag = 1
self.flag = 0
self.epoch = 20
self.end = 'inception_4c/output'
#self.end = 'conv4'
def __init__(self, solver_prototxt, pretrained_model=None):
"""Initialize the SolverWrapper."""
self.solver = caffe.SGDSolver(solver_prototxt)
if pretrained_model is not None:
print ('Loading pretrained model '
'weights from {:s}').format(pretrained_model)
self.solver.net.copy_from(pretrained_model)
self.solver_param = caffe.io.caffe_pb2.SolverParameter()
with open(solver_prototxt, 'rt') as f:
text_format.Merge(f.read(), self.solver_param)
if self.solver_param.solver_mode == 1:
caffe.set_mode_gpu()
caffe.set_device(params.gpu_id)
print 'Use GPU', params.gpu_id, 'to train'
else:
print 'Use CPU to train'
#initial python data layer
self.solver.net.layers[0].set_db()
def init_detection_net(self, gpu_id=0, prototxt=None, caffemodel=None):
"""init extraction network"""
cfg.TEST.HAS_RPN = True # Use RPN for proposals
if prototxt is None:
prototxt = os.path.join(cfg.ROOT_DIR, 'models', NETS['zf'][0],
'faster_rcnn_alt_opt', 'faster_rcnn_test.pt')
if caffemodel is None:
caffemodel = os.path.join(cfg.ROOT_DIR, 'output/default/train',
NETS['zf'][1])
if not os.path.isfile(caffemodel):
raise IOError(('{:s} not found.\nDid you run ./data/script/'
'fetch_faster_rcnn_models.sh?').format(caffemodel))
#np.random.seed(cfg.RNG_SEED)
caffe.set_random_seed(cfg.RNG_SEED)
caffe.set_mode_gpu()
caffe.set_device(gpu_id)
self.net_d = caffe.Net(prototxt, caffemodel, caffe.TEST)
def __init__(self, solver_prototxt, pretrained_model=None):
"""Initialize the SolverWrapper."""
self.solver = caffe.SGDSolver(solver_prototxt)
if pretrained_model is not None:
print ('Loading pretrained model '
'weights from {:s}').format(pretrained_model)
self.solver.net.copy_from(pretrained_model)
self.solver_param = caffe.io.caffe_pb2.SolverParameter()
with open(solver_prototxt, 'rt') as f:
text_format.Merge(f.read(), self.solver_param)
if self.solver_param.solver_mode == 1:
caffe.set_mode_gpu()
caffe.set_device(params.gpu_id)
print 'Use GPU', params.gpu_id, 'to train'
else:
print 'Use CPU to train'
#initial python data layer
#self.solver.net.layers[0].set_db()
def solve(proto, gpus, uid, rank, max_iter):
caffe.set_mode_gpu()
caffe.set_device(gpus[rank])
caffe.set_solver_count(len(gpus))
caffe.set_solver_rank(rank)
caffe.set_multiprocess(True)
solver = caffe.SGDSolver(proto)
if rank == 0:
# solver.restore(_snapshot)
solver.net.copy_from(_weights)
solver.net.layers[0].get_gpu_id(gpus[rank])
nccl = caffe.NCCL(solver, uid)
nccl.bcast()
solver.add_callback(nccl)
if solver.param.layer_wise_reduce:
solver.net.after_backward(nccl)
for _ in range(max_iter):
solver.step(1)
def test_imdb_wiki_model():
# not finished
sample_lst_fn = 'datasets/IMDB-WIKI/Annotations/imdb_wiki_good_test.json'
img_root = 'datasets/IMDB-WIKI/Images'
batch_size = 128
num_batch = 10
gpu_id = 0
fn_model = 'datasets/IMDB-WIKI/caffe_models/age.prototxt'
fn_weight = 'datasets/IMDB-WIKI/caffe_models/dex_imdb_wiki.caffemodel'
imagenet_mean = [[[104, 117, 123]]]
caffe.set_device(gpu_id)
caffe.set_mode_gpu()
model = caffemodel(fn_model, fn_weight, caffe.TEST)
def _init_caffe(cfg):
"""Initialize pycaffe in a training process.
"""
import caffe
# fix the random seeds (numpy and caffe) for reproducibility
np.random.seed(cfg.RNG_SEED)
caffe.set_random_seed(cfg.RNG_SEED)
# set up caffe
caffe.set_mode_gpu()
caffe.set_device(cfg.GPU_ID)
def setup():
global resnet_mean
global resnet_net
global vqa_net
# data provider
vqa_data_provider_layer.CURRENT_DATA_SHAPE = EXTRACT_LAYER_SIZE
# mean substraction
blob = caffe.proto.caffe_pb2.BlobProto()
data = open( RESNET_MEAN_PATH , 'rb').read()
blob.ParseFromString(data)
resnet_mean = np.array( caffe.io.blobproto_to_array(blob)).astype(np.float32).reshape(3,224,224)
resnet_mean = np.transpose(cv2.resize(np.transpose(resnet_mean,(1,2,0)), (448,448)),(2,0,1))
# resnet
caffe.set_device(GPU_ID)
caffe.set_mode_gpu()
resnet_net = caffe.Net(RESNET_LARGE_PROTOTXT_PATH, RESNET_CAFFEMODEL_PATH, caffe.TEST)
# our net
vqa_net = caffe.Net(VQA_PROTOTXT_PATH, VQA_CAFFEMODEL_PATH, caffe.TEST)
# uploads
if not os.path.exists(UPLOAD_FOLDER):
os.makedirs(UPLOAD_FOLDER)
if not os.path.exists(VIZ_FOLDER):
os.makedirs(VIZ_FOLDER)
print 'Finished setup'