def load_mat_file(name, path, matname, load_zeros = False, prop_valid_set = .1, prop_test_set=0):
x = scipy.io.loadmat(path + name)[matname]
if sp.issparse(x):
if not load_zeros:
idxs = x.nonzero()
indexes = np.array(zip(idxs[0], np.zeros_like(idxs[0]), idxs[1]))
np.random.shuffle(indexes)
nb = indexes.shape[0]
i_valid = int(nb - nb*prop_valid_set - nb * prop_test_set)
i_test = i_valid + int( nb*prop_valid_set)
train = Triplets_set(indexes[:i_valid,:], np.ones(i_valid))
valid = Triplets_set(indexes[i_valid:i_test,:], np.ones(i_test - i_valid))
test = Triplets_set(indexes[i_test:,:], np.ones(nb - i_test))
return Experiment(name,train, valid, test, positives_only = True, compute_ranking_scores = True)
python类io()的实例源码
def saveParams(self, lap, hmodel, SS=None, **kwargs):
''' Save current model to disk
'''
if lap in self.SavedIters or self.task_output_path is None:
return
ElapsedTimeLogger.startEvent("io", "saveparams")
self.SavedIters.add(lap)
prefix = ModelWriter.makePrefixForLap(lap)
with open(self.mkfile('snapshot_lap.txt'), 'a') as f:
f.write('%.4f\n' % (lap))
with open(self.mkfile('snapshot_elapsed_time_sec.txt'), 'a') as f:
f.write('%.3f\n' % (self.get_elapsed_time()))
if self.outputParams['doSaveFullModel']:
ModelWriter.save_model(
hmodel, self.task_output_path, prefix,
doSavePriorInfo=np.allclose(lap, 0.0),
doLinkBest=True,
doSaveObsModel=self.outputParams['doSaveObsModel'])
if self.outputParams['doSaveTopicModel']:
ModelWriter.saveTopicModel(
hmodel, SS, self.task_output_path, prefix, **kwargs)
ElapsedTimeLogger.stopEvent("io", "saveparams")
def run_motif(type,cell,thresh_mode):
warnings.filterwarnings("ignore")
print "cross_validation_training"
print "motif features used"
# Read data
filename = "./pairs_%s%s_motif.mat"%(str(type),str(cell))
data = scipy.io.loadmat(filename)
x = np.asmatrix(data['seq_m'])
y = np.ravel(data['lab_m'])
y[y<0]=0
print "Positive: %d Negative: %d" % (sum(y==1), sum(y==0))
k_fold = 10
if thresh_mode==0:
k_fold1 = 0
elif thresh_mode==1:
k_fold1 = 1
else:
k_fold1 = 5
metrics_vec, pred, predicted, features1 = parametered_cv(x,y,k_fold,k_fold1,serial)
filename1 = "test_%s%s_motiflab.txt"%(str(type), str(cell))
filename2 = "test_%s%s_motifprob.txt"%(str(type), str(cell))
filename3 = "test_%s%s_motiffeature.txt"%(str(type), str(cell))
np.savetxt(filename1, pred, fmt='%d %d %d', delimiter='\t')
np.savetxt(filename2, predicted, fmt='%f %f', delimiter='\t')
np.savetxt(filename3, features1, fmt='%d %f', delimiter='\t')
filename4 = "test_%s%s_motifthresh2.txt"%(str(type), str(cell))
np.savetxt(filename4, metrics_vec, fmt='%f %f %f %f %f', delimiter='\t')
# Cross validation for PEP-Integrate
def run_shuffle(word, num_features,k,type,cell,sel_num,thresh_mode):
warnings.filterwarnings("ignore")
word = int(word)
num_features = int(num_features)
k = int(k)
sel_num = int(sel_num)
print "Loading motif data"
filename = "./pairs_%s%s_motif.mat"%(str(type),str(cell))
data = scipy.io.loadmat(filename)
x1 = np.asarray(data['seq_m'])
y = np.ravel(data['lab_m'])
y[y<0]=0
print "Positive: %d Negative: %d" % (sum(y==1), sum(y==0))
serial3 = np.array(range(0,x1.shape[1]))
print serial3.shape
print "shuffle features..."
random.shuffle(serial3)
x = x1[:,serial3]
filename4 = "test_%s%s_motifidx_shuffle%d.txt"%(str(type), str(cell), sel_num)
np.savetxt(filename4, np.array((range(0,x1.shape[1]),serial3)).T, fmt='%d %d', delimiter='\t')
k_fold = 10
if thresh_mode==0:
k_fold1 = 0
elif thresh_mode==1:
k_fold1 = 1
else:
k_fold1 = 5
metrics_vec, pred, predicted, features1 = parametered_cv(x,y,k_fold,k_fold1)
filename1 = "test_%s%s_motiflab_shuffle%d.txt"%(str(type), str(cell), sel_num)
filename2 = "test_%s%s_motifprob_shuffle%d.txt"%(str(type), str(cell), sel_num)
filename3 = "test_%s%s_motiffeature_shuffle%d.txt"%(str(type), str(cell), sel_num)
np.savetxt(filename1, pred, fmt='%d %d %d', delimiter='\t')
np.savetxt(filename2, predicted, fmt='%f %f', delimiter='\t')
np.savetxt(filename3, features1, fmt='%d %f', delimiter='\t')
def __init__(self, Brain_image_filename,Electrode_ElectrodeData_filename,Electrode_mat_filename,ElectrodeSignals,ElectrodeSignalDataName):
self.im = Image.open(Brain_image_filename)
self.syllableUnit = 0
self.Timestep =0
self.ElectrodeSignals = scipy.io.loadmat(ElectrodeSignals)
self.mat = scipy.io.loadmat(Electrode_mat_filename)
self.connectivityData = scipy.io.loadmat(Electrode_ElectrodeData_filename)
# Changes for artificial data
Data=scipy.io.loadmat(Electrode_ElectrodeData_filename)
temp = Data['electrode']
self.ElectrodeIds = temp[0]
self.ElectodeData = Data['C']
# Changes for RealData
# self.ElectrodeIds = [i for i in range(len(self.ElectrodeSignals[ElectrodeSignalDataName][0]))]
# self.ElectodeData = self.connectivityData['conData']
self.syllable, self.timestep, self.N , self.N = np.shape(self.ElectodeData)
self.timestep = self.timestep - 1
""" The variables names for the new connecivity matrices,
C == correlation matrix
syllable == 6 syllables
time = mapping between electrodes
electrode == 58 electrodes
"""
ClassifyWav.py 文件源码
项目:CNNs-Speech-Music-Discrimination
作者: MikeMpapa
项目源码
文件源码
阅读 41
收藏 0
点赞 0
评论 0
def initialize_transformer(image_mean):
shape = (10*16, 3, 227, 227)
transformer = caffe.io.Transformer({'data': shape})
channel_mean = np.zeros((3,227,227))
for channel_index, mean_val in enumerate(image_mean):
channel_mean[channel_index, ...] = mean_val
transformer.set_mean('data', channel_mean)
transformer.set_raw_scale('data', 255)
transformer.set_channel_swap('data', (2, 1, 0))
transformer.set_transpose('data', (2, 0, 1))
#transformer.set_is_flow('data', is_flow)
return transformer
ClassifyWav.py 文件源码
项目:CNNs-Speech-Music-Discrimination
作者: MikeMpapa
项目源码
文件源码
阅读 29
收藏 0
点赞 0
评论 0
def singleFrame_classify_video(signal, net, transformer, with_smoothing, classNamesCNN):
batch_size = 1
input_images = []
input_im = caffe.io.load_image(signal.replace(".wav",".png"))
input_images.append(input_im)
os.remove(signal.replace(".wav",".png"))
#Initialize predictions matrix
output_predictions = np.zeros((len(input_images),2))
output_classes = []
#print [method for method in dir(net) if callable(getattr(net, method))]
for i in range(0,len(input_images)):
# print "Classifying Spectrogram: ",i+1
clip_input = input_images[i:min(i+batch_size, len(input_images))] #get every image -- batch_size==1
clip_input = caffe.io.oversample(clip_input,[227,227]) #make it 227x227
caffe_in = np.zeros(np.array(clip_input.shape)[[0,3,1,2]], dtype=np.float32) #initialize input matrix
for ix, inputs in enumerate(clip_input):
caffe_in[ix] = transformer.preprocess('data',inputs) # transform input data appropriatelly and add to input matrix
net.blobs['data'].reshape(caffe_in.shape[0], caffe_in.shape[1], caffe_in.shape[2], caffe_in.shape[3]) #make input caffe readable
out = net.forward_all(data=caffe_in) #feed input to the network
output_predictions[i:i+batch_size] = np.mean(out['probs'].reshape(10,caffe_in.shape[0]/10,2),0) #predict labels
#Store predicted Labels without smoothing
iMAX = output_predictions[i:i+batch_size].argmax(axis=1)[0]
prediction = classNamesCNN[iMAX]
output_classes.append(prediction)
#print "Predicted Label for file --> ", signal.upper() ,":", prediction
return output_classes, output_predictions
def savemat(file_name, mdict, oned_as="column", **kwargs):
"""MATLAB-format output routine that is compatible with SciPy 0.7's.
0.7.2 (or .1?) added the oned_as keyword arg with 'column' as the default
value. It issues a warning if this is not provided, stating that "This will
change to 'row' in future versions."
"""
import scipy.io
try:
return scipy.io.savemat(file_name, mdict, oned_as=oned_as, **kwargs)
except TypeError:
return scipy.io.savemat(file_name, mdict, **kwargs)
def _load_sbd_mask_annotations(self, index, gt_roidbs):
"""
Load gt_masks information from SBD's additional data
"""
if index % 1000 == 0:
print '%d / %d' % (index, len(self._image_index))
image_name = self._image_index[index]
inst_file_name = os.path.join(self._data_path, 'inst', image_name + '.mat')
gt_inst_mat = scipy.io.loadmat(inst_file_name)
gt_inst_data = gt_inst_mat['GTinst']['Segmentation'][0][0]
unique_inst = np.unique(gt_inst_data)
background_ind = np.where(unique_inst == 0)[0]
unique_inst = np.delete(unique_inst, background_ind)
gt_roidb = gt_roidbs[index]
cls_file_name = os.path.join(self._data_path, 'cls', image_name + '.mat')
gt_cls_mat = scipy.io.loadmat(cls_file_name)
gt_cls_data = gt_cls_mat['GTcls']['Segmentation'][0][0]
gt_masks = []
for ind, inst_mask in enumerate(unique_inst):
box = gt_roidb['boxes'][ind]
im_mask = (gt_inst_data == inst_mask)
im_cls_mask = np.multiply(gt_cls_data, im_mask)
unique_cls_inst = np.unique(im_cls_mask)
background_ind = np.where(unique_cls_inst == 0)[0]
unique_cls_inst = np.delete(unique_cls_inst, background_ind)
assert len(unique_cls_inst) == 1
assert unique_cls_inst[0] == gt_roidb['gt_classes'][ind]
mask = im_mask[box[1]: box[3]+1, box[0]:box[2]+1]
gt_masks.append(mask)
# Also record the maximum dimension to create fixed dimension array when do forwarding
mask_max_x = max(gt_masks[i].shape[1] for i in xrange(len(gt_masks)))
mask_max_y = max(gt_masks[i].shape[0] for i in xrange(len(gt_masks)))
return {
'gt_masks': gt_masks,
'mask_max': [mask_max_x, mask_max_y],
'flipped': False
}