def makeRotMatrix(motionParams, simDirClusterDirection):
#Make three rotation matrices
call(["makerot", "-t", str(motionParams[4]), "-a", "1,0,0", "--cov="+simDirClusterDirection+ "/brain.nii.gz", "-o", "rotx.mat"])
call(["makerot", "-t", str(motionParams[5]), "-a", "0,1,0", "--cov="+simDirClusterDirection+ "/brain.nii.gz", "-o", "roty.mat"])
call(["makerot", "-t", str(motionParams[6]), "-a", "0,0,1", "--cov="+simDirClusterDirection+ "/brain.nii.gz", "-o", "rotz.mat"])
#Concatenate
call(["convert_xfm", "-omat", "rotxy.mat","-concat", "roty.mat", "rotx.mat"])
call(["convert_xfm", "-omat", "rotxyz.mat","-concat", "rotz.mat", "rotxy.mat"])
#Add translations
rot = np.loadtxt('rotxyz.mat')
rot[0,3] += motionParams[1]
rot[1,3] += motionParams[2]
rot[2,3] += motionParams[3]
np.savetxt('trans.mat', rot )
#Tidy up
call(["rm","rotx.mat","roty.mat","rotz.mat","rotxy.mat","rotxyz.mat",])
python类savetxt()的实例源码
plot.py 文件源码
项目:tensorflow_end2end_speech_recognition
作者: hirofumi0810
项目源码
文件源码
阅读 107
收藏 0
点赞 0
评论 0
def plot_loss(train_losses, dev_losses, steps, save_path):
"""Save history of training & dev loss as figure.
Args:
train_losses (list): train losses
dev_losses (list): dev losses
steps (list): steps
"""
# Save as csv file
loss_graph = np.column_stack((steps, train_losses, dev_losses))
if os.path.isfile(os.path.join(save_path, "ler.csv")):
os.remove(os.path.join(save_path, "ler.csv"))
np.savetxt(os.path.join(save_path, "loss.csv"), loss_graph, delimiter=",")
# TODO: error check for inf loss
# Plot & save as png file
plt.clf()
plt.plot(steps, train_losses, blue, label="Train")
plt.plot(steps, dev_losses, orange, label="Dev")
plt.xlabel('step', fontsize=12)
plt.ylabel('loss', fontsize=12)
plt.legend(loc="upper right", fontsize=12)
if os.path.isfile(os.path.join(save_path, "loss.png")):
os.remove(os.path.join(save_path, "loss.png"))
plt.savefig(os.path.join(save_path, "loss.png"), dvi=500)
def save(weight_vec, out_dir, subject, str_suffix=None):
"Saves the features to disk."
if out_dir is not None:
# get outpath returned from hiwenet, based on dist name and all other parameters
# choose out_dir name based on dist name and all other parameters
out_subject_dir = pjoin(out_dir, subject)
if not pexists(out_subject_dir):
os.mkdir(out_subject_dir)
if str_suffix is not None:
out_file_name = '{}_graynet.csv'.format(str_suffix)
else:
out_file_name = 'graynet.csv'
out_weights_path = pjoin(out_subject_dir, out_file_name)
try:
np.savetxt(out_weights_path, weight_vec, fmt='%.5f')
print('\nSaved the features to \n{}'.format(out_weights_path))
except:
print('\nUnable to save features to {}'.format(out_weights_path))
traceback.print_exc()
return
def encode(sess, memory, encoder, values, keys, full_batch_host, keys_host, batch_size):
full_batch_size = full_batch_host.shape[0]
assert full_batch_size >= batch_size, "full batch size needs to be >= mini-batch size"
memories_host = np.zeros([memory.num_models, memory.input_size])
print 'full_batch_size = ', full_batch_size, 'minibatch_size = ', batch_size
for begin,end in zip(range(0, full_batch_size, batch_size),
range(batch_size, full_batch_size+1, batch_size)):
feed_dict={keys: keys_host[begin:end],
values: full_batch_host[begin:end]}
# encode value with the keys
memories_host += sess.run(encoder, feed_dict=feed_dict)
#np.savetxt("encoded.csv", memories_host, delimiter=",")
return memories_host
def create_boc_w2v_train(doc_path,dim,win,freq,num_concept):
'''
Creates (word, concept) result for given dimension, window, min freq threshold and num of concepts Trains new W2v models simultaneously
'''
all_param=[]
for edim in dim:
model=train_w2v(doc_path,edim,win,freq)
wlist=get_tokens(doc_path,freq)
wM=get_wordvectors(model,wlist)
for ecp in num_concpt:
w2c_output="w2c_d%s_w%s_mf%s_c%s.csv" %(str(edim),str(win),str(freq),str(ecp))
boc_output="boc_d%s_w%s_mf%s_c%s.csv" %(str(edim),str(win),str(freq),str(ecp))
word2concept=create_concepts(wM,wlist,w2c_output,num_concept)
boc=apply_cfidf(doc_path,word2concept,num_concept)
np.savetxt(boc_output, boc, delimiter=",")
print(".... BOC vectors created in %s" %boc_output)
all_param.append(namedtuple('parameters','document_path dimension window_size min_freq num_concept'))
return all_param
def create_boc_w2v_load(models,doc_path,win,freq,num_concept,model_path):
'''
Creates (word, concept) result for given dimension, window, min freq threshold and num of concepts Trains new W2v models simultaneously
'''
all_param=[]
for em in models:
em_name=em.split("/")[-1]
model=KeyedVectors.load_word2vec_format(em)
wlist=get_tokens(doc_path,freq)
wM=get_wordvectors(model,wlist)
for ecp in num_concpt:
w2c_output="w2c_d%s_w%s_mf%s_c%s.csv" %(str(em_name),str(win),str(freq),str(ecp))
boc_output="boc_d%s_w%s_mf%s_c%s.csv" %(str(em_name),str(win),str(freq),str(ecp))
word2concept=create_concepts(wM,wlist,w2c_output,num_concept)
boc=apply_cfidf(doc_path,word2concept,num_concept)
np.savetxt(boc_output, boc, delimiter=",")
print(".... BOC vectors created in %s" %boc_output)
all_param.append(namedtuple('parameters','document_path dimension window_size min_freq num_concept'))
return all_param
def create_boc_w2v_train(doc_path,dim,win,freq,num_concept):
'''
Creates (word, concept) result for given dimension, window, min freq threshold and num of concepts Trains new W2v models simultaneously
'''
all_param=[]
for edim in dim:
model=train_w2v(doc_path,edim,win,freq)
wlist=get_tokens(doc_path,freq)
wM=get_wordvectors(model,wlist)
for ecp in num_concpt:
w2c_output="w2c_d%s_w%s_mf%s_c%s.csv" %(str(edim),str(win),str(freq),str(ecp))
boc_output="boc_d%s_w%s_mf%s_c%s.csv" %(str(edim),str(win),str(freq),str(ecp))
word2concept=create_concepts(wM,wlist,w2c_output,num_concept)
boc=apply_cfidf(doc_path,word2concept,num_concept)
np.savetxt(boc_output, boc, delimiter=",")
print(".... BOC vectors created in %s" %boc_output)
all_param.append(namedtuple('parameters','document_path dimension window_size min_freq num_concept'))
return all_param
def create_boc_w2v_load(models,doc_path,win,freq,num_concept,model_path):
'''
Creates (word, concept) result for given dimension, window, min freq threshold and num of concepts Trains new W2v models simultaneously
'''
all_param=[]
for em in models:
em_name=em.split("/")[-1]
model=KeyedVectors.load_word2vec_format(em)
wlist=get_tokens(doc_path,freq)
wM=get_wordvectors(model,wlist)
for ecp in num_concpt:
w2c_output="w2c_d%s_w%s_mf%s_c%s.csv" %(str(em_name),str(win),str(freq),str(ecp))
boc_output="boc_d%s_w%s_mf%s_c%s.csv" %(str(em_name),str(win),str(freq),str(ecp))
word2concept=create_concepts(wM,wlist,w2c_output,num_concept)
boc=apply_cfidf(doc_path,word2concept,num_concept)
np.savetxt(boc_output, boc, delimiter=",")
print(".... BOC vectors created in %s" %boc_output)
all_param.append(namedtuple('parameters','document_path dimension window_size min_freq num_concept'))
return all_param
def write_preprocessed_data(output_directory, cell_IDs, cell_stages, data, markers):
processed_data_path = path.join(output_directory, 'processed_data.tsv')
with open(processed_data_path, 'w') as f:
f.write('\t'.join(cell_IDs))
f.write('\n')
f.write('\t'.join(cell_stages))
f.write('\n')
np.savetxt(f, data.T, fmt = '%.6f', delimiter = '\t')
dataset = np.genfromtxt(processed_data_path, delimiter = '\t', dtype = str)
dataset = np.insert(dataset, 0, np.append(['Cell ID', 'Stage'],
markers), axis = 1)
with open(processed_data_path, 'w') as f:
np.savetxt(f, dataset, fmt = '%s', delimiter = '\t')
def prepare_inputs(*inputs, **kwinputs):
"""Prepare the inputs for the simulator.
The signature follows that given in `elfi.tools.external_operation`. This function
appends kwinputs with unique and descriptive filenames and writes an input file for
the bdm executable.
"""
alpha, delta, tau, N = inputs
meta = kwinputs['meta']
# Organize the parameters to an array. The broadcasting works nicely with constant
# arguments.
param_array = np.row_stack(np.broadcast(alpha, delta, tau, N))
# Prepare a unique filename for parallel settings
filename = '{model_name}_{batch_index}_{submission_index}.txt'.format(**meta)
np.savetxt(filename, param_array, fmt='%.4f %.4f %.4f %d')
# Add the filenames to kwinputs
kwinputs['filename'] = filename
kwinputs['output_filename'] = filename[:-4] + '_out.txt'
# Return new inputs that the command will receive
return inputs, kwinputs
calculateDistanceInt.py 文件源码
项目:RecursiveHierarchicalClustering
作者: xychang
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def run(self):
# pr = cProfile.Profile()
print '[LOG]: start new thread '+str(self.threadID)
curTime = time.time()
distM = self.matrix[self.sfrom].dot(
self.matrix[self.sto].T).todense()
distM = np.maximum(
np.arccos(np.minimum(distM, np.ones(distM.shape))) /
(PI_VALUE/200)-0.01,
np.zeros(distM.shape)).astype(np.int8)
# np.savetxt(self.fo, distM, fmt = '%d')
np.save(self.fo + '.npy', distM)
print('[LOG]: thread %d finished after %d' %
(self.threadID, time.time() - curTime))
# self.pr.disable()
# # sortby = 'cumulative'
# # pstats.Stats(pr).strip_dirs().sort_stats(sortby).print_stats()
# self.pr.print_stats()
def test_format(self):
a = np.array([(1, 2), (3, 4)])
c = BytesIO()
# Sequence of formats
np.savetxt(c, a, fmt=['%02d', '%3.1f'])
c.seek(0)
assert_equal(c.readlines(), [b'01 2.0\n', b'03 4.0\n'])
# A single multiformat string
c = BytesIO()
np.savetxt(c, a, fmt='%02d : %3.1f')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
# Specify delimiter, should be overiden
c = BytesIO()
np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
# Bad fmt, should raise a ValueError
c = BytesIO()
assert_raises(ValueError, np.savetxt, c, a, fmt=99)
def main():
labels_uni = np.zeros([4716,1])
with open(flags.FLAGS.src_path_1, "rt", encoding='utf-8') as csvfile:
spamreader = csv.reader(csvfile)
line_num = 0
for row in spamreader:
line_num += 1
print('the '+str(line_num)+'th file is processing')
if line_num==1:
continue
lbs = row[1].split()
for i in range(0,len(lbs),2):
labels_uni[int(lbs[i])] += 1
np.savetxt('labels_model.out', labels_uni, delimiter=',')
def main():
rootclass = {}
with open(flags.FLAGS.src_path_1, "rt", encoding='utf-8') as csvfile:
spamreader = csv.reader(csvfile)
line_num = 0
for row in spamreader:
line_num += 1
print('the '+str(line_num)+'th file is processing')
if line_num==1:
continue
if row[5] in rootclass:
rootclass[row[5]].append(line_num-2)
else:
rootclass[row[5]] = [line_num-2]
labels_ordered = []
for x in rootclass:
labels_ordered.extend(rootclass[x])
labels_ordered = [int(l) for l in labels_ordered]
reverse_ordered = np.zeros([4716,1])
for i in range(len(labels_ordered)):
reverse_ordered[labels_ordered[i]] = i
print(len(rootclass))
print(labels_ordered)
np.savetxt('labels_ordered.out', reverse_ordered, delimiter=',')
random.shuffle(labels_ordered)
reverse_unordered = np.zeros([4716,1])
for i in range(len(labels_ordered)):
reverse_unordered[labels_ordered[i]] = i
print(labels_ordered)
np.savetxt('labels_unordered.out', reverse_unordered, delimiter=',')
labels_class = np.zeros([len(rootclass),4716])
flag = 0
for x in rootclass:
for i in rootclass[x]:
labels_class[flag,i] = 1
flag +=1
np.savetxt('labels_class.out', labels_class)
def _write_txt(self, stream):
'''
Save a PLY element to an ASCII-format PLY file. The element may
contain list properties.
'''
for rec in self.data:
fields = []
for prop in self.properties:
fields.extend(prop._to_fields(rec[prop.name]))
_np.savetxt(stream, [fields], '%.18g', newline='\r\n')
def _write_txt(self, stream):
'''
Save a PLY element to an ASCII-format PLY file. The element may
contain list properties.
'''
for rec in self.data:
fields = []
for prop in self.properties:
fields.extend(prop._to_fields(rec[prop.name]))
_np.savetxt(stream, [fields], '%.18g', newline='\r\n')
def save_poses(fn, poses):
""" Save poses in toon format """
Rts = [pose.matrix[:3,:] for pose in poses]
with file(fn, 'w') as outfile:
for Rt in Rts:
for row in Rt:
np.savetxt(outfile, row, fmt='%-8.7f', delimiter=' ', newline=' ')
outfile.write('\n')
outfile.write('\n')
return
def write_test_file(self, variable='v', check=False):
data, metadata = self.build_test_data(variable)
with open(self.test_file, 'wb') as f:
for item in sorted(metadata.items()):
f.write(("# %s = %s\n" % item).encode('utf8'))
np.savetxt(f, data)
if check:
raise NotImplementedError
def _write_file_contents(self, data, metadata):
with open(self.filename, 'wb') as f:
for item in sorted(metadata.items()):
f.write(("# %s = %s\n" % item).encode('utf8'))
numpy.savetxt(f, data)
def write_test_file(self, variable='v', check=False):
data, metadata = self.build_test_data(variable)
with open(self.test_file, 'wb') as f:
for item in sorted(metadata.items()):
f.write(("# %s = %s\n" % item).encode('utf8'))
np.savetxt(f, data)
if check:
raise NotImplementedError