def calc_auc(y_pred_proba, labels, exp_run_folder, classifier, fold):
auc = roc_auc_score(labels, y_pred_proba)
fpr, tpr, thresholds = roc_curve(labels, y_pred_proba)
curve_roc = np.array([fpr, tpr])
dataile_id = open(exp_run_folder+'/data/roc_{}_{}.txt'.format(classifier, fold), 'w+')
np.savetxt(dataile_id, curve_roc)
dataile_id.close()
plt.plot(fpr, tpr, label='ROC curve: AUC={0:0.2f}'.format(auc))
plt.xlabel('1-Specificity')
plt.ylabel('Sensitivity')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.grid(True)
plt.title('ROC Fold {}'.format(fold))
plt.legend(loc="lower left")
plt.savefig(exp_run_folder+'/data/roc_{}_{}.pdf'.format(classifier, fold), format='pdf')
return auc
python类savetxt()的实例源码
def main():
files = tf.gfile.Glob(flags.FLAGS.src_path_1)
labels_uni = np.zeros([4716,1])
labels_matrix = np.zeros([4716,4716])
for file in files:
labels_all = get_video_input_feature(file)
print(len(labels_all[0][2]),len(labels_all[0][3]),len(labels_all[0][4]),len(labels_all[0][5]))
"""
for labels in labels_all:
for i in range(len(labels)):
labels_uni[labels[i]] += 1
for j in range(len(labels)):
labels_matrix[labels[i],labels[j]] += 1
labels_matrix = labels_matrix/labels_uni
labels_matrix = labels_matrix/(np.sum(labels_matrix,axis=0)-1.0)
for i in range(4716):
labels_matrix[i,i] = 1.0
np.savetxt('labels_uni.out', labels_uni, delimiter=',')
np.savetxt('labels_matrix.out', labels_matrix, delimiter=',')"""
def _write(self, stream, text, byte_order):
'''
Write the data to a PLY file.
'''
if self._have_list:
# There are list properties, so serialization is
# slightly complicated.
if text:
self._write_txt(stream)
else:
self._write_bin(stream, byte_order)
else:
# no list properties, so serialization is
# straightforward.
if text:
_np.savetxt(stream, self.data, '%.18g', newline='\r\n')
else:
data = self.data.astype(self.dtype(byte_order),
copy=False)
data.tofile(stream)
def write_segment(self, segment,
delimiter = '\t',
skiprows =0,
writetimecolumn = True,
):
"""
Write a segment and AnalogSignal in a text file.
**Arguments**
delimiter : columns delimiter in file '\t' or one space or two space or ',' or ';'
writetimecolumn : True or Flase write time vector as first column
"""
if skiprows:
raise NotImplementedError('skiprows values other than 0 are not ' +
'supported')
l = [ ]
if writetimecolumn is not None:
l.append(segment.analogsignals[0].times[:, np.newaxis])
for anaSig in segment.analogsignals:
l.append(anaSig.magnitude[:, np.newaxis])
sigs = np.concatenate(l, axis=1)
#print sigs.shape
np.savetxt(self.filename , sigs , delimiter = delimiter)
def transpose_contig_matrix( args ):
contig = args[0]
opts = args[1]
logging.info(" Transposing %s" % contig)
contig_ipds_fn = os.path.join( opts.tmp, "%s_ipds.tmp" % contig)
contig_ipds_kmers_fn = os.path.join( opts.tmp, "%s_ipdskmers.tmp" % contig)
contig_ipds_N_fn = os.path.join( opts.tmp, "%s_ipdsN.tmp" % contig)
contig_ipds = np.loadtxt(contig_ipds_fn, dtype="float")
contig_ipds_kmers = np.loadtxt(contig_ipds_kmers_fn, dtype="str")
contig_ipds_N = np.loadtxt(contig_ipds_N_fn, dtype="int")
if len(contig_ipds.shape)==1:
contig_ipds = contig_ipds.reshape(1,contig_ipds.shape[0])
contig_ipds_N = contig_ipds_N.reshape(1,contig_ipds_N.shape[0])
contig_ipds = contig_ipds.T
contig_ipds_N = contig_ipds_N.T
np.savetxt(contig_ipds_fn+".trans", contig_ipds, fmt="%.4f", delimiter="\t")
np.savetxt(contig_ipds_N_fn+".trans", contig_ipds_N, fmt="%s", delimiter="\t")
return None
def writeToCSV(_imat, _nameOfFile):
_rows, _columns = _imat.shape;
_array = [];
_index = 0;
#_imat.tofile(_nameOfFile + ".csv", sep = ',', format = '%10.5f');
#np.savetxt(_nameOfFile + ".csv", _imat, delimiter = ",");
for i in range(0, _rows):
for j in range(0, _columns):
_array.append([]);
_array[_index].append(i);
#print(_array[_index]);
_array[_index].append(j);
#print(_array[_index]);
_array[_index].append(_imat[i][j]);
#print(_mag[i][j]);
#_array[_index].append(_mag[i][j]);
#_array[_index].append(_ang[i][j]);
#print(_array[_index]);
_index = _index + 1;
writeCSVFile(_array, _nameOfFile + ".csv");
#np.savetxt(_nameOfFile + ".csv", _array, delimiter = ",", fmt = '%10.5f');
#_array[_index].tofile(_nameOfFile + ".csv", sep = ',', format = '%10.5f');
return
def save(self, outfile):
if self.ps1d_normalized:
ps1d_desc = "normalized power [K^2]"
else:
ps1d_desc = "power [K^2 Mpc^3]"
header = [
"EoR window definition:",
"+ FoV: %f [deg]" % self.ps2d.fov,
"+ e_ConvWidth: %f" % self.ps2d.e,
"+ k_perp_min: %f [Mpc^-1]" % self.ps2d.k_perp_min,
"+ k_perp_max: %f [Mpc^-1]" % self.ps2d.k_perp_max,
"+ k_los_min: %f [Mpc^-1]" % self.ps2d.k_los_min,
"+ k_los_max: %f [Mpc^-1]" % self.ps2d.k_los_max,
"",
"Columns:",
"1. k: wavenumber [Mpc^-1]",
"2. ps1d: %s" % ps1d_desc,
"ps1d_err: power errors",
"",
"k ps1d ps1d_err",
]
np.savetxt(outfile, self.ps1d, header="\n".join(header))
print("Saved 1D power spectrum to file: %s" % outfile)
def save(self, outfile):
data = self.psd1d
header = [
"pixel: %s [%s]" % self.pixel,
"frequency: [%s^-1]" % self.pixel[1],
]
if self.meanstd:
header += [
"psd1d: *mean* powers of radial averaging annuli",
"psd1d_err: *standard deviation*",
]
else:
header += [
"psd1d: *median* powers of radial averaging annuli",
"psd1d_err: 1.4826*MAD (median absolute deviation)",
]
header += [
"n_cells: number of averaging cells",
"",
"frequency psd1d psd1d_err n_cells"
]
np.savetxt(outfile, data, header="\n".join(header))
print("Saved PSD data to: %s" % outfile)
def dump_all_actions(ae,configs,trans_fn,name="all_actions.csv",repeat=1):
if 'dump' not in mode:
return
l = len(configs)
batch = 5000
loop = (l // batch) + 1
try:
print(ae.local(name))
with open(ae.local(name), 'wb') as f:
for i in range(repeat):
for begin in range(0,loop*batch,batch):
end = begin + batch
print((begin,end,len(configs)))
transitions = trans_fn(configs[begin:end])
orig, dest = transitions[0], transitions[1]
orig_b = ae.encode_binary(orig,batch_size=1000).round().astype(int)
dest_b = ae.encode_binary(dest,batch_size=1000).round().astype(int)
actions = np.concatenate((orig_b,dest_b), axis=1)
np.savetxt(f,actions,"%d")
except AttributeError:
print("this AE does not support dumping")
except KeyboardInterrupt:
print("dump stopped")
def dump_all_states(ae,configs,states_fn,name="all_states.csv",repeat=1):
if 'dump' not in mode:
return
l = len(configs)
batch = 5000
loop = (l // batch) + 1
try:
print(ae.local(name))
with open(ae.local(name), 'wb') as f:
for i in range(repeat):
for begin in range(0,loop*batch,batch):
end = begin + batch
print((begin,end,len(configs)))
states = states_fn(configs[begin:end])
states_b = ae.encode_binary(states,batch_size=1000).round().astype(int)
np.savetxt(f,states_b,"%d")
except AttributeError:
print("this AE does not support dumping")
except KeyboardInterrupt:
print("dump stopped")
def dump_states(ae,states,name="states.csv",repeat=1):
if 'dump' not in mode:
return
try:
print(ae.local(name))
with open(ae.local(name), 'wb') as f:
for i in range(repeat):
np.savetxt(f,ae.encode_binary(states,batch_size=1000).round().astype(int),"%d")
except AttributeError:
print("this AE does not support dumping")
except KeyboardInterrupt:
print("dump stopped")
import subprocess
################################################################
# note: lightsout has epoch 200
def writeModelUBC(mesh, fileName, model):
"""Writes a model associated with a TensorMesh
to a UBC-GIF format model file.
:param string fileName: File to write to
:param numpy.ndarray model: The model
"""
# Reshape model to a matrix
modelMat = mesh.r(model, 'CC', 'CC', 'M')
# Transpose the axes
modelMatT = modelMat.transpose((2, 0, 1))
# Flip z to positive down
modelMatTR = utils.mkvc(modelMatT[::-1, :, :])
np.savetxt(fileName, modelMatTR.ravel())
def encoder(args, model):
latent_dim = args.latent_dim
data, charset = load_dataset(args.data, split = False)
if os.path.isfile(args.model):
model.load(charset, args.model, latent_rep_size = latent_dim)
else:
raise ValueError("Model file %s doesn't exist" % args.model)
x_latent = model.encoder.predict(data)
if args.save_h5:
h5f = h5py.File(args.save_h5, 'w')
h5f.create_dataset('charset', data = charset)
h5f.create_dataset('latent_vectors', data = x_latent)
h5f.close()
else:
np.savetxt(sys.stdout, x_latent, delimiter = '\t')
def main():
args = get_arguments()
model = MoleculeVAE()
data, data_test, charset = load_dataset(args.data)
if os.path.isfile(args.model):
model.load(charset, args.model, latent_rep_size = args.latent_dim)
else:
raise ValueError("Model file %s doesn't exist" % args.model)
x_latent = model.encoder.predict(data)
if not args.visualize:
if not args.save_h5:
np.savetxt(sys.stdout, x_latent, delimiter = '\t')
else:
h5f = h5py.File(args.save_h5, 'w')
h5f.create_dataset('charset', data = charset)
h5f.create_dataset('latent_vectors', data = x_latent)
h5f.close()
else:
visualize_latent_rep(args, model, x_latent)
def export_histories(self, path):
if not os.path.exists(path):
os.makedirs(path)
i = np.arange(len(self.loss_history)) + 1
z = np.array(zip(i, i*self.batch_size, self.loss_history))
np.savetxt(path + 'loss_history.csv', z, delimiter=',', fmt=[
'%d', '%d', '%f'], header='iteration, n_images, loss')
i = np.arange(len(self.train_acc_history), dtype=np.int)
z = np.array(zip(i, self.train_acc_history))
np.savetxt(path + 'train_acc_history.csv', z, delimiter=',', fmt=[
'%d', '%f'], header='epoch, train_acc')
z = np.array(zip(i, self.val_acc_history))
np.savetxt(path + 'val_acc_history.csv', z, delimiter=',', fmt=[
'%d', '%f'], header='epoch, val_acc')
np.save(path + 'loss', self.loss_history)
np.save(path + 'train_acc_history', self.train_acc_history)
np.save(path + 'val_acc_history', self.val_acc_history)
def test_format(self):
a = np.array([(1, 2), (3, 4)])
c = BytesIO()
# Sequence of formats
np.savetxt(c, a, fmt=['%02d', '%3.1f'])
c.seek(0)
assert_equal(c.readlines(), [b'01 2.0\n', b'03 4.0\n'])
# A single multiformat string
c = BytesIO()
np.savetxt(c, a, fmt='%02d : %3.1f')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
# Specify delimiter, should be overiden
c = BytesIO()
np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
# Bad fmt, should raise a ValueError
c = BytesIO()
assert_raises(ValueError, np.savetxt, c, a, fmt=99)
def test_usecols(self):
# Test the selection of columns
# Select 1 column
control = np.array([[1, 2], [3, 4]], float)
data = TextIO()
np.savetxt(data, control)
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=(1,))
assert_equal(test, control[:, 1])
#
control = np.array([[1, 2, 3], [3, 4, 5]], float)
data = TextIO()
np.savetxt(data, control)
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=(1, 2))
assert_equal(test, control[:, 1:])
# Testing with arrays instead of tuples.
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=np.array([1, 2]))
assert_equal(test, control[:, 1:])
def create_scatter_plot(outfile_results, config):
true_vs_pred = os.path.join(config.output_dir,
config.name + "_results.csv")
true_vs_pred_plot = os.path.join(config.output_dir,
config.name + "_results.png")
with hdf.open_file(outfile_results, 'r') as f:
prediction = f.get_node("/", "Prediction").read()
y_true = f.get_node("/", "y_true").read()
np.savetxt(true_vs_pred, X=np.vstack([y_true, prediction]).T,
delimiter=',')
plt.figure()
plt.scatter(y_true, prediction)
plt.title('true vs prediction')
plt.xlabel('True')
plt.ylabel('Prediction')
plt.savefig(true_vs_pred_plot)
def WordToVec(bet_list):
model = Word2Vec.load('word2vector.model')
# ????
bet_vec_list = [] # between???????
for bet in bet_list:
between_vec = [] # ???between??
bet = bet.strip()
num, line = bet.split(':', 1)
line = line.split()
between_vec = np.array([0] * 50)
for word in line:
if word in model:
between_vec = between_vec + np.array(model[word])
bet_vec_list.append(between_vec)
bet_vec_list = np.array(bet_vec_list)
np.savetxt('2.csv', bet_vec_list)
return bet_vec_list
def compar_pic(path1,path2):
global net
#??????
X=read_image(path1)
test_num=np.shape(X)[0]
#X ?? ?????
out = net.forward_all(data = X)
#fc7??????,??????
feature1 = np.float64(out['fc7'])
feature1=np.reshape(feature1,(test_num,4096))
#np.savetxt('feature1.txt', feature1, delimiter=',')
#??????
X=read_image(path2)
#X ?? ?????
out = net.forward_all(data=X)
#fc7??????,??????
feature2 = np.float64(out['fc7'])
feature2=np.reshape(feature2,(test_num,4096))
#np.savetxt('feature2.txt', feature2, delimiter=',')
#????????cos?,??????????
predicts=pw.cosine_similarity(feature1, feature2)
return predicts
def run(self):
"""
extract and resize images then write manifest files to disk.
"""
cfg_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'train.cfg')
log_file = os.path.join(self.orig_out_dir, 'train.log')
manifest_list_cfg = ', '.join([k + ':' + v for k, v in self.manifests.items()])
with open(cfg_file, 'w') as f:
f.write('manifest = [{}]\n'.format(manifest_list_cfg))
f.write('manifest_root = {}\n'.format(self.out_dir))
f.write('log = {}\n'.format(log_file))
f.write('epochs = 90\nrng_seed = 0\nverbose = True\neval_freq = 1\n')
for setn, manifest in self.manifests.items():
if not os.path.exists(manifest):
pairs = self.train_or_val_pairs(setn)
records = [(os.path.relpath(fname, self.out_dir),
os.path.relpath(self._target_filename(int(tgt)), self.out_dir))
for fname, tgt in pairs]
np.savetxt(manifest, records, fmt='%s,%s')
def main(self, input_ring):
"""Initiate the writing to filename
@param[in] input_rings First ring in this list will be used for
data
@param[out] output_rings This list of rings won't be used."""
span_generator = self.iterate_ring_read(input_ring)
data_accumulate = None
for span in span_generator:
if self.nbit < 8:
unpacked_data = unpack(span.data_view(self.dtype), self.nbit)
else:
if self.dtype == np.complex64:
unpacked_data = span.data_view(self.dtype).view(np.float32)
elif self.dtype == np.complex128:
unpacked_data = span.data_view(self.dtype).view(np.float64)
else:
unpacked_data = span.data_view(self.dtype)
if data_accumulate is not None:
data_accumulate = np.concatenate((data_accumulate, unpacked_data[0]))
else:
data_accumulate = unpacked_data[0]
text_file = open(self.filename, 'a')
np.savetxt(text_file, data_accumulate.reshape((1, -1)))
def saveData(self):
try:
os.mkdir(self.savedir)
except:
print('directory exists. overwriting')
print ('saving to ',self.savedir)
if self.calibrateOnlyADC: # create ideal dataset for PV1, PV2
np.savetxt(os.path.join(self.savedir,'PV1_ERR.csv'),np.column_stack([np.linspace(-5,5,4096),np.linspace(-5,5,4096) ]))
np.savetxt(os.path.join(self.savedir,'PV2_ERR.csv'),np.column_stack([np.linspace(-3.3,3.3,4096),np.linspace(-3.3,3.3,4096) ]))
else:
np.savetxt(os.path.join(self.savedir,'PV1_ERR.csv'),np.column_stack([self.A.ADC24['AIN5'],self.A.DAC_VALS['PV1'] ]))
np.savetxt(os.path.join(self.savedir,'PV2_ERR.csv'),np.column_stack([self.A.ADC24['AIN6'],self.A.DAC_VALS['PV2'] ]))
np.savetxt(os.path.join(self.savedir,'PV3_ERR.csv'),np.column_stack([self.A.ADC24['AIN7'],self.A.DAC_VALS['PV3'] ]))
np.savetxt(os.path.join(self.savedir,'CALIB_INL.csv'),np.column_stack([self.A.ADC24['AIN7'],self.A.ADCPIC_INL]))
for a in self.INPUTS:
if self.I.analogInputSources[a].gainEnabled:
for b in range(8):
raw=self.A.ADC_VALUES[a][b]
np.savetxt(os.path.join(self.savedir,'CALIB_%s_%dx.csv'%(a,self.I.gain_values[b])),np.column_stack([np.array(self.A.ADC24['AIN6'])[self.A.ADC_ACTUALS[a][b]],raw]))
else:
np.savetxt(os.path.join(self.savedir,'CALIB_%s_%dx.csv'%(a,1)),np.column_stack([np.array(self.A.ADC24['AIN6'])[self.A.ADC_ACTUALS[a][0]],self.A.ADC_VALUES[a][0]]))
def main():
import sys
num_samples = int(sys.argv[1])
num_variables = int(sys.argv[2])
if num_variables < 9:
raise ValueError('needed at least 9 variables')
print 'Generation of %d samples with %d variables...' % (num_samples,
num_variables),
X, Y = correlated_dataset(num_samples, num_variables, (5, 5, 5), [1.0]*15)
np.savetxt('data.txt', X)
np.savetxt('labels.txt', Y)
print 'done'
def main():
import sys
num_samples = int(sys.argv[1])
num_variables = int(sys.argv[2])
if num_variables < 9:
raise ValueError('needed at least 9 variables')
print 'Generation of %d samples with %d variables...' % (num_samples,
num_variables),
X, Y = correlated_dataset(num_samples, num_variables, (5, 5, 5), [1.0]*15)
np.savetxt('data.txt', X)
np.savetxt('labels.txt', Y)
print 'done'
def dump(self, base):
header = ",".join(["x" + str(x) for x in range(1, 1 + self.X_train.shape[1])])
header += ","
header += ",".join(["y" + str(x) for x in range(1, 1 + self.y_train_nn.shape[1])])
np.savetxt(base + "_train.csv",
np.hstack((self.X_train, self.y_train_nn)),
fmt='%10.5f', delimiter=',', header=header, comments="")
np.savetxt(base + "_validate.csv",
np.hstack((self.X_validate, self.y_validate_nn)),
fmt='%10.5f', delimiter=',', header=header, comments="")
np.savetxt(base + "_train_norm.csv",
np.hstack((self.X_train_norm, self.y_train_nn)),
fmt='%10.5f', delimiter=',', header=header, comments="")
np.savetxt(base + "_validate_norm.csv",
np.hstack((self.X_validate_norm, self.y_validate_nn)),
fmt='%10.5f', delimiter=',', header=header, comments="")
# Human readable time elapsed string.
def save_raw_csv(raw, soln, dir_csv):
np.savetxt(os.path.join(dir_csv,str(raw['axis0_type'])+'.csv'), raw['axis0'])
np.savetxt(os.path.join(dir_csv,'pressure.csv'), raw['pressure'])
np.savetxt(os.path.join(dir_csv,'temperature.csv'), raw['temperature'])
np.savetxt(os.path.join(dir_csv,'mole_fraction.csv'), raw['mole_fraction'], delimiter=',')
np.savetxt(os.path.join(dir_csv,'net_reaction_rate.csv'), raw['net_reaction_rate'], delimiter=',')
if 'speed' in raw.keys():
np.savetxt(os.path.join(dir_csv,'speed.csv'), raw['speed'], delimiter=',')
f = open(os.path.join(dir_csv,'species_list.csv'),'w')
for sp in soln.species_names:
f.write(sp+'\n')
f.close()
f = open(os.path.join(dir_csv,'reaction_list.csv'),'w')
for rxn in soln.reaction_equations():
f.write(rxn+'\n')
f.close()
def save_adjacency_matrix_for_gephi(matrix, name, root_dir=None, notebook_mode=True, class_names=None):
if root_dir is None: root_dir = os.getcwd()
directory = check_or_create_dir(join_paths(root_dir, FOLDER_NAMINGS['GEPHI_DIR']),
notebook_mode=notebook_mode)
filename = join_paths(directory, '%s.csv' % name)
m, n = np.shape(matrix)
assert m == n, '%s should be a square matrix.' % matrix
if not class_names:
class_names = [str(k) for k in range(n)]
left = np.array([class_names]).T
matrix = np.hstack([left, matrix])
up = np.vstack([[''], left]).T
matrix = np.vstack([up, matrix])
np.savetxt(filename, matrix, delimiter=';', fmt='%s')
def GenerateData(nf = 256, ns = 16384):
try: #Try to read data from file
A = np.loadtxt('bdatA.csv', delimiter = ',')
Y = np.loadtxt('bdatY.csv', delimiter = ',').reshape(-1, 1)
except OSError: #New data needs to be generated
x = np.linspace(-1, 1, num = ns).reshape(-1, 1)
A = np.concatenate([x] * nf, axis = 1)
Y = ((np.sum(A, axis = 1) / nf) ** 2).reshape(-1, 1)
A = (A + np.random.rand(ns, nf)) / (2.0)
np.savetxt('bdatA.csv', A, delimiter = ',')
np.savetxt('bdatY.csv', Y, delimiter = ',')
return (A, Y)
#R: Regressor network to use
#A: The sample data matrix
#Y: Target data matrix
#nt: Number of times to divide the sample matrix
#fn: File name to write results
def MakeBenchDataFeature(R, A, Y, nt, fn):
#Divide samples into nt pieces on for each i run benchmark with chunks 0, 1, ..., i
step = A.shape[1] // nt
TT = np.zeros((nt, 3))
for i in range(1, nt):
#Number of features
TT[i, 0] = len(range(0, (i * step)))
print('{:8d} feature benchmark.'.format(int(TT[i, 0])))
#Training and testing times respectively
TT[i, 1], TT[i, 2] = RunBenchmark(R, A[:, 0:(i * step)], Y[:, 0:(i * step)])
#Save benchmark data to csv file
np.savetxt(fn, TT, delimiter = ',', header = 'Samples,Train,Test')
#R: Regressor network to use
#A: The sample data matrix
#Y: Target data matrix
#nt: Number of times to divide the sample matrix
#fn: File name to write results