def merge_results(sol,files):
model = get_model_type(sol)
save_where = '/Batch results/'
working_path = getcwd().replace("\\", "/")+"/"
save_path = working_path+save_where
print("\nChecking for longest csv file")
lengths = []
for f in files:
to_merge_temp = working_path+"/Results/%s/INV_%s-%s_%s.csv" %(f,sol.model,model,f)
headers_temp = np.genfromtxt(to_merge_temp, delimiter=",", dtype=str, skip_footer=1)
lengths.append(len(headers_temp))
to_merge_max = working_path+"/Results/%s/INV_%s-%s_%s.csv" %(files[lengths.index(max(lengths))],sol.model,model,files[lengths.index(max(lengths))])
headers = np.genfromtxt(to_merge_max, delimiter=",", dtype=str, skip_footer=1)
print("\nMerging csv files")
if not path.exists(save_path):
makedirs(save_path)
# to_merge = working_path+"/Results/%s/INV_%s_%s.csv" %(files[0],model,files[0])
# headers = np.genfromtxt(to_merge, delimiter=",", dtype=str, skip_footer=1)
merged_inv_results = np.zeros((len(files), len(headers)))
merged_inv_results.fill(np.nan)
for i, f in enumerate(files):
to_add = np.loadtxt(working_path+"/Results/%s/INV_%s-%s_%s.csv" %(f,sol.model,model,f), delimiter=",", skiprows=1)
merged_inv_results[i][:to_add.shape[0]] = to_add
rows = np.array(files, dtype=str)[:, np.newaxis]
hd = ",".join(["ID"] + list(headers))
np.savetxt(save_path+"Merged_%s-%s_%s_TO_%s.csv" %(sol.model,model,files[0],files[-1]), np.hstack((rows, merged_inv_results)), delimiter=",", header=hd, fmt="%s")
print("Batch file successfully saved in:\n", save_path)
评论列表
文章目录