def generateDatasets(train_files, cut_data, len_mfcc, step_mfcc, hop_len, freq):
X, Y = [], []
for tf in train_files:
train_data, labels = generateSingleDataset(tf, cut_data, len_mfcc, step_mfcc, hop_len, freq)
X.append(train_data)
Y.append(labels)
X = np.concatenate(X)
Y = np.concatenate(Y)
if cut_data:
filename = STORE_DIR + 'dataset_CUT_' + str(freq) + '_' + str(hop_len) + '_' + str(len_mfcc) + '_' + str(step_mfcc) + '_' + str(X.shape[0]) + '_' + str(X.shape[1]) + '_' + str(X.shape[2]) + '.pickle'
else:
filename = STORE_DIR + 'dataset_' + str(freq) + '_' + str(hop_len) + '_' + str(len_mfcc) + '_' + str(step_mfcc) + '_' + str(X.shape[0]) + '_' + str(X.shape[1]) + '_' + str(X.shape[2]) + '.pickle'
print filename
with open(filename, 'w') as f:
pickle.dump([X, Y], f)
return X, Y
# Generate a dataset from all available files
audio_converter.py 文件源码
python
阅读 31
收藏 0
点赞 0
评论 0
评论列表
文章目录