def predict_on_long_clips():
"""Load the saved model and perform inference/prediction on features obtained from inputs.
Splits the audio into 10second chunks and predicts on those chunks."""
with open(FILENAMES,"r") as fh:
filecontents=fh.read()
filenames=filecontents.splitlines()
random.shuffle(filenames)
filenames=filenames[:5] #[:5] is for quickly verifying if things work
filenames = [DATASET_LOCATION+f for f in filenames]
session = tf.Session()
saver = tf.train.import_meta_graph(IMPORT_META_GRAPH)
saver.restore(session, tf.train.latest_checkpoint(IMPORT_LATEST_CHECKPOINT))
tf.global_variables_initializer().run(session=session)
test_x = {}
for f in filenames:
s, sr = librosa.load(f)
total_chunks = s.shape[0]/max_audio_length
waveforms = [s[max_audio_length*i:max_audio_length*(i+1)] for i in range(total_chunks)]
test_x[f] = extract_features_from_waveforms(waveforms)
print "FILENAME: ", f
predictions = session.run(tf.argmax(pred, 1), feed_dict={X: test_x[f]})
print [possible_categories[p] for p in predictions]
predict_convnet_10sec_model.py 文件源码
python
阅读 21
收藏 0
点赞 0
评论 0
评论列表
文章目录