def rh_e2p_sample_plot(self):
# intro checks
if not self.attr_check(["y_samples"]):
return
pl.ioff()
# 2a. plot sampling results
pl.suptitle("%s step 1 + 2: learning proprio, then learning e2p" % (self.mode,))
ax = pl.subplot(211)
pl.title("Exteroceptive state S_e, extero to proprio mapping p2e")
self.S_ext = ax.plot(self.logs["S_ext"], "k-", alpha=0.8, label="S_e")
p2e = ax.plot(self.logs["P2E_pred"], "r-", alpha=0.8, label="p2e")
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles=[handles[i] for i in [0, 2]],
labels=[labels[i] for i in [0, 2]])
ax2 = pl.subplot(212)
pl.title("Proprioceptive state S_p, proprio to extero mapping e2p")
ax2.plot(self.logs["M_prop_pred"], "k-", label="S_p")
# pl.plot(self.logs["E2P_pred"], "y-", label="E2P knn")
ax2.plot(self.y_samples, "g-", label="E2P gmm cond", alpha=0.8, linewidth=2)
ax2.plot(self.logs["X__"][:,:3], "r-", label="goal goal")
for _ in self.y_samples_:
plausibility = _ - self.logs["X__"][:,:3]
# print "_.shape = %s, plausibility.shape = %s, %d" % (_.shape, plausibility.shape, 0)
# print "_", np.sum(_), _ - self.logs["X__"][:,:3]
plausibility_norm = np.linalg.norm(plausibility, 2, axis=1)
print "plausibility = %f" % (np.mean(plausibility_norm))
if np.mean(plausibility_norm) < 0.8: # FIXME: what is that for, for thinning out the number of samples?
ax2.plot(_, "b.", label="E2P gmm samples", alpha=0.2)
handles, labels = ax2.get_legend_handles_labels()
print "handles, labels", handles, labels
legidx = slice(0, 12, 3)
ax2.legend(handles[legidx], labels[legidx])
# ax.legend(handles=[handles[i] for i in [0, 2]],
# labels=[labels[i] for i in [0, 2]])
pl.show()
评论列表
文章目录