def sample_cond(self, X):
"""smpHebbianSOM.sample_cond: draw single sample from model conditioned on X"""
# print("%s.sample_cond X.shape = %s, %d" % (self.__class__.__name__, X.shape, 0))
# fix the SOMs with learning rate constant 0
self.filter_e_lr = self.filter_e.map._learning_rate
self.filter_p_lr = self.filter_p.map._learning_rate
# print("fit_hebb", self.filter_e.map._learning_rate)
self.filter_e.map._learning_rate = self.CT(0.0)
self.filter_p.map._learning_rate = self.CT(0.0)
e_shape = (np.prod(self.filter_e.map._shape), 1)
p_shape = (np.prod(self.filter_p.map._shape), 1)
# activate input network
self.filter_e.learn(X)
# pl.plot(self.filter_e.
# propagate activation via hebbian associative links
if self.hebblink_use_activity:
e_ = self.filter_e.activity.reshape((np.prod(self.filter_e.map._shape), 1))
e_ = (e_ == np.max(e_)) * 1.0
e2p_activation = np.dot(self.hebblink_filter.T, e_)
# print("e2p_activation", e2p_activation)
self.filter_p.activity = np.clip((e2p_activation / (np.sum(e2p_activation) + 1e-9)).reshape(self.filter_p.map._shape), 0, np.inf)
else:
e2p_activation = np.dot(self.hebblink_filter.T, self.filter_e.distances(e).flatten().reshape(e_shape))
# sample the output network with
sidxs = self.filter_p.sample(100)
# print("sidxs", stats.mode(sidxs)[0], sidxs)
# sidx = self.filter_p.sample(1)[0]
# find the mode (most frequent realization) of distribution
sidx = stats.mode(sidxs)[0][0]
e2p_w_p_weights = self.filter_p.neuron(self.filter_p.flat_to_coords(sidx))
# e2p_w_p_weights = self.filter_p.neuron(self.filter_p.flat_to_coords(np.argmax(self.filter_p.activity)))
# ret = np.random.normal(e2p_w_p_weights, self.filter_p.sigmas[sidx], (1, self.odim))
ret = np.random.normal(e2p_w_p_weights, np.sqrt(self.filter_p.sigmas[sidx]), (1, self.odim))
# ret = np.random.normal(e2p_w_p_weights, 0.01, (1, self.odim))
# print("hebbsom sample", sidx, e2p_w_p_weights) # , sidxs) # , self.filter_p.sigmas[sidx])
# ret = e2p_w_p_weights.reshape((1, self.odim))
return ret
评论列表
文章目录