def __init__(self, n_features=None, n_dim=8, lossfun=F.mean_squared_error,
lambda0=5e-3, lambda1=5e-3, lambda2=5e-3, init_bias=0.0,
intx_term=True, total_nobs=1):
self.n_dim = n_dim
self.n_features = n_features
self.lossfun = lossfun
self.lambda0 = lambda0
self.lambda1 = lambda1
self.lambda2 = lambda2
self.intx_term = intx_term
self.total_nobs = total_nobs
# These are all the learned weights corresponding
# to the overall bias, slope per feature, and latent
# interaction vector per feature
super(FM, self).__init__(bias=L.Bias(shape=(1,)),
slope=L.EmbedID(n_features, 1),
latent=L.EmbedID(n_features, n_dim))
# Xavier initialize weights
c = np.sqrt(n_features * n_dim)
self.latent.W.data[...] = np.random.randn(n_features, n_dim) / c
d = np.sqrt(n_features)
self.slope.W.data[...] = np.random.randn(n_features, 1) / d
self.bias.b.data[...] *= 0.0
self.bias.b.data[...] += init_bias
评论列表
文章目录