python类mean_squared_error()的实例源码

train1_unsupervised.py 文件源码 项目:SketchSimplification 作者: La4La 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def loss_gen(self, gen, G_out, gt, batchsize, alpha=1):
        xp = self.gen.xp
        loss_L = F.mean_squared_error(G_out, gt) * G_out.data.size
        loss = loss_L
        chainer.report({'loss': loss, "loss_L": loss_L}, gen)
        return loss
train2_paired.py 文件源码 项目:SketchSimplification 作者: La4La 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def loss_gen(self, gen, G_p_rough, D_p_rough, p_line, batchsize, alpha=0.1):
        xp = self.gen.xp
        loss_L = F.mean_squared_error(G_p_rough, p_line) * G_p_rough.data.shape[0]
        loss_adv = F.softmax_cross_entropy(D_p_rough, Variable(xp.zeros(batchsize, dtype=np.int32)))
        #loss_line = self.line_loss(G_p_rough, p_line)
        loss = loss_L + alpha * loss_adv #+ loss_line
        chainer.report({'loss': loss, "loss_L": loss_L, 'loss_adv': loss_adv}, gen)
        return loss
task.py 文件源码 项目:cloud-ml-sdk 作者: XiaoMi 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def main():
  # Define train function
  def linear_train(train_data, train_target, n_epochs=200):
    for _ in range(n_epochs):
      output = linear_function(train_data)
      loss = F.mean_squared_error(train_target, output)
      linear_function.zerograds()
      loss.backward()
      optimizer.update()

  # Construct train data
  x = 30 * np.random.rand(1000).astype(np.float32)
  y = 7 * x + 10
  y += 10 * np.random.randn(1000).astype(np.float32)

  linear_function = L.Linear(1, 1)

  x_var = Variable(x.reshape(1000, -1))
  y_var = Variable(y.reshape(1000, -1))

  optimizer = optimizers.MomentumSGD(lr=0.001)
  optimizer.setup(linear_function)

  for i in range(150):
    linear_train(x_var, y_var, n_epochs=20)
    y_pred = linear_function(x_var).data

  slope = linear_function.W.data[0, 0]
  intercept = linear_function.b.data[0]

  print("Final Line: {0:.3}x + {1:.3}".format(slope, intercept))
ddqn_agent.py 文件源码 项目:doubleDQN 作者: masataka46 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def forward(self, state, action, Reward, state_dash, episode_end):
        num_of_batch = state.shape[0]
        s = Variable(state)
        s_dash = Variable(state_dash)

        Q = self.Q_func(s)  # Get Q-value
        # Generate Target Signals
        tmp2 = self.Q_func(s_dash)
        tmp2 = list(map(np.argmax, tmp2.data.get()))  # argmaxQ(s',a)
        tmp = self.Q_func_target(s_dash)  # Q'(s',*)
        tmp = list(tmp.data.get())
        # select Q'(s',*) due to argmaxQ(s',a)
        res1 = []
        for i in range(num_of_batch):
            res1.append(tmp[i][tmp2[i]])

        #max_Q_dash = np.asanyarray(tmp, dtype=np.float32)
        max_Q_dash = np.asanyarray(res1, dtype=np.float32)
        target = np.asanyarray(Q.data.get(), dtype=np.float32)
        for i in xrange(num_of_batch):
            if not episode_end[i][0]:
                tmp_ = np.sign(Reward[i]) + self.gamma * max_Q_dash[i]
            else:
                tmp_ = np.sign(Reward[i])

            action_index = self.action_to_index(action[i])
            target[i, action_index] = tmp_
        # TD-error clipping
        td = Variable(cuda.to_gpu(target)) - Q  # TD error
        td_tmp = td.data + 1000.0 * (abs(td.data) <= 1)  # Avoid zero division
        td_clip = td * (abs(td.data) <= 1) + td/abs(td_tmp) * (abs(td.data) > 1)

        zero_val = Variable(cuda.to_gpu(np.zeros((self.replay_size, self.num_of_actions), dtype=np.float32)))
        loss = F.mean_squared_error(td_clip, zero_val)
        return loss, Q
NN.py 文件源码 项目:learning2rank 作者: shiba24 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __call__(self, x, t):
        h1 = F.relu(self.l1(x))
        h2 = F.relu(self.l2(h1))
        h = F.relu(self.l3(h2))
        self.loss = F.mean_squared_error(h, t)
        return self.loss
rnin.py 文件源码 项目:deel 作者: uei 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def getLossDistill(self,x,t):
        self.loss = F.mean_squared_error(x, t)

        return self.loss
nin.py 文件源码 项目:deel 作者: uei 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def getLossDistill(self,x,t):
        _t = chainer.Variable(t.data, volatile='off')
        self.loss = F.mean_squared_error(x, _t)

        return self.loss
__init__.py 文件源码 项目:deel 作者: uei 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def backprop(self,t,x=None):
        if x is None:
            x=Tensor.context
        #loss = F.mean_squared_error(x.content,t.content)
        loss = F.softmax_cross_entropy(x.content,t.content)
        if  Deel.train:
            loss.backward()
        accuracy = F.accuracy(x.content,t.content)
        self.optimizer.update()
        return loss.data,accuracy.data
chain.py 文件源码 项目:chainer-examples 作者: nocotan 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def __call__(self, x, t):
        h = F.sigmoid(self.l1(x))
        h = F.sigmoid(self.l2(h))
        y = F.mean_squared_error(h, t)
        return y
vfm.py 文件源码 项目:vfm 作者: cemoody 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, n_features=None, n_dim=8, lossfun=F.mean_squared_error,
                 lambda0=1, lambda1=1, lambda2=1, init_bias_mu=0.0,
                 init_bias_lv=0.0, intx_term=True, total_nobs=1):
        self.n_dim = n_dim
        self.n_features = n_features
        self.lossfun = lossfun
        self.lambda0 = lambda0
        self.lambda1 = lambda1
        self.lambda2 = lambda2
        self.intx_term = intx_term
        self.total_nobs = total_nobs

        # In contrast to the FM model, the slopes and latent vectors
        # will have means (mu) and log variances (lv) for each component.
        super(VFM, self).__init__(bias_mu=L.Bias(shape=(1,)),
                                  bias_lv=L.Bias(shape=(1,)),
                                  slop_mu=L.Bias(shape=(1, 1)),
                                  slop_lv=L.Bias(shape=(1, 1)),
                                  slop_delta_mu=L.EmbedID(n_features, 1,
                                                          ignore_label=-1),
                                  slop_delta_lv=L.EmbedID(n_features, 1,
                                                          ignore_label=-1),
                                  feat_mu_vec=L.Bias(shape=(1, 1, n_dim)),
                                  feat_lv_vec=L.Bias(shape=(1, 1, n_dim)),
                                  feat_delta_mu=L.EmbedID(n_features, n_dim,
                                                          ignore_label=-1),
                                  feat_delta_lv=L.EmbedID(n_features, n_dim,
                                                          ignore_label=-1))

        # Xavier initialize weights
        c = np.sqrt(n_features * n_dim) * 1e3
        d = np.sqrt(n_features) * 1e3
        self.feat_delta_mu.W.data[...] = np.random.randn(n_features, n_dim) / c
        self.feat_delta_lv.W.data[...] = np.random.randn(n_features, n_dim) / c
        self.slop_delta_mu.W.data[...] = np.random.randn(n_features, 1) / d
        self.slop_delta_lv.W.data[...] = np.random.randn(n_features, 1) / d
        self.bias_mu.b.data[...] *= 0.0
        self.bias_mu.b.data[...] += init_bias_mu
        self.bias_lv.b.data[...] *= 0.0
        self.bias_lv.b.data[...] += init_bias_lv
fm.py 文件源码 项目:vfm 作者: cemoody 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __init__(self, n_features=None, n_dim=8, lossfun=F.mean_squared_error,
                 lambda0=5e-3, lambda1=5e-3, lambda2=5e-3, init_bias=0.0,
                 intx_term=True, total_nobs=1):
        self.n_dim = n_dim
        self.n_features = n_features
        self.lossfun = lossfun
        self.lambda0 = lambda0
        self.lambda1 = lambda1
        self.lambda2 = lambda2
        self.intx_term = intx_term
        self.total_nobs = total_nobs

        # These are all the learned weights corresponding
        # to the overall bias, slope per feature, and latent
        # interaction vector per feature
        super(FM, self).__init__(bias=L.Bias(shape=(1,)),
                                 slope=L.EmbedID(n_features, 1),
                                 latent=L.EmbedID(n_features, n_dim))

        # Xavier initialize weights
        c = np.sqrt(n_features * n_dim)
        self.latent.W.data[...] = np.random.randn(n_features, n_dim) / c
        d = np.sqrt(n_features)
        self.slope.W.data[...] = np.random.randn(n_features, 1) / d
        self.bias.b.data[...] *= 0.0
        self.bias.b.data[...] += init_bias
ram.py 文件源码 项目:ram 作者: amasky 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def __call__(self, x, t, train=True):
        x = chainer.Variable(self.xp.asarray(x), volatile=not train)
        t = chainer.Variable(self.xp.asarray(t), volatile=not train)
        bs = x.data.shape[0] # batch size
        self.clear(bs, train)

        # init mean location
        l = np.random.uniform(-1, 1, size=(bs,2)).astype(np.float32)
        l = chainer.Variable(self.xp.asarray(l), volatile=not train)

        # forward n_steps time
        sum_ln_pi = 0
        self.forward(x, train, action=False, init_l=l)
        for i in range(1, self.n_steps):
            action = True if (i == self.n_steps - 1) else False
            l, ln_pi, y, b = self.forward(x, train, action)
            if train: sum_ln_pi += ln_pi

        # loss with softmax cross entropy
        self.loss_action = F.softmax_cross_entropy(y, t)
        self.loss = self.loss_action
        self.accuracy = F.accuracy(y, t)

        if train:
            # reward
            conditions = self.xp.argmax(y.data, axis=1) == t.data
            r = self.xp.where(conditions, 1., 0.).astype(np.float32)

            # squared error between reward and baseline
            self.loss_base = F.mean_squared_error(r, b)
            self.loss += self.loss_base

            # loss with reinforce rule
            mean_ln_pi = sum_ln_pi / (self.n_steps - 1)
            self.loss_reinforce = F.sum(-mean_ln_pi * (r-b))/bs
            self.loss += self.loss_reinforce

        return self.loss
iris.py 文件源码 项目:workspace 作者: nojima 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def __call__(self, x, y):
        return F.mean_squared_error(self.forward(x), y)
iris.py 文件源码 项目:workspace 作者: nojima 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __call__(self, x):
        return F.mean_squared_error(self.forward(x), x)
ae.py 文件源码 项目:workspace 作者: nojima 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def __call__(self, x: Variable) -> Variable:
        output = self.forward(x)
        return F.mean_squared_error(output, x)
gan_rl_fitter.py 文件源码 项目:gan-rl 作者: iaroslav-ai 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __call__(self, X, D, G):
        D.reset_state()
        G.reset_state()

        r = 0.0

        mg = w_init
        for x in X:
            f = D(x, G(x))
            r += F.mean_squared_error(f, f*0.0 + 1.0)*mg
            mg = 1.0

        return r
sklearn_wrapper.py 文件源码 项目:chainer_sklearn 作者: corochann 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __init__(self,
                 predictor=None,
                 lossfun=mean_squared_error,
                 accfun=None,
                 device=-1,
                 **sk_params
                 ):
        super(SklearnWrapperRegressor, self).__init__(
            predictor=predictor,
            lossfun=lossfun,
            accfun=accfun,
            device=device,
            **sk_params
        )
RL_Q_reversi.py 文件源码 项目:RL_reversi 作者: ryogrid 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __call__(self, x, t=None, train=False):
        h = F.leaky_relu(self.l1(x))
        h = F.leaky_relu(self.l2(h))
        h = F.leaky_relu(self.l3(h))
        h = self.l4(h)

        if train:
            return F.mean_squared_error(h,t)
        else:
            return h
RL_ttt.py 文件源码 项目:RL_reversi 作者: ryogrid 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __call__(self, x, t=None, train=False):
        h = F.leaky_relu(self.l1(x))
        h = F.leaky_relu(self.l2(h))
        h = F.leaky_relu(self.l3(h))
        h = self.l4(h)

        if train:
            return F.mean_squared_error(h,t)
        else:
            return h
seranet_v1.py 文件源码 项目:SeRanet 作者: corochann 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __call__(self, x, t=None):
        self.clear()
        h1 = F.leaky_relu(self.conv1(x), slope=0.1)
        h1 = F.leaky_relu(self.conv2(h1), slope=0.1)
        h1 = F.leaky_relu(self.conv3(h1), slope=0.1)

        h2 = self.seranet_v1_crbm(x)
        # Fusion
        h12 = F.concat((h1, h2), axis=1)

        lu = F.leaky_relu(self.convlu6(h12), slope=0.1)
        lu = F.leaky_relu(self.convlu7(lu), slope=0.1)
        lu = F.leaky_relu(self.convlu8(lu), slope=0.1)
        ru = F.leaky_relu(self.convru6(h12), slope=0.1)
        ru = F.leaky_relu(self.convru7(ru), slope=0.1)
        ru = F.leaky_relu(self.convru8(ru), slope=0.1)
        ld = F.leaky_relu(self.convld6(h12), slope=0.1)
        ld = F.leaky_relu(self.convld7(ld), slope=0.1)
        ld = F.leaky_relu(self.convld8(ld), slope=0.1)
        rd = F.leaky_relu(self.convrd6(h12), slope=0.1)
        rd = F.leaky_relu(self.convrd7(rd), slope=0.1)
        rd = F.leaky_relu(self.convrd8(rd), slope=0.1)

        # Splice
        h = CF.splice(lu, ru, ld, rd)

        h = F.leaky_relu(self.conv9(h), slope=0.1)
        h = F.leaky_relu(self.conv10(h), slope=0.1)
        h = F.leaky_relu(self.conv11(h), slope=0.1)
        h = F.clipped_relu(self.conv12(h), z=1.0)
        if self.train:
            self.loss = F.mean_squared_error(h, t)
            return self.loss
        else:
            return h


问题


面经


文章

微信
公众号

扫码关注公众号