python类softplus()的实例源码

function.py 文件源码 项目:ddnn 作者: kunglab 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def to_function(self):
        if self.nonlinearity.lower() == "clipped_relu":
            return clipped_relu()
        if self.nonlinearity.lower() == "crelu":
            return crelu()
        if self.nonlinearity.lower() == "elu":
            return elu()
        if self.nonlinearity.lower() == "hard_sigmoid":
            return hard_sigmoid()
        if self.nonlinearity.lower() == "leaky_relu":
            return leaky_relu()
        if self.nonlinearity.lower() == "relu":
            return relu()
        if self.nonlinearity.lower() == "sigmoid":
            return sigmoid()
        if self.nonlinearity.lower() == "softmax":
            return softmax()
        if self.nonlinearity.lower() == "softplus":
            return softplus()
        if self.nonlinearity.lower() == "tanh":
            return tanh()
        if self.nonlinearity.lower() == "bst":
            return bst()
        raise NotImplementedError()
functions.py 文件源码 项目:unrolled-gan 作者: musyoku 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def to_function(self):
        if self.nonlinearity.lower() == "clipped_relu":
            return clipped_relu()
        if self.nonlinearity.lower() == "crelu":
            return crelu()
        if self.nonlinearity.lower() == "elu":
            return elu()
        if self.nonlinearity.lower() == "hard_sigmoid":
            return hard_sigmoid()
        if self.nonlinearity.lower() == "leaky_relu":
            return leaky_relu()
        if self.nonlinearity.lower() == "relu":
            return relu()
        if self.nonlinearity.lower() == "sigmoid":
            return sigmoid()
        if self.nonlinearity.lower() == "softmax":
            return softmax()
        if self.nonlinearity.lower() == "softplus":
            return softplus()
        if self.nonlinearity.lower() == "tanh":
            return tanh()
        raise NotImplementedError()
functions.py 文件源码 项目:LSGAN 作者: musyoku 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def to_function(self):
        if self.nonlinearity.lower() == "clipped_relu":
            return clipped_relu()
        if self.nonlinearity.lower() == "crelu":
            return crelu()
        if self.nonlinearity.lower() == "elu":
            return elu()
        if self.nonlinearity.lower() == "hard_sigmoid":
            return hard_sigmoid()
        if self.nonlinearity.lower() == "leaky_relu":
            return leaky_relu()
        if self.nonlinearity.lower() == "relu":
            return relu()
        if self.nonlinearity.lower() == "sigmoid":
            return sigmoid()
        if self.nonlinearity.lower() == "softmax":
            return softmax()
        if self.nonlinearity.lower() == "softplus":
            return softplus()
        if self.nonlinearity.lower() == "tanh":
            return tanh()
        raise NotImplementedError()
functions.py 文件源码 项目:adgm 作者: musyoku 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def to_function(self):
        if self.nonlinearity.lower() == "clipped_relu":
            return clipped_relu()
        if self.nonlinearity.lower() == "crelu":
            return crelu()
        if self.nonlinearity.lower() == "elu":
            return elu()
        if self.nonlinearity.lower() == "hard_sigmoid":
            return hard_sigmoid()
        if self.nonlinearity.lower() == "leaky_relu":
            return leaky_relu()
        if self.nonlinearity.lower() == "relu":
            return relu()
        if self.nonlinearity.lower() == "sigmoid":
            return sigmoid()
        if self.nonlinearity.lower() == "softmax":
            return softmax()
        if self.nonlinearity.lower() == "softplus":
            return softplus()
        if self.nonlinearity.lower() == "tanh":
            return tanh()
        raise NotImplementedError()
loss_functions.py 文件源码 项目:chainer-gan-experiments 作者: Aixile 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def loss_func_dcgan_dis_real(y_real):
    return F.sum(F.softplus(-y_real)) / np.prod(y_real.data.shape)
loss_functions.py 文件源码 项目:chainer-gan-experiments 作者: Aixile 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def loss_func_dcgan_dis_fake(y_fake):
    return F.sum(F.softplus(y_fake)) / np.prod(y_fake.data.shape)
loss_functions.py 文件源码 项目:chainer-gan-experiments 作者: Aixile 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def loss_sigmoid_cross_entropy_with_logits(x, t):
    return F.average(x - x*t + F.softplus(-x))# / x.data.shape[0]
gaussian_policy.py 文件源码 项目:chainerrl 作者: chainer 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def compute_mean_and_var(self, x):
        h = x
        for layer in self.hidden_layers:
            h = self.nonlinearity(layer(h))
        mean = self.mean_layer(h)
        if self.bound_mean:
            mean = bound_by_tanh(mean, self.min_action, self.max_action)
        var = F.broadcast_to(F.softplus(self.var_layer(h)), mean.shape) + \
            self.min_var
        return mean, var
gaussian_policy.py 文件源码 项目:chainerrl 作者: chainer 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __call__(self, x):
        mean = self.hidden_layers(x)
        var = F.broadcast_to(
            F.softplus(self.var_param),
            mean.shape)
        return distribution.GaussianDistribution(mean, var)
gaussian_policy.py 文件源码 项目:chainerrl 作者: chainer 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def compute_mean_and_var(self, x):
        # mean = self.mean_layer(x)
        mean = F.tanh(self.mean_layer(x)) * 2.0
        var = F.softplus(self.var_layer(x))
        return mean, var
gaussian_policy.py 文件源码 项目:chainerrl 作者: chainer 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def compute_mean_and_var(self, x):
        # mean = self.mean_layer(x)
        mean = F.tanh(self.mean_layer(x)) * 2.0
        var = F.softplus(F.broadcast_to(self.var_layer(x), mean.data.shape))
        return mean, var
nn.py 文件源码 项目:chainer-speech-recognition 作者: musyoku 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __call__(self, x):
        return functions.softplus(x, self.beta)
test_softplus.py 文件源码 项目:chainer-deconv 作者: germanRos 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def check_forward(self, x_data):
        x = chainer.Variable(x_data)
        y = functions.softplus(x, beta=self.beta)
        x_value = cuda.to_cpu(x_data)
        y_exp = numpy.log(1 + numpy.exp(self.beta * x_value)) / self.beta
        self.assertEqual(y.data.dtype, self.dtype)
        gradient_check.assert_allclose(
            y_exp, y.data, **self.check_forward_options)
function.py 文件源码 项目:ddnn 作者: kunglab 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self, use_cudnn=True):
        self._function = "softplus"
        self.use_cudnn = use_cudnn
function.py 文件源码 项目:ddnn 作者: kunglab 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __call__(self, x):
        return F.softplus(x, self.use_cudnn)
nn.py 文件源码 项目:adversarial-autoencoder 作者: musyoku 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __call__(self, x):
        return functions.softplus(x, self.beta)
functions.py 文件源码 项目:unrolled-gan 作者: musyoku 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, use_cudnn=True):
        self._function = "softplus"
        self.use_cudnn = use_cudnn
functions.py 文件源码 项目:unrolled-gan 作者: musyoku 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __call__(self, x):
        return F.softplus(x, self.use_cudnn)
functions.py 文件源码 项目:LSGAN 作者: musyoku 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__(self, use_cudnn=True):
        self._function = "softplus"
        self.use_cudnn = use_cudnn
functions.py 文件源码 项目:LSGAN 作者: musyoku 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __call__(self, x):
        return F.softplus(x, self.use_cudnn)
functions.py 文件源码 项目:adgm 作者: musyoku 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __init__(self, use_cudnn=True):
        self._function = "softplus"
        self.use_cudnn = use_cudnn
functions.py 文件源码 项目:adgm 作者: musyoku 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __call__(self, x):
        return F.softplus(x, self.use_cudnn)
adgm.py 文件源码 项目:adgm 作者: musyoku 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def bernoulli_nll_keepbatch(self, x, y):
        nll = F.softplus(y) - x * y
        return F.sum(nll, axis=1)
vae_m2.py 文件源码 项目:variational-autoencoder 作者: musyoku 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def __init__(self):
        self.image_width = 28
        self.image_height = 28
        self.ndim_x = 28 * 28
        self.ndim_y = 10
        self.ndim_z = 50

        # True : y = f(BN(Wx + b))
        # False: y = f(W*BN(x) + b)
        self.batchnorm_before_activation = True

        # gaussianmarg | gaussian
        self.type_pz = "gaussianmarg"
        self.type_qz = "gaussianmarg"

        self.encoder_xy_z_hidden_units = [500]
        self.encoder_xy_z_activation_function = "softplus"
        self.encoder_xy_z_apply_dropout = False
        self.encoder_xy_z_apply_batchnorm = True
        self.encoder_xy_z_apply_batchnorm_to_input = True

        self.encoder_x_y_hidden_units = [500]
        self.encoder_x_y_activation_function = "softplus"
        self.encoder_x_y_apply_dropout = False
        self.encoder_x_y_apply_batchnorm = True
        self.encoder_x_y_apply_batchnorm_to_input = True

        self.decoder_hidden_units = [500]
        self.decoder_activation_function = "softplus"
        self.decoder_apply_dropout = False
        self.decoder_apply_batchnorm = True
        self.decoder_apply_batchnorm_to_input = True

        self.gpu_enabled = True
        self.learning_rate = 0.0003
        self.gradient_momentum = 0.9
        self.gradient_clipping = 5.0
vae_m2.py 文件源码 项目:variational-autoencoder 作者: musyoku 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def bernoulli_nll_keepbatch(self, x, y):
        nll = F.softplus(y) - x * y
        return F.sum(nll, axis=1)
vae_m2.py 文件源码 项目:variational-autoencoder 作者: musyoku 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, **layers):
        super(SoftmaxEncoder, self).__init__(**layers)
        self.activation_function = "softplus"
        self.apply_batchnorm_to_input = True
        self.apply_batchnorm = True
        self.apply_dropout = False
        self.batchnorm_before_activation = True
vae_m2.py 文件源码 项目:variational-autoencoder 作者: musyoku 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__(self, **layers):
        super(GaussianEncoder, self).__init__(**layers)
        self.activation_function = "softplus"
        self.apply_batchnorm_to_input = True
        self.apply_batchnorm = True
        self.apply_dropout = False
        self.batchnorm_before_activation = True
vae_m1.py 文件源码 项目:variational-autoencoder 作者: musyoku 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self):
        self.image_width = 28
        self.image_height = 28
        self.ndim_x = 28 * 28
        self.ndim_z = 100
        self.batchnorm_before_activation = True

        # gaussianmarg | gaussian
        # We recommend you to use "gaussianmarg" when decoder is gaussian.
        self.type_pz = "gaussianmarg"
        self.type_qz = "gaussianmarg"

        # e.g.
        # ndim_x (input) -> 2000 -> 1000 -> 100 (output)
        # encoder_hidden_units = [2000, 1000]
        self.encoder_hidden_units = [600, 600]
        self.encoder_activation_function = "softplus"
        self.encoder_apply_dropout = True
        self.encoder_apply_batchnorm = True
        self.encoder_apply_batchnorm_to_input = True

        self.decoder_hidden_units = [600, 600]
        self.decoder_activation_function = "softplus"
        self.decoder_apply_dropout = True
        self.decoder_apply_batchnorm = True
        self.decoder_apply_batchnorm_to_input = True

        self.gpu_enabled = True
        self.learning_rate = 0.0003
        self.gradient_momentum = 0.9
        self.gradient_clipping = 1.0
vae_m1.py 文件源码 项目:variational-autoencoder 作者: musyoku 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, **layers):
        super(Encoder, self).__init__(**layers)
        self.activation_function = "softplus"
        self.apply_batchnorm_to_input = True
        self.apply_batchnorm = True
        self.apply_dropout = True
        self.batchnorm_before_activation = True
vae_m1.py 文件源码 项目:variational-autoencoder 作者: musyoku 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self, **layers):
        super(BernoulliDecoder, self).__init__(**layers)
        self.activation_function = "softplus"
        self.apply_batchnorm_to_input = True
        self.apply_batchnorm = True
        self.apply_dropout = True
        self.batchnorm_before_activation = True


问题


面经


文章

微信
公众号

扫码关注公众号