python类binary_crossentropy()的实例源码

5_variational_autoencoder.py 文件源码 项目:DLPlaying 作者: Honlan 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def vae_loss(x, x_decoded_mean):
    xent_loss = original_dim * objectives.binary_crossentropy(x, x_decoded_mean)
    kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
    return xent_loss + kl_loss
VAE_mnist.py 文件源码 项目:Siamese 作者: ascourge21 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def vae_loss(x, x_decoded_mean):
    xent_loss = objectives.binary_crossentropy(x, x_decoded_mean)
    kl_loss = - 0.5 * K.mean(1 + z_log_sigma - K.square(z_mean) - K.exp(z_log_sigma), axis=-1)
    return xent_loss + kl_loss
VAELeuven.py 文件源码 项目:Siamese 作者: ascourge21 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def vae_loss(x, x_decoded_mean):
    xent_loss = objectives.binary_crossentropy(x, x_decoded_mean)
    kl_loss = - 0.5 * K.mean(1 + z_log_sigma - K.square(z_mean) - K.exp(z_log_sigma), axis=-1)
    return xent_loss + kl_loss
model.py 文件源码 项目:latplan 作者: guicho271828 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def _build(self,input_shape):
        _encoder = self.build_encoder(input_shape)
        _decoder = self.build_decoder(input_shape)
        self.gs = self.build_gs()
        self.gs2 = self.build_gs()

        x = Input(shape=input_shape)
        z = Sequential([flatten, *_encoder, self.gs])(x)
        y = Sequential(_decoder)(flatten(z))

        z2 = Input(shape=(self.parameters['N'], self.parameters['M']))
        y2 = Sequential(_decoder)(flatten(z2))
        w2 = Sequential([*_encoder, self.gs2])(flatten(y2))

        data_dim = np.prod(input_shape)
        def rec(x, y):
            #return K.mean(K.binary_crossentropy(x,y))
            return bce(K.reshape(x,(K.shape(x)[0],data_dim,)),
                       K.reshape(y,(K.shape(x)[0],data_dim,)))

        def loss(x, y):
            return rec(x,y) + self.gs.loss()

        self.callbacks.append(LambdaCallback(on_epoch_end=self.gs.cool))
        self.callbacks.append(LambdaCallback(on_epoch_end=self.gs2.cool))
        self.custom_log_functions['tau'] = lambda: K.get_value(self.gs.tau)
        self.loss = loss
        self.metrics.append(rec)
        self.encoder     = Model(x, z)
        self.decoder     = Model(z2, y2)
        self.autoencoder = Model(x, y)
        self.autodecoder = Model(z2, w2)
        self.net = self.autoencoder
        y2_downsample = Sequential([
            Reshape((*input_shape,1)),
            MaxPooling2D((2,2))
            ])(y2)
        shape = K.int_shape(y2_downsample)[1:3]
        self.decoder_downsample = Model(z2, Reshape(shape)(y2_downsample))
        self.features = Model(x, Sequential([flatten, *_encoder[:-2]])(x))
        if 'lr_epoch' in self.parameters:
            ratio = self.parameters['lr_epoch']
        else:
            ratio = 0.5
        self.callbacks.append(
            LearningRateScheduler(lambda epoch: self.parameters['lr'] if epoch < self.parameters['full_epoch'] * ratio else self.parameters['lr']*0.1))
        self.custom_log_functions['lr'] = lambda: K.get_value(self.net.optimizer.lr)
model.py 文件源码 项目:latplan 作者: guicho271828 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _build(self,input_shape):

        dim = np.prod(input_shape) // 2
        print("{} latent bits".format(dim))
        M, N = self.parameters['M'], self.parameters['N']

        x = Input(shape=input_shape)

        _pre = tf.slice(x, [0,0],   [-1,dim])
        _suc = tf.slice(x, [0,dim], [-1,dim])

        pre = wrap(x,_pre,name="pre")
        suc = wrap(x,_suc,name="suc")

        print("encoder")
        _encoder = self.build_encoder([dim])
        action_logit = ConditionalSequential(_encoder, pre, axis=1)(suc)

        gs = self.build_gs()
        action = gs(action_logit)

        print("decoder")
        _decoder = self.build_decoder([dim])
        suc_reconstruction = ConditionalSequential(_decoder, pre, axis=1)(flatten(action))
        y = Concatenate(axis=1)([pre,suc_reconstruction])

        action2 = Input(shape=(N,M))
        pre2    = Input(shape=(dim,))
        suc_reconstruction2 = ConditionalSequential(_decoder, pre2, axis=1)(flatten(action2))
        y2 = Concatenate(axis=1)([pre2,suc_reconstruction2])

        def rec(x, y):
            return bce(K.reshape(x,(K.shape(x)[0],dim*2,)),
                       K.reshape(y,(K.shape(x)[0],dim*2,)))
        def loss(x, y):
            kl_loss = gs.loss()
            reconstruction_loss = rec(x, y)
            return reconstruction_loss + kl_loss

        self.metrics.append(rec)
        self.callbacks.append(LambdaCallback(on_epoch_end=gs.cool))
        self.custom_log_functions['tau'] = lambda: K.get_value(gs.tau)
        self.loss = loss
        self.encoder     = Model(x, [pre,action])
        self.decoder     = Model([pre2,action2], y2)

        self.net = Model(x, y)
        self.autoencoder = self.net


问题


面经


文章

微信
公众号

扫码关注公众号