def _build(self,input_shape):
x = Input(shape=input_shape)
N = input_shape[0] // 2
y = Sequential([
flatten,
*[Sequential([BN(),
Dense(self.parameters['layer'],activation=self.parameters['activation']),
Dropout(self.parameters['dropout']),])
for i in range(self.parameters['num_layers']) ],
Dense(1,activation="sigmoid")
])(x)
self.loss = bce
self.net = Model(x, y)
# self.callbacks.append(self.linear_schedule([0.2,0.5], 0.1))
self.callbacks.append(GradientEarlyStopping(verbose=1,epoch=50,min_grad=self.parameters['min_grad']))
# self.custom_log_functions['lr'] = lambda: K.get_value(self.net.optimizer.lr)
python类binary_crossentropy()的实例源码
def _build(self,input_shape):
_encoder = self.build_encoder(input_shape)
_decoder = self.build_decoder(input_shape)
x = Input(shape=input_shape)
z = Sequential([flatten, *_encoder])(x)
y = Sequential(_decoder)(flatten(z))
z2 = Input(shape=K.int_shape(z)[1:])
y2 = Sequential(_decoder)(flatten(z2))
self.loss = bce
self.encoder = Model(x, z)
self.decoder = Model(z2, y2)
self.net = Model(x, y)
self.autoencoder = self.net
def report(self,train_data,
epoch=200,batch_size=1000,optimizer=Adam(0.001),
test_data=None,
train_data_to=None,
test_data_to=None,):
test_data = train_data if test_data is None else test_data
train_data_to = train_data if train_data_to is None else train_data_to
test_data_to = test_data if test_data_to is None else test_data_to
opts = {'verbose':0,'batch_size':batch_size}
def test_both(msg, fn):
print(msg.format(fn(train_data)))
if test_data is not None:
print((msg+" (validation)").format(fn(test_data)))
self.autoencoder.compile(optimizer=optimizer, loss=bce)
test_both("Reconstruction BCE: {}",
lambda data: self.autoencoder.evaluate(data,data,**opts))
return self
def _buildEncoder(self, x, latent_rep_size, max_length, epsilon_std = 0.01):
h = Convolution1D(9, 9, activation = 'relu', name='conv_1')(x)
h = Convolution1D(9, 9, activation = 'relu', name='conv_2')(h)
h = Convolution1D(10, 11, activation = 'relu', name='conv_3')(h)
h = Flatten(name='flatten_1')(h)
h = Dense(435, activation = 'relu', name='dense_1')(h)
def sampling(args):
z_mean_, z_log_var_ = args
batch_size = K.shape(z_mean_)[0]
epsilon = K.random_normal(shape=(batch_size, latent_rep_size), mean=0., std = epsilon_std)
return z_mean_ + K.exp(z_log_var_ / 2) * epsilon
z_mean = Dense(latent_rep_size, name='z_mean', activation = 'linear')(h)
z_log_var = Dense(latent_rep_size, name='z_log_var', activation = 'linear')(h)
def vae_loss(x, x_decoded_mean):
x = K.flatten(x)
x_decoded_mean = K.flatten(x_decoded_mean)
xent_loss = max_length * objectives.binary_crossentropy(x, x_decoded_mean)
kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis = -1)
return xent_loss + kl_loss
return (vae_loss, Lambda(sampling, output_shape=(latent_rep_size,), name='lambda')([z_mean, z_log_var]))
def vae_loss(x, x_hat):
kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
xent_loss = n * objectives.binary_crossentropy(x, x_hat)
mse_loss = n * objectives.mse(x, x_hat)
if use_loss == 'xent':
return xent_loss + kl_loss
elif use_loss == 'mse':
return mse_loss + kl_loss
else:
raise Expception, 'Nonknow loss!'
def vae_loss(x, x_decoded_mean):
xent_loss = original_dim * objectives.binary_crossentropy(x, x_decoded_mean)
kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
return xent_loss + kl_loss
variational_autoencoder_deconv.py 文件源码
项目:VAE_NOTES
作者: FanhuaandLuomu
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def vae_loss(x, x_decoded_mean):
# NOTE: binary_crossentropy expects a batch_size by dim
# for x and x_decoded_mean, so we MUST flatten these!
# Flatten
x = K.flatten(x)
x_decoded_mean = K.flatten(x_decoded_mean)
xent_loss = img_rows * img_cols * objectives.binary_crossentropy(x, x_decoded_mean)
kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
return xent_loss + kl_loss
# input_shape: (100,1,28,28)
# output_shape: (100,1,28,28)
def _load(self):
import json
with open(self.local('aux.json'), 'r') as f:
data = json.load(f)
self.parameters = data["parameters"]
self.build(tuple(data["input_shape"]))
self.net.compile(Adam(0.0001),bce)
def report(self,train_data,
test_data=None,
train_data_to=None,
test_data_to=None,
batch_size=1000,
**kwargs):
test_data = train_data if test_data is None else test_data
train_data_to = train_data if train_data_to is None else train_data_to
test_data_to = test_data if test_data_to is None else test_data_to
opts = {'verbose':0,'batch_size':batch_size}
def test_both(msg, fn):
print(msg.format(fn(train_data)))
if test_data is not None:
print((msg+" (validation)").format(fn(test_data)))
self.autoencoder.compile(optimizer='adam', loss=mse)
test_both("Reconstruction MSE: {}",
lambda data: self.autoencoder.evaluate(data,data,**opts))
test_both("Reconstruction MSE (gaussian 0.3): {}",
lambda data: self.autoencoder.evaluate(gaussian(data),data,**opts))
test_both("Reconstruction MSE (salt 0.06): {}",
lambda data: self.autoencoder.evaluate(salt(data),data,**opts))
test_both("Reconstruction MSE (pepper 0.06): {}",
lambda data: self.autoencoder.evaluate(pepper(data),data,**opts))
# self.autoencoder.compile(optimizer=optimizer, loss=bce)
# test_both("Reconstruction BCE: {}",
# lambda data: self.autoencoder.evaluate(data,data,**opts))
# test_both("Noise reconstruction BCE (gaussian 0.3): {}",
# lambda data: self.autoencoder.evaluate(gaussian(data),data,**opts))
# test_both("Noise reconstruction BCE (salt 0.1): {}",
# lambda data: self.autoencoder.evaluate(salt(data),data,**opts))
# test_both("Noise reconstruction BCE (pepper 0.1): {}",
# lambda data: self.autoencoder.evaluate(pepper(data),data,**opts))
test_both("Latent activation: {}",
lambda data: self.encode_binary(train_data,batch_size=batch_size,).mean())
return self
def _build(self,input_shape):
data_dim = np.prod(input_shape)
self.gs = self.build_gs()
self.gs2 = self.build_gs(N=data_dim)
self.gs3 = self.build_gs(N=data_dim)
_encoder = self.build_encoder(input_shape)
_decoder = self.build_decoder(input_shape)
x = Input(shape=input_shape)
z = Sequential([flatten, *_encoder, self.gs])(x)
y = Sequential([flatten,
*_decoder,
self.gs2,
Lambda(take_true),
Reshape(input_shape)])(z)
z2 = Input(shape=(self.parameters['N'], self.parameters['M']))
y2 = Sequential([flatten,
*_decoder,
self.gs3,
Lambda(take_true),
Reshape(input_shape)])(z2)
def rec(x, y):
return bce(K.reshape(x,(K.shape(x)[0],data_dim,)),
K.reshape(y,(K.shape(x)[0],data_dim,)))
def loss(x, y):
return rec(x,y) + self.gs.loss() + self.gs2.loss()
self.callbacks.append(LambdaCallback(on_epoch_end=self.gs.cool))
self.callbacks.append(LambdaCallback(on_epoch_end=self.gs2.cool))
self.callbacks.append(LambdaCallback(on_epoch_end=self.gs3.cool))
self.custom_log_functions['tau'] = lambda: K.get_value(self.gs.tau)
self.loss = loss
self.metrics.append(rec)
self.encoder = Model(x, z)
self.decoder = Model(z2, y2)
self.net = Model(x, y)
self.autoencoder = self.net
def _build(self,input_shape):
x = Input(shape=input_shape)
y = Sequential([
Convolution2D(self.parameters['clayer'], (3,3), padding='same', activation=self.parameters['activation']),
BN(),
Dropout(self.parameters['dropout']),
MaxPooling2D((2,2)),
Convolution2D(self.parameters['clayer'], (3,3), padding='same', activation=self.parameters['activation']),
BN(),
Dropout(self.parameters['dropout']),
MaxPooling2D((2,2)),
Convolution2D(self.parameters['clayer'], (3,3), padding='same', activation=self.parameters['activation']),
BN(),
Dropout(self.parameters['dropout']),
MaxPooling2D((2,2)),
flatten,
Dense(self.parameters['layer'], activation=self.parameters['activation']),
# BN(),
# Dropout(self.parameters['dropout'])
# *[Sequential([,])
# for i in range(self.parameters['num_layers']) ],
Dense(1,activation="sigmoid")
])(x)
def loss(x,y):
return bce(x,y)
self.loss = loss
self.net = Model(x, y)
def _build(self,input_shape):
num_actions = 128
N = input_shape[0] - num_actions
x = Input(shape=input_shape)
pre = wrap(x,tf.slice(x, [0,0], [-1,N]),name="pre")
action = wrap(x,tf.slice(x, [0,N], [-1,num_actions]),name="action")
ys = []
for i in range(num_actions):
_x = Input(shape=(N,))
_y = Sequential([
flatten,
*[Sequential([BN(),
Dense(self.parameters['layer'],activation=self.parameters['activation']),
Dropout(self.parameters['dropout']),])
for i in range(self.parameters['num_layers']) ],
Dense(1,activation="sigmoid")
])(_x)
_m = Model(_x,_y,name="action_"+str(i))
ys.append(_m(pre))
ys = Concatenate()(ys)
y = Dot(-1)([ys,action])
self.loss = bce
self.net = Model(x, y)
self.callbacks.append(GradientEarlyStopping(verbose=1,epoch=50,min_grad=self.parameters['min_grad']))
def vae_loss(x, x_decoded_mean):
xent_loss = original_dim * objectives.binary_crossentropy(x, x_decoded_mean)
kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
return xent_loss + kl_loss
def vae_loss(x, x_decoded_mean):
# NOTE: binary_crossentropy expects a batch_size by dim
# for x and x_decoded_mean, so we MUST flatten these!
x = K.flatten(x)
x_decoded_mean = K.flatten(x_decoded_mean)
xent_loss = img_rows * img_cols * objectives.binary_crossentropy(x, x_decoded_mean)
kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
return xent_loss + kl_loss
def _vae_loss(self, x, x_decoded_mean):
n_inputs = self._model.get_input_shape_at(0)[1]
z_mean = self._model.get_layer('z_mean').inbound_nodes[0].output_tensors[0]
z_log_var = self._model.get_layer('z_log_var').inbound_nodes[0].output_tensors[0]
xent_loss = n_inputs * objectives.binary_crossentropy(x, x_decoded_mean)
kl_loss = - 0.5 * K.sum(1 + z_log_var
- K.square(z_mean)
- K.exp(z_log_var), axis=-1)
return xent_loss + kl_loss
def vae_loss(x_, x_reconstruct):
rec_loss = binary_crossentropy(x_, x_reconstruct)
kl_loss = - 0.5 * K.mean(1 + 2*K.log(z_std + 1e-10) - z_mean**2 - z_std**2, axis=-1)
return rec_loss + kl_loss
def vae_loss(x, x_decoded_mean):
xent_loss = original_dim * objectives.binary_crossentropy(x, x_decoded_mean)
kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
return xent_loss + kl_loss
variational_autoencoder_deconv.py 文件源码
项目:keras-customized
作者: ambrite
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def vae_loss(x, x_decoded_mean):
# NOTE: binary_crossentropy expects a batch_size by dim
# for x and x_decoded_mean, so we MUST flatten these!
x = K.flatten(x)
x_decoded_mean = K.flatten(x_decoded_mean)
xent_loss = img_rows * img_cols * objectives.binary_crossentropy(x, x_decoded_mean)
kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
return xent_loss + kl_loss
def vae_loss(x, x_decoded_mean):
xent_loss = objectives.binary_crossentropy(x, x_decoded_mean)
kl_loss = - 0.5 * K.mean(1 + z_log_std - K.square(z_mean) - K.exp(z_log_std), axis=-1)
return xent_loss + kl_loss
variational_autoencoder_deconv_gpu.py 文件源码
项目:keras-mxnet-benchmarks
作者: sandeep-krishnamurthy
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def vae_loss(x, x_decoded_mean):
# NOTE: binary_crossentropy expects a batch_size by dim
# for x and x_decoded_mean, so we MUST flatten these!
x = K.flatten(x)
x_decoded_mean = K.flatten(x_decoded_mean)
xent_loss = img_rows * img_cols * objectives.binary_crossentropy(x, x_decoded_mean)
kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
return xent_loss + kl_loss
variational_autoencoder_gpu.py 文件源码
项目:keras-mxnet-benchmarks
作者: sandeep-krishnamurthy
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def vae_loss(x, x_decoded_mean):
xent_loss = original_dim * objectives.binary_crossentropy(x, x_decoded_mean)
kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
return xent_loss + kl_loss
def e_binary_crossentropy(self, y_true, y_pred):
if self.p:
y_pred = undo_normcentererr(y_pred, self.p)
y_true = undo_normcentererr(y_true, self.p)
return K.mean(K.binary_crossentropy(y_pred, y_true), axis=-1)
def s_binary_crossentropy(self, y_true, y_pred):
if self.p:
y_pred = undo_normcentererr(y_pred, self.p)
y_true = undo_normcentererr(y_true, self.p)
s_true = K.dot(y_true, K.transpose(self.H))%2
twopminusone = 2*y_pred-1
s_pred = ( 1 - tf.real(K.exp(K.dot(K.log(tf.cast(twopminusone, tf.complex64)), tf.cast(K.transpose(self.H), tf.complex64)))) ) / 2
return K.mean(K.binary_crossentropy(s_pred, s_true), axis=-1)
def create_model(L, hidden_sizes=[4], hidden_act='tanh', act='sigmoid', loss='binary_crossentropy',
Z=True, X=False, learning_rate=0.002,
normcentererr_p=None, batchnorm=0):
in_dim = L**2 * (X+Z)
out_dim = 2*L**2 * (X+Z)
model = Sequential()
model.add(Dense(int(hidden_sizes[0]*out_dim), input_dim=in_dim, kernel_initializer='glorot_uniform'))
if batchnorm:
model.add(BatchNormalization(momentum=batchnorm))
model.add(Activation(hidden_act))
for s in hidden_sizes[1:]:
model.add(Dense(int(s*out_dim), kernel_initializer='glorot_uniform'))
if batchnorm:
model.add(BatchNormalization(momentum=batchnorm))
model.add(Activation(hidden_act))
model.add(Dense(out_dim, kernel_initializer='glorot_uniform'))
if batchnorm:
model.add(BatchNormalization(momentum=batchnorm))
model.add(Activation(act))
c = CodeCosts(L, ToricCode, Z, X, normcentererr_p)
losses = {'e_binary_crossentropy':c.e_binary_crossentropy,
's_binary_crossentropy':c.s_binary_crossentropy,
'se_binary_crossentropy':c.se_binary_crossentropy}
model.compile(loss=losses.get(loss,loss),
optimizer=Nadam(lr=learning_rate),
metrics=[c.triv_no_error, c.e_binary_crossentropy, c.s_binary_crossentropy]
)
return model
def vae_loss(x, x_decoded_mean):
xent_loss = original_dim * objectives.binary_crossentropy(x, x_decoded_mean)
kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
return xent_loss + kl_loss
def vae_loss(x, x_decoded_mean):
# NOTE: binary_crossentropy expects a batch_size by dim
# for x and x_decoded_mean, so we MUST flatten these!
x = K.flatten(x)
x_decoded_mean = K.flatten(x_decoded_mean)
xent_loss = img_rows * img_cols * objectives.binary_crossentropy(x, x_decoded_mean)
kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
return xent_loss + kl_loss
def VAELoss(x, x_decoded_mean):
# NOTE: binary_crossentropy expects a batchSize by dim
# for x and x_decoded_mean, so we MUST flatten these!
x = K.flatten(x)
x_decoded_mean = K.flatten(x_decoded_mean)
xent_loss = imageSize * imageSize * objectives.binary_crossentropy(x, x_decoded_mean)
kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
return xent_loss + kl_loss
# Convolutional models
def vae_loss(x, x_decoded_mean):
xent_loss = original_dim * objectives.binary_crossentropy(x, x_decoded_mean)
kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
return xent_loss + kl_loss
def vae_loss(x, x_decoded_mean):
xent_loss = original_dim * objectives.binary_crossentropy(x, x_decoded_mean)
kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
return xent_loss + kl_loss
def vae_loss(x, x_hat):
kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
xent_loss = n * objectives.binary_crossentropy(x, x_hat)
mse_loss = n * objectives.mse(x, x_hat)
if use_loss == 'xent':
return xent_loss + kl_loss
elif use_loss == 'mse':
return mse_loss + kl_loss
else:
raise Expception, 'Nonknow loss!'