def build_discriminator( shape, build_disc=True ) :
'''
Build discriminator.
Set build_disc=False to build an encoder network to test
the encoding/discrimination capability with autoencoder...
'''
def conv2d( x, filters, shape=(4, 4), **kwargs ) :
'''
I don't want to write lengthy parameters so I made a short hand function.
'''
x = Conv2D( filters, shape, strides=(2, 2),
padding='same',
kernel_initializer=Args.kernel_initializer,
**kwargs )( x )
#x = MaxPooling2D()( x )
x = BatchNormalization(momentum=Args.bn_momentum)( x )
x = LeakyReLU(alpha=Args.alpha_D)( x )
return x
# https://github.com/tdrussell/IllustrationGAN
# As proposed by them, unlike GAN hacks, MaxPooling works better for anime dataset it seems.
# However, animeGAN doesn't use it so I'll keep it more similar to DCGAN.
face = Input( shape=shape )
x = face
# Warning: Don't batchnorm the first set of Conv2D.
x = Conv2D( 64, (4, 4), strides=(2, 2),
padding='same',
kernel_initializer=Args.kernel_initializer )( x )
x = LeakyReLU(alpha=Args.alpha_D)( x )
# 32x32
x = conv2d( x, 128 )
# 16x16
x = conv2d( x, 256 )
# 8x8
x = conv2d( x, 512 )
# 4x4
if build_disc:
x = Flatten()(x)
# add 16 features. Run 1D conv of size 3.
#x = MinibatchDiscrimination(16, 3)( x )
#x = Dense(1024, kernel_initializer=Args.kernel_initializer)( x )
#x = LeakyReLU(alpha=Args.alpha_D)( x )
# 1 when "real", 0 when "fake".
x = Dense(1, activation='sigmoid',
kernel_initializer=Args.kernel_initializer)( x )
return models.Model( inputs=face, outputs=x )
else:
# build encoder.
x = Conv2D(Args.noise_shape[2], (4, 4), activation='tanh')(x)
return models.Model( inputs=face, outputs=x )
评论列表
文章目录