def Discriminator(image_size = 64):
L = int(image_size)
images = Input(shape = (L, L, 3))
x = Conv2D(64, (4, 4), strides = (2, 2),
kernel_initializer = init, padding = 'same')(images) # shape(L/2, L/2, 32)
x = LeakyReLU(0.2)(x)
x = Conv2D(128, (4, 4), strides = (2, 2),
kernel_initializer = init, padding = 'same')(x) # shape(L/4, L/4, 64)
x = BatchNormalization()(x)
x = LeakyReLU(0.2)(x)
x = Conv2D(256, (4, 4), strides = (2, 2),
kernel_initializer = init, padding = 'same')(x) # shape(L/8, L/8, 128)
x = BatchNormalization()(x)
x = LeakyReLU(0.2)(x)
x = Conv2D(512, (4, 4), strides = (2, 2),
kernel_initializer = init, padding = 'same')(x) # shape(L/16, L/16, 256)
x = BatchNormalization()(x)
x = LeakyReLU(0.2)(x)
x = Flatten()(x)
outputs = Dense(1)(x)
model = Model(inputs = images, outputs = outputs)
model.summary()
return model
评论列表
文章目录