def __init__(self, bottom_width=8, ch=512, wscale=0.005):
w = chainer.initializers.Normal(wscale)
super(DiscriminatorPFN, self).__init__(
c0_0=L.Convolution2D(3, ch // 8, 3, 1, 1, initialW=w),
c0_1=L.Convolution2D(ch // 8, ch // 4, 4, 2, 1, initialW=w),
c1_0=L.Convolution2D(ch // 4, ch // 4, 3, 1, 1, initialW=w),
c1_1=L.Convolution2D(ch // 4, ch // 2, 4, 2, 1, initialW=w),
c2_0=L.Convolution2D(ch // 2, ch // 2, 3, 1, 1, initialW=w),
c2_1=L.Convolution2D(ch // 2, ch // 1, 4, 2, 1, initialW=w),
c3_0=L.Convolution2D(ch // 1, ch // 1, 3, 1, 1, initialW=w),
l4=L.Linear(bottom_width * bottom_width * ch, 1, initialW=w),
bn0_1=L.BatchNormalization(ch // 4, use_gamma=False),
bn1_0=L.BatchNormalization(ch // 4, use_gamma=False),
bn1_1=L.BatchNormalization(ch // 2, use_gamma=False),
bn2_0=L.BatchNormalization(ch // 2, use_gamma=False),
bn2_1=L.BatchNormalization(ch // 1, use_gamma=False),
bn3_0=L.BatchNormalization(ch // 1, use_gamma=False),
)
python类BatchNormalization()的实例源码
def __init__(self, density=1, size=64, latent_size=128, channel=3):
assert (size % 16 == 0)
initial_size = size / 16
super(Generator, self).__init__(
g1=L.Linear(latent_size, initial_size * initial_size * 256 * density, wscale=0.02 * math.sqrt(latent_size)),
norm1=L.BatchNormalization(initial_size * initial_size * 256 * density),
g2=L.Deconvolution2D(256 * density, 128 * density, 4, stride=2, pad=1,
wscale=0.02 * math.sqrt(4 * 4 * 256 * density)),
norm2=L.BatchNormalization(128 * density),
g3=L.Deconvolution2D(128 * density, 64 * density, 4, stride=2, pad=1,
wscale=0.02 * math.sqrt(4 * 4 * 128 * density)),
norm3=L.BatchNormalization(64 * density),
g4=L.Deconvolution2D(64 * density, 32 * density, 4, stride=2, pad=1,
wscale=0.02 * math.sqrt(4 * 4 * 64 * density)),
norm4=L.BatchNormalization(32 * density),
g5=L.Deconvolution2D(32 * density, channel, 4, stride=2, pad=1,
wscale=0.02 * math.sqrt(4 * 4 * 32 * density)),
)
self.density = density
self.latent_size = latent_size
self.initial_size = initial_size
def __init__(self, density=1, size=64, channel=3):
assert (size % 16 == 0)
initial_size = size / 16
super(Discriminator, self).__init__(
dis1=L.Convolution2D(channel, 32 * density, 4, stride=2, pad=1,
wscale=0.02 * math.sqrt(4 * 4 * channel * density)),
dis2=L.Convolution2D(32 * density, 64 * density, 4, stride=2, pad=1,
wscale=0.02 * math.sqrt(4 * 4 * 32 * density)),
norm2=L.BatchNormalization(64 * density),
dis3=L.Convolution2D(64 * density, 128 * density, 4, stride=2, pad=1,
wscale=0.02 * math.sqrt(4 * 4 * 64 * density)),
norm3=L.BatchNormalization(128 * density),
dis4=L.Convolution2D(128 * density, 256 * density, 4, stride=2, pad=1,
wscale=0.02 * math.sqrt(4 * 4 * 128 * density)),
norm4=L.BatchNormalization(256 * density),
dis5=L.Linear(initial_size * initial_size * 256 * density, 512,
wscale=0.02 * math.sqrt(initial_size * initial_size * 256 * density)),
norm5=L.BatchNormalization(512),
dis6=L.Linear(512, 2, wscale=0.02 * math.sqrt(512)),
)
def __init__(self, density=1, size=64, latent_size=100, channel=3):
assert (size % 16 == 0)
initial_size = size / 16
super(Encoder_origin, self).__init__(
enc1=L.Convolution2D(channel, 32 * density, 4, stride=2, pad=1,
wscale=0.02 * math.sqrt(4 * 4 * channel * density)),
enc2=L.Convolution2D(32 * density, 64 * density, 4, stride=2, pad=1,
wscale=0.02 * math.sqrt(4 * 4 * 32 * density)),
norm2=L.BatchNormalization(64 * density),
enc3=L.Convolution2D(64 * density, 128 * density, 4, stride=2, pad=1,
wscale=0.02 * math.sqrt(4 * 4 * 64 * density)),
norm3=L.BatchNormalization(128 * density),
enc4=L.Convolution2D(128 * density, 256 * density, 4, stride=2, pad=1,
wscale=0.02 * math.sqrt(4 * 4 * 128 * density)),
norm4=L.BatchNormalization(256 * density),
mean=L.Linear(initial_size * initial_size * 256 * density, latent_size,
wscale=0.02 * math.sqrt(initial_size * initial_size * 256 * density)),
ln_var=L.Linear(initial_size * initial_size * 256 * density, latent_size,
wscale=0.02 * math.sqrt(initial_size * initial_size * 256 * density)),
)
def __init__(self, density=1, size=64, latent_size=100, channel=3):
assert (size % 16 == 0)
initial_size = size / 16
super(Generator_origin, self).__init__(
g1=L.Linear(latent_size, initial_size * initial_size * 256 * density, wscale=0.02 * math.sqrt(latent_size)),
norm1=L.BatchNormalization(initial_size * initial_size * 256 * density),
g2=L.Deconvolution2D(256 * density, 128 * density, 4, stride=2, pad=1,
wscale=0.02 * math.sqrt(4 * 4 * 256 * density)),
norm2=L.BatchNormalization(128 * density),
g3=L.Deconvolution2D(128 * density, 64 * density, 4, stride=2, pad=1,
wscale=0.02 * math.sqrt(4 * 4 * 128 * density)),
norm3=L.BatchNormalization(64 * density),
g4=L.Deconvolution2D(64 * density, 32 * density, 4, stride=2, pad=1,
wscale=0.02 * math.sqrt(4 * 4 * 64 * density)),
norm4=L.BatchNormalization(32 * density),
g5=L.Deconvolution2D(32 * density, channel, 4, stride=2, pad=1,
wscale=0.02 * math.sqrt(4 * 4 * 32 * density)),
)
self.density = density
self.latent_size = latent_size
self.initial_size = initial_size
def __init__(self, density=1, size=64, channel=3):
assert (size % 16 == 0)
initial_size = size / 16
super(Discriminator_org, self).__init__(
dis1=L.Convolution2D(channel, 32 * density, 4, stride=2, pad=1,
wscale=0.02 * math.sqrt(4 * 4 * channel * density)),
dis2=L.Convolution2D(32 * density, 64 * density, 4, stride=2, pad=1,
wscale=0.02 * math.sqrt(4 * 4 * 32 * density)),
norm2=L.BatchNormalization(64 * density),
dis3=L.Convolution2D(64 * density, 128 * density, 4, stride=2, pad=1,
wscale=0.02 * math.sqrt(4 * 4 * 64 * density)),
norm3=L.BatchNormalization(128 * density),
dis4=L.Convolution2D(128 * density, 256 * density, 4, stride=2, pad=1,
wscale=0.02 * math.sqrt(4 * 4 * 128 * density)),
norm4=L.BatchNormalization(256 * density),
dis5=L.Linear(initial_size * initial_size * 256 * density, 2,
wscale=0.02 * math.sqrt(initial_size * initial_size * 256 * density)),
)
def __init__(self, size=64, ch=512, wscale=0.005):
assert (size % 16 == 0)
initial_size = size // 16
w = chainer.initializers.Normal(wscale)
super(Discriminator, self).__init__(
c0_0=L.Convolution2D(3, ch // 8, 3, 1, 1, initialW=w),
c0_1=L.Convolution2D(ch // 8, ch // 4, 4, 2, 1, initialW=w),
c1_1=L.Convolution2D(ch // 4, ch // 2, 4, 2, 1, initialW=w),
c2_1=L.Convolution2D(ch // 2, ch // 1, 4, 2, 1, initialW=w),
c3_0=L.Convolution2D(ch // 1, ch // 1, 4, 2, 1, initialW=w),
l4=L.Linear(initial_size * initial_size * ch, 1, initialW=w),
bn0_1=L.BatchNormalization(ch // 4, use_gamma=False),
bn1_1=L.BatchNormalization(ch // 2, use_gamma=False),
bn2_1=L.BatchNormalization(ch // 1, use_gamma=False),
bn3_0=L.BatchNormalization(ch // 1, use_gamma=False),
)
def __init__(self, size=64, ch=512, wscale=0.005):
assert (size % 16 == 0)
initial_size = size // 16
w = chainer.initializers.Normal(wscale)
super(Discriminator2, self).__init__(
c0_0=L.Convolution2D(3, ch // 8, 3, 1, 1, initialW=w),
c0_1=L.Convolution2D(ch // 8, ch // 4, 4, 2, 1, initialW=w),
c1_1=L.Convolution2D(ch // 4, ch // 2, 4, 2, 1, initialW=w),
c2_1=L.Convolution2D(ch // 2, ch // 1, 4, 2, 1, initialW=w),
c3_0=L.Convolution2D(ch // 1, ch // 1, 4, 2, 1, initialW=w),
l4=L.Linear(initial_size * initial_size * ch, 1, initialW=w),
bn0_1=L.BatchNormalization(ch // 4),
bn1_1=L.BatchNormalization(ch // 2),
bn2_1=L.BatchNormalization(ch // 1),
bn3_0=L.BatchNormalization(ch // 1),
)
def __init__(self, size=64, n_hidden=128, ch=512, wscale=0.02):
assert (size % 16 == 0)
initial_size = size / 16
w = chainer.initializers.Normal(wscale)
super(Encoder, self).__init__(
c0_0=L.Convolution2D(3, ch // 8, 3, 1, 1, initialW=w),
c0_1=L.Convolution2D(ch // 8, ch // 4, 4, 2, 1, initialW=w),
c1_1=L.Convolution2D(ch // 4, ch // 2, 4, 2, 1, initialW=w),
c2_1=L.Convolution2D(ch // 2, ch // 1, 4, 2, 1, initialW=w),
c3_0=L.Convolution2D(ch // 1, ch // 1, 4, 2, 1, initialW=w),
mean=L.Linear(initial_size * initial_size * ch, n_hidden, initialW=w),
ln_var=L.Linear(initial_size * initial_size * ch, n_hidden, initialW=w),
bn0_1=L.BatchNormalization(ch // 4, use_gamma=False),
bn1_1=L.BatchNormalization(ch // 2, use_gamma=False),
bn2_1=L.BatchNormalization(ch // 1, use_gamma=False),
bn3_0=L.BatchNormalization(ch // 1, use_gamma=False),
)
# noinspection PyCallingNonCallable,PyUnresolvedReferences
def __init__(self, ch0, ch1, bn=True, sample='down', activation=F.relu, dropout=False, noise=False):
self.bn = bn
self.activation = activation
self.dropout = dropout
self.noise = noise
layers = {}
w = chainer.initializers.Normal(0.02)
if sample == 'down':
layers['c'] = L.Convolution2D(ch0, ch1, 4, 2, 1, initialW=w)
elif sample == 'up':
layers['c'] = L.Deconvolution2D(ch0, ch1, 4, 2, 1, initialW=w)
elif sample == 'c7s1':
layers['c'] = L.Convolution2D(ch0, ch1, 7, 1, 3, initialW=w)
if bn:
if self.noise:
layers['batchnorm'] = L.BatchNormalization(ch1, use_gamma=False)
else:
layers['batchnorm'] = L.BatchNormalization(ch1)
super(CBR, self).__init__(**layers)
def __init__(self, in_size, ch, out_size, stride=2):
super(BottleNeckA, self).__init__()
initialW = initializers.HeNormal()
with self.init_scope():
self.conv1 = L.Convolution2D(
in_size, ch, 1, stride, 0, initialW=initialW, nobias=True)
self.bn1 = L.BatchNormalization(ch)
self.conv2 = L.Convolution2D(
ch, ch, 3, 1, 1, initialW=initialW, nobias=True)
self.bn2 = L.BatchNormalization(ch)
self.conv3 = L.Convolution2D(
ch, out_size, 1, 1, 0, initialW=initialW, nobias=True)
self.bn3 = L.BatchNormalization(out_size)
self.conv4 = L.Convolution2D(
in_size, out_size, 1, stride, 0,
initialW=initialW, nobias=True)
self.bn4 = L.BatchNormalization(out_size)
def __init__(self):
super(FastStyleNet, self).__init__(
c1=L.Convolution2D(3, 32, 9, stride=1, pad=4),
c2=L.Convolution2D(32, 64, 4, stride=2, pad=1),
c3=L.Convolution2D(64, 128, 4,stride=2, pad=1),
r1=ResidualBlock(128, 128),
r2=ResidualBlock(128, 128),
r3=ResidualBlock(128, 128),
r4=ResidualBlock(128, 128),
r5=ResidualBlock(128, 128),
d1=L.Deconvolution2D(128, 64, 4, stride=2, pad=1),
d2=L.Deconvolution2D(64, 32, 4, stride=2, pad=1),
d3=L.Deconvolution2D(32, 3, 9, stride=1, pad=4),
b1=L.BatchNormalization(32),
b2=L.BatchNormalization(64),
b3=L.BatchNormalization(128),
b4=L.BatchNormalization(64),
b5=L.BatchNormalization(32),
)
def __init__(self, in_channels, n_layers, growth_rate,
dropout_ratio=None):
super(DenseBlock, self).__init__()
self._layers = []
sum_channels = in_channels
for l in range(n_layers):
W = initializers.HeNormal()
conv = L.Convolution2D(sum_channels, growth_rate, 3, pad=1,
initialW=W)
norm = L.BatchNormalization(sum_channels)
self.add_link('conv{}'.format(l + 1), conv)
self.add_link('norm{}'.format(l + 1), norm)
self._layers.append((conv, norm))
sum_channels += growth_rate
self.add_persistent('dropout_ratio', dropout_ratio)
def __init__(self, depth=40, growth_rate=12, in_channels=16,
dropout_ratio=0.2, n_class=10):
assert (depth - 4) % 3 == 0
n_layers = int((depth - 4) / 3)
n_ch = [in_channels + growth_rate * n_layers * i for i in range(4)]
dropout_ratio = dropout_ratio if dropout_ratio > 0 else None
super(DenseNet, self).__init__(
conv0=L.Convolution2D(3, n_ch[0], 3, pad=1),
dense1=DenseBlock(
n_ch[0], n_layers, growth_rate, dropout_ratio),
trans1=TransitionLayer(n_ch[1], n_ch[1], dropout_ratio),
dense2=DenseBlock(
n_ch[1], n_layers, growth_rate, dropout_ratio),
trans2=TransitionLayer(n_ch[2], n_ch[2], dropout_ratio),
dense3=DenseBlock(
n_ch[2], n_layers, growth_rate, dropout_ratio),
norm4=L.BatchNormalization(n_ch[3]),
fc4=L.Linear(n_ch[3], n_class),
)
def __init__(self, n_hidden, bottom_width=4, ch=512, wscale=0.02):
super(Generator, self).__init__()
self.n_hidden = n_hidden
self.ch = ch
self.bottom_width = bottom_width
with self.init_scope():
w = chainer.initializers.Normal(wscale)
self.l0 = L.Linear(self.n_hidden, bottom_width * bottom_width * ch,
initialW=w)
self.dc1 = L.Deconvolution2D(ch, ch // 2, 4, 2, 1, initialW=w)
self.dc2 = L.Deconvolution2D(ch // 2, ch // 4, 4, 2, 1, initialW=w)
self.dc3 = L.Deconvolution2D(ch // 4, ch // 8, 4, 2, 1, initialW=w)
self.dc4 = L.Deconvolution2D(ch // 8, 3, 3, 1, 1, initialW=w)
self.bn0 = L.BatchNormalization(bottom_width * bottom_width * ch)
self.bn1 = L.BatchNormalization(ch // 2)
self.bn2 = L.BatchNormalization(ch // 4)
self.bn3 = L.BatchNormalization(ch // 8)
def __init__(self, bottom_width=4, ch=512, wscale=0.02):
w = chainer.initializers.Normal(wscale)
super(Discriminator, self).__init__()
with self.init_scope():
self.c0_0 = L.Convolution2D(3, ch // 8, 3, 1, 1, initialW=w)
self.c0_1 = L.Convolution2D(ch // 8, ch // 4, 4, 2, 1, initialW=w)
self.c1_0 = L.Convolution2D(ch // 4, ch // 4, 3, 1, 1, initialW=w)
self.c1_1 = L.Convolution2D(ch // 4, ch // 2, 4, 2, 1, initialW=w)
self.c2_0 = L.Convolution2D(ch // 2, ch // 2, 3, 1, 1, initialW=w)
self.c2_1 = L.Convolution2D(ch // 2, ch // 1, 4, 2, 1, initialW=w)
self.c3_0 = L.Convolution2D(ch // 1, ch // 1, 3, 1, 1, initialW=w)
self.l4 = L.Linear(bottom_width * bottom_width * ch, 1, initialW=w)
self.bn0_1 = L.BatchNormalization(ch // 4, use_gamma=False)
self.bn1_0 = L.BatchNormalization(ch // 4, use_gamma=False)
self.bn1_1 = L.BatchNormalization(ch // 2, use_gamma=False)
self.bn2_0 = L.BatchNormalization(ch // 2, use_gamma=False)
self.bn2_1 = L.BatchNormalization(ch // 1, use_gamma=False)
self.bn3_0 = L.BatchNormalization(ch // 1, use_gamma=False)
def __init__(self):
initializer = initializers.HeNormal()
dis = Discriminator()
chainer.serializers.load_npz('result/dis_iter_500000.npz', dis)
super(DiscriminatorClassifier, self).__init__(
c0 = L.Convolution2D(1, 64, 4, stride=2, pad=1, initialW=dis.c0.W.data, initial_bias=dis.c0.b.data),
c1 = L.Convolution2D(64, 128, 4, stride=2, pad=1, initialW=dis.c1.W.data, initial_bias=dis.c1.b.data),
l2 = L.Linear(7*7*128, 10, initialW = initializer),
bn1 = L.BatchNormalization(128),
)
self.c0.disable_update()
# def __init__(self):
# initializer = initializers.HeNormal()
# super(DiscriminatorClassifier, self).__init__(
# c0 = L.Convolution2D(1, 64, 4, stride=2, pad=1, initialW=initializer),
# c1 = L.Convolution2D(64, 128, 4, stride=2, pad=1, initialW=initializer),
# l2 = L.Linear(7*7*128, 10, initialW = initializer),
# bn1 = L.BatchNormalization(128),
# )
#
def build_network(self):
config.check()
wscale = config.q_wscale
# Fully connected part of Q-Network
fc_attributes = {}
fc_units = zip(config.q_fc_units[:-1], config.q_fc_units[1:])
for i, (n_in, n_out) in enumerate(fc_units):
fc_attributes["layer_%i" % i] = L.Linear(n_in, n_out, wscale=wscale)
fc_attributes["batchnorm_%i" % i] = BatchNormalization(n_out)
fc = FullyConnectedNetwork(**fc_attributes)
fc.n_hidden_layers = len(fc_units) - 1
fc.activation_function = config.q_fc_activation_function
fc.apply_batchnorm = config.apply_batchnorm
fc.apply_dropout = config.q_fc_apply_dropout
fc.apply_batchnorm_to_input = config.q_fc_apply_batchnorm_to_input
if config.use_gpu:
fc.to_gpu()
return fc
def build_network(self, units=None):
if units is None:
raise Exception()
config.check()
wscale = config.q_wscale
# Fully connected part of Q-Network
fc_attributes = {}
units[-1] *= config.q_k_heads
fc_units = zip(units[:-1], units[1:])
for i, (n_in, n_out) in enumerate(fc_units):
fc_attributes["layer_%i" % i] = L.Linear(n_in, n_out, wscale=wscale)
fc_attributes["batchnorm_%i" % i] = BatchNormalization(n_out)
fc = FullyConnectedNetwork(**fc_attributes)
fc.n_hidden_layers = len(fc_units) - 1
fc.activation_function = config.q_fc_activation_function
fc.apply_batchnorm = config.apply_batchnorm
fc.apply_dropout = config.q_fc_apply_dropout
fc.apply_batchnorm_to_input = config.q_fc_apply_batchnorm_to_input
if config.use_gpu:
fc.to_gpu()
return fc
def build_head(self, k=0, units=None):
if units is None:
raise Exception()
config.check()
wscale = config.q_wscale
# Fully connected part of Q-Network
fc_attributes = {}
fc_units = zip(units[:-1], units[1:])
for i, (n_in, n_out) in enumerate(fc_units):
fc_attributes["layer_%i" % i] = LinearHead(n_in, n_out, config.q_k_heads, wscale=wscale)
fc_attributes["batchnorm_%i" % i] = BatchNormalization(n_out)
fc = FullyConnectedNetwork(**fc_attributes)
fc.n_hidden_layers = len(fc_units) - 1
fc.activation_function = config.q_fc_activation_function
fc.apply_dropout = config.q_fc_apply_dropout
if config.use_gpu:
fc.to_gpu()
return fc