def __init__(self):
super(FastStyleNet, self).__init__(
c1=L.Convolution2D(3, 32, 9, stride=1, pad=4),
c2=L.Convolution2D(32, 64, 4, stride=2, pad=1),
c3=L.Convolution2D(64, 128, 4,stride=2, pad=1),
r1=ResidualBlock(128, 128),
r2=ResidualBlock(128, 128),
r3=ResidualBlock(128, 128),
r4=ResidualBlock(128, 128),
r5=ResidualBlock(128, 128),
d1=L.Deconvolution2D(128, 64, 4, stride=2, pad=1),
d2=L.Deconvolution2D(64, 32, 4, stride=2, pad=1),
d3=L.Deconvolution2D(32, 3, 9, stride=1, pad=4),
b1=L.BatchNormalization(32),
b2=L.BatchNormalization(64),
b3=L.BatchNormalization(128),
b4=L.BatchNormalization(64),
b5=L.BatchNormalization(32),
)
python类Deconvolution2D()的实例源码
def __init__(self, n_hidden, bottom_width=4, ch=512, wscale=0.02):
super(Generator, self).__init__()
self.n_hidden = n_hidden
self.ch = ch
self.bottom_width = bottom_width
with self.init_scope():
w = chainer.initializers.Normal(wscale)
self.l0 = L.Linear(self.n_hidden, bottom_width * bottom_width * ch,
initialW=w)
self.dc1 = L.Deconvolution2D(ch, ch // 2, 4, 2, 1, initialW=w)
self.dc2 = L.Deconvolution2D(ch // 2, ch // 4, 4, 2, 1, initialW=w)
self.dc3 = L.Deconvolution2D(ch // 4, ch // 8, 4, 2, 1, initialW=w)
self.dc4 = L.Deconvolution2D(ch // 8, 3, 3, 1, 1, initialW=w)
self.bn0 = L.BatchNormalization(bottom_width * bottom_width * ch)
self.bn1 = L.BatchNormalization(ch // 2)
self.bn2 = L.BatchNormalization(ch // 4)
self.bn3 = L.BatchNormalization(ch // 8)
def setUp(self):
self.link = L.Deconvolution2D(
self.in_channels, self.out_channels, self.ksize,
stride=self.stride, pad=self.pad, nobias=self.nobias)
self.link.W.data[...] = numpy.random.uniform(
-1, 1, self.link.W.data.shape).astype(numpy.float32)
if not self.nobias:
self.link.b.data[...] = numpy.random.uniform(
-1, 1, self.link.b.data.shape).astype(numpy.float32)
self.link.zerograds()
N = 2
h, w = 3, 2
kh, kw = _pair(self.ksize)
out_h = conv.get_deconv_outsize(h, kh, self.stride, self.pad)
out_w = conv.get_deconv_outsize(w, kw, self.stride, self.pad)
self.gy = numpy.random.uniform(
-1, 1, (N, self.out_channels, out_h, out_w)).astype(numpy.float32)
self.x = numpy.random.uniform(
-1, 1, (N, self.in_channels, h, w)).astype(numpy.float32)
def __init__(self, n_hidden, bottom_width=4, ch=512, wscale=0.02):
super(Generator, self).__init__()
self.n_hidden = n_hidden
self.ch = ch
self.bottom_width = bottom_width
with self.init_scope():
w = chainer.initializers.Normal(wscale)
self.l0 = L.Linear(self.n_hidden, bottom_width * bottom_width * ch,
initialW=w)
self.dc1 = L.Deconvolution2D(ch, ch // 2, 4, 2, 1, initialW=w)
self.dc2 = L.Deconvolution2D(ch // 2, ch // 4, 4, 2, 1, initialW=w)
self.dc3 = L.Deconvolution2D(ch // 4, ch // 8, 4, 2, 1, initialW=w)
self.dc4 = L.Deconvolution2D(ch // 8, 3, 3, 1, 1, initialW=w)
self.bn0 = L.BatchNormalization(bottom_width * bottom_width * ch)
self.bn1 = L.BatchNormalization(ch // 2)
self.bn2 = L.BatchNormalization(ch // 4)
self.bn3 = L.BatchNormalization(ch // 8)
def __init__(self, n_hidden=128, bottom_width=4, ch=512, wscale=0.02):
super(Generator, self).__init__()
self.n_hidden = n_hidden
self.ch = ch
self.bottom_width = bottom_width
with self.init_scope():
w = chainer.initializers.Normal(wscale)
self.l0 = L.Linear(self.n_hidden, bottom_width*bottom_width*ch,
initialW=w)
self.dc1 = L.Deconvolution2D(ch, ch//2, 4, 2, 1, initialW=w)
self.dc2 = L.Deconvolution2D(ch//2, ch//4, 4, 2, 1, initialW=w)
self.dc3 = L.Deconvolution2D(ch//4, ch//8, 4, 2, 1, initialW=w)
self.dc4 = L.Deconvolution2D(ch//8, 3, 3, 1, 1, initialW=w)
self.bn0 = L.BatchNormalization(bottom_width*bottom_width*ch)
self.bn1 = L.BatchNormalization(ch//2)
self.bn2 = L.BatchNormalization(ch//4)
self.bn3 = L.BatchNormalization(ch//8)
def __init__(self, n_hidden=128, bottom_width=4, ch=512, wscale=0.02):
super(Generator, self).__init__()
self.n_hidden = n_hidden
self.ch = ch
self.bottom_width = bottom_width
with self.init_scope():
w = chainer.initializers.Normal(wscale)
self.l0 = L.Linear(self.n_hidden, bottom_width*bottom_width*ch,
initialW=w)
self.dc1 = L.Deconvolution2D(ch, ch//2, 4, 2, 1, initialW=w)
self.dc2 = L.Deconvolution2D(ch//2, ch//4, 4, 2, 1, initialW=w)
self.dc3 = L.Deconvolution2D(ch//4, ch//8, 4, 2, 1, initialW=w)
self.dc4 = L.Deconvolution2D(ch//8, 3, 3, 1, 1, initialW=w)
self.bn0 = L.BatchNormalization(bottom_width*bottom_width*ch)
self.bn1 = L.BatchNormalization(ch//2)
self.bn2 = L.BatchNormalization(ch//4)
self.bn3 = L.BatchNormalization(ch//8)
def __init__(self, n_hidden, activate='sigmoid', size=64, ch=512, wscale=0.02):
assert (size % 16 == 0)
initial_size = size // 16
self.n_hidden = n_hidden
if activate == 'sigmoid':
self.activate = F.sigmoid
elif activate == 'tanh':
self.activate = F.tanh
else:
raise ValueError('invalid activate function')
self.ch = ch
self.initial_size = initial_size
w = chainer.initializers.Normal(wscale)
super(Generator, self).__init__(
l0=L.Linear(self.n_hidden, initial_size * initial_size * ch, initialW=w),
dc1=L.Deconvolution2D(ch // 1, ch // 2, 4, 2, 1, initialW=w),
dc2=L.Deconvolution2D(ch // 2, ch // 4, 4, 2, 1, initialW=w),
dc3=L.Deconvolution2D(ch // 4, ch // 8, 4, 2, 1, initialW=w),
dc4=L.Deconvolution2D(ch // 8, 3, 4, 2, 1, initialW=w),
bn0=L.BatchNormalization(initial_size * initial_size * ch),
bn1=L.BatchNormalization(ch // 2),
bn2=L.BatchNormalization(ch // 4),
bn3=L.BatchNormalization(ch // 8),
)
def __init__(self, n_hidden, activate='sigmoid', size=64, ch=512, wscale=0.02):
assert (size % 8 == 0)
initial_size = size // 8
self.n_hidden = n_hidden
self.ch = ch
self.initial_size = initial_size
if activate == 'sigmoid':
self.activate = F.sigmoid
elif activate == 'tanh':
self.activate = F.tanh
else:
raise ValueError('invalid activate function')
w = chainer.initializers.Normal(wscale)
super(Generator2, self).__init__(
l0=L.Linear(self.n_hidden, initial_size * initial_size * ch, initialW=w),
dc1=L.Deconvolution2D(ch // 1, ch // 2, 4, 2, 1, initialW=w),
dc2=L.Deconvolution2D(ch // 2, ch // 4, 4, 2, 1, initialW=w),
dc3=L.Deconvolution2D(ch // 4, ch // 8, 4, 2, 1, initialW=w),
dc4=L.Deconvolution2D(ch // 8, 3, 3, 1, 1, initialW=w),
bn0=L.BatchNormalization(initial_size * initial_size * ch),
bn1=L.BatchNormalization(ch // 2),
bn2=L.BatchNormalization(ch // 4),
bn3=L.BatchNormalization(ch // 8),
)
def __init__(self, n_hidden, activate='sigmoid', size=64, ch=512, wscale=0.02):
assert (size % 8 == 0)
initial_size = size // 8
self.n_hidden = n_hidden
if activate == 'sigmoid':
self.activate = F.sigmoid
elif activate == 'tanh':
self.activate = F.tanh
else:
raise ValueError('invalid activate function')
self.ch = ch
self.initial_size = initial_size
w = chainer.initializers.Normal(wscale)
super(Generator, self).__init__(
l0=L.Linear(self.n_hidden, initial_size * initial_size * ch, initialW=w),
dc1=L.Deconvolution2D(ch // 1, ch // 2, 4, 2, 1, initialW=w),
dc2=L.Deconvolution2D(ch // 2, ch // 4, 4, 2, 1, initialW=w),
dc3=L.Deconvolution2D(ch // 4, ch // 8, 4, 2, 1, initialW=w),
dc4=L.Deconvolution2D(ch // 8, 3, 3, 1, 1, initialW=w),
)
def __init__(self, density=1, size=64, latent_size=128, channel=3):
assert (size % 16 == 0)
initial_size = size / 16
super(Generator, self).__init__(
g1=L.Linear(latent_size, initial_size * initial_size * 256 * density, wscale=0.02 * math.sqrt(latent_size)),
norm1=L.BatchNormalization(initial_size * initial_size * 256 * density),
g2=L.Deconvolution2D(256 * density, 128 * density, 4, stride=2, pad=1,
wscale=0.02 * math.sqrt(4 * 4 * 256 * density)),
norm2=L.BatchNormalization(128 * density),
g3=L.Deconvolution2D(128 * density, 64 * density, 4, stride=2, pad=1,
wscale=0.02 * math.sqrt(4 * 4 * 128 * density)),
norm3=L.BatchNormalization(64 * density),
g4=L.Deconvolution2D(64 * density, 32 * density, 4, stride=2, pad=1,
wscale=0.02 * math.sqrt(4 * 4 * 64 * density)),
norm4=L.BatchNormalization(32 * density),
g5=L.Deconvolution2D(32 * density, channel, 4, stride=2, pad=1,
wscale=0.02 * math.sqrt(4 * 4 * 32 * density)),
)
self.density = density
self.latent_size = latent_size
self.initial_size = initial_size
def __init__(self, density=1, size=64, latent_size=100, channel=3):
assert (size % 16 == 0)
initial_size = size / 16
super(Generator_origin, self).__init__(
g1=L.Linear(latent_size, initial_size * initial_size * 256 * density, wscale=0.02 * math.sqrt(latent_size)),
norm1=L.BatchNormalization(initial_size * initial_size * 256 * density),
g2=L.Deconvolution2D(256 * density, 128 * density, 4, stride=2, pad=1,
wscale=0.02 * math.sqrt(4 * 4 * 256 * density)),
norm2=L.BatchNormalization(128 * density),
g3=L.Deconvolution2D(128 * density, 64 * density, 4, stride=2, pad=1,
wscale=0.02 * math.sqrt(4 * 4 * 128 * density)),
norm3=L.BatchNormalization(64 * density),
g4=L.Deconvolution2D(64 * density, 32 * density, 4, stride=2, pad=1,
wscale=0.02 * math.sqrt(4 * 4 * 64 * density)),
norm4=L.BatchNormalization(32 * density),
g5=L.Deconvolution2D(32 * density, channel, 4, stride=2, pad=1,
wscale=0.02 * math.sqrt(4 * 4 * 32 * density)),
)
self.density = density
self.latent_size = latent_size
self.initial_size = initial_size
def __init__(self, ch0, ch1, bn=True, sample='down', activation=F.relu, dropout=False, noise=False):
self.bn = bn
self.activation = activation
self.dropout = dropout
self.noise = noise
layers = {}
w = chainer.initializers.Normal(0.02)
if sample == 'down':
layers['c'] = L.Convolution2D(ch0, ch1, 4, 2, 1, initialW=w)
elif sample == 'up':
layers['c'] = L.Deconvolution2D(ch0, ch1, 4, 2, 1, initialW=w)
elif sample == 'c7s1':
layers['c'] = L.Convolution2D(ch0, ch1, 7, 1, 3, initialW=w)
if bn:
if self.noise:
layers['batchnorm'] = L.BatchNormalization(ch1, use_gamma=False)
else:
layers['batchnorm'] = L.BatchNormalization(ch1)
super(CBR, self).__init__(**layers)
def __init__(self, ch=128, wscale=0.02):
w = chainer.initializers.Normal(wscale)
super(GeneratorOld, self).__init__(
conv1=L.Convolution2D(3, ch // 4, 5, 1, 2, initialW=w),
conv2=L.Convolution2D(ch // 4, ch // 2, 3, 2, 1, initialW=w),
conv3=L.Convolution2D(ch // 2, ch, 3, 2, 1, initialW=w),
res1=ResBlock(ch, ch, bn=False),
res2=ResBlock(ch, ch, bn=False),
res3=ResBlock(ch, ch, bn=False),
res4=ResBlock(ch, ch, bn=False),
res5=ResBlock(ch, ch, bn=False),
res6=ResBlock(ch, ch, bn=False),
res7=ResBlock(ch, ch, bn=False),
res8=ResBlock(ch, ch, bn=False),
res9=ResBlock(ch, ch, bn=False),
dc1=L.Deconvolution2D(ch, ch // 2, 4, 2, 1, initialW=w),
dc2=L.Deconvolution2D(ch // 2, ch // 4, 4, 2, 1, initialW=w),
dc3=L.Convolution2D(ch // 4, 3, 5, 1, 2, initialW=w),
)
# noinspection PyCallingNonCallable,PyUnresolvedReferences
fcn32s.py 文件源码
项目:Semantic-Segmentation-using-Adversarial-Networks
作者: oyam
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def __init__(self, n_class=21):
self.train=True
super(FCN32s, self).__init__(
conv1_1=L.Convolution2D(3, 64, 3, stride=1, pad=100),
conv1_2=L.Convolution2D(64, 64, 3, stride=1, pad=1),
conv2_1=L.Convolution2D(64, 128, 3, stride=1, pad=1),
conv2_2=L.Convolution2D(128, 128, 3, stride=1, pad=1),
conv3_1=L.Convolution2D(128, 256, 3, stride=1, pad=1),
conv3_2=L.Convolution2D(256, 256, 3, stride=1, pad=1),
conv3_3=L.Convolution2D(256, 256, 3, stride=1, pad=1),
conv4_1=L.Convolution2D(256, 512, 3, stride=1, pad=1),
conv4_2=L.Convolution2D(512, 512, 3, stride=1, pad=1),
conv4_3=L.Convolution2D(512, 512, 3, stride=1, pad=1),
conv5_1=L.Convolution2D(512, 512, 3, stride=1, pad=1),
conv5_2=L.Convolution2D(512, 512, 3, stride=1, pad=1),
conv5_3=L.Convolution2D(512, 512, 3, stride=1, pad=1),
fc6=L.Convolution2D(512, 4096, 7, stride=1, pad=0),
fc7=L.Convolution2D(4096, 4096, 1, stride=1, pad=0),
score_fr=L.Convolution2D(4096, n_class, 1, stride=1, pad=0,
nobias=True, initialW=np.zeros((n_class, 4096, 1, 1))),
upscore=L.Deconvolution2D(n_class, n_class, 64, stride=32, pad=0,
nobias=True, initialW=f.bilinear_interpolation_kernel(n_class, n_class, ksize=64)),
)
def __init__(self):
super(FastStyleNet, self).__init__(
c1=L.Convolution2D(3, 32, 9, stride=1, pad=4),
c2=L.Convolution2D(32, 64, 4, stride=2, pad=1),
c3=L.Convolution2D(64, 128, 4,stride=2, pad=1),
r1=ResidualBlock(128, 128),
r2=ResidualBlock(128, 128),
r3=ResidualBlock(128, 128),
r4=ResidualBlock(128, 128),
r5=ResidualBlock(128, 128),
d1=L.Deconvolution2D(128, 64, 4, stride=2, pad=1),
d2=L.Deconvolution2D(64, 32, 4, stride=2, pad=1),
d3=L.Deconvolution2D(32, 3, 9, stride=1, pad=4),
b1=L.BatchNormalization(32),
b2=L.BatchNormalization(64),
b3=L.BatchNormalization(128),
b4=L.BatchNormalization(64),
b5=L.BatchNormalization(32),
)
def __init__(self, n_hidden, bottom_width=4, ch=512, wscale=0.02):
super(Generator, self).__init__()
self.n_hidden = n_hidden
self.ch = ch
self.bottom_width = bottom_width
with self.init_scope():
w = chainer.initializers.Normal(wscale)
self.l0 = L.Linear(self.n_hidden, bottom_width * bottom_width * ch,
initialW=w)
self.dc1 = L.Deconvolution2D(ch, ch // 2, 4, 2, 1, initialW=w)
self.dc2 = L.Deconvolution2D(ch // 2, ch // 4, 4, 2, 1, initialW=w)
self.dc3 = L.Deconvolution2D(ch // 4, ch // 8, 4, 2, 1, initialW=w)
self.dc4 = L.Deconvolution2D(ch // 8, 3, 3, 1, 1, initialW=w)
self.bn0 = L.BatchNormalization(bottom_width * bottom_width * ch)
self.bn1 = L.BatchNormalization(ch // 2)
self.bn2 = L.BatchNormalization(ch // 4)
self.bn3 = L.BatchNormalization(ch // 8)
def check_add_deconv_layers(self, nobias=True):
"""Add a deconvolutional layer for each convolutional layer already
defined in the network."""
if len(self.deconv_blocks) == len(self.conv_blocks):
return
for conv_block in self.conv_blocks:
deconv_block = []
for conv in conv_block:
out_channels, in_channels, kh, kw = conv.W.data.shape
if isinstance(conv.W.data, cuda.ndarray):
initialW = cuda.cupy.asnumpy(conv.W.data)
else:
initialW = conv.W.data
deconv = L.Deconvolution2D(out_channels, in_channels,
(kh, kw), stride=conv.stride,
pad=conv.pad,
initialW=initialW,
nobias=nobias)
if isinstance(conv.W.data, cuda.ndarray):
deconv.to_gpu()
self.add_link('de{}'.format(conv.name), deconv)
deconv_block.append(deconv)
self.deconv_blocks.append(deconv_block)
def __init__(self, n_class=21):
self.n_class = n_class
kwargs = {
'initialW': chainer.initializers.Zero(),
'initial_bias': chainer.initializers.Zero(),
}
super(FCN16s, self).__init__()
with self.init_scope():
self.conv1_1 = L.Convolution2D(3, 64, 3, 1, 100, **kwargs)
self.conv1_2 = L.Convolution2D(64, 64, 3, 1, 1, **kwargs)
self.conv2_1 = L.Convolution2D(64, 128, 3, 1, 1, **kwargs)
self.conv2_2 = L.Convolution2D(128, 128, 3, 1, 1, **kwargs)
self.conv3_1 = L.Convolution2D(128, 256, 3, 1, 1, **kwargs)
self.conv3_2 = L.Convolution2D(256, 256, 3, 1, 1, **kwargs)
self.conv3_3 = L.Convolution2D(256, 256, 3, 1, 1, **kwargs)
self.conv4_1 = L.Convolution2D(256, 512, 3, 1, 1, **kwargs)
self.conv4_2 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs)
self.conv4_3 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs)
self.conv5_1 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs)
self.conv5_2 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs)
self.conv5_3 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs)
self.fc6 = L.Convolution2D(512, 4096, 7, 1, 0, **kwargs)
self.fc7 = L.Convolution2D(4096, 4096, 1, 1, 0, **kwargs)
self.score_fr = L.Convolution2D(4096, n_class, 1, 1, 0, **kwargs)
self.score_pool4 = L.Convolution2D(512, n_class, 1, 1, 0, **kwargs)
self.upscore2 = L.Deconvolution2D(
n_class, n_class, 4, 2, nobias=True,
initialW=initializers.UpsamplingDeconvWeight())
self.upscore16 = L.Deconvolution2D(
n_class, n_class, 32, 16, nobias=True,
initialW=initializers.UpsamplingDeconvWeight())
def __init__(self, n_class=21):
self.n_class = n_class
kwargs = {
'initialW': chainer.initializers.Zero(),
'initial_bias': chainer.initializers.Zero(),
}
super(FCN32s, self).__init__()
with self.init_scope():
self.conv1_1 = L.Convolution2D(3, 64, 3, 1, 100, **kwargs)
self.conv1_2 = L.Convolution2D(64, 64, 3, 1, 1, **kwargs)
self.conv2_1 = L.Convolution2D(64, 128, 3, 1, 1, **kwargs)
self.conv2_2 = L.Convolution2D(128, 128, 3, 1, 1, **kwargs)
self.conv3_1 = L.Convolution2D(128, 256, 3, 1, 1, **kwargs)
self.conv3_2 = L.Convolution2D(256, 256, 3, 1, 1, **kwargs)
self.conv3_3 = L.Convolution2D(256, 256, 3, 1, 1, **kwargs)
self.conv4_1 = L.Convolution2D(256, 512, 3, 1, 1, **kwargs)
self.conv4_2 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs)
self.conv4_3 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs)
self.conv5_1 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs)
self.conv5_2 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs)
self.conv5_3 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs)
self.fc6 = L.Convolution2D(512, 4096, 7, 1, 0, **kwargs)
self.fc7 = L.Convolution2D(4096, 4096, 1, 1, 0, **kwargs)
self.score_fr = L.Convolution2D(4096, n_class, 1, 1, 0, **kwargs)
self.upscore = L.Deconvolution2D(
n_class, n_class, 64, 32, 0, nobias=True,
initialW=initializers.UpsamplingDeconvWeight())
def __init__(self):
initialW = chainer.initializers.Normal(0.02)
super(Generator1, self).__init__(
conv1=L.Deconvolution2D(100, 1024, 4, initialW=initialW),
bn1=L.BatchNormalization(1024),
up=UpSampling(4, 1024, 64),
)
def __init__(self):
super(MyFcn, self).__init__(
conv1_1=L.Convolution2D( 3, 64, 3, stride=1, pad=1),
conv1_2=L.Convolution2D( 64, 64, 3, stride=1, pad=1),
conv2_1=L.Convolution2D( 64, 128, 3, stride=1, pad=1),
conv2_2=L.Convolution2D(128, 128, 3, stride=1, pad=1),
conv3_1=L.Convolution2D(128, 256, 3, stride=1, pad=1),
conv3_2=L.Convolution2D(256, 256, 3, stride=1, pad=1),
conv3_3=L.Convolution2D(256, 256, 3, stride=1, pad=1),
conv4_1=L.Convolution2D(256, 512, 3, stride=1, pad=1),
conv4_2=L.Convolution2D(512, 512, 3, stride=1, pad=1),
conv4_3=L.Convolution2D(512, 512, 3, stride=1, pad=1),
conv5_1=L.Convolution2D(512, 512, 3, stride=1, pad=1),
conv5_2=L.Convolution2D(512, 512, 3, stride=1, pad=1),
conv5_3=L.Convolution2D(512, 512, 3, stride=1, pad=1),
score_pool3=L.Convolution2D(256, MyFcn.CLASSES, 1, stride=1, pad=0),
score_pool4=L.Convolution2D(512, MyFcn.CLASSES, 1, stride=1, pad=0),
score_pool5=L.Convolution2D(512, MyFcn.CLASSES, 1, stride=1, pad=0),
upsample_pool4=L.Deconvolution2D(MyFcn.CLASSES, MyFcn.CLASSES, ksize= 4, stride=2, pad=1),
upsample_pool5=L.Deconvolution2D(MyFcn.CLASSES, MyFcn.CLASSES, ksize= 8, stride=4, pad=2),
upsample_final=L.Deconvolution2D(MyFcn.CLASSES, MyFcn.CLASSES, ksize=16, stride=8, pad=4),
)
self.train = True
def __init__(self):
super().__init__(
dc1=L.Deconvolution2D(None, 256, 4, stride=1, pad=0, nobias=True),
dc2=L.Deconvolution2D(256, 128, 4, stride=2, pad=1, nobias=True),
dc3=L.Deconvolution2D(128, 64, 4, stride=2, pad=1, nobias=True),
dc4=L.Deconvolution2D(64, 3, 4, stride=2, pad=1, nobias=True),
bn_dc1=L.BatchNormalization(256),
bn_dc2=L.BatchNormalization(128),
bn_dc3=L.BatchNormalization(64)
)
def __init__(self, size=None):
super().__init__(
dc1=L.Deconvolution2D(None, 256, 4, stride=1, pad=0, nobias=True),
dc2=L.Deconvolution2D(256, 128, 4, stride=2, pad=1, nobias=True),
dc3=L.Deconvolution2D(128, 64, 4, stride=2, pad=1, nobias=True),
dc4=L.Deconvolution2D(64, 3, 4, stride=2, pad=1, nobias=True),
bn_dc1=L.BatchNormalization(256),
bn_dc2=L.BatchNormalization(128),
bn_dc3=L.BatchNormalization(64)
)
def __init__(self, size=None):
super().__init__(
dc1=L.Deconvolution2D(None, 256, 4, stride=1, pad=0, nobias=True),
dc2=L.Deconvolution2D(256, 128, 4, stride=2, pad=1, nobias=True),
dc3=L.Deconvolution2D(128, 64, 4, stride=2, pad=2, nobias=True),
dc4=L.Deconvolution2D(64, 1, 4, stride=2, pad=1, nobias=True),
bn_dc1=L.BatchNormalization(256),
bn_dc2=L.BatchNormalization(128),
bn_dc3=L.BatchNormalization(64)
)
def __init__(self, feature_map_nc, output_nc, w_init=None):
super(Generator, self).__init__(
c1=L.Convolution2D(None, feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init),
c2=L.Convolution2D(None, 2*feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init),
c3=L.Convolution2D(None, 4*feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init),
c4=L.Convolution2D(None, 8*feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init),
c5=L.Convolution2D(None, 8*feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init),
c6=L.Convolution2D(None, 8*feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init),
c7=L.Convolution2D(None, 8*feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init),
c8=L.Convolution2D(None, 8*feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init),
dc1=L.Deconvolution2D(None, 8*feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init),
dc2=L.Deconvolution2D(None, 8*feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init),
dc3=L.Deconvolution2D(None, 8*feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init),
dc4=L.Deconvolution2D(None, 8*feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init),
dc5=L.Deconvolution2D(None, 4*feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init),
dc6=L.Deconvolution2D(None, 2*feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init),
dc7=L.Deconvolution2D(None, feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init),
dc8=L.Deconvolution2D(None, output_nc, ksize=4, stride=2, pad=1, initialW=w_init),
b2=L.BatchNormalization(2*feature_map_nc),
b3=L.BatchNormalization(4*feature_map_nc),
b4=L.BatchNormalization(8*feature_map_nc),
b5=L.BatchNormalization(8*feature_map_nc),
b6=L.BatchNormalization(8*feature_map_nc),
b7=L.BatchNormalization(8*feature_map_nc),
b8=L.BatchNormalization(8*feature_map_nc),
b1_d=L.BatchNormalization(8*feature_map_nc),
b2_d=L.BatchNormalization(8*feature_map_nc),
b3_d=L.BatchNormalization(8*feature_map_nc),
b4_d=L.BatchNormalization(8*feature_map_nc),
b5_d=L.BatchNormalization(4*feature_map_nc),
b6_d=L.BatchNormalization(2*feature_map_nc),
b7_d=L.BatchNormalization(feature_map_nc)
)
def __init__(self, ch0, ch1, bn=True, sample='down', activation=F.relu, dropout=False):
self.bn = bn
self.activation = activation
self.dropout = dropout
layers = {}
w = chainer.initializers.Normal(0.02)
if sample=='down':
layers['c'] = L.Convolution2D(ch0, ch1, 4, 2, 1, initialW=w)
else:
layers['c'] = L.Deconvolution2D(ch0, ch1, 4, 2, 1, initialW=w)
if bn:
layers['batchnorm'] = L.BatchNormalization(ch1)
super(CBR, self).__init__(**layers)
def __init__(self, nz=30):
super(Generator, self).__init__(
l0z=L.Linear(nz, 6 * 6 * 128, wscale=0.02 * math.sqrt(nz)),
dc1=L.Deconvolution2D(128, 64, 4, stride=2, pad=1, wscale=0.02 * math.sqrt(4 * 4 * 128)),
dc2=L.Deconvolution2D(64, 32, 4, stride=2, pad=1, wscale=0.02 * math.sqrt(4 * 4 * 64)),
dc3=L.Deconvolution2D(32, 1, 4, stride=2, pad=1, wscale=0.02 * math.sqrt(4 * 4 * 32)),
bn0l=L.BatchNormalization(6 * 6 * 128),
bn0=L.BatchNormalization(128),
bn1=L.BatchNormalization(64),
bn2=L.BatchNormalization(32)
)
def __init__(self, ch=512, wscale=0.02):
w = chainer.initializers.Normal(wscale)
self.ch = ch
super(Discriminator, self).__init__()
with self.init_scope():
self.c0 = L.Convolution2D(3, ch // 8, 3, 1, 1, initialW=w)
self.c1 = L.Convolution2D(ch // 8, ch // 4, 4, 2, 1, initialW=w)
self.c2 = L.Convolution2D(ch // 4, ch // 2, 4, 2, 1, initialW=w)
self.c3 = L.Convolution2D(ch // 2, ch // 1, 4, 2, 1, initialW=w)
self.l4 = L.Linear(4*4*ch, 128, initialW=w)
self.l5 = L.Linear(128, 4*4*ch, initialW=w)
self.dc3 = L.Deconvolution2D(ch // 1, ch // 2, 4, 2, 1, initialW=w)
self.dc2 = L.Deconvolution2D(ch // 2, ch // 4, 4, 2, 1, initialW=w)
self.dc1 = L.Deconvolution2D(ch // 4, ch // 8, 4, 2, 1, initialW=w)
self.dc0 = L.Deconvolution2D(ch // 8, 3, 3, 1, 1, initialW=w)
def __init__(self, z_dim):
super(Generator, self).__init__(
l1=L.Deconvolution2D(z_dim, 128, 3, 2, 0),
bn1=L.BatchNormalization(128),
l2=L.Deconvolution2D(128, 128, 3, 2, 1),
bn2=L.BatchNormalization(128),
l3=L.Deconvolution2D(128, 128, 3, 2, 1),
bn3=L.BatchNormalization(128),
l4=L.Deconvolution2D(128, 128, 3, 2, 2),
bn4=L.BatchNormalization(128),
l5=L.Deconvolution2D(128, 1, 3, 2, 2, outsize=(28, 28)),
)
self.train = True
def __init__(self, ch0, ch1, bn=True, sample='down', activation=F.relu, dropout=False):
self.bn = bn
self.activation = activation
self.dropout = dropout
layers = {}
w = chainer.initializers.Normal(0.02)
if sample == 'down':
layers['c'] = L.Convolution2D(ch0, ch1, 4, 2, 1, initialW=w)
else:
layers['c'] = L.Deconvolution2D(ch0, ch1, 4, 2, 1, initialW=w)
if bn:
layers['batchnorm'] = L.BatchNormalization(ch1)
super(CBR, self).__init__(**layers)