def _generator(self, z, dims, train_phase, activation=tf.nn.relu, scope_name="generator"):
N = len(dims)
image_size = self.resized_image_size // (2 ** (N - 1))
with tf.variable_scope(scope_name) as scope:
W_z = utils.weight_variable([self.z_dim, dims[0] * image_size * image_size], name="W_z")
b_z = utils.bias_variable([dims[0] * image_size * image_size], name="b_z")
h_z = tf.matmul(z, W_z) + b_z
h_z = tf.reshape(h_z, [-1, image_size, image_size, dims[0]])
h_bnz = utils.batch_norm(h_z, dims[0], train_phase, scope="gen_bnz")
h = activation(h_bnz, name='h_z')
utils.add_activation_summary(h)
for index in range(N - 2):
image_size *= 2
W = utils.weight_variable([5, 5, dims[index + 1], dims[index]], name="W_%d" % index)
b = utils.bias_variable([dims[index + 1]], name="b_%d" % index)
deconv_shape = tf.pack([tf.shape(h)[0], image_size, image_size, dims[index + 1]])
h_conv_t = utils.conv2d_transpose_strided(h, W, b, output_shape=deconv_shape)
h_bn = utils.batch_norm(h_conv_t, dims[index + 1], train_phase, scope="gen_bn%d" % index)
h = activation(h_bn, name='h_%d' % index)
utils.add_activation_summary(h)
image_size *= 2
W_pred = utils.weight_variable([5, 5, dims[-1], dims[-2]], name="W_pred")
b_pred = utils.bias_variable([dims[-1]], name="b_pred")
deconv_shape = tf.pack([tf.shape(h)[0], image_size, image_size, dims[-1]])
h_conv_t = utils.conv2d_transpose_strided(h, W_pred, b_pred, output_shape=deconv_shape)
pred_image = tf.nn.tanh(h_conv_t, name='pred_image')
utils.add_activation_summary(pred_image)
return pred_image
python类bias_variable()的实例源码
def _discriminator(self, input_images, dims, train_phase, activation=tf.nn.relu, scope_name="discriminator",
scope_reuse=False):
N = len(dims)
with tf.variable_scope(scope_name) as scope:
if scope_reuse:
scope.reuse_variables()
h = input_images
skip_bn = True # First layer of discriminator skips batch norm
for index in range(N - 2):
W = utils.weight_variable([5, 5, dims[index], dims[index + 1]], name="W_%d" % index)
b = utils.bias_variable([dims[index + 1]], name="b_%d" % index)
h_conv = utils.conv2d_strided(h, W, b)
if skip_bn:
h_bn = h_conv
skip_bn = False
else:
h_bn = utils.batch_norm(h_conv, dims[index + 1], train_phase, scope="disc_bn%d" % index)
h = activation(h_bn, name="h_%d" % index)
utils.add_activation_summary(h)
shape = h.get_shape().as_list()
image_size = self.resized_image_size // (2 ** (N - 2)) # dims has input dim and output dim
h_reshaped = tf.reshape(h, [self.batch_size, image_size * image_size * shape[3]])
W_pred = utils.weight_variable([image_size * image_size * shape[3], dims[-1]], name="W_pred")
b_pred = utils.bias_variable([dims[-1]], name="b_pred")
h_pred = tf.matmul(h_reshaped, W_pred) + b_pred
return tf.nn.sigmoid(h_pred), h_pred, h
def fc_layer(x, shape, name):
num_inputs, num_outputs = shape
W = utils.weight_variable(shape, 1.0, name + "/W")
b = utils.bias_variable([num_outputs], 0.0, name + "/b")
return tf.nn.sigmoid(tf.matmul(x, W) + b)
def bias_variable(shape, value, name):
initial = tf.constant(value, shape = shape)
b = tf.Variable(initial, name = name)
return b
def conv_layer(x, filter_shape, stride, sigmoid, name):
filter_width, num_inputs, num_outputs = filter_shape
W = weight_variable(filter_shape, 0.1, name + "/W")
b = bias_variable([num_outputs], 0.0, name + "/b")
z = tf.nn.conv1d(x, W, stride = stride, padding = 'SAME') + b
a = tf.nn.sigmoid(z) if sigmoid else tf.nn.tanh(z)
return a
def _generator(self, z, dims, train_phase, activation=tf.nn.relu, scope_name="generator"):
N = len(dims)
image_size = self.resized_image_size // (2 ** (N - 1))
with tf.variable_scope(scope_name) as scope:
W_z = utils.weight_variable([self.z_dim, dims[0] * image_size * image_size], name="W_z")
b_z = utils.bias_variable([dims[0] * image_size * image_size], name="b_z")
h_z = tf.matmul(z, W_z) + b_z
h_z = tf.reshape(h_z, [-1, image_size, image_size, dims[0]])
h_bnz = utils.batch_norm(h_z, dims[0], train_phase, scope="gen_bnz")
h = activation(h_bnz, name='h_z')
utils.add_activation_summary(h)
for index in range(N - 2):
image_size *= 2
W = utils.weight_variable([5, 5, dims[index + 1], dims[index]], name="W_%d" % index)
b = utils.bias_variable([dims[index + 1]], name="b_%d" % index)
deconv_shape = tf.stack([tf.shape(h)[0], image_size, image_size, dims[index + 1]])
h_conv_t = utils.conv2d_transpose_strided(h, W, b, output_shape=deconv_shape)
h_bn = utils.batch_norm(h_conv_t, dims[index + 1], train_phase, scope="gen_bn%d" % index)
h = activation(h_bn, name='h_%d' % index)
utils.add_activation_summary(h)
image_size *= 2
W_pred = utils.weight_variable([5, 5, dims[-1], dims[-2]], name="W_pred")
b_pred = utils.bias_variable([dims[-1]], name="b_pred")
deconv_shape = tf.stack([tf.shape(h)[0], image_size, image_size, dims[-1]])
h_conv_t = utils.conv2d_transpose_strided(h, W_pred, b_pred, output_shape=deconv_shape)
pred_image = tf.nn.tanh(h_conv_t, name='pred_image')
utils.add_activation_summary(pred_image)
return pred_image
def _discriminator(self, input_images, dims, train_phase, activation=tf.nn.relu, scope_name="discriminator",
scope_reuse=False):
N = len(dims)
with tf.variable_scope(scope_name) as scope:
if scope_reuse:
scope.reuse_variables()
h = input_images
skip_bn = True # First layer of discriminator skips batch norm
for index in range(N - 2):
W = utils.weight_variable([5, 5, dims[index], dims[index + 1]], name="W_%d" % index)
b = utils.bias_variable([dims[index + 1]], name="b_%d" % index)
h_conv = utils.conv2d_strided(h, W, b)
if skip_bn:
h_bn = h_conv
skip_bn = False
else:
h_bn = utils.batch_norm(h_conv, dims[index + 1], train_phase, scope="disc_bn%d" % index)
h = activation(h_bn, name="h_%d" % index)
utils.add_activation_summary(h)
shape = h.get_shape().as_list()
image_size = self.resized_image_size // (2 ** (N - 2)) # dims has input dim and output dim
h_reshaped = tf.reshape(h, [self.batch_size, image_size * image_size * shape[3]])
W_pred = utils.weight_variable([image_size * image_size * shape[3], dims[-1]], name="W_pred")
b_pred = utils.bias_variable([dims[-1]], name="b_pred")
h_pred = tf.matmul(h_reshaped, W_pred) + b_pred
return tf.nn.sigmoid(h_pred), h_pred, h