def create_model(self, model_input, num_classes=2, l2_penalty=1e-8, **unused_params):
"""Creates a logistic model.
Args:
model_input: 'batch' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes."""
net = slim.flatten(model_input)
output = slim.fully_connected(
net, num_classes - 1, activation_fn=tf.nn.sigmoid,
weights_regularizer=slim.l2_regularizer(l2_penalty))
return {"predictions": output}
python类fully_connected()的实例源码
def create_architecture(self, mode, tag=None):
training = mode == 'TRAIN'
testing = mode == 'TEST'
assert tag != None
# handle most of the regularizers here
weights_regularizer = tf.contrib.layers.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY)
biases_regularizer = weights_regularizer
# list as many types of layers as possible, even if they are not used now
with arg_scope([slim.conv2d, slim.conv2d_in_plane,
slim.conv2d_transpose, slim.separable_conv2d, slim.fully_connected],
weights_regularizer=weights_regularizer,
biases_regularizer=biases_regularizer,
biases_initializer=tf.constant_initializer(0.0)):
self.build_network()
elbo = self.add_losses()
self._summary_op = tf.summary.merge_all()
return elbo
def fc_net(inp, layers, out_layers, scope, lamba=1e-3, activation=tf.nn.relu, reuse=None,
weights_initializer=initializers.xavier_initializer(uniform=False)):
with slim.arg_scope([slim.fully_connected],
activation_fn=activation,
normalizer_fn=None,
weights_initializer=weights_initializer,
reuse=reuse,
weights_regularizer=slim.l2_regularizer(lamba)):
if layers:
h = slim.stack(inp, slim.fully_connected, layers, scope=scope)
if not out_layers:
return h
else:
h = inp
outputs = []
for i, (outdim, activation) in enumerate(out_layers):
o1 = slim.fully_connected(h, outdim, activation_fn=activation, scope=scope + '_{}'.format(i + 1))
outputs.append(o1)
return outputs if len(outputs) > 1 else outputs[0]
def create_model(self, model_input, vocab_size, num_mixtures=None,
l2_penalty=1e-8, sub_scope="", original_input=None, **unused_params):
num_supports = FLAGS.num_supports
num_layers = FLAGS.hidden_chain_layers
relu_cells = FLAGS.hidden_chain_relu_cells
next_input = model_input
support_predictions = []
for layer in xrange(num_layers):
sub_relu = slim.fully_connected(
next_input,
relu_cells,
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope=sub_scope+"relu-%d"%layer)
sub_prediction = self.sub_model(sub_relu, vocab_size, sub_scope=sub_scope+"prediction-%d"%layer)
relu_norm = tf.nn.l2_normalize(sub_relu, dim=1)
next_input = tf.concat([model_input, relu_norm], axis=1)
support_predictions.append(sub_prediction)
main_predictions = self.sub_model(next_input, vocab_size, sub_scope=sub_scope+"-main")
support_predictions = tf.concat(support_predictions, axis=1)
return {"predictions": main_predictions, "support_predictions": support_predictions}
def create_model(self, model_input, vocab_size, num_mixtures=None,
l2_penalty=1e-8, sub_scope="", original_input=None, **unused_params):
num_supports = FLAGS.num_supports
num_layers = FLAGS.hidden_chain_layers
relu_cells = FLAGS.hidden_chain_relu_cells
next_input = model_input
support_predictions = []
for layer in xrange(num_layers):
sub_relu = slim.fully_connected(
next_input,
relu_cells,
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope=sub_scope+"relu-%d"%layer)
sub_prediction = self.sub_model(sub_relu, vocab_size, sub_scope=sub_scope+"prediction-%d"%layer)
relu_norm = tf.nn.l2_normalize(sub_relu, dim=1)
next_input = tf.concat([next_input, relu_norm], axis=1)
support_predictions.append(sub_prediction)
main_predictions = self.sub_model(next_input, vocab_size, sub_scope=sub_scope+"-main")
support_predictions = tf.concat(support_predictions, axis=1)
return {"predictions": main_predictions, "support_predictions": support_predictions}
def _region_classification(self, fc7, is_training, initializer, initializer_bbox):
cls_score = slim.fully_connected(fc7, self._num_classes,
weights_initializer=initializer,
trainable=is_training,
activation_fn=None, scope='cls_score')
cls_prob = self._softmax_layer(cls_score, "cls_prob")
cls_pred = tf.argmax(cls_score, axis=1, name="cls_pred")
bbox_pred = slim.fully_connected(fc7, self._num_classes * 4,
weights_initializer=initializer_bbox,
trainable=is_training,
activation_fn=None, scope='bbox_pred')
self._predictions["cls_score"] = cls_score
self._predictions["cls_pred"] = cls_pred
self._predictions["cls_prob"] = cls_prob
self._predictions["bbox_pred"] = bbox_pred
return cls_prob, bbox_pred
def _region_classification(self, fc7, is_training, initializer, initializer_bbox):
cls_score = slim.fully_connected(fc7, self._num_classes,
weights_initializer=initializer,
trainable=is_training,
activation_fn=None, scope='cls_score')
cls_prob = self._softmax_layer(cls_score, "cls_prob")
cls_pred = tf.argmax(cls_score, axis=1, name="cls_pred")
bbox_pred = slim.fully_connected(fc7, self._num_classes * 4,
weights_initializer=initializer_bbox,
trainable=is_training,
activation_fn=None, scope='bbox_pred')
self._predictions["cls_score"] = cls_score
self._predictions["cls_pred"] = cls_pred
self._predictions["cls_prob"] = cls_prob
self._predictions["bbox_pred"] = bbox_pred
return cls_prob, bbox_pred
def arg_scope(self):
"""Configure the neural network's layers."""
batch_norm_params = {
"is_training" : self.is_training,
"decay" : 0.9997,
"epsilon" : 0.001,
"variables_collections" : {
"beta" : None,
"gamma" : None,
"moving_mean" : ["moving_vars"],
"moving_variance" : ["moving_vars"]
}
}
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(
stddev=self._hparams.init_stddev),
weights_regularizer=slim.l2_regularizer(
self._hparams.regularize_constant),
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params) as sc:
return sc
def up(self,h):
h_up = slim.fully_connected(h,self.hps.h_size,activation_fn=tf.nn.relu)
if self.var_type == 'discrete':
# q_z
self.K = K = self.hps.K
self.N = N = self.hps.N
h_up = slim.fully_connected(h_up,K*N,activation_fn=None)
self.logits_q = tf.reshape(h_up,[-1,K]) # unnormalized logits for N separate K-categorical distributions (shape=(batch_size*N,K))
h_out = slim.fully_connected(h_up,self.hps.h_size,activation_fn=None)
elif self.var_type == 'continuous':
hps = self.hps
z_size = hps.z_size
h_size = hps.h_size
h_up = slim.fully_connected(h_up,h_size,activation_fn=None)
h_up = slim.fully_connected(h,z_size*2 + h_size,activation_fn=None)
self.qz_mean, self.qz_logsd, h_out = split(h_up, 1, [z_size, z_size, h_size])
if self.hps.resnet:
return h + 0.2 * h_out
else:
return h_out
def ar_layer(z0,hps,n_hidden=10):
''' old iaf layer '''
# Repeat input
z_rep = tf.reshape(tf.tile(z0,[1,hps.z_size]),[-1,hps.z_size])
# make mask
mask = tf.sequence_mask(tf.range(hps.z_size),hps.z_size)[None,:,:]
mask = tf.reshape(tf.tile(mask,[tf.shape(z0)[0],1,1]),[-1,hps.z_size])
# predict mu and sigma
z_mask = z_rep * tf.to_float(mask)
mid = slim.fully_connected(z_mask,n_hidden,activation_fn=tf.nn.relu)
pars = slim.fully_connected(mid,2,activation_fn=None)
pars = tf.reshape(pars,[-1,hps.z_size,2])
mu, log_sigma = tf.unstack(pars,axis=2)
return mu, log_sigma
def __init__(self, lr, s_size, a_size):
self.state_in = tf.placeholder(shape=[1], dtype=tf.int32)
state_in_OH = slim.one_hot_encoding(self.state_in, s_size)
output = slim.fully_connected(state_in_OH,
a_size,
biases_initializer=None,
activation_fn=tf.nn.sigmoid,
weights_initializer=tf.ones_initializer())
self.output = tf.reshape(output, [-1])
self.chosen_action = tf.argmax(self.output, 0)
self.reward_holder = tf.placeholder(shape=[1], dtype=tf.float32)
self.action_holder = tf.placeholder(shape=[1], dtype=tf.int32)
self.responsible_weight = tf.slice(self.output, self.action_holder, [1])
self.loss = -(tf.log(self.responsible_weight) * self.reward_holder)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=lr)
self.update = optimizer.minimize(self.loss)
def __init__(self, lr, s_size, a_size):
self.state_in = tf.placeholder(shape=[1], dtype=tf.int32)
state_in_OH = slim.one_hot_encoding(self.state_in, s_size)
output = slim.fully_connected(state_in_OH,
a_size,
biases_initializer=None,
activation_fn=tf.nn.sigmoid,
weights_initializer=tf.ones_initializer())
self.output = tf.reshape(output, [-1])
self.chosen_action = tf.argmax(self.output, 0)
self.reward_holder = tf.placeholder(shape=[1], dtype=tf.float32)
self.action_holder = tf.placeholder(shape=[1], dtype=tf.int32)
self.responsible_weight = tf.slice(self.output, self.action_holder, [1])
self.loss = -(tf.log(self.responsible_weight) * self.reward_holder)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=lr)
self.update = optimizer.minimize(self.loss)
def setUp(self):
super(TestVirtualAdversarialMethod, self).setUp()
import tensorflow as tf
import tensorflow.contrib.slim as slim
def dummy_model(x):
net = slim.fully_connected(x, 60)
return slim.fully_connected(net, 10, activation_fn=None)
self.sess = tf.Session()
self.sess.as_default()
self.model = tf.make_template('dummy_model', dummy_model)
self.attack = VirtualAdversarialMethod(self.model, sess=self.sess)
# initialize model
with tf.name_scope('dummy_model'):
self.model(tf.placeholder(tf.float32, shape=(None, 1000)))
self.sess.run(tf.global_variables_initializer())
def setUp(self):
super(TestSaliencyMapMethod, self).setUp()
import tensorflow as tf
import tensorflow.contrib.slim as slim
def dummy_model(x):
net = slim.fully_connected(x, 60)
return slim.fully_connected(net, 10, activation_fn=None)
self.sess = tf.Session()
self.sess.as_default()
self.model = tf.make_template('dummy_model', dummy_model)
self.attack = SaliencyMapMethod(self.model, sess=self.sess)
# initialize model
with tf.name_scope('dummy_model'):
self.model(tf.placeholder(tf.float32, shape=(None, 1000)))
self.sess.run(tf.global_variables_initializer())
self.attack = SaliencyMapMethod(self.model, sess=self.sess)
def adversarial_discriminator(net, layers, scope='adversary', leaky=False):
if leaky:
activation_fn = tflearn.activations.leaky_relu
else:
activation_fn = tf.nn.relu
with ExitStack() as stack:
stack.enter_context(tf.variable_scope(scope))
stack.enter_context(
slim.arg_scope(
[slim.fully_connected],
activation_fn=activation_fn,
weights_regularizer=slim.l2_regularizer(2.5e-5)))
for dim in layers:
net = slim.fully_connected(net, dim)
net = slim.fully_connected(net, 2, activation_fn=None)
return net
def vgg_arg_scope(weight_decay=0.0005):
"""Defines the VGG arg scope.
Args:
weight_decay: The l2 regularization coefficient.
Returns:
An arg_scope.
"""
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(weight_decay),
biases_initializer=tf.zeros_initializer()):
with slim.arg_scope([slim.conv2d], padding='SAME') as arg_sc:
with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:
return arg_sc
def create_model(self, model_input, num_classes=10, l2_penalty=1e-8, **unused_params):
"""Creates a logistic model.
Args:
model_input: 'batch' x 'num_features' matrix of input features.
num_classes: The number of classes in the dataset.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes."""
net = slim.flatten(model_input)
output = slim.fully_connected(
net, num_classes, activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty))
return {"predictions": output}
def _createModel(self):
self.input = tf.placeholder('float', shape=[None,self.stateSize])
x1 = slim.fully_connected(self.input, 64, scope='fc/fc_1')
x1 = tf.nn.relu(x1)
self.Qout = slim.fully_connected(x1, self.actionSize)
self.tdTarget = tf.placeholder(shape=[None, self.actionSize],dtype=tf.float32)
self.loss = tf.reduce_mean(tf.square(self.tdTarget - self.Qout ) )
self.trainer = tf.train.RMSPropOptimizer(learning_rate=0.00025)
self.updateModel = self.trainer.minimize(self.loss)
tdTargetLogger= tf.summary.scalar('tdTarget', tf.reduce_mean(self.tdTarget))
lossLogger= tf.summary.scalar('loss', self.loss)
self.log = tf.summary.merge([tdTargetLogger, lossLogger])
def _extra_conv_arg_scope(weight_decay=0.00001, activation_fn=None, normalizer_fn=None):
with slim.arg_scope(
[slim.conv2d, slim.conv2d_transpose],
padding='SAME',
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=tf.truncated_normal_initializer(stddev=0.001),
activation_fn=activation_fn,
normalizer_fn=normalizer_fn,) as arg_sc:
with slim.arg_scope(
[slim.fully_connected],
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=tf.truncated_normal_initializer(stddev=0.001),
activation_fn=activation_fn,
normalizer_fn=normalizer_fn) as arg_sc:
return arg_sc
def create_architecture(self, mode, tag=None):
training = mode == 'TRAIN'
testing = mode == 'TEST'
assert tag != None
# handle most of the regularizers here
weights_regularizer = tf.contrib.layers.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY)
biases_regularizer = weights_regularizer
# list as many types of layers as possible, even if they are not used now
with arg_scope([slim.conv2d, slim.conv2d_in_plane,
slim.conv2d_transpose, slim.separable_conv2d, slim.fully_connected],
weights_regularizer=weights_regularizer,
biases_regularizer=biases_regularizer,
biases_initializer=tf.constant_initializer(0.0)):
self.build_network()
elbo = self.add_losses()
self._summary_op = tf.summary.merge_all()
return elbo
def squeezenet(inputs,
num_classes=1000,
is_training=True,
keep_prob=0.5,
spatial_squeeze=True,
scope='squeeze'):
"""
squeezenetv1.1
"""
with tf.name_scope(scope, 'squeeze', [inputs]) as sc:
end_points_collection = sc + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.max_pool2d,
slim.avg_pool2d, fire_module],
outputs_collections=end_points_collection):
nets = squeezenet_inference(inputs, is_training, keep_prob)
nets = slim.conv2d(nets, num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='logits')
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if spatial_squeeze:
nets = tf.squeeze(nets, [1, 2], name='logits/squeezed')
return nets, end_points
humancritic_tensorflow.py 文件源码
项目:LearningFromHumanPreferences
作者: ZachisGit
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def _create_model(self,input_data,reuse=False):
with self.graph.as_default():
with tf.variable_scope("hp_model"):
model = input_data
# Programatically define Layers
for i in range(self.LAYER_COUNT):
layer = slim.fully_connected(model,self.NEURON_SIZE,activation_fn=tf.nn.relu,scope="hp_model_"+str(i),
reuse=reuse,weights_initializer=self.initializer)
model = layer
layer = slim.fully_connected(model,1,scope="output",\
reuse=reuse,weights_initializer=self.initializer)
#'''
model = layer
layer = tf.nn.batch_normalization(model,tf.constant(0.0,shape=[1]),\
tf.constant(1.0,shape=[1]),None,None,1e-5)
#'''
return layer
def create_model(self, model_input, vocab_size, l2_penalty=1e-4, **unused_params):
"""Creates a logistic model.
Args:
model_input: 'batch' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes."""
with tf.name_scope('MyNNModel0'):
h1Units = 2400
a1 = slim.fully_connected(
model_input, h1Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC1')
output = slim.fully_connected(
a1, vocab_size, activation_fn=tf.nn.sigmoid,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC2')
return {"predictions": output}
#%%
#%%
def create_model(self, model_input, vocab_size, l2_penalty=1e-8, **unused_params):
"""Creates a logistic model.
Args:
model_input: 'batch' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes."""
output = slim.fully_connected(
model_input, vocab_size, activation_fn=tf.nn.sigmoid,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope = 'Logistic_FC')
return {"predictions": output}
def inference(images, keep_probability, phase_train=True,
bottleneck_layer_size=128, weight_decay=0.0, reuse=None):
batch_norm_params = {
# Decay for the moving averages.
'decay': 0.995,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# force in-place updates of mean and variance estimates
'updates_collections': None,
# Moving averages ends up in the trainable variables collection
'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
}
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
return inception_resnet_v2(images, is_training=phase_train,
dropout_keep_prob=keep_probability, bottleneck_layer_size=bottleneck_layer_size, reuse=reuse)
def inference(images, keep_probability, phase_train=True,
bottleneck_layer_size=128, weight_decay=0.0, reuse=None):
batch_norm_params = {
# Decay for the moving averages.
'decay': 0.995,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# force in-place updates of mean and variance estimates
'updates_collections': None,
# Moving averages ends up in the trainable variables collection
'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
}
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
return inception_resnet_v1(images, is_training=phase_train,
dropout_keep_prob=keep_probability, bottleneck_layer_size=bottleneck_layer_size, reuse=reuse)
def encoder(self, images, is_training):
activation_fn = leaky_relu # tf.nn.relu
weight_decay = 0.0
with tf.variable_scope('encoder'):
with slim.arg_scope([slim.batch_norm],
is_training=is_training):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=self.batch_norm_params):
net = images
net = slim.conv2d(net, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 32, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_1b')
net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 64, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_2b')
net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 128, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_3b')
net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 256, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_4b')
net = slim.flatten(net)
fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1')
fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2')
return fc1, fc2
def encoder(self, images, is_training):
activation_fn = leaky_relu # tf.nn.relu
weight_decay = 0.0
with tf.variable_scope('encoder'):
with slim.arg_scope([slim.batch_norm],
is_training=is_training):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=self.batch_norm_params):
net = slim.conv2d(images, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1')
net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2')
net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3')
net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4')
net = slim.conv2d(net, 512, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_5')
net = slim.flatten(net)
fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1')
fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2')
return fc1, fc2
def encoder(x):
# Variational posterior q(y|x), i.e. the encoder (shape=(batch_size, 200))
net = slim.stack(x,
slim.fully_connected,
[512, 256])
# Unnormalized logits for number of classes (N) seperate K-categorical distributions
logits_y = tf.reshape(slim.fully_connected(net,
FLAGS.num_classes*FLAGS.num_cat_dists,
activation_fn=None),
[-1, FLAGS.num_cat_dists])
q_y = tf.nn.softmax(logits_y)
log_q_y = tf.log(q_y + 1e-20)
return logits_y, q_y, log_q_y
def decoder(tau, logits_y):
y = tf.reshape(gumbel_softmax(logits_y, tau, hard=False),
[-1, FLAGS.num_cat_dists, FLAGS.num_classes])
# Generative model p(x|y), i.e. the decoder (shape=(batch_size, 200))
net = slim.stack(slim.flatten(y),
slim.fully_connected,
[256, 512])
logits_x = slim.fully_connected(net,
784,
activation_fn=None)
# (shape=(batch_size, 784))
p_x = bernoulli(logits=logits_x)
return p_x