def __init__(self, arch, is_training=False):
self.arch = arch
self._sanity_check()
self.is_training = is_training
with tf.name_scope('SpeakerRepr'):
self.y_emb = self._unit_embedding(
self.arch['y_dim'],
self.arch['z_dim'],
'y_embedding')
with tf.variable_scope('Tau'):
self.tau = tf.nn.relu(
10. * tf.Variable(
tf.ones([1]),
name='tau')) + 0.1
self._generate = tf.make_template(
'Generator',
self._generator)
self._discriminate = tf.make_template(
'Discriminator',
self._discriminator)
self._encode = tf.make_template(
'Encoder',
self._encoder)
self._classify = tf.make_template(
'Classifier',
self._classifier)
python类make_template()的实例源码
def __init__(self, arch, is_training=False):
self.arch = arch
self._sanity_check()
self.is_training = is_training
with tf.name_scope('SpeakerRepr'):
self.y_emb = self._unit_embedding(
self.arch['y_dim'],
self.arch['z_dim'],
'y_embedding')
with tf.variable_scope('Tau'):
self.tau = tf.nn.relu(
10. * tf.Variable(
tf.ones([1]),
name='tau')) + 0.1
self._generate = tf.make_template(
'Generator',
self._generator)
self._discriminate = tf.make_template(
'Discriminator',
self._discriminator)
self._encode = tf.make_template(
'Encoder',
self._encoder)
self._classify = tf.make_template(
'Classifier',
self._classifier)
def ready_for_reuse(name):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
temp_func = tf.make_template(name, func)
return temp_func(*args, **kwargs)
return wrapper
return decorator
def __init__(self,
f,
g,
num_layers=1,
f_side_input=None,
g_side_input=None,
use_efficient_backprop=True):
if isinstance(f, list):
assert len(f) == num_layers
else:
f = [f] * num_layers
if isinstance(g, list):
assert len(g) == num_layers
else:
g = [g] * num_layers
scope_prefix = "revblock/revlayer_%d/"
f_scope = scope_prefix + "f"
g_scope = scope_prefix + "g"
f = [
tf.make_template(f_scope % i, fn, create_scope_now_=True)
for i, fn in enumerate(f)
]
g = [
tf.make_template(g_scope % i, fn, create_scope_now_=True)
for i, fn in enumerate(g)
]
self.f = f
self.g = g
self.num_layers = num_layers
self.f_side_input = f_side_input or []
self.g_side_input = g_side_input or []
self._use_efficient_backprop = use_efficient_backprop
def build(self, *args, **kwargs):
"""Builds the module and sets the scope.
This function will get called automatically when the module gets called.
"""
if self._is_built:
logging.info('Current Module name: `{}` is already built.'.format(self.name))
return
self._is_built = True
self._template = tf.make_template(self.name, self._build, create_scope_now_=True)
self._unique_name = self._template.variable_scope.name.split('/')[-1]
def __call__(self, func):
this = self
templated_func = tf.make_template(this.scope, func)
@wraps(func, assigned=TfTemplate.available_attrs(func))
def inner(*args, **kwargs):
return templated_func(*args, **kwargs)
return inner
def layer(func):
class Layer(object):
def __init__(self, *args, **kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
self.name = self.kwargs.get("name", self.func.__name__)
self._template = tf.make_template(self.name, self.func, create_scope_now_=True)
self._unique_name = self._template.variable_scope.name.split("/")[-1]
self._summary_added = False
def __call__(self, x):
out = self.template(x, *self.args, **self.kwargs)
self._layer_logging(x, out)
self._add_summary()
return out
def __rrshift__(self, other):
""" >> """
return self.__call__(other)
def _layer_logging(self, other, out):
tf.logging.info(" {} {} {} -> {}".format(
self.unique_name, "shape", str(other.get_shape()), str(out.get_shape())))
def _add_summary(self):
if not self.kwargs.get("summary"):
return None
if self.summary_added:
return None
for var in self.get_variables_in_scope():
# TODO: different summary types
tf.summary.scalar(var.name, tf.reduce_mean(var))
self._summary_added = True
def get_variables_in_scope(self):
assert self.template._variables_created, "Variables not yet created or undefined."
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.variable_scope_name)
return variables
@property
def template(self):
return self._template
@property
def unique_name(self):
return self._unique_name
@property
def variable_scope_name(self):
return self.template._variable_scope._name
@property
def summary_added(self):
return self._summary_added
return Layer
def __init__(self, corpus, **opts):
self.corpus = corpus
self.opts = opts
self.global_step = get_or_create_global_step()
self.increment_global_step_op = tf.assign(self.global_step, self.global_step + 1, name="increment_global_step")
self.corpus_size = get_corpus_size(self.corpus["train"])
self.corpus_size_valid = get_corpus_size(self.corpus["valid"])
self.word2idx, self.idx2word = build_vocab(self.corpus["train"])
self.vocab_size = len(self.word2idx)
self.generator_template = tf.make_template(GENERATOR_PREFIX, generator)
self.discriminator_template = tf.make_template(DISCRIMINATOR_PREFIX, discriminator)
self.enqueue_data, _, source, target, sequence_length = \
prepare_data(self.corpus["train"], self.word2idx, num_threads=7, **self.opts)
# TODO: option to either do pretrain or just generate?
self.g_tensors_pretrain = self.generator_template(
source, target, sequence_length, self.vocab_size, **self.opts)
self.enqueue_data_valid, self.input_ph, source_valid, target_valid, sequence_length_valid = \
prepare_data(self.corpus["valid"], self.word2idx, num_threads=1, **self.opts)
self.g_tensors_pretrain_valid = self.generator_template(
source_valid, target_valid, sequence_length_valid, self.vocab_size, **self.opts)
self.decoder_fn = prepare_custom_decoder(
sequence_length, self.g_tensors_pretrain.embedding_matrix, self.g_tensors_pretrain.output_projections)
self.g_tensors_fake = self.generator_template(
source, target, sequence_length, self.vocab_size, decoder_fn=self.decoder_fn, **self.opts)
self.g_tensors_fake_valid = self.generator_template(
source_valid, target_valid, sequence_length_valid, self.vocab_size, decoder_fn=self.decoder_fn, **self.opts)
# TODO: using the rnn outputs from pretraining as "real" instead of target embeddings (aka professor forcing)
self.d_tensors_real = self.discriminator_template(
self.g_tensors_pretrain.rnn_outputs, sequence_length, is_real=True, **self.opts)
# TODO: check to see if sequence_length is correct
self.d_tensors_fake = self.discriminator_template(
self.g_tensors_fake.rnn_outputs, None, is_real=False, **self.opts)
self.g_tvars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=GENERATOR_PREFIX)
self.d_tvars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=DISCRIMINATOR_PREFIX)
def build_model(self):
sc = predictron_arg_scope()
with tf.variable_scope('state'):
with slim.arg_scope(sc):
state = slim.conv2d(self.inputs, 32, [3, 3], scope='conv1')
state = layers.batch_norm(state, activation_fn=tf.nn.relu, scope='conv1/preact')
state = slim.conv2d(state, 32, [3, 3], scope='conv2')
state = layers.batch_norm(state, activation_fn=tf.nn.relu, scope='conv2/preact')
iter_template = tf.make_template('iter', self.iter_func, unique_name_='iter')
rewards_arr = []
gammas_arr = []
lambdas_arr = []
values_arr = []
for k in range(self.max_depth):
state, reward, gamma, lambda_, value = iter_template(state)
rewards_arr.append(reward)
gammas_arr.append(gamma)
lambdas_arr.append(lambda_)
values_arr.append(value)
_, _, _, _, value = iter_template(state)
# K + 1 elements
values_arr.append(value)
bs = tf.shape(self.inputs)[0]
# [batch_size, K * maze_size]
self.rewards = tf.pack(rewards_arr, axis=1)
# [batch_size, K, maze_size]
self.rewards = tf.reshape(self.rewards, [bs, self.max_depth, self.maze_size])
# [batch_size, K + 1, maze_size]
self.rewards = tf.concat_v2(values=[tf.zeros(shape=[bs, 1, self.maze_size], dtype=tf.float32), self.rewards],
axis=1, name='rewards')
# [batch_size, K * maze_size]
self.gammas = tf.pack(gammas_arr, axis=1)
# [batch_size, K, maze_size]
self.gammas = tf.reshape(self.gammas, [bs, self.max_depth, self.maze_size])
# [batch_size, K + 1, maze_size]
self.gammas = tf.concat_v2(values=[tf.ones(shape=[bs, 1, self.maze_size], dtype=tf.float32), self.gammas],
axis=1, name='gammas')
# [batch_size, K * maze_size]
self.lambdas = tf.pack(lambdas_arr, axis=1)
# [batch_size, K, maze_size]
self.lambdas = tf.reshape(self.lambdas, [-1, self.max_depth, self.maze_size])
# [batch_size, (K + 1) * maze_size]
self.values = tf.pack(values_arr, axis=1)
# [batch_size, K + 1, maze_size]
self.values = tf.reshape(self.values, [-1, (self.max_depth + 1), self.maze_size])
self.build_preturns()
self.build_lambda_preturns()
def build_model(self):
lstm_state = tf.contrib.rnn.LSTMStateTuple(self.initial_lstm_state[0], self.initial_lstm_state[1])
encoder_network_template = tf.make_template('vpn_encoder', self.encoder_template)
decoder_network_template = tf.make_template('vpn_decoder', self.decoder_template)
with tf.name_scope('training_graph'):
net_unwrap = []
for i in range(self.config.truncated_steps):
encoder_state, lstm_state = encoder_network_template(self.sequences[:, i], lstm_state)
step_out = decoder_network_template(encoder_state, self.sequences[:, i + 1])
net_unwrap.append(step_out)
self.final_lstm_state = lstm_state
with tf.name_scope('wrap_out'):
net_unwrap = tf.stack(net_unwrap)
self.output = tf.transpose(net_unwrap, [1, 0, 2, 3, 4])
for i in range(self.config.truncated_steps):
Logger.summarize_images(tf.expand_dims(tf.cast(tf.arg_max(self.output[:, i], 3), tf.float32), 3),
'frame_{0}'.format(i), 'vpn', 1)
with tf.name_scope('loss'):
labels = tf.one_hot(tf.cast(tf.squeeze(self.sequences[:, 1:]), tf.int32),
256,
axis=-1,
dtype=tf.float32)
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.output, labels=labels))
self.optimizer = tf.train.RMSPropOptimizer(learning_rate=self.config.learning_rate).minimize(self.loss)
with tf.name_scope('inference_graph'):
lstm_state = tf.contrib.rnn.LSTMStateTuple(self.initial_lstm_state[0], self.initial_lstm_state[1])
self.encoder_state, lstm_state = encoder_network_template(self.inference_prev_frame, lstm_state)
self.inference_lstm_state = lstm_state
self.inference_output = decoder_network_template(self.inference_encoder_state, self.inference_current_frame)
with tf.name_scope('test_frames'):
self.test_summaries = []
for i in range(self.config.truncated_steps):
Logger.summarize_images(tf.expand_dims(tf.cast(tf.arg_max(self.inference_output, 3), tf.float32), 3),
'test_frame_{0}'.format(i), 'vpn_test_{0}'.format(i), 1)
self.test_summaries.append(tf.summary.merge_all('vpn_test_{0}'.format(i)))
self.summaries = tf.summary.merge_all('vpn')