def _extract_argmax_and_embed(embedding,
output_projection=None,
update_embedding=True):
"""Get a loop_function that extracts the previous symbol and embeds it.
Args:
embedding: embedding tensor for symbols.
output_projection: None or a pair (W, B). If provided, each fed previous
output will first be multiplied by W and added B.
update_embedding: Boolean; if False, the gradients will not propagate
through the embeddings.
Returns:
A loop function.
"""
def loop_function(prev, _):
if output_projection is not None:
prev = nn_ops.xw_plus_b(
prev, output_projection[0], output_projection[1])
prev_symbol = math_ops.argmax(prev, 1)
# Note that gradients will not propagate through the second parameter of
# embedding_lookup.
emb_prev = embedding_ops.embedding_lookup(embedding, prev_symbol)
if not update_embedding:
emb_prev = array_ops.stop_gradient(emb_prev)
return emb_prev
return loop_function
python类argmax()的实例源码
def _predictions(logits, n_classes):
"""Returns predictions for the given logits and n_classes."""
predictions = {}
if n_classes == 2:
predictions[_LOGISTIC] = math_ops.sigmoid(logits)
logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
predictions[_PROBABILITIES] = nn.softmax(logits)
predictions[_CLASSES] = array_ops.reshape(
math_ops.argmax(logits, 1), shape=(-1, 1))
return predictions
def testPredictAsIterable(self):
"""Tests predict() and predict_proba() call with as_iterable set to True."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[.9], [.1], [.1]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
target = tf.constant([[1], [0], [0]], dtype=tf.int64)
return features, target
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
]
classifier = dnn_sampled_softmax_classifier._DNNSampledSoftmaxClassifier(
n_classes=3,
n_samples=2,
n_labels=1,
feature_columns=feature_columns,
hidden_units=[4, 4])
classifier.fit(input_fn=_input_fn, steps=1)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
# Test the output of predict() and predict_proba() with as_iterable=True
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
predictions_proba = list(
classifier.predict_proba(input_fn=predict_input_fn, as_iterable=True))
self.assertTrue(np.array_equal(predictions,
np.argmax(predictions_proba, 1)))
def testCustomMetrics(self):
"""Tests the use of custom metric."""
def _input_fn():
features = {
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
target = tf.constant([[1], [0], [0]], dtype=tf.int64)
return features, target
def _my_metric_op(predictions, targets):
"""Simply multiplies predictions and targets to return [1, 0 , 0]."""
prediction_classes = math_ops.argmax(predictions, 1)
return tf.mul(prediction_classes, tf.reshape(targets, [-1]))
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
embedding_features = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1)
]
classifier = dnn_sampled_softmax_classifier._DNNSampledSoftmaxClassifier(
n_classes=3,
n_samples=2,
n_labels=1,
feature_columns=embedding_features,
hidden_units=[4, 4],
optimizer=tf.train.AdamOptimizer(learning_rate=0.01),
config=tf.contrib.learn.RunConfig(tf_random_seed=5))
# Test that the model actually trains.
classifier.fit(input_fn=_input_fn, steps=50)
metrics = {('my_metric', 'probabilities'): _my_metric_op}
evaluate_output = classifier.evaluate(input_fn=_input_fn, steps=1,
metrics=metrics)
self.assertListEqual([1, 0, 0], list(evaluate_output['my_metric']))
def _convert_to_estimator_model_result(self, logits_fn_result):
logits, loss, train_op = logits_fn_result
return {
Classifier.CLASS_OUTPUT:
math_ops.argmax(logits, len(logits.get_shape()) - 1),
Classifier.PROBABILITY_OUTPUT: nn.softmax(logits)
}, loss, train_op
def logits_to_predictions(self, logits, proba=False):
if self.num_label_columns == 1:
logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
if proba:
return nn.softmax(logits)
else:
return math_ops.argmax(logits, 1)
def logits_to_predictions(self, logits, proba=False):
if proba:
raise ValueError(
"logits to probabilities is not supported for _BinarySvmTargetColumn")
logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
return math_ops.argmax(logits, 1)
# TODO(zakaria): use contrib losses.
def _accuracy(probabilities, targets):
predictions = math_ops.argmax(probabilities, 1)
# undo one-hot
labels = math_ops.argmax(targets, 1)
return metric_ops.streaming_accuracy(predictions, labels)
def _predictions(probabilities, unused_targets):
return math_ops.argmax(probabilities, 1)
def _mode(self):
ret = math_ops.argmax(self.logits, dimension=self._batch_rank)
ret = math_ops.cast(ret, self.dtype)
ret.set_shape(self.get_batch_shape())
return ret
def _convert_to_estimator_model_result(self, logits_fn_result):
logits, loss, train_op = logits_fn_result
return {
Classifier.CLASS_OUTPUT:
math_ops.argmax(logits, len(logits.get_shape()) - 1),
Classifier.PROBABILITY_OUTPUT: nn.softmax(logits)
}, loss, train_op
def _logits_to_predictions(self, logits):
"""See `_MultiClassHead`."""
predictions = {}
predictions[prediction_key.PredictionKey.LOGITS] = logits
logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
predictions[prediction_key.PredictionKey.CLASSES] = math_ops.argmax(
logits, 1)
return predictions
def _single_value_predictions(
activations, sequence_length, target_column, predict_probabilities):
"""Maps `activations` from the RNN to predictions for single value models.
If `predict_probabilities` is `False`, this function returns a `dict`
containing single entry with key `PREDICTIONS_KEY`. If `predict_probabilities`
is `True`, it will contain a second entry with key `PROBABILITIES_KEY`. The
value of this entry is a `Tensor` of probabilities with shape
`[batch_size, num_classes]`.
Args:
activations: Output from an RNN. Should have dtype `float32` and shape
`[batch_size, padded_length, ?]`.
sequence_length: A `Tensor` with shape `[batch_size]` and dtype `int32`
containing the length of each sequence in the batch. If `None`, sequences
are assumed to be unpadded.
target_column: An initialized `TargetColumn`, calculate predictions.
predict_probabilities: A Python boolean, indicating whether probabilities
should be returned. Should only be set to `True` for
classification/logistic regression problems.
Returns:
A `dict` mapping strings to `Tensors`.
"""
with ops.name_scope('SingleValuePrediction'):
last_activations = select_last_activations(activations, sequence_length)
if predict_probabilities:
probabilities = target_column.logits_to_predictions(
last_activations, proba=True)
prediction_dict = {
RNNKeys.PROBABILITIES_KEY: probabilities,
RNNKeys.PREDICTIONS_KEY: math_ops.argmax(probabilities, 1)}
else:
predictions = target_column.logits_to_predictions(
last_activations, proba=False)
prediction_dict = {RNNKeys.PREDICTIONS_KEY: predictions}
return prediction_dict
def predict(
self, x=None, input_fn=None, axis=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
Args:
x: features.
input_fn: Input function. If set, x must be None.
axis: Axis on which to argmax (for classification).
Last axis is used by default.
batch_size: Override default batch size.
outputs: list of `str`, name of the output to predict.
If `None`, returns all.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted classes or regression values (or an iterable of
predictions if as_iterable is True).
"""
results = self._estimator.predict(
x=x, input_fn=input_fn, batch_size=batch_size, outputs=outputs,
as_iterable=as_iterable)
predict_name = (eval_metrics.INFERENCE_PROB_NAME if self.params.regression
else eval_metrics.INFERENCE_PRED_NAME)
if as_iterable:
return (x[predict_name] for x in results)
else:
return results[predict_name]
def logits_to_predictions(self, logits, proba=False):
if self.num_label_columns == 1:
logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
if proba:
return nn.softmax(logits)
else:
return math_ops.argmax(logits, 1)
def logits_to_predictions(self, logits, proba=False):
if proba:
raise ValueError(
"logits to probabilities is not supported for _BinarySvmTargetColumn")
logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
return math_ops.argmax(logits, 1)
# TODO(zakaria): use contrib losses.
def _mode(self):
ret = math_ops.argmax(self.logits, dimension=self._batch_rank)
ret = math_ops.cast(ret, self.dtype)
ret.set_shape(self.get_batch_shape())
return ret
def _extract_argmax_and_embed(embedding, output_projection=None,
update_embedding=True):
"""Get a loop_function that extracts the previous symbol and embeds it.
Args:
embedding: embedding tensor for symbols.
output_projection: None or a pair (W, B). If provided, each fed previous
output will first be multiplied by W and added B.
update_embedding: Boolean; if False, the gradients will not propagate
through the embeddings.
Returns:
A loop function.
"""
def loop_function(prev, _):
if output_projection is not None:
prev = nn_ops.xw_plus_b(
prev, output_projection[0], output_projection[1])
prev_symbol = math_ops.argmax(prev, 1)
# Note that gradients will not propagate through the second parameter of
# embedding_lookup.
emb_prev = embedding_ops.embedding_lookup(embedding, prev_symbol)
if not update_embedding:
emb_prev = array_ops.stop_gradient(emb_prev)
return emb_prev
return loop_function
def _extract_argmax_and_embed(embedding, output_projection=None,
update_embedding=True):
def loop_function(prev, _):
if output_projection is not None:
prev = nn_ops.xw_plus_b(
prev, output_projection[0], output_projection[1])
prev_symbol = math_ops.argmax(prev, 1)
# Note that gradients will not propagate through the second parameter of
# embedding_lookup.
emb_prev = embedding_ops.embedding_lookup(embedding, prev_symbol)
if not update_embedding:
emb_prev = array_ops.stop_gradient(emb_prev)
return emb_prev
return loop_function
def _extract_argmax_and_embed(embedding, output_projection=None,
update_embedding=True):
"""Get a loop_function that extracts the previous symbol and embeds it.
Args:
embedding: embedding tensor for symbols.
output_projection: None or a pair (W, B). If provided, each fed previous
output will first be multiplied by W and added B.
update_embedding: Boolean; if False, the gradients will not propagate
through the embeddings.
Returns:
A loop function.
"""
def loop_function(prev, _):
if output_projection is not None:
prev = nn_ops.xw_plus_b(
prev, output_projection[0], output_projection[1])
prev_symbol = math_ops.argmax(prev, 1)
# Note that gradients will not propagate through the second parameter of
# embedding_lookup.
emb_prev = embedding_ops.embedding_lookup(embedding, prev_symbol)
if not update_embedding:
emb_prev = array_ops.stop_gradient(emb_prev)
return emb_prev
return loop_function