def import_libs():
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.contrib.keras.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.contrib.keras.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.ops.init_ops import Constant
from tensorflow.python.ops.init_ops import Initializer
from tensorflow.python.ops.init_ops import Ones
from tensorflow.python.ops.init_ops import Orthogonal
from tensorflow.python.ops.init_ops import RandomNormal
from tensorflow.python.ops.init_ops import RandomUniform
from tensorflow.python.ops.init_ops import TruncatedNormal
from tensorflow.python.ops.init_ops import VarianceScaling
from tensorflow.python.ops.init_ops import Zeros
python类Initializer()的实例源码
def add_weight(self,
name,
shape,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
constraint=None):
"""Adds a weight variable to the layer.
Arguments:
name: String, the name for the weight variable.
shape: The shape tuple of the weight.
dtype: The dtype of the weight.
initializer: An Initializer instance (callable).
regularizer: An optional Regularizer instance.
trainable: A boolean, whether the weight should
be trained via backprop or not (assuming
that the layer itself is also trainable).
constraint: An optional Constraint instance.
Returns:
The created weight variable.
"""
if dtype is None:
dtype = K.floatx()
weight = self.add_variable(
name, shape, dtype=dtype,
initializer=initializer, regularizer=regularizer, trainable=trainable)
if constraint is not None:
self.constraints[weight] = constraint
return weight
def linear(input: tf.Tensor,
output_size: int,
weight_initializer: Optional[Initializer] = None,
bias_initializer: Optional[Initializer] = None,
name: str = "linear") -> tf.Tensor:
"""
Apply a linear transformation to a tensor.
Parameters
----------
input: tf.Tensor
The tensor which should be linearly transformed
output_size: int
The desired output size of the linear transformation
weight_initializer: tf.Initializer, optional
A custom initializer for the weight matrix of the linear transformation
bias_initializer: tf.Initializer, optional
A custom initializer for the bias vector of the linear transformation
name: str, optional
A name for the operation (default "linear")
Returns
-------
tf.Tensor
The linearly transformed input tensor
"""
shape = input.get_shape().as_list()
with tf.variable_scope(name):
weights = tf.get_variable(name="weights",
shape=[shape[-1], output_size],
dtype=tf.float32,
initializer=weight_initializer)
bias = tf.get_variable(name="bias",
shape=[output_size],
initializer=bias_initializer)
return tf.matmul(input, weights) + bias
def time_distributed_linear(inputs: tf.Tensor,
output_size: int,
weight_initializer: Optional[Initializer] = None,
bias_initializer: Optional[Initializer] = None,
name: str = "time_dist_linear") -> tf.Tensor:
"""
Applies the same linear transformation to all time steps of a sequence.
Parameters
----------
inputs: tf.Tensor
The input sequences, of shape [max_time, batch_size, num_features]
output_size: int
The desired number of features in the output sequences
weight_initializer: tf.Initializer, optional
A custom initializer for the weight matrix of the linear transformation
bias_initializer: tf.Initializer, optional
A custom initializer for the bias vector of the linear transformation
name: str, optional
A name for the operation (default "time_dist_linear")
Returns
-------
tf.Tensor
The linearly transformed input sequences, of shape [max_time, batch_size, output_size]
"""
max_time, batch_size, _ = tf.unstack(tf.shape(inputs))
static_shape = inputs.shape.as_list()
with tf.variable_scope(name):
result = flatten_time(inputs)
result = linear(result,
output_size=output_size,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer)
result = restore_time(result, max_time, batch_size, output_size)
result.set_shape([static_shape[0], static_shape[1], output_size])
return result
def conv2d(input: tf.Tensor,
output_dim: int,
kernel_width: int = 5,
kernel_height: int = 5,
horizontal_stride: int = 2,
vertical_stride: int = 2,
weight_initializer: Optional[Initializer] = None,
bias_initializer: Optional[Initializer] = None,
name: str = "conv2d"):
"""
Apply a 2D-convolution to a tensor.
Parameters
----------
input: tf.Tensor
The tensor to which the convolution should be applied. Must be of shape [batch_size, height, width, channels]
output_dim: int
The number of convolutional filters
kernel_width: int, optional
The width of the convolutional filters (default 5)
kernel_height: int, optional
The height of the convolutional filters (default 5)
horizontal_stride: int, optional
The horizontal stride of the convolutional filters (default 2)
vertical_stride: int, optional
The vertical stride of the convolutional filters (default 2)
weight_initializer: tf.Initializer, optional
A custom initializer for the weight matrices of the filters
bias_initializer: tf.Initializer, optional
A custom initializer for the bias vectors of the filters
name: str, optional
A name for the operation (default "conv2d")
Returns
-------
tf.Tensor
The result of applying a 2D-convolution to the input tensor.
"""
shape = input.get_shape().as_list()
with tf.variable_scope(name):
weights = tf.get_variable(name="weights",
shape=[kernel_height, kernel_width, shape[-1], output_dim],
initializer=weight_initializer)
bias = tf.get_variable(name="bias",
shape=[output_dim],
initializer=bias_initializer)
conv = tf.nn.conv2d(input,
filter=weights,
strides=[1, vertical_stride, horizontal_stride, 1],
padding='SAME')
conv = tf.nn.bias_add(conv, bias)
return conv
def deconv2d(input: tf.Tensor,
output_shape: Sequence[Union[int, tf.Tensor]],
kernel_width: int = 5,
kernel_height: int = 5,
horizontal_stride: int = 2,
vertical_stride: int = 2,
weight_initializer: Optional[Initializer] = None,
bias_initializer: Optional[Initializer] = None,
name: str = "deconv2d"):
"""
Applies a 2D-deconvolution to a tensor.
Parameters
----------
input: tf.Tensor
The tensor to which a 2D-deconvolution should be applied. Must be of shape [batch_size, height, width, channels]
output_shape: list of int or tf.Tensor
The desired output shape.
kernel_width: int, optional
The width of the convolutional filters (default 5)
kernel_height: int, optional
The height of the convolutional filters (default 5)
horizontal_stride: int, optional
The horizontal stride of the convolutional filters (default 2)
vertical_stride: int, optional
The vertical stride of the convolutional filters (default 2)
weight_initializer: tf.Initializer, optional
A custom initializer for the weight matrices of the filters
bias_initializer: tf.Initializer, optional
A custom initializer for the bias vectors of the filters
name: str, optional
A name for the operation (default "deconv2d")
Returns
-------
tf.Tensor
The result of applying a 2D-deconvolution to the input tensor
"""
shape = input.get_shape().as_list()
with tf.variable_scope(name):
# filter : [height, width, output_channels, in_channels]
weights = tf.get_variable(name="weights",
shape=[kernel_height, kernel_width, output_shape[-1], shape[-1]],
initializer=weight_initializer)
biases = tf.get_variable(name="bias",
shape=[output_shape[-1]],
initializer=bias_initializer)
deconv = tf.nn.conv2d_transpose(input,
filter=weights,
output_shape=output_shape,
strides=[1, vertical_stride, horizontal_stride, 1])
deconv = tf.nn.bias_add(deconv, biases)
deconv.set_shape([None] + output_shape[1:])
return deconv