def conv_layer(self, bottom, in_channels, out_channels, name):
with tf.variable_scope(name):
filter_size_h, filter_size_w = self.__convKernelSize
filt = tf.get_variable(name=name + "_filters",
shape=[filter_size_h, filter_size_w, in_channels, out_channels],
initializer=init_ops.random_normal_initializer(stddev=0.01))
conv_biases = tf.get_variable(name=name + "_biases",
shape=[out_channels],
initializer=init_ops.random_normal_initializer(stddev=0.01))
def _inner_conv(bott):
conv = tf.nn.conv2d(bott, filt, [1, 1, 1, 1], padding='SAME')
bias = tf.nn.bias_add(conv, conv_biases)
relu = tf.nn.relu(bias)
return relu
_bottoms = tf.unstack(bottom, axis=0)
output = tf.stack([_inner_conv(bott) for bott in _bottoms], axis=0)
return output
python类random_normal_initializer()的实例源码
def random_normal_variable(shape, mean, scale, dtype=None, name=None,
seed=None):
"""Instantiates a variable with values drawn from a normal distribution.
Arguments:
shape: Tuple of integers, shape of returned Keras variable.
mean: Float, mean of the normal distribution.
scale: Float, standard deviation of the normal distribution.
dtype: String, dtype of returned Keras variable.
name: String, name of returned Keras variable.
seed: Integer, random seed.
Returns:
A Keras variable, filled with drawn samples.
Example:
```python
# TensorFlow example
>>> kvar = K.random_normal_variable((2,3), 0, 1)
>>> kvar
<tensorflow.python.ops.variables.Variable object at 0x10ab12dd0>
>>> K.eval(kvar)
array([[ 1.19591331, 0.68685907, -0.63814116],
[ 0.92629528, 0.28055015, 1.70484698]], dtype=float32)
"""
if dtype is None:
dtype = floatx()
shape = tuple(map(int, shape))
tf_dtype = _convert_string_dtype(dtype)
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e8)
value = init_ops.random_normal_initializer(
mean, scale, dtype=tf_dtype, seed=seed)(shape)
return variable(value, dtype=dtype, name=name)
```
def separable_conv(self, bottom, in_channels, out_channels, name):
with tf.variable_scope(name):
filter_size_h = 1
filter_size_w = 1
filt = tf.get_variable(name=name + "_filters",
shape=[filter_size_h, filter_size_w, in_channels, out_channels],
initializer=init_ops.random_normal_initializer(stddev=0.01))
conv_biases = tf.get_variable(name=name + "_biases", shape=[out_channels], initializer=init_ops.random_normal_initializer(stddev=0.01))
conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME')
bias = tf.nn.bias_add(conv, conv_biases)
relu = tf.nn.relu(bias)
return relu
def _conv(self, input, in_channels, out_channels, name):
with tf.variable_scope(name):
filter_size_h, filter_size_w = self.__convKernelSize
filt = tf.get_variable(name=name + "_filters",
shape=[filter_size_h, filter_size_w, in_channels, out_channels],
initializer=init_ops.random_normal_initializer(stddev=0.01))
conv_biases = tf.get_variable(name=name + "_biases",
shape=[out_channels],
initializer=init_ops.random_normal_initializer(stddev=0.01))
conv = tf.nn.conv2d(input, filt, [1, 1, 1, 1], padding='SAME')
bias = tf.nn.bias_add(conv, conv_biases)
relu = tf.nn.relu(bias)
return relu
def fc_layer(self, bottom, out_size, name):
with tf.variable_scope(name):
_, _height, _width, _channel = bottom.get_shape().as_list()
size = _height*_width*_channel
weights = tf.get_variable(name=name + "_weights", shape = [size, out_size], initializer=init_ops.random_normal_initializer(stddev=0.01))
biases = tf.get_variable(name=name + "_biases", shape=[out_size], initializer=init_ops.random_normal_initializer(stddev=0.01))
print weights
x = tf.reshape(bottom, [-1, size])
fc = tf.nn.bias_add(tf.matmul(x, weights), biases)
return fc
def _fc(self, bottom, out_size, name):
with tf.variable_scope(name):
_, size = bottom.get_shape().as_list()
weights = tf.get_variable(name=name + "_weights", shape = [size, out_size], initializer=init_ops.random_normal_initializer(stddev=0.01))
biases = tf.get_variable(name=name + "_biases", shape=[out_size], initializer=init_ops.random_normal_initializer(stddev=0.01))
print weights
fc = tf.nn.bias_add(tf.matmul(bottom, weights), biases)
return fc
def linear_regression(x, y, init_mean=None, init_stddev=1.0):
"""Creates linear regression TensorFlow subgraph.
Args:
x: tensor or placeholder for input features.
y: tensor or placeholder for target.
init_mean: the mean value to use for initialization.
init_stddev: the standard devation to use for initialization.
Returns:
Predictions and loss tensors.
Side effects:
The variables linear_regression.weights and linear_regression.bias are
initialized as follows. If init_mean is not None, then initialization
will be done using a random normal initializer with the given init_mean
and init_stddv. (These may be set to 0.0 each if a zero initialization
is desirable for convex use cases.) If init_mean is None, then the
uniform_unit_scaling_initialzer will be used.
"""
with vs.variable_scope('linear_regression'):
scope_name = vs.get_variable_scope().name
logging_ops.histogram_summary('%s.x' % scope_name, x)
logging_ops.histogram_summary('%s.y' % scope_name, y)
dtype = x.dtype.base_dtype
y_shape = y.get_shape()
if len(y_shape) == 1:
output_shape = 1
else:
output_shape = y_shape[1]
# Set up the requested initialization.
if init_mean is None:
weights = vs.get_variable(
'weights', [x.get_shape()[1], output_shape], dtype=dtype)
bias = vs.get_variable('bias', [output_shape], dtype=dtype)
else:
weights = vs.get_variable('weights', [x.get_shape()[1], output_shape],
initializer=init_ops.random_normal_initializer(
init_mean, init_stddev, dtype=dtype),
dtype=dtype)
bias = vs.get_variable('bias', [output_shape],
initializer=init_ops.random_normal_initializer(
init_mean, init_stddev, dtype=dtype),
dtype=dtype)
logging_ops.histogram_summary('%s.weights' % scope_name, weights)
logging_ops.histogram_summary('%s.bias' % scope_name, bias)
return losses_ops.mean_squared_error_regressor(x, y, weights, bias)
def linear_regression(x, y, init_mean=None, init_stddev=1.0):
"""Creates linear regression TensorFlow subgraph.
Args:
x: tensor or placeholder for input features.
y: tensor or placeholder for labels.
init_mean: the mean value to use for initialization.
init_stddev: the standard devation to use for initialization.
Returns:
Predictions and loss tensors.
Side effects:
The variables linear_regression.weights and linear_regression.bias are
initialized as follows. If init_mean is not None, then initialization
will be done using a random normal initializer with the given init_mean
and init_stddv. (These may be set to 0.0 each if a zero initialization
is desirable for convex use cases.) If init_mean is None, then the
uniform_unit_scaling_initialzer will be used.
"""
with vs.variable_scope('linear_regression'):
scope_name = vs.get_variable_scope().name
summary.histogram('%s.x' % scope_name, x)
summary.histogram('%s.y' % scope_name, y)
dtype = x.dtype.base_dtype
y_shape = y.get_shape()
if len(y_shape) == 1:
output_shape = 1
else:
output_shape = y_shape[1]
# Set up the requested initialization.
if init_mean is None:
weights = vs.get_variable(
'weights', [x.get_shape()[1], output_shape], dtype=dtype)
bias = vs.get_variable('bias', [output_shape], dtype=dtype)
else:
weights = vs.get_variable('weights', [x.get_shape()[1], output_shape],
initializer=init_ops.random_normal_initializer(
init_mean, init_stddev, dtype=dtype),
dtype=dtype)
bias = vs.get_variable('bias', [output_shape],
initializer=init_ops.random_normal_initializer(
init_mean, init_stddev, dtype=dtype),
dtype=dtype)
summary.histogram('%s.weights' % scope_name, weights)
summary.histogram('%s.bias' % scope_name, bias)
return losses_ops.mean_squared_error_regressor(x, y, weights, bias)
models.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 16
收藏 0
点赞 0
评论 0
def linear_regression(x, y, init_mean=None, init_stddev=1.0):
"""Creates linear regression TensorFlow subgraph.
Args:
x: tensor or placeholder for input features.
y: tensor or placeholder for labels.
init_mean: the mean value to use for initialization.
init_stddev: the standard devation to use for initialization.
Returns:
Predictions and loss tensors.
Side effects:
The variables linear_regression.weights and linear_regression.bias are
initialized as follows. If init_mean is not None, then initialization
will be done using a random normal initializer with the given init_mean
and init_stddv. (These may be set to 0.0 each if a zero initialization
is desirable for convex use cases.) If init_mean is None, then the
uniform_unit_scaling_initialzer will be used.
"""
with vs.variable_scope('linear_regression'):
scope_name = vs.get_variable_scope().name
summary.histogram('%s.x' % scope_name, x)
summary.histogram('%s.y' % scope_name, y)
dtype = x.dtype.base_dtype
y_shape = y.get_shape()
if len(y_shape) == 1:
output_shape = 1
else:
output_shape = y_shape[1]
# Set up the requested initialization.
if init_mean is None:
weights = vs.get_variable(
'weights', [x.get_shape()[1], output_shape], dtype=dtype)
bias = vs.get_variable('bias', [output_shape], dtype=dtype)
else:
weights = vs.get_variable(
'weights', [x.get_shape()[1], output_shape],
initializer=init_ops.random_normal_initializer(
init_mean, init_stddev, dtype=dtype),
dtype=dtype)
bias = vs.get_variable(
'bias', [output_shape],
initializer=init_ops.random_normal_initializer(
init_mean, init_stddev, dtype=dtype),
dtype=dtype)
summary.histogram('%s.weights' % scope_name, weights)
summary.histogram('%s.bias' % scope_name, bias)
return losses_ops.mean_squared_error_regressor(x, y, weights, bias)