python类truncated_normal_initializer()的实例源码

cifar10.py 文件源码 项目:ml 作者: hohoins 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  var = _variable_on_cpu(name, shape,
                         tf.truncated_normal_initializer(stddev=stddev))
  if wd:
    # weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
    weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var
cifar10.py 文件源码 项目:ml 作者: hohoins 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  var = _variable_on_cpu(name, shape,
                         tf.truncated_normal_initializer(stddev=stddev))
  if wd:
    # weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
    weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var
bbbc006.py 文件源码 项目:dcan-tensorflow 作者: lisjin 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def _variable_with_weight_decay(name, shape, stddev, wd):
    """Helper to create an initialized Variable with weight decay.
    Note that the Variable is initialized with a truncated normal distribution.
    A weight decay is added only if one is specified.
    Args:
        name: name of the variable
        shape: list of ints
        stddev: standard deviation of a truncated Gaussian
        wd: add L2Loss weight decay multiplied by this float. If None, weight
            decay is not added for this Variable.
    Returns:
        Variable Tensor
    """
    dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
    var = _variable_on_cpu(
        name,
        shape,
        tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
    if wd is not None and not tf.get_variable_scope().reuse:
        weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
    return var
model.py 文件源码 项目:aapm_thoracic_challenge 作者: xf4j 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def conv3d(input_, output_dim, f_size, is_training, scope='conv3d'):
    with tf.variable_scope(scope) as scope:
        # VGG network uses two 3*3 conv layers to effectively increase receptive field
        w1 = tf.get_variable('w1', [f_size, f_size, f_size, input_.get_shape()[-1], output_dim],
                             initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv1 = tf.nn.conv3d(input_, w1, strides=[1, 1, 1, 1, 1], padding='SAME')
        b1 = tf.get_variable('b1', [output_dim], initializer=tf.constant_initializer(0.0))
        conv1 = tf.nn.bias_add(conv1, b1)
        bn1 = tf.contrib.layers.batch_norm(conv1, is_training=is_training, scope='bn1',
                                           variables_collections=['bn_collections'])
        r1 = tf.nn.relu(bn1)

        w2 = tf.get_variable('w2', [f_size, f_size, f_size, output_dim, output_dim],
                             initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv2 = tf.nn.conv3d(r1, w2, strides=[1, 1, 1, 1, 1], padding='SAME')
        b2 = tf.get_variable('b2', [output_dim], initializer=tf.constant_initializer(0.0))
        conv2 = tf.nn.bias_add(conv2, b2)
        bn2 = tf.contrib.layers.batch_norm(conv2, is_training=is_training, scope='bn2',
                                           variables_collections=['bn_collections'])
        r2 = tf.nn.relu(bn2)
        return r2
model_cifar10.py 文件源码 项目:easy-tensorflow 作者: khanhptnk 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def arg_scope(self):
    """Configure the neural network's layers."""
    batch_norm_params = {
      "is_training" : self.is_training,
      "decay" : 0.9997,
      "epsilon" : 0.001,
      "variables_collections" : {
        "beta" : None,
        "gamma" : None,
        "moving_mean" : ["moving_vars"],
        "moving_variance" : ["moving_vars"]
      }
    }

    with slim.arg_scope([slim.conv2d, slim.fully_connected],
                        weights_initializer=tf.truncated_normal_initializer(
                          stddev=self._hparams.init_stddev),
                        weights_regularizer=slim.l2_regularizer(
                          self._hparams.regularize_constant),
                        activation_fn=tf.nn.relu,
                        normalizer_fn=slim.batch_norm,
                        normalizer_params=batch_norm_params) as sc:
      return sc
pose_model.py 文件源码 项目:Face-Pose-Net 作者: fengju514 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def _variable_with_weight_decay(self, name, shape, stddev, wd):
    """Helper to create an initialized Variable with weight decay.                                                                                                               
    Note that the Variable is initialized with a truncated normal distribution.                                                                                                                             
    A weight decay is added only if one is specified.                                                                                                                                                       
    Args:                                                                                                                                                                                                   
    name: name of the variable                                                                                                                                                                              
    shape: list of ints                                                                                                                                                                                     
    stddev: standard deviation of a truncated Gaussian                                                                                                                                                      
    wd: add L2Loss weight decay multiplied by this float. If None, weight                                                                                                                                   
        decay is not added for this Variable.                                                                                                                                                               
    Returns:                                                                                                                                                                                                
    Variable Tensor                                                                                                                                                                                         
    """
    dtype = tf.float32 #if FLAGS.use_fp16 else tf.float32                                                                                                                                                    
    var = self._variable_on_cpu(
      name,
      shape,
      tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
    if wd is not None:
      weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
      tf.add_to_collection('losses', weight_decay)
    return var
discriminator.py 文件源码 项目:SRGAN-tensorflow 作者: zoharli 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def __init__(self,input,name='disc'):
        with tf.variable_scope(name):
            conv1=conv_layer(input,[3,3,3,64],1)
            lrelu1=leaky_relu(conv1)
            ochannels=[64,128,128,256,256,512,512]
            stride=[2,1]
            block=[lrelu1]
            for i in xrange(7):
                block.append(self.get_block(block[-1],ochannels[i],stride[i%2]))
            dense1=tf.layers.dense(block[-1],1024,
                                   kernel_initializer=tf.truncated_normal_initializer()
                                   )
            lrelu2=leaky_relu(dense1)
            self.dense2=tf.layers.dense(lrelu2,1,
                                   kernel_initializer=tf.truncated_normal_initializer(),
                                   activation=tf.sigmoid)
nnet_cartpole_pg.py 文件源码 项目:rlflow 作者: tpbarron 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def build_network(name_scope, env):
    w_init_dense = tf.truncated_normal_initializer() #contrib.layers.xavier_initializer()
    b_init = tf.constant_initializer(value=0.0)

    with tf.variable_scope(name_scope):
        input_tensor = tf.placeholder(tf.float32,
                                      shape=tf_utils.get_input_tensor_shape(env),
                                      name='policy_input_'+name_scope)
        net = tf.contrib.layers.fully_connected(input_tensor,
                                                32, #env.action_space.n, #32,
                                                activation_fn=tf.nn.tanh, #sigmoid,
                                                weights_initializer=w_init_dense,
                                                biases_initializer=b_init,
                                                scope='dense1_'+name_scope)
        net = tf.contrib.layers.fully_connected(net,
                                                env.action_space.n,
                                                weights_initializer=w_init_dense,
                                                biases_initializer=b_init,
                                                scope='dense2_'+name_scope)
        net = tf.contrib.layers.softmax(net)

    return [input_tensor], [net]
net_model.py 文件源码 项目:3D_CNN_jonas 作者: 2015ZxEE 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def variable_with_weight_decay(name, shape, stddev, wd):
    """
    Note that the Variable is initialized with a truncated normal distribution.
    A weight decay is added only if one is specified.
    Args:
        name   -> name of the variable
        shape  -> list of ints
        stddev -> standard deviation of a truncated Gaussian
        wd     -> add L2Loss weight decay multiplied by this float.
                        If None, weight decay is not added for this Variable.
    Rtns:
        var    -> variable tensor
    """
    dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
    var   = variable_on_cpu(name,shape,
                    tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
    if wd is not None:
        weight_decay = tf.mul(tf.nn.l2_loss(var),wd,name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
    return var
network.py 文件源码 项目:tf_base 作者: ozansener 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def fc(self, input, num_out, name, relu=True):
        with tf.variable_scope(name) as scope:
            input_shape = input.get_shape()
            if input_shape.ndims == 4:
                # The input is spatial. Vectorize it first.
                dim = 1
                for d in input_shape[1:].as_list():
                    dim *= d
                feed_in = tf.reshape(input, [-1, dim])
            else:
                feed_in, dim = (input, input_shape[-1].value)
            weights = self.make_var('weights', shape=[dim, num_out], init_func=tf.truncated_normal_initializer(stddev = 0.1))
            biases = self.make_var('biases', [num_out], init_func=tf.constant_initializer(0.1))
            op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b
            fc = op(feed_in, weights, biases, name=scope.name)
            return fc
model.py 文件源码 项目:brats17 作者: xf4j 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def conv3d(input_, output_dim, f_size, is_training, scope='conv3d'):
    with tf.variable_scope(scope) as scope:
        # VGG network uses two 3*3 conv layers to effectively increase receptive field
        w1 = tf.get_variable('w1', [f_size, f_size, f_size, input_.get_shape()[-1], output_dim],
                             initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv1 = tf.nn.conv3d(input_, w1, strides=[1, 1, 1, 1, 1], padding='SAME')
        b1 = tf.get_variable('b1', [output_dim], initializer=tf.constant_initializer(0.0))
        conv1 = tf.nn.bias_add(conv1, b1)
        bn1 = tf.contrib.layers.batch_norm(conv1, is_training=is_training, scope='bn1', decay=0.9,
                                           zero_debias_moving_mean=True, variables_collections=['bn_collections'])
        r1 = tf.nn.relu(bn1)

        w2 = tf.get_variable('w2', [f_size, f_size, f_size, output_dim, output_dim],
                             initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv2 = tf.nn.conv3d(r1, w2, strides=[1, 1, 1, 1, 1], padding='SAME')
        b2 = tf.get_variable('b2', [output_dim], initializer=tf.constant_initializer(0.0))
        conv2 = tf.nn.bias_add(conv2, b2)
        bn2 = tf.contrib.layers.batch_norm(conv2, is_training=is_training, scope='bn2', decay=0.9,
                                           zero_debias_moving_mean=True, variables_collections=['bn_collections'])
        r2 = tf.nn.relu(bn2)
        return r2
net_frame.py 文件源码 项目:DRLModule 作者: halleanwoo 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def _action_norm_dist(inpt, num_actions, w_init, activation_fn_v, activation_fn_a):
    mu = layers.fully_connected(inpt, num_outputs=num_actions, weights_initializer=w_init, activation_fn=activation_fn_v)
    sigma = layers.fully_connected(inpt, num_outputs=num_actions, weights_initializer=w_init, activation_fn=activation_fn_a)
    return mu, sigma



# # cnn network frame
# def cnn_frame_continu(hiddens, kerners, strides, inpt, num_actions, scope=None, activation_fn=tf.nn.relu, activation_fn_mu=tf.nn.relu, activation_fn_sigma=tf.nn.relu, reuse=None):
#     with tf.variable_scope(scope, reuse=reuse):
#         out = inpt
#         for kerner, stride in kerners, strides:
#             out = tf.nn.conv2d(input=out, filter=kerner, stride=stride)
#         out = layers.flatten(out)
#         with tf.name_scope("out"):
#             mu = layers.fully_connected(out, num_outputs=num_actions, weights_initializer=tf.truncated_normal_initializer(0 , 0.3), activation_fn=None)
#             sigma = layers.fully_connected(out, num_outputs=num_actions, weights_initializer=tf.truncated_normal_initializer(0 , 0.3), activation_fn=tf.nn.softplus)
#         return mu, sigma
clock_model.py 文件源码 项目:deep-time-reading 作者: felixduvallet 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def _variable_with_weight_decay(name, shape, stddev, wd):
    """Helper to create an initialized Variable with weight decay.

    Note that the Variable is initialized with a truncated normal distribution.
    A weight decay is added only if one is specified.

    Args:
      name: name of the variable
      shape: list of ints
      stddev: standard deviation of a truncated Gaussian
      wd: add L2Loss weight decay multiplied by this float. If None, weight
          decay is not added for this Variable.

    Returns:
      Variable Tensor
    """
    var = _variable_on_cpu(
        name,
        shape,
        tf.truncated_normal_initializer(stddev=stddev, dtype=tf.float32))
    if wd is not None:
        weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
    return var
ocr.py 文件源码 项目:tf-cnn-lstm-ocr-captcha 作者: Luonic 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  var = _variable_on_cpu(
      name,
      shape,
      tf.truncated_normal_initializer(stddev=stddev, dtype=tf.float32))
  if wd is not None:
    weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var
general.py 文件源码 项目:qrn 作者: uwnlp 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def variable_with_weight_decay(name, shape, stddev, wd):
    """Helper to create an initialized Variable with weight decay.

    Note that the Variable is initialized with a truncated normal distribution.
    A weight decay is added only if one is specified.

    Args:
      name: name of the variable
      shape: list of ints
      stddev: standard deviation of a truncated Gaussian
      wd: add L2Loss weight decay multiplied by this float. If None, weight
          decay is not added for this Variable.

    Returns:
      Variable Tensor
    """
    var = variable_on_cpu(name, shape,
                           tf.truncated_normal_initializer(stddev=stddev))
    if wd:
        weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
    return var
fpn.py 文件源码 项目:canshi 作者: hungsing92 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def _extra_conv_arg_scope(weight_decay=0.00001, activation_fn=None, normalizer_fn=None):

  with slim.arg_scope(
      [slim.conv2d, slim.conv2d_transpose],
      padding='SAME',
      weights_regularizer=slim.l2_regularizer(weight_decay),
      weights_initializer=tf.truncated_normal_initializer(stddev=0.001),
      activation_fn=activation_fn,
      normalizer_fn=normalizer_fn,) as arg_sc:
    with slim.arg_scope(
      [slim.fully_connected],
          weights_regularizer=slim.l2_regularizer(weight_decay),
          weights_initializer=tf.truncated_normal_initializer(stddev=0.001),
          activation_fn=activation_fn,
          normalizer_fn=normalizer_fn) as arg_sc:
          return arg_sc
ops.py 文件源码 项目:DeepVideo 作者: AniketBajpai 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def conv2d(input_, output_shape, is_train,
           k=5, s=2, stddev=0.01,
           name='conv2d', with_w=False):
    k_h = k_w = k
    s_h = s_w = s
    with tf.variable_scope(name):
        weights = tf.get_variable('weights', [k_h, k_w, input_.get_shape()[-1], output_shape[-1]],
                                  initializer=tf.truncated_normal_initializer(stddev=stddev))
        conv = tf.nn.conv2d(input_, weights, strides=[1, s_h, s_w, 1], padding='SAME')

        biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0))
        conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape())
        bn = tf.contrib.layers.batch_norm(conv, center=True, scale=True, decay=0.9,
                                          is_training=is_train, updates_collections=None)
        out = lrelu(bn, name=lrelu)
        if with_w:
            return out, weights, biases
        else:
            return out
ops.py 文件源码 项目:DeepVideo 作者: AniketBajpai 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def conv3d(input_, output_shape, is_train,
           k=4, s=2, stddev=0.01,
           name='conv3d', with_w=False):
    k_d = k_h = k_w = k
    s_d = s_h = s_w = s
    with tf.variable_scope(name):
        weights = tf.get_variable('weights', [k_d, k_h, k_w, input_.get_shape()[-1], output_shape[-1]],
                                  initializer=tf.truncated_normal_initializer(stddev=stddev))
        conv = tf.nn.conv3d(input_, weights, strides=[1, s_d, s_h, s_w, 1], padding='SAME')

        biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0))
        conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape())
        bn = tf.contrib.layers.batch_norm(conv, center=True, scale=True, decay=0.9,
                                          is_training=is_train, updates_collections=None)
        out = lrelu(bn, name='lrelu')

        if with_w:
            return out, weights, biases
        else:
            return out
ops.py 文件源码 项目:PixelDCN 作者: HongyangGao 项目源码 文件源码 阅读 47 收藏 0 点赞 0 评论 0
def conv(inputs, out_num, kernel_size, scope, data_type='2D', norm=True):
    if data_type == '2D':
        outs = tf.layers.conv2d(
            inputs, out_num, kernel_size, padding='same', name=scope+'/conv',
            kernel_initializer=tf.truncated_normal_initializer)
    else:
        shape = list(kernel_size) + [inputs.shape[-1].value, out_num]
        weights = tf.get_variable(
            scope+'/conv/weights', shape,
            initializer=tf.truncated_normal_initializer())
        outs = tf.nn.conv3d(
            inputs, weights, (1, 1, 1, 1, 1), padding='SAME',
            name=scope+'/conv')
    if norm:
        return tf.contrib.layers.batch_norm(
            outs, decay=0.9, epsilon=1e-5, activation_fn=tf.nn.relu,
            updates_collections=None, scope=scope+'/batch_norm')
    else:
        return tf.contrib.layers.batch_norm(
            outs, decay=0.9, epsilon=1e-5, activation_fn=None,
            updates_collections=None, scope=scope+'/batch_norm')
ops.py 文件源码 项目:PixelDCN 作者: HongyangGao 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def deconv(inputs, out_num, kernel_size, scope, data_type='2D', **kws):
    if data_type == '2D':
        outs = tf.layers.conv2d_transpose(
            inputs, out_num, kernel_size, (2, 2), padding='same', name=scope,
            kernel_initializer=tf.truncated_normal_initializer)
    else:
        shape = list(kernel_size) + [out_num, out_num]
        input_shape = inputs.shape.as_list()
        out_shape = [input_shape[0]] + \
            list(map(lambda x: x*2, input_shape[1:-1])) + [out_num]
        weights = tf.get_variable(
            scope+'/deconv/weights', shape,
            initializer=tf.truncated_normal_initializer())
        outs = tf.nn.conv3d_transpose(
            inputs, weights, out_shape, (1, 2, 2, 2, 1), name=scope+'/deconv')
    return tf.contrib.layers.batch_norm(
        outs, decay=0.9, epsilon=1e-5, activation_fn=tf.nn.relu,
        updates_collections=None, scope=scope+'/batch_norm')
ops.py 文件源码 项目:hyperchamber 作者: 255BITS 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def constrained_conv2d(input_, output_dim,
           k_h=6, k_w=6, d_h=2, d_w=2, stddev=0.02,
           name="conv2d"):
    assert k_h % d_h == 0
    assert k_w % d_w == 0
    # constrained to have stride be a factor of kernel width
    # this is intended to reduce convolution artifacts
    with tf.variable_scope(name):
        w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim],
                            initializer=tf.truncated_normal_initializer(stddev=stddev))

        # This is meant to reduce boundary artifacts
        padded = tf.pad(input_, [[0, 0],
            [k_h-1, 0],
            [k_w-1, 0],
            [0, 0]])
        conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME')

        biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0))
        conv = tf.nn.bias_add(conv, biases)

        return conv
cifar10.py 文件源码 项目:facial-emotion-detection-dl 作者: dllatas 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  var = _variable_on_cpu(name, shape,
                         tf.truncated_normal_initializer(stddev=stddev))
  if wd:
    weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var
cifar10.py 文件源码 项目:facial-emotion-detection-dl 作者: dllatas 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  var = _variable_on_cpu(name, shape,
                         tf.truncated_normal_initializer(stddev=stddev))
  if wd:
    weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var
cifar10.py 文件源码 项目:facial-emotion-detection-dl 作者: dllatas 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  var = _variable_on_cpu(name, shape,
                         tf.truncated_normal_initializer(stddev=stddev))
  if wd:
    weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var
ops.py 文件源码 项目:3D_Dense_Transformer_Networks 作者: JohnYC1995 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def deconv(inputs, out_num, kernel_size, scope, data_type='2D'):
    if data_type == '2D':
        outputs = tf.layers.conv2d_transpose(
            inputs, out_num, kernel_size, (2, 2), padding='same', name=scope,
            kernel_initializer=tf.truncated_normal_initializer)
    else:
        shape = list(kernel_size) + [out_num, out_num]
        input_shape = inputs.shape.as_list()
        out_shape = [input_shape[0]] + \
            list(map(lambda x: x*2, input_shape[1:-1])) + [out_num]
        weights = tf.get_variable(
            scope+'/deconv/weights', shape, initializer=tf.truncated_normal_initializer())
        outputs = tf.nn.conv3d_transpose(
            inputs, weights, out_shape, (1, 2, 2, 2, 1), name=scope+'/deconv')
    return tf.contrib.layers.batch_norm(
        outputs, decay=0.9, epsilon=1e-5, activation_fn=tf.nn.relu,
        updates_collections=None, scope=scope+'/batch_norm')
ops.py 文件源码 项目:3D_Dense_Transformer_Networks 作者: JohnYC1995 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def deconv(inputs, out_num, kernel_size, scope, data_type='2D'):
    if data_type == '2D':
        outputs = tf.layers.conv2d_transpose(
            inputs, out_num, kernel_size, (2, 2), padding='same', name=scope,
            kernel_initializer=tf.truncated_normal_initializer)
    else:
        shape = list(kernel_size) + [out_num, out_num]
        input_shape = inputs.shape.as_list()
        out_shape = [input_shape[0]] + \
            list(map(lambda x: x*2, input_shape[1:-1])) + [out_num]
        weights = tf.get_variable(
            scope+'/deconv/weights', shape, initializer=tf.truncated_normal_initializer())
        outputs = tf.nn.conv3d_transpose(
            inputs, weights, out_shape, (1, 2, 2, 2, 1), name=scope+'/deconv')
    return tf.contrib.layers.batch_norm(
        outputs, decay=0.9, epsilon=1e-5, activation_fn=tf.nn.relu,
        updates_collections=None, scope=scope+'/batch_norm')
model.py 文件源码 项目:IDNNs 作者: ravidziv 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def initilizae_layer(self, name_scope, row_size, col_size, activation_function, last_hidden):
        # Bulid layer of the network with weights and biases
        weights = get_scope_variable(name_scope=name_scope, var="weights",
                                     shape=[row_size, col_size],
                                     initializer=tf.truncated_normal_initializer(mean=0.0, stddev=1.0 / np.sqrt(
                                         float(row_size))))
        biases = get_scope_variable(name_scope=name_scope, var='biases', shape=[col_size],
                                    initializer=tf.constant_initializer(0.0))

        self.weights_all.append(weights)
        self.biases_all.append(biases)
        variable_summaries(weights)
        variable_summaries(biases)
        with tf.variable_scope(name_scope) as scope:
            input = tf.matmul(last_hidden, weights) + biases
            if activation_function == None:
                output = input
            else:
                output = activation_function(input, name='output')
        self.inputs.append(input)
        self.hidden.append(output)
        return output
cifar10_gtf.py 文件源码 项目:deep_learning_study 作者: jowettcz 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
  var = _variable_on_cpu(
      name,
      shape,
      tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
  if wd is not None:
    weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var
mlp_test.py 文件源码 项目:sonnet 作者: deepmind 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def setUp(self):
    super(MLPTest, self).setUp()

    self.output_sizes = [11, 13, 17]
    self.batch_size = 5
    self.input_size = 7
    self.module_name = "mlp"
    self.initializers = {
        "w": tf.truncated_normal_initializer(stddev=1.0),
    }
    self.regularizers = {
        "w": tf.contrib.layers.l1_regularizer(scale=0.1),
    }
    self.partitioners = {
        "w": tf.fixed_size_partitioner(num_shards=2),
    }
basic.py 文件源码 项目:sonnet 作者: deepmind 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def _build(self):
    """Connects the TrainableTensor module into the graph.

    Returns:
      A Tensor of shape as determined in the constructor.
    """
    if "w" not in self._initializers:
      stddev = 1 / math.sqrt(np.prod(self._shape))
      self._initializers["w"] = tf.truncated_normal_initializer(stddev=stddev)

    self._w = tf.get_variable("w",
                              shape=self._shape,
                              dtype=self._dtype,
                              initializer=self._initializers["w"],
                              partitioner=self._partitioners.get("w", None),
                              regularizer=self._regularizers.get("w", None))
    return self._w


问题


面经


文章

微信
公众号

扫码关注公众号