python类variable_op_scope()的实例源码

nn.py 文件源码 项目:tf_practice 作者: juho-lee 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def batch_norm(input, is_train, scope=None, reuse=None, decay=0.9):
    shape = input.get_shape()
    num_out = shape[-1]

    with tf.variable_op_scope([input], scope, 'BN', reuse=reuse):
        beta = tf.get_variable('beta', [num_out],
                initializer=tf.constant_initializer(0.0),
                trainable=True)
        gamma = tf.get_variable('gamma', [num_out],
                initializer=tf.constant_initializer(1.0),
                trainable=True)

        batch_mean, batch_var = tf.nn.moments(input, [0,1,2], name='moments') \
                if len(shape)==4 else tf.nn.moments(input, [0], name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=decay)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = tf.cond(is_train,
                mean_var_with_update,
                lambda: (ema.average(batch_mean), ema.average(batch_var)))
        return tf.nn.batch_normalization(input, mean, var, beta, gamma, 1e-3)
nn.py 文件源码 项目:tf_practice 作者: juho-lee 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def batch_norm(input, is_train, scope=None, reuse=None, decay=0.9):
    shape = input.get_shape()
    num_out = shape[-1]

    with tf.variable_op_scope([input], scope, 'BN', reuse=reuse):
        beta = tf.get_variable('beta', [num_out],
                initializer=tf.constant_initializer(0.0),
                trainable=True)
        gamma = tf.get_variable('gamma', [num_out],
                initializer=tf.constant_initializer(1.0),
                trainable=True)

        batch_mean, batch_var = tf.nn.moments(input, [0,1,2], name='moments') \
                if len(shape)==4 else tf.nn.moments(input, [0], name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=decay)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = tf.cond(is_train,
                mean_var_with_update,
                lambda: (ema.average(batch_mean), ema.average(batch_var)))
        return tf.nn.batch_normalization(input, mean, var, beta, gamma, 1e-3)
nn.py 文件源码 项目:tf_practice 作者: juho-lee 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def batch_norm(input, is_train, scope=None, reuse=None, decay=0.9):
    shape = input.get_shape()
    num_out = shape[-1]

    with tf.variable_op_scope([input], scope, 'BN', reuse=reuse):
        beta = tf.get_variable('beta', [num_out],
                initializer=tf.constant_initializer(0.0),
                trainable=True)
        gamma = tf.get_variable('gamma', [num_out],
                initializer=tf.constant_initializer(1.0),
                trainable=True)

        batch_mean, batch_var = tf.nn.moments(input, [0,1,2], name='moments') \
                if len(shape)==4 else tf.nn.moments(input, [0], name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=decay)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = tf.cond(is_train,
                mean_var_with_update,
                lambda: (ema.average(batch_mean), ema.average(batch_var)))
        return tf.nn.batch_normalization(input, mean, var, beta, gamma, 1e-3)
nn.py 文件源码 项目:tf_practice 作者: juho-lee 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def batch_norm(input, is_train, scope=None, reuse=None, decay=0.9):
    shape = input.get_shape()
    num_out = shape[-1]

    with tf.variable_op_scope([input], scope, 'BN', reuse=reuse):
        beta = tf.get_variable('beta', [num_out],
                initializer=tf.constant_initializer(0.0),
                trainable=True)
        gamma = tf.get_variable('gamma', [num_out],
                initializer=tf.constant_initializer(1.0),
                trainable=True)

        batch_mean, batch_var = tf.nn.moments(input, [0,1,2], name='moments') \
                if len(shape)==4 else tf.nn.moments(input, [0], name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=decay)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = tf.cond(is_train,
                mean_var_with_update,
                lambda: (ema.average(batch_mean), ema.average(batch_var)))
        return tf.nn.batch_normalization(input, mean, var, beta, gamma, 1e-3)
nn.py 文件源码 项目:tf_practice 作者: juho-lee 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def batch_norm(input, is_train, scope=None, reuse=None, decay=0.9):
    shape = input.get_shape()
    num_out = shape[-1]

    with tf.variable_op_scope([input], scope, 'BN', reuse=reuse):
        beta = tf.get_variable('beta', [num_out],
                initializer=tf.constant_initializer(0.0),
                trainable=True)
        gamma = tf.get_variable('gamma', [num_out],
                initializer=tf.constant_initializer(1.0),
                trainable=True)

        batch_mean, batch_var = tf.nn.moments(input, [0,1,2], name='moments') \
                if len(shape)==4 else tf.nn.moments(input, [0], name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=decay)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = tf.cond(is_train,
                mean_var_with_update,
                lambda: (ema.average(batch_mean), ema.average(batch_var)))
        return tf.nn.batch_normalization(input, mean, var, beta, gamma, 1e-3)
nn.py 文件源码 项目:tf_practice 作者: juho-lee 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def batch_norm(input, is_train, scope=None, reuse=None, decay=0.9):
    shape = input.get_shape()
    num_out = shape[-1]

    with tf.variable_op_scope([input], scope, 'BN', reuse=reuse):
        beta = tf.get_variable('beta', [num_out],
                initializer=tf.constant_initializer(0.0),
                trainable=True)
        gamma = tf.get_variable('gamma', [num_out],
                initializer=tf.constant_initializer(1.0),
                trainable=True)

        batch_mean, batch_var = tf.nn.moments(input, [0,1,2], name='moments') \
                if len(shape)==4 else tf.nn.moments(input, [0], name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=decay)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = tf.cond(is_train,
                mean_var_with_update,
                lambda: (ema.average(batch_mean), ema.average(batch_var)))
        return tf.nn.batch_normalization(input, mean, var, beta, gamma, 1e-3)
nn.py 文件源码 项目:tf_practice 作者: juho-lee 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def batch_norm(input, is_train, scope=None, reuse=None, decay=0.9):
    shape = input.get_shape()
    num_out = shape[-1]

    with tf.variable_op_scope([input], scope, 'BN', reuse=reuse):
        beta = tf.get_variable('beta', [num_out],
                initializer=tf.constant_initializer(0.0),
                trainable=True)
        gamma = tf.get_variable('gamma', [num_out],
                initializer=tf.constant_initializer(1.0),
                trainable=True)

        batch_mean, batch_var = tf.nn.moments(input, [0,1,2], name='moments') \
                if len(shape)==4 else tf.nn.moments(input, [0], name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=decay)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = tf.cond(is_train,
                mean_var_with_update,
                lambda: (ema.average(batch_mean), ema.average(batch_var)))
        return tf.nn.batch_normalization(input, mean, var, beta, gamma, 1e-3)
nn.py 文件源码 项目:tf_practice 作者: juho-lee 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def batch_norm(input, is_train, scope=None, reuse=None, decay=0.9):
    shape = input.get_shape()
    num_out = shape[-1]

    with tf.variable_op_scope([input], scope, 'BN', reuse=reuse):
        beta = tf.get_variable('beta', [num_out],
                initializer=tf.constant_initializer(0.0),
                trainable=True)
        gamma = tf.get_variable('gamma', [num_out],
                initializer=tf.constant_initializer(1.0),
                trainable=True)

        batch_mean, batch_var = tf.nn.moments(input, [0,1,2], name='moments') \
                if len(shape)==4 else tf.nn.moments(input, [0], name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=decay)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = tf.cond(is_train,
                mean_var_with_update,
                lambda: (ema.average(batch_mean), ema.average(batch_var)))
        return tf.nn.batch_normalization(input, mean, var, beta, gamma, 1e-3)
nn.py 文件源码 项目:tf_practice 作者: juho-lee 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def batch_norm(input, is_train, scope=None, reuse=None, decay=0.9):
    shape = input.get_shape()
    num_out = shape[-1]

    with tf.variable_op_scope([input], scope, 'BN', reuse=reuse):
        beta = tf.get_variable('beta', [num_out],
                initializer=tf.constant_initializer(0.0),
                trainable=True)
        gamma = tf.get_variable('gamma', [num_out],
                initializer=tf.constant_initializer(1.0),
                trainable=True)

        batch_mean, batch_var = tf.nn.moments(input, [0,1,2], name='moments') \
                if len(shape)==4 else tf.nn.moments(input, [0], name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=decay)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = tf.cond(is_train,
                mean_var_with_update,
                lambda: (ema.average(batch_mean), ema.average(batch_var)))
        return tf.nn.batch_normalization(input, mean, var, beta, gamma, 1e-3)
nn.py 文件源码 项目:tf_practice 作者: juho-lee 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def batch_norm(input, is_train, scope=None, reuse=None, decay=0.9):
    shape = input.get_shape()
    num_out = shape[-1]

    with tf.variable_op_scope([input], scope, 'BN', reuse=reuse):
        beta = tf.get_variable('beta', [num_out],
                initializer=tf.constant_initializer(0.0),
                trainable=True)
        gamma = tf.get_variable('gamma', [num_out],
                initializer=tf.constant_initializer(1.0),
                trainable=True)

        batch_mean, batch_var = tf.nn.moments(input, [0,1,2], name='moments') \
                if len(shape)==4 else tf.nn.moments(input, [0], name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=decay)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = tf.cond(is_train,
                mean_var_with_update,
                lambda: (ema.average(batch_mean), ema.average(batch_var)))
        return tf.nn.batch_normalization(input, mean, var, beta, gamma, 1e-3)
nn.py 文件源码 项目:tf_practice 作者: juho-lee 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def batch_norm(input, is_train, scope=None, reuse=None, decay=0.9):
    shape = input.get_shape()
    num_out = shape[-1]

    with tf.variable_op_scope([input], scope, 'BN', reuse=reuse):
        beta = tf.get_variable('beta', [num_out],
                initializer=tf.constant_initializer(0.0),
                trainable=True)
        gamma = tf.get_variable('gamma', [num_out],
                initializer=tf.constant_initializer(1.0),
                trainable=True)

        batch_mean, batch_var = tf.nn.moments(input, [0,1,2], name='moments') \
                if len(shape)==4 else tf.nn.moments(input, [0], name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=decay)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = tf.cond(is_train,
                mean_var_with_update,
                lambda: (ema.average(batch_mean), ema.average(batch_var)))
        return tf.nn.batch_normalization(input, mean, var, beta, gamma, 1e-3)
nn.py 文件源码 项目:tf_practice 作者: juho-lee 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def batch_norm(input, is_train, scope=None, reuse=None, decay=0.9):
    shape = input.get_shape()
    num_out = shape[-1]

    with tf.variable_op_scope([input], scope, 'BN', reuse=reuse):
        beta = tf.get_variable('beta', [num_out],
                initializer=tf.constant_initializer(0.0),
                trainable=True)
        gamma = tf.get_variable('gamma', [num_out],
                initializer=tf.constant_initializer(1.0),
                trainable=True)

        batch_mean, batch_var = tf.nn.moments(input, [0,1,2], name='moments') \
                if len(shape)==4 else tf.nn.moments(input, [0], name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=decay)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = tf.cond(is_train,
                mean_var_with_update,
                lambda: (ema.average(batch_mean), ema.average(batch_var)))
        return tf.nn.batch_normalization(input, mean, var, beta, gamma, 1e-3)
nn.py 文件源码 项目:tf_practice 作者: juho-lee 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def batch_norm(input, is_train, scope=None, reuse=None, decay=0.9):
    shape = input.get_shape()
    num_out = shape[-1]

    with tf.variable_op_scope([input], scope, 'BN', reuse=reuse):
        beta = tf.get_variable('beta', [num_out],
                initializer=tf.constant_initializer(0.0),
                trainable=True)
        gamma = tf.get_variable('gamma', [num_out],
                initializer=tf.constant_initializer(1.0),
                trainable=True)

        batch_mean, batch_var = tf.nn.moments(input, [0,1,2], name='moments') \
                if len(shape)==4 else tf.nn.moments(input, [0], name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=decay)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = tf.cond(is_train,
                mean_var_with_update,
                lambda: (ema.average(batch_mean), ema.average(batch_var)))
        return tf.nn.batch_normalization(input, mean, var, beta, gamma, 1e-3)
nn.py 文件源码 项目:tf_practice 作者: juho-lee 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def batch_norm(input, is_train, scope=None, reuse=None, decay=0.9):
    shape = input.get_shape()
    num_out = shape[-1]

    with tf.variable_op_scope([input], scope, 'BN', reuse=reuse):
        beta = tf.get_variable('beta', [num_out],
                initializer=tf.constant_initializer(0.0),
                trainable=True)
        gamma = tf.get_variable('gamma', [num_out],
                initializer=tf.constant_initializer(1.0),
                trainable=True)

        batch_mean, batch_var = tf.nn.moments(input, [0,1,2], name='moments') \
                if len(shape)==4 else tf.nn.moments(input, [0], name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=decay)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = tf.cond(is_train,
                mean_var_with_update,
                lambda: (ema.average(batch_mean), ema.average(batch_var)))
        return tf.nn.batch_normalization(input, mean, var, beta, gamma, 1e-3)
fractal_block.py 文件源码 项目:FractalNet 作者: tensorpro 项目源码 文件源码 阅读 50 收藏 0 点赞 0 评论 0
def join(columns,
         coin):
  """Takes mean of the columns, applies drop path if
     `tflearn.get_training_mode()` is True.

  Args:
    columns: columns of fractal block.
    is_training: boolean in tensor form. Determines whether drop path
      should be used.
    coin: boolean in tensor form. Determines whether drop path is
     local or global.
  """
  if len(columns)==1:
    return columns[0]
  with tf.variable_op_scope(columns, None, "Join"):
    columns = tf.convert_to_tensor(columns)
    columns = tf.cond(tflearn.get_training_mode(),
                      lambda: drop_path(columns, coin),
                      lambda: columns)
    out = tf.reduce_mean(columns, 0)
  return out
nnUtils.py 文件源码 项目:BinaryNet.tf 作者: itayhubara 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def BinarizedSpatialConvolution(nOutputPlane, kW, kH, dW=1, dH=1,
        padding='VALID', bias=True, reuse=None, name='BinarizedSpatialConvolution'):
    def b_conv2d(x, is_training=True):
        nInputPlane = x.get_shape().as_list()[3]
        with tf.variable_op_scope([x], None, name, reuse=reuse):
            w = tf.get_variable('weight', [kH, kW, nInputPlane, nOutputPlane],
                            initializer=tf.contrib.layers.xavier_initializer_conv2d())
            bin_w = binarize(w)
            bin_x = binarize(x)
            '''
            Note that we use binarized version of the input and the weights. Since the binarized function uses STE
            we calculate the gradients using bin_x and bin_w but we update w (the full precition version).
            '''
            out = tf.nn.conv2d(bin_x, bin_w, strides=[1, dH, dW, 1], padding=padding)
            if bias:
                b = tf.get_variable('bias', [nOutputPlane],initializer=tf.zeros_initializer)
                out = tf.nn.bias_add(out, b)
            return out
    return b_conv2d
nnUtils.py 文件源码 项目:BinaryNet.tf 作者: itayhubara 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def BinarizedWeightOnlySpatialConvolution(nOutputPlane, kW, kH, dW=1, dH=1,
        padding='VALID', bias=True, reuse=None, name='BinarizedWeightOnlySpatialConvolution'):
    '''
    This function is used only at the first layer of the model as we dont want to binarized the RGB images
    '''
    def bc_conv2d(x, is_training=True):
        nInputPlane = x.get_shape().as_list()[3]
        with tf.variable_op_scope([x], None, name, reuse=reuse):
            w = tf.get_variable('weight', [kH, kW, nInputPlane, nOutputPlane],
                            initializer=tf.contrib.layers.xavier_initializer_conv2d())
            bin_w = binarize(w)
            out = tf.nn.conv2d(x, bin_w, strides=[1, dH, dW, 1], padding=padding)
            if bias:
                b = tf.get_variable('bias', [nOutputPlane],initializer=tf.zeros_initializer)
                out = tf.nn.bias_add(out, b)
            return out
    return bc_conv2d
nnUtils.py 文件源码 项目:BinaryNet.tf 作者: itayhubara 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def BinarizedAffine(nOutputPlane, bias=True, name=None, reuse=None):
    def b_affineLayer(x, is_training=True):
        with tf.variable_op_scope([x], name, 'Affine', reuse=reuse):
            '''
            Note that we use binarized version of the input (bin_x) and the weights (bin_w). Since the binarized function uses STE
            we calculate the gradients using bin_x and bin_w but we update w (the full precition version).
            '''
            bin_x = binarize(x)
            reshaped = tf.reshape(bin_x, [x.get_shape().as_list()[0], -1])
            nInputPlane = reshaped.get_shape().as_list()[1]
            w = tf.get_variable('weight', [nInputPlane, nOutputPlane], initializer=tf.contrib.layers.xavier_initializer())
            bin_w = binarize(w)
            output = tf.matmul(reshaped, bin_w)
            if bias:
                b = tf.get_variable('bias', [nOutputPlane],initializer=tf.zeros_initializer)
                output = tf.nn.bias_add(output, b)
        return output
    return b_affineLayer
ops.py 文件源码 项目:Tensormodels 作者: asheshjain399 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def repeat_op(repetitions, inputs, op, *args, **kwargs):
  """Build a sequential Tower starting from inputs by using an op repeatedly.

  It creates new scopes for each operation by increasing the counter.
  Example: given repeat_op(3, _, ops.conv2d, 64, [3, 3], scope='conv1')
    it will repeat the given op under the following variable_scopes:
      conv1/Conv
      conv1/Conv_1
      conv1/Conv_2

  Args:
    repetitions: number or repetitions.
    inputs: a tensor of size [batch_size, height, width, channels].
    op: an operation.
    *args: args for the op.
    **kwargs: kwargs for the op.

  Returns:
    a tensor result of applying the operation op, num times.
  Raises:
    ValueError: if the op is unknown or wrong.
  """
  scope = kwargs.pop('scope', None)
  with tf.variable_op_scope([inputs], scope, 'RepeatOp'):
    tower = inputs
    for _ in range(repetitions):
      tower = op(tower, *args, **kwargs)
    return tower
ops.py 文件源码 项目:piecewisecrf 作者: Vaan5 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def repeat_op(repetitions, inputs, op, *args, **kwargs):
  """Build a sequential Tower starting from inputs by using an op repeatedly.

  It creates new scopes for each operation by increasing the counter.
  Example: given repeat_op(3, _, ops.conv2d, 64, [3, 3], scope='conv1')
    it will repeat the given op under the following variable_scopes:
      conv1/Conv
      conv1/Conv_1
      conv1/Conv_2

  Args:
    repetitions: number or repetitions.
    inputs: a tensor of size [batch_size, height, width, channels].
    op: an operation.
    *args: args for the op.
    **kwargs: kwargs for the op.

  Returns:
    a tensor result of applying the operation op, num times.
  Raises:
    ValueError: if the op is unknown or wrong.
  """
  scope = kwargs.pop('scope', None)
  with tf.variable_op_scope([inputs], scope, 'RepeatOp'):
    tower = inputs
    for _ in range(repetitions):
      tower = op(tower, *args, **kwargs)
    return tower
ops.py 文件源码 项目:the-neural-perspective 作者: GokuMohandas 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def repeat_op(repetitions, inputs, op, *args, **kwargs):
  """Build a sequential Tower starting from inputs by using an op repeatedly.

  It creates new scopes for each operation by increasing the counter.
  Example: given repeat_op(3, _, ops.conv2d, 64, [3, 3], scope='conv1')
    it will repeat the given op under the following variable_scopes:
      conv1/Conv
      conv1/Conv_1
      conv1/Conv_2

  Args:
    repetitions: number or repetitions.
    inputs: a tensor of size [batch_size, height, width, channels].
    op: an operation.
    *args: args for the op.
    **kwargs: kwargs for the op.

  Returns:
    a tensor result of applying the operation op, num times.
  Raises:
    ValueError: if the op is unknown or wrong.
  """
  scope = kwargs.pop('scope', None)
  with tf.variable_op_scope([inputs], scope, 'RepeatOp'):
    tower = inputs
    for _ in range(repetitions):
      tower = op(tower, *args, **kwargs)
    return tower
ops.py 文件源码 项目:darkskies-challenge 作者: LiberiFatali 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def repeat_op(repetitions, inputs, op, *args, **kwargs):
  """Build a sequential Tower starting from inputs by using an op repeatedly.

  It creates new scopes for each operation by increasing the counter.
  Example: given repeat_op(3, _, ops.conv2d, 64, [3, 3], scope='conv1')
    it will repeat the given op under the following variable_scopes:
      conv1/Conv
      conv1/Conv_1
      conv1/Conv_2

  Args:
    repetitions: number or repetitions.
    inputs: a tensor of size [batch_size, height, width, channels].
    op: an operation.
    *args: args for the op.
    **kwargs: kwargs for the op.

  Returns:
    a tensor result of applying the operation op, num times.
  Raises:
    ValueError: if the op is unknown or wrong.
  """
  scope = kwargs.pop('scope', None)
  with tf.variable_op_scope([inputs], scope, 'RepeatOp'):
    tower = inputs
    for _ in range(repetitions):
      tower = op(tower, *args, **kwargs)
    return tower
ops.py 文件源码 项目:dcn.tf 作者: beopst 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def repeat_op(repetitions, inputs, op, *args, **kwargs):
  """Build a sequential Tower starting from inputs by using an op repeatedly.

  It creates new scopes for each operation by increasing the counter.
  Example: given repeat_op(3, _, ops.conv2d, 64, [3, 3], scope='conv1')
    it will repeat the given op under the following variable_scopes:
      conv1/Conv
      conv1/Conv_1
      conv1/Conv_2

  Args:
    repetitions: number or repetitions.
    inputs: a tensor of size [batch_size, height, width, channels].
    op: an operation.
    *args: args for the op.
    **kwargs: kwargs for the op.

  Returns:
    a tensor result of applying the operation op, num times.
  Raises:
    ValueError: if the op is unknown or wrong.
  """
  scope = kwargs.pop('scope', None)
  with tf.variable_op_scope([inputs], scope, 'RepeatOp'):
    tower = inputs
    for _ in range(repetitions):
      tower = op(tower, *args, **kwargs)
    return tower
ops.py 文件源码 项目:tensorflow_web_deploy 作者: hetaoaoao 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def repeat_op(repetitions, inputs, op, *args, **kwargs):
  """Build a sequential Tower starting from inputs by using an op repeatedly.

  It creates new scopes for each operation by increasing the counter.
  Example: given repeat_op(3, _, ops.conv2d, 64, [3, 3], scope='conv1')
    it will repeat the given op under the following variable_scopes:
      conv1/Conv
      conv1/Conv_1
      conv1/Conv_2

  Args:
    repetitions: number or repetitions.
    inputs: a tensor of size [batch_size, height, width, channels].
    op: an operation.
    *args: args for the op.
    **kwargs: kwargs for the op.

  Returns:
    a tensor result of applying the operation op, num times.
  Raises:
    ValueError: if the op is unknown or wrong.
  """
  scope = kwargs.pop('scope', None)
  with tf.variable_op_scope([inputs], scope, 'RepeatOp'):
    tower = inputs
    for _ in range(repetitions):
      tower = op(tower, *args, **kwargs)
    return tower
ops.py 文件源码 项目:inception_v3 作者: Cyber-Neuron 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def repeat_op(repetitions, inputs, op, *args, **kwargs):
  """Build a sequential Tower starting from inputs by using an op repeatedly.

  It creates new scopes for each operation by increasing the counter.
  Example: given repeat_op(3, _, ops.conv2d, 64, [3, 3], scope='conv1')
    it will repeat the given op under the following variable_scopes:
      conv1/Conv
      conv1/Conv_1
      conv1/Conv_2

  Args:
    repetitions: number or repetitions.
    inputs: a tensor of size [batch_size, height, width, channels].
    op: an operation.
    *args: args for the op.
    **kwargs: kwargs for the op.

  Returns:
    a tensor result of applying the operation op, num times.
  Raises:
    ValueError: if the op is unknown or wrong.
  """
  scope = kwargs.pop('scope', None)
  with tf.variable_op_scope([inputs], scope, 'RepeatOp'):
    tower = inputs
    for _ in range(repetitions):
      tower = op(tower, *args, **kwargs)
    return tower
ops.py 文件源码 项目:Net2Net 作者: paengs 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def repeat_op(repetitions, inputs, op, *args, **kwargs):
  """Build a sequential Tower starting from inputs by using an op repeatedly.

  It creates new scopes for each operation by increasing the counter.
  Example: given repeat_op(3, _, ops.conv2d, 64, [3, 3], scope='conv1')
    it will repeat the given op under the following variable_scopes:
      conv1/Conv
      conv1/Conv_1
      conv1/Conv_2

  Args:
    repetitions: number or repetitions.
    inputs: a tensor of size [batch_size, height, width, channels].
    op: an operation.
    *args: args for the op.
    **kwargs: kwargs for the op.

  Returns:
    a tensor result of applying the operation op, num times.
  Raises:
    ValueError: if the op is unknown or wrong.
  """
  scope = kwargs.pop('scope', None)
  with tf.variable_op_scope([inputs], scope, 'RepeatOp'):
    tower = inputs
    for _ in range(repetitions):
      tower = op(tower, *args, **kwargs)
    return tower
fractal_block.py 文件源码 项目:FractalNet 作者: tensorpro 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def coin_flip(prob=.5):
  """Random boolean variable, with `prob` chance of being true.

  Used to choose between local and global drop path.

  Args:
    prob:float, probability of being True.
  """
  with tf.variable_op_scope([],None,"CoinFlip"):
    coin = tf.random_uniform([1])[0]>prob
  return coin
fractal_block.py 文件源码 项目:FractalNet 作者: tensorpro 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def drop_path(columns,
              coin):
  with tf.variable_op_scope([columns], None, "DropPath"):
    out = tf.cond(coin,
                  lambda : drop_some(columns),
                  lambda : random_column(columns))
  return out
nnUtils.py 文件源码 项目:BinaryNet.tf 作者: itayhubara 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def SpatialConvolution(nOutputPlane, kW, kH, dW=1, dH=1,
        padding='VALID', bias=True, reuse=None, name='SpatialConvolution'):
    def conv2d(x, is_training=True):
        nInputPlane = x.get_shape().as_list()[3]
        with tf.variable_op_scope([x], None, name, reuse=reuse):
            w = tf.get_variable('weight', [kH, kW, nInputPlane, nOutputPlane],
                            initializer=tf.contrib.layers.xavier_initializer_conv2d())
            out = tf.nn.conv2d(x, w, strides=[1, dH, dW, 1], padding=padding)
            if bias:
                b = tf.get_variable('bias', [nOutputPlane],initializer=tf.zeros_initializer)
                out = tf.nn.bias_add(out, b)
            return out
    return conv2d
nnUtils.py 文件源码 项目:BinaryNet.tf 作者: itayhubara 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def Affine(nOutputPlane, bias=True, name=None, reuse=None):
    def affineLayer(x, is_training=True):
        with tf.variable_op_scope([x], name, 'Affine', reuse=reuse):
            reshaped = tf.reshape(x, [x.get_shape().as_list()[0], -1])
            nInputPlane = reshaped.get_shape().as_list()[1]
            w = tf.get_variable('weight', [nInputPlane, nOutputPlane], initializer=tf.contrib.layers.xavier_initializer())
            output = tf.matmul(reshaped, w)
            if bias:
                b = tf.get_variable('bias', [nOutputPlane],initializer=tf.zeros_initializer)
                output = tf.nn.bias_add(output, b)
        return output
    return affineLayer


问题


面经


文章

微信
公众号

扫码关注公众号