def Dropout(p, name='Dropout'):
def dropout_layer(x, is_training=True):
with tf.variable_op_scope([x], None, name):
# def drop(): return tf.nn.dropout(x,p)
# def no_drop(): return x
# return tf.cond(is_training, drop, no_drop)
if is_training:
return tf.nn.dropout(x,p)
else:
return x
return dropout_layer
python类variable_op_scope()的实例源码
def ReLU(name='ReLU'):
def layer(x, is_training=True):
with tf.variable_op_scope([x], None, name):
return tf.nn.relu(x)
return layer
def HardTanh(name='HardTanh'):
def layer(x, is_training=True):
with tf.variable_op_scope([x], None, name):
return tf.clip_by_value(x,-1,1)
return layer
def View(shape, name='View'):
with tf.variable_op_scope([x], None, name, reuse=reuse):
return wrapNN(tf.reshape,shape=shape)
def SpatialMaxPooling(kW, kH=None, dW=None, dH=None, padding='VALID',
name='SpatialMaxPooling'):
kH = kH or kW
dW = dW or kW
dH = dH or kH
def max_pool(x,is_training=True):
with tf.variable_op_scope([x], None, name):
return tf.nn.max_pool(x, ksize=[1, kW, kH, 1], strides=[1, dW, dH, 1], padding=padding)
return max_pool
def Sequential(moduleList):
def model(x, is_training=True):
# Create model
output = x
#with tf.variable_op_scope([x], None, name):
for i,m in enumerate(moduleList):
output = m(output, is_training=is_training)
tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, output)
return output
return model
def Concat(moduleList, dim=3):
def model(x, is_training=True):
# Create model
outputs = []
for i,m in enumerate(moduleList):
name = 'layer_'+str(i)
with tf.variable_op_scope([x], name, 'Layer', reuse=reuse):
outputs[i] = m(x, is_training=is_training)
output = tf.concat(dim, outputs)
return output
return model
def Residual(moduleList, name='Residual'):
m = Sequential(moduleList)
def model(x, is_training=True):
# Create model
with tf.variable_op_scope([x], None, name):
output = tf.add(m(x, is_training=is_training), x)
return output
return model
def repeat_op(repetitions, inputs, op, *args, **kwargs):
"""Build a sequential Tower starting from inputs by using an op repeatedly.
It creates new scopes for each operation by increasing the counter.
Example: given repeat_op(3, _, ops.conv2d, 64, [3, 3], scope='conv1')
it will repeat the given op under the following variable_scopes:
conv1/Conv
conv1/Conv_1
conv1/Conv_2
Args:
repetitions: number or repetitions.
inputs: a tensor of size [batch_size, height, width, channels].
op: an operation.
*args: args for the op.
**kwargs: kwargs for the op.
Returns:
a tensor result of applying the operation op, num times.
Raises:
ValueError: if the op is unknown or wrong.
"""
scope = kwargs.pop('scope', None)
with tf.variable_op_scope([inputs], scope, 'RepeatOp'):
tower = inputs
for _ in range(repetitions):
tower = op(tower, *args, **kwargs)
return tower
def policy(obs, theta, name='policy'):
with tf.variable_op_scope([obs], name, name):
h0 = tf.identity(obs, name='h0-obs')
h1 = tf.nn.relu(tf.matmul(h0, theta[0]) + theta[1], name='h1')
h2 = tf.nn.relu(tf.matmul(h1, theta[2]) + theta[3], name='h2')
h3 = tf.identity(tf.matmul(h2, theta[4]) + theta[5], name='h3')
action = tf.nn.tanh(h3, name='h4-action')
return action
def qfunction(obs, act, theta, name="qfunction"):
with tf.variable_op_scope([obs, act], name, name):
h0 = tf.identity(obs, name='h0-obs')
h0a = tf.identity(act, name='h0-act')
h1 = tf.nn.relu(tf.matmul(h0, theta[0]) + theta[1], name='h1')
h1a = tf.concat(1, [h1, act])
h2 = tf.nn.relu(tf.matmul(h1a, theta[2]) + theta[3], name='h2')
qs = tf.matmul(h2, theta[4]) + theta[5]
q = tf.squeeze(qs, [1], name='h3-q')
return q
def policy_network(state,theta,name='policy'):
with tf.variable_op_scope([state],name,name):
h0 = tf.identity(state,name='h0-state')
h1 = tf.nn.relu( tf.matmul(h0,theta[0]) + theta[1],name='h1')
h2 = tf.nn.relu( tf.matmul(h1,theta[2]) + theta[3],name='h2')
h3 = tf.identity(tf.matmul(h2,theta[4]) + theta[5],name='h3')
action = tf.nn.tanh(h3,name='h4-action')
return action
def q_network(state,action,theta, name="q_network"):
with tf.variable_op_scope([state,action],name,name):
h0 = tf.identity(state,name='h0-state')
h0a = tf.identity(action,name='h0-act')
h1 = tf.nn.relu( tf.matmul(h0,theta[0]) + theta[1],name='h1')
h1a = tf.concat(1,[h1,action])
h2 = tf.nn.relu( tf.matmul(h1a,theta[2]) + theta[3],name='h2')
qs = tf.matmul(h2,theta[4]) + theta[5]
q = tf.squeeze(qs,[1],name='h3-q')
return q
def repeat_op(repetitions, inputs, op, *args, **kwargs):
"""Build a sequential Tower starting from inputs by using an op repeatedly.
It creates new scopes for each operation by increasing the counter.
Example: given repeat_op(3, _, ops.conv2d, 64, [3, 3], scope='conv1')
it will repeat the given op under the following variable_scopes:
conv1/Conv
conv1/Conv_1
conv1/Conv_2
Args:
repetitions: number or repetitions.
inputs: a tensor of size [batch_size, height, width, channels].
op: an operation.
*args: args for the op.
**kwargs: kwargs for the op.
Returns:
a tensor result of applying the operation op, num times.
Raises:
ValueError: if the op is unknown or wrong.
"""
scope = kwargs.pop('scope', None)
with tf.variable_op_scope([inputs], scope, 'RepeatOp'):
tower = inputs
for _ in range(repetitions):
tower = op(tower, *args, **kwargs)
return tower
def fractal_template(inputs,
num_columns,
block_fn,
block_asc,
joined=True,
is_training=True,
reuse=False,
scope=None):
"""Template for making fractal blocks.
Given a function and a corresponding arg_scope `fractal_template`
will build a truncated fractal with `num_columns` columns.
Args:
inputs: a 4-D tensor `[batch_size, height, width, channels]`.
num_columns: integer, the columns in the fractal.
block_fn: function to be called within each fractal.
block_as: A function that returns argscope for `block_fn`.
joined: boolean, whether the output columns should be joined.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
scope: Optional scope for `variable_scope`.
"""
def fractal_expand(inputs, num_columns, joined):
'''Recursive Helper Function for making fractal'''
with block_asc():
output = lambda cols: join(cols, coin) if joined else cols
if num_columns == 1:
return output([block_fn(inputs)])
left = block_fn(inputs)
right = fractal_expand(inputs, num_columns-1, joined=True)
right = fractal_expand(right, num_columns-1, joined=False)
cols=[left]+right
return output(cols)
with tf.variable_op_scope([inputs], scope, 'Fractal',
reuse=reuse) as scope:
coin = coin_flip()
net=fractal_expand(inputs, num_columns, joined)
return net