def batchnorm(input, orig_graph, is_training):
return tfl.batch_norm(
input,
decay=0.9,
scale=True,
epsilon=1E-5,
activation_fn=None,
param_initializers={
'beta': get_val_or_initializer(orig_graph,
tf.constant_initializer(0.),
'BatchNorm/beta'),
'gamma': get_val_or_initializer(orig_graph,
tf.random_normal_initializer(1.0,
0.02),
'BatchNorm/gamma'),
'moving_mean': get_val_or_initializer(orig_graph,
tf.constant_initializer(0.),
'BatchNorm/moving_mean'),
'moving_variance': get_val_or_initializer(orig_graph,
tf.ones_initializer(),
'BatchNorm/moving_variance')
},
is_training=is_training,
fused=True, # new implementation with a fused kernel => speedup.
)
评论列表
文章目录