def __init__(self,
input_channels,
output_shape,
num_filters,
hidden_dim,
init="glorot_uniform"):
"""
Parameters
----------
output_shape : list_like
Size of the generated matrix (x, y)
layer_sizes : array_like
List of nodes in hidden layers
init : str
Keras initializer to use for weights
"""
self.input_channels = input_channels
self.num_filters = num_filters
self.output_shape = output_shape
self.hidden_dim = hidden_dim
self.init = initializations.get(init)
self.bias_init = initializations.get("zero")
self.setup_weights()
self.setup_output()
self.num_param = np.prod(self.output_shape) * self.num_filters * \
self.input_channels
python类get()的实例源码
def __init__(self,
output_shape,
z_dim,
layer_sizes,
scale,
init="glorot_uniform"):
"""
Parameters
----------
output_shape : list_like
Size of the generated matrix (x, y)
z_dim : int
Size of the input z vector
layer_sizes : list_like
List of nodes in hidden layers
scale : float
Scale used for generating the coordinate matrix
(see get_coordinates* functions)
init : str
Keras initializer to use for weights
"""
self.output_shape = output_shape
self.layer_sizes = layer_sizes
self.z_dim = z_dim
self.init = initializations.get(init)
self.bias_init = initializations.get("zero")
self.scale = scale
self.setup_weights()
self.setup_output()
def __init__(self, epsilon=1e-5, weights=None,
beta_init='zero', gamma_init='one', **kwargs):
self.beta_init = initializations.get(beta_init)
self.gamma_init = initializations.get(gamma_init)
self.epsilon = epsilon
super(InstanceNormalization, self).__init__(**kwargs)
def __init__(self, weights=None, axis=-1, momentum = 0.9, beta_init='zero', gamma_init='one', **kwargs):
self.momentum = momentum
self.axis = axis
self.beta_init = initializations.get(beta_init)
self.gamma_init = initializations.get(gamma_init)
self.initial_weights = weights
super(Scale, self).__init__(**kwargs)
def __init__(self, output_dim,
init='glorot_uniform', activation='linear', weights=None,
W_regularizer=None, b_regularizer=None, activity_regularizer=None,
W_constraint=None, b_constraint=None,
input_dim=None, input_length1=None, input_length2=None, **kwargs):
self.output_dim = output_dim
self.init = initializations.get(init)
self.activation = activations.get(activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.constraints = [self.W_constraint, self.b_constraint]
self.initial_weights = weights
self.input_dim = input_dim
self.input_length1 = input_length1
self.input_length2 = input_length2
if self.input_dim:
kwargs['input_shape'] = (self.input_length1, self.input_length2, self.input_dim)
self.input = K.placeholder(ndim=4)
super(HigherOrderTimeDistributedDense, self).__init__(**kwargs)
def __init__(self, input_shape, context='word', init='glorot_uniform', activation='tanh', weights=None, **kwargs):
self.init = initializations.get(init)
self.activation = activations.get(activation)
self.context = context
self.td1, self.td2, self.wd = input_shape
self.initial_weights = weights
kwargs['input_shape'] = input_shape
super(TensorAttention, self).__init__(**kwargs)
def __init__(self, output_dim, init='glorot_uniform', **kwargs):
self.init = initializations.get(init)
self.output_dim = output_dim
def hshape(n):
from math import sqrt, ceil
l1 = ceil(sqrt(n))
l2 = ceil(n / l1)
return int(l1), int(l2)
self.n_classes, self.n_outputs_per_class = hshape(output_dim)
super(HierarchicalSoftmax, self).__init__(**kwargs)
def __init__(self, nb_filter, nb_row, nb_col, transform_bias=-1,
init='glorot_uniform', activation='relu', weights=None,
border_mode='same', subsample=(1, 1), dim_ordering='th',
W_regularizer=None, b_regularizer=None, activity_regularizer=None,
W_constraint=None, b_constraint=None,
bias=True, **kwargs):
if border_mode not in {'valid', 'same'}:
raise Exception('Invalid border mode for Convolution2D:', border_mode)
self.nb_filter = nb_filter
self.nb_row = nb_row
self.nb_col = nb_col
self.transform_bias = transform_bias
self.init = initializations.get(init, dim_ordering=dim_ordering)
self.activation = activations.get(activation)
assert border_mode in {'valid', 'same'}, 'border_mode must be in {valid, same}'
self.border_mode = border_mode
self.subsample = tuple(subsample)
assert dim_ordering in {'tf', 'th'}, 'dim_ordering must be in {tf, th}'
self.dim_ordering = dim_ordering
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
self.input_spec = [InputSpec(ndim=4)]
self.initial_weights = weights
super(Conv2DHighway, self).__init__(**kwargs)
def __init__(self, weights=None, axis=-1, momentum = 0.9, beta_init='zero', gamma_init='one', **kwargs):
self.momentum = momentum
self.axis = axis
self.beta_init = initializations.get(beta_init)
self.gamma_init = initializations.get(gamma_init)
self.initial_weights = weights
super(Scale, self).__init__(**kwargs)
recurrent_convolutional.py 文件源码
项目:keras-prednet
作者: kunimasa-kawasaki
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def __init__(self, nb_filter, nb_row, nb_col,
init='glorot_uniform', inner_init='orthogonal',
forget_bias_init='one', activation='tanh',
inner_activation='hard_sigmoid', dim_ordering="tf",
border_mode="valid", sub_sample=(1, 1),
W_regularizer=None, U_regularizer=None, b_regularizer=None,
dropout_W=0., dropout_U=0., **kwargs):
self.nb_filter = nb_filter
self.nb_row = nb_row
self.nb_col = nb_col
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.forget_bias_init = initializations.get(forget_bias_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.border_mode = border_mode
self.subsample = sub_sample
assert dim_ordering in {'tf', "th"}, 'dim_ordering must be in {tf,"th}'
self.dim_ordering = dim_ordering
kwargs["nb_filter"] = nb_filter
kwargs["nb_row"] = nb_row
kwargs["nb_col"] = nb_col
kwargs["dim_ordering"] = dim_ordering
self.W_regularizer = W_regularizer
self.U_regularizer = U_regularizer
self.b_regularizer = b_regularizer
self.dropout_W, self.dropout_U = dropout_W, dropout_U
super(LSTMConv2D, self).__init__(**kwargs)
def __init__(self, output_dim, weights=None, activation='linear', return_mask=True, **kwargs):
self.supports_masking = True
self.output_dim = output_dim
self.init = initializations.get('glorot_uniform')
self.activation = activations.get(activation)
self.initial_weights = weights
self.return_mask = return_mask
super(Projection, self).__init__(**kwargs)
def on_epoch_end(self, epoch, logs={}):
current = logs.get(self.monitor)
if self.monitor_op(current, self.best):
self.best = current
self.best_epoch = epoch
self.wait = 0
else:
if self.wait >= self.patience:
if self.verbose > 0:
print('Epoch %05d: early stopping' % (epoch))
self.model.stop_training = True
self.wait += 1
def __init__(self, init='zero', weights=None, **kwargs):
self.init = initializations.get(init)
self.initial_weights = weights
self.alphas = None
super(MyPReLU, self).__init__(**kwargs)
def __init__(self, output_dim,
init = 'glorot_uniform', inner_init = 'orthogonal',
activation = 'tanh', W_regularizer = None,
U_regularizer = None, b_regularizer = None,
dropout_W = 0.0, dropout_U = 0.0,
tau=100, dt=20, noise=.1,
dale_ratio = None, **kwargs):
self.output_dim = output_dim
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.activation = activations.get(activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.U_regularizer = regularizers.get(U_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.dropout_W, self.dropout_U = dropout_W, dropout_U
self.tau = tau
self.dt = dt
self.noise = noise
self.dale_ratio = dale_ratio
if dale_ratio:
#make dales law matrix
dale_vec = np.ones(output_dim)
dale_vec[int(dale_ratio*output_dim):] = -1
dale = np.diag(dale_vec)
self.Dale = K.variable(dale)
if self.dropout_W or self.dropout_U:
self.uses_learning_phase = True
super(leak_recurrent, self).__init__(**kwargs)
def __init__(self, output_dim, init='glorot_uniform',
activation='linear', weights=None,
W_regularizer=None, b_regularizer=None, activity_regularizer=None,
W_constraint=None, b_constraint=None,
bias=False, input_dim=None, dale_ratio = .8, **kwargs):
self.init = initializations.get(init)
self.activation = activations.get(activation)
self.output_dim = output_dim
self.input_dim = input_dim
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
self.initial_weights = weights
self.input_spec = [InputSpec(ndim=2)]
# OUR CHANGE
self.dale_ratio = dale_ratio
if dale_ratio:
dale_vec = np.ones((input_dim, 1))
dale_vec[int(dale_ratio*input_dim):, 0] = 0
self.Dale = K.variable(dale_vec)
if self.input_dim:
kwargs['input_shape'] = (self.input_dim,)
super(Dense, self).__init__(**kwargs)
def __init__(self, weights=None, axis=-1, momentum=0.9, beta_init='zero', gamma_init='one', **kwargs):
self.momentum = momentum
self.axis = axis
self.beta_init = initializations.get(beta_init)
self.gamma_init = initializations.get(gamma_init)
self.initial_weights = weights
super(Scale, self).__init__(**kwargs)
def __init__(self, nb_filter, nb_row, nb_col, rate=2,
init='glorot_uniform', activation='linear', weights=None,
border_mode='valid', dim_ordering=K.image_dim_ordering(),
W_regularizer=None, b_regularizer=None, activity_regularizer=None,
W_constraint=None, b_constraint=None,
bias=True, **kwargs):
if K._BACKEND != 'tensorflow':
raise Exception('TensorBoard callback only works '
'with the TensorFlow backend.')
if border_mode not in {'valid', 'same'}:
raise Exception('Invalid border mode for Convolution2D:', border_mode)
self.nb_filter = nb_filter
self.nb_row = nb_row
self.nb_col = nb_col
self.rate = rate
self.init = initializations.get(init, dim_ordering=dim_ordering)
self.activation = activations.get(activation)
assert border_mode in {'valid', 'same'}, 'border_mode must be in {valid, same}'
self.border_mode = border_mode
assert dim_ordering in {'tf', 'th'}, 'dim_ordering must be in {tf, th}'
self.dim_ordering = dim_ordering
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
self.input_spec = [InputSpec(ndim=4)]
self.initial_weights = weights
super(ATrousConvolution2D, self).__init__(**kwargs)
def __init__(self, nb_filter, nb_row, nb_col,
init='glorot_uniform', activation='linear', weights=None,
border_mode='valid', subsample=(1, 1), dim_ordering=K.image_dim_ordering(),
W_regularizer=None, b_regularizer=None, activity_regularizer=None,
W_constraint=None, b_constraint=None,
bias=True, **kwargs):
if border_mode not in {'valid', 'same'}:
raise Exception('Invalid border mode for Convolution2D:', border_mode)
self.nb_filter = nb_filter
self.nb_row = nb_row
self.nb_col = nb_col
self.dim_ordering = dim_ordering
self.init = initializations.get(init, dim_ordering=self.dim_ordering)
self.activation = activations.get(activation)
assert border_mode in {'valid', 'same'}, 'border_mode must be in {valid, same}'
self.border_mode = border_mode
self.subsample = tuple(subsample)
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
self.input_spec = [InputSpec(ndim=4)]
self.initial_weights = weights
super(ConvolutionTranspose2D, self).__init__(**kwargs)
KerasBatchNormalization.py 文件源码
项目:audit-log-detection
作者: twosixlabs
项目源码
文件源码
阅读 17
收藏 0
点赞 0
评论 0
def __init__(self, epsilon=1e-6, mode=0, axis=-1, momentum=0.9,
weights=None, beta_init='zero', gamma_init='one', **kwargs):
self.beta_init = initializations.get(beta_init)
self.gamma_init = initializations.get(gamma_init)
self.epsilon = epsilon
self.mode = mode
self.axis = axis
self.momentum = momentum
self.initial_weights = weights
if self.mode == 0:
self.uses_learning_phase = True
super(BatchNormalization, self).__init__(**kwargs)
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
self.init = initializations.get('glorot_uniform')
super(SimLayer, self).__init__(**kwargs)