def __init__(self, output_dim, init='glorot_uniform', activation='relu',weights=None,
W_regularizer=None, b_regularizer=None, activity_regularizer=None,
W_constraint=None, b_constraint=None, input_dim=None, **kwargs):
self.W_initializer = initializers.get(init)
self.b_initializer = initializers.get('zeros')
self.activation = activations.get(activation)
self.output_dim = output_dim
self.input_dim = input_dim
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.initial_weights = weights
self.input_spec = InputSpec(ndim=2)
if self.input_dim:
kwargs['input_shape'] = (self.input_dim,)
super(SparseFullyConnectedLayer, self).__init__(**kwargs)
python类InputSpec()的实例源码
def build(self, input_shape):
assert len(input_shape) == 2
input_dim = input_shape[1]
#self.input_spec = InputSpec(dtype=K.floatx(), shape=(None, input_dim))
self.input_spec = InputSpec(ndim=2, axes={1: input_dim})
self.W = self.add_weight(
shape=(input_dim, self.output_dim),
initializer=self.W_initializer,
name='SparseFullyConnected_W',
regularizer=self.W_regularizer,
constraint=self.W_constraint)
self.b = self.add_weight(
shape=(self.output_dim,),
initializer=self.b_initializer,
name='SparseFullyConnected_b',
regularizer=self.b_regularizer,
constraint=self.b_constraint)
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
#self.built = True
#super(SparseFullyConnectedLayer, self).build(input_shape)
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape[0])]
self.input_dim = input_shape[0][2]
self.embedding_dim = input_shape[1][1]
self.states = [None]
self.W = self.init((self.input_dim, 3 * self.output_dim),
name='{}_W'.format(self.name))
self.U = self.inner_init((self.output_dim, 3 * self.output_dim),
name='{}_U'.format(self.name))
self.C = self.inner_init((self.embedding_dim, 3 * self.output_dim),
name='{}_C'.format(self.name))
self.V = self.init((self.embedding_dim, self.output_dim),
name='{}_V'.format(self.name))
self.b = K.variable(np.hstack((np.zeros(self.output_dim),
np.zeros(self.output_dim),
np.zeros(self.output_dim))),
name='{}_b'.format(self.name))
self.trainable_weights = [self.W, self.U, self.C, self.V, self.b]
def build(self, input_shape):
self.input_spec = [InputSpec(dtype=K.floatx(),
shape=(None, input_shape[0][1], input_shape[0][2])),
InputSpec(dtype=K.floatx(),
shape=(None, input_shape[1][1], input_shape[1][2])),
InputSpec(dtype=K.floatx(),
shape=(None, input_shape[2][1], input_shape[2][2]))]
self.W_h = self.init((self.nb_feature, input_shape[0][2], self.output_dim),
name='{}_W_h'.format(self.name))
self.W_y = self.init((self.nb_feature, input_shape[1][2], self.output_dim),
name='{}_W_y'.format(self.name))
self.W_c = self.init((self.nb_feature, input_shape[2][2], self.output_dim),
name='{}_W_c'.format(self.name))
trainable = [self.W_h, self.W_y, self.W_c]
if self.bias:
self.b = K.zeros((self.nb_feature, self.output_dim),
name='{}_b'.format(self.name))
self.trainable_weights = trainable + [self.b]
else:
self.trainable_weights = trainable
def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[1]
if self.H == 'Glorot':
self.H = np.float32(np.sqrt(1.5 / (input_dim + self.units)))
#print('Glorot H: {}'.format(self.H))
if self.kernel_lr_multiplier == 'Glorot':
self.kernel_lr_multiplier = np.float32(1. / np.sqrt(1.5 / (input_dim + self.units)))
#print('Glorot learning rate multiplier: {}'.format(self.kernel_lr_multiplier))
self.kernel_constraint = Clip(-self.H, self.H)
self.kernel_initializer = initializers.RandomUniform(-self.H, self.H)
self.kernel = self.add_weight(shape=(input_dim, self.units),
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.lr_multipliers = [self.kernel_lr_multiplier, self.bias_lr_multiplier]
self.bias = self.add_weight(shape=(self.output_dim,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.lr_multipliers = [self.kernel_lr_multiplier]
self.bias = None
self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
self.built = True
def __init__(self, output_dim, window_size=3, stride=1,
kernel_initializer='uniform', bias_initializer='zero',
activation='linear', activity_regularizer=None,
kernel_regularizer=None, bias_regularizer=None,
kernel_constraint=None, bias_constraint=None,
use_bias=True, input_dim=None, input_length=None, **kwargs):
self.output_dim = output_dim
self.window_size = window_size
self.strides = (stride, 1)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.activation = activations.get(activation)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = [InputSpec(ndim=3)]
self.input_dim = input_dim
self.input_length = input_length
if self.input_dim:
kwargs['input_shape'] = (self.input_length, self.input_dim)
super(GCNN, self).__init__(**kwargs)
def build(self, input_shape):
input_dim = input_shape[2]
self.input_dim = input_dim
self.input_spec = [InputSpec(shape=input_shape)]
self.kernel_shape = (self.window_size, 1, input_dim, self.output_dim * 2)
self.kernel = self.add_weight(self.kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias = self.add_weight((self.output_dim * 2,),
initializer=self.bias_initializer,
name='b',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
self.built = True
def __init__(self, units, window_size=2, stride=1,
return_sequences=False, go_backwards=False,
stateful=False, unroll=False, activation='tanh',
kernel_initializer='uniform', bias_initializer='zero',
kernel_regularizer=None, bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None, bias_constraint=None,
dropout=0, use_bias=True, input_dim=None, input_length=None,
**kwargs):
self.return_sequences = return_sequences
self.go_backwards = go_backwards
self.stateful = stateful
self.unroll = unroll
self.units = units
self.window_size = window_size
self.strides = (stride, 1)
self.use_bias = use_bias
self.activation = activations.get(activation)
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = dropout
self.supports_masking = True
self.input_spec = [InputSpec(ndim=3)]
self.input_dim = input_dim
self.input_length = input_length
if self.input_dim:
kwargs['input_shape'] = (self.input_length, self.input_dim)
super(QRNN, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) == 4
self.input_spec = InputSpec(shape=input_shape)
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = 3
channels = input_shape[channel_axis]
self.kernel1 = self.add_weight(shape=(channels, channels // self.ratio),
initializer=self.kernel_initializer,
name='kernel1',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias1 = self.add_weight(shape=(channels // self.ratio,),
initializer=self.bias_initializer,
name='bias1',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias1 = None
self.kernel2 = self.add_weight(shape=(channels // self.ratio, channels),
initializer=self.kernel_initializer,
name='kernel2',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias2 = self.add_weight(shape=(channels,),
initializer=self.bias_initializer,
name='bias2',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias2 = None
self.built = True
def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[1]
if self.H == 'Glorot':
self.H = np.float32(np.sqrt(1.5 / (input_dim + self.units)))
#print('Glorot H: {}'.format(self.H))
if self.kernel_lr_multiplier == 'Glorot':
self.kernel_lr_multiplier = np.float32(1. / np.sqrt(1.5 / (input_dim + self.units)))
#print('Glorot learning rate multiplier: {}'.format(self.kernel_lr_multiplier))
self.kernel_constraint = Clip(-self.H, self.H)
self.kernel_initializer = initializers.RandomUniform(-self.H, self.H)
self.kernel = self.add_weight(shape=(input_dim, self.units),
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.lr_multipliers = [self.kernel_lr_multiplier, self.bias_lr_multiplier]
self.bias = self.add_weight(shape=(self.output_dim,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.lr_multipliers = [self.kernel_lr_multiplier]
self.bias = None
self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
self.built = True
def build(self, input_shape):
# This currently only works for 4D inputs: assuming (B, H, W, C)
self.input_spec = [InputSpec(shape=input_shape)]
shape = (1, 1, 1, input_shape[-1])
self.gamma = self.gamma_init(shape, name='{}_gamma'.format(self.name))
self.beta = self.beta_init(shape, name='{}_beta'.format(self.name))
self.trainable_weights = [self.gamma, self.beta]
self.built = True
def build(self, input_shape):
# This currently only works for 4D inputs: assuming (B, H, W, C)
self.input_spec = [InputSpec(shape=input_shape)]
shape = (self.nb_classes, 1, 1, input_shape[-1])
self.gamma = self.gamma_init(shape, name='{}_gamma'.format(self.name))
self.beta = self.beta_init(shape, name='{}_beta'.format(self.name))
self.trainable_weights = [self.gamma, self.beta]
self.built = True
def build(self,input_shapes):
'''
build?????????
U_a: x?attention???????
U_m: attention_vec?attention???????
U_s: attention???softmax???????
'''
input_shape=input_shapes[0]
super(AttentionLSTM,self).build(input_shape)
self.input_spec = [InputSpec(shape=input_shapes[0]),InputSpec(shape=input_shapes[1])]
#attention_dim=self.input_spec[1].shape[1]
attention_dim=self.att_dim
input_dim = input_shape[2]
#attention??
self.U_a=self.inner_init((input_dim,self.output_dim),
name='{}_U_a'.format(self.name))
self.b_a=K.zeros((self.output_dim,),name='{}_b_a'.format(self.name))
self.U_m=self.inner_init((attention_dim,self.output_dim),
name='{}_U_m'.format(self.name))
self.b_m=K.zeros((self.output_dim,),name='{}_b_m'.format(self.name))
if self.single_attention_param:
self.U_s = self.inner_init((self.output_dim, 1),
name='{}_U_s'.format(self.name))
self.b_s = K.zeros((1,), name='{}_b_s'.format(self.name))
else:
self.U_s = self.inner_init((self.output_dim, self.output_dim),
name='{}_U_s'.format(self.name))
self.b_s = K.zeros((self.output_dim,), name='{}_b_s'.format(self.name))
self.trainable_weights+=[self.U_a,self.U_m,self.U_s,
self.b_a,self.b_m,self.b_s]
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
def __init__(self, output_dim, nb_feature=4, init='glorot_uniform', bias=True, input_dim=None, **kwargs):
self.output_dim = output_dim
self.nb_feature = nb_feature
self.init = initializations.get(init)
self.bias = bias
self.input_spec = [InputSpec(ndim=3), InputSpec(ndim=3), InputSpec(ndim=3)]
self.input_dim = input_dim
if self.input_dim:
kwargs['input_shape'] = (self.input_dim,)
super(Maxout, self).__init__(**kwargs)
def build(self, input_shape):
if len(input_shape) < 4:
raise ValueError('Inputs to `DepthwiseConv2D` should have rank 4. '
'Received input shape:', str(input_shape))
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = 3
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs to '
'`DepthwiseConv2D` '
'should be defined. Found `None`.')
input_dim = int(input_shape[channel_axis])
depthwise_kernel_shape = (self.kernel_size[0],
self.kernel_size[1],
input_dim,
self.depth_multiplier)
self.depthwise_kernel = self.add_weight(
shape=depthwise_kernel_shape,
initializer=self.depthwise_initializer,
name='depthwise_kernel',
regularizer=self.depthwise_regularizer,
constraint=self.depthwise_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(input_dim * self.depth_multiplier,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
# Set input spec.
self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})
self.built = True
def build(self, input_shape):
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
kernel_shape = self.kernel_size + (input_dim, self.filters)
base = self.kernel_size[0] * self.kernel_size[1]
if self.H == 'Glorot':
nb_input = int(input_dim * base)
nb_output = int(self.filters * base)
self.H = np.float32(np.sqrt(1.5 / (nb_input + nb_output)))
#print('Glorot H: {}'.format(self.H))
if self.kernel_lr_multiplier == 'Glorot':
nb_input = int(input_dim * base)
nb_output = int(self.filters * base)
self.kernel_lr_multiplier = np.float32(1. / np.sqrt(1.5/ (nb_input + nb_output)))
#print('Glorot learning rate multiplier: {}'.format(self.lr_multiplier))
self.kernel_constraint = Clip(-self.H, self.H)
self.kernel_initializer = initializers.RandomUniform(-self.H, self.H)
self.kernel = self.add_weight(shape=kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.lr_multipliers = [self.kernel_lr_multiplier, self.bias_lr_multiplier]
self.bias = self.add_weight((self.output_dim,),
initializer=self.bias_initializers,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.lr_multipliers = [self.kernel_lr_multiplier]
self.bias = None
# Set input spec.
self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})
self.built = True
def build(self, input_shape):
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
kernel_shape = self.kernel_size + (input_dim, self.filters)
base = self.kernel_size[0] * self.kernel_size[1]
if self.H == 'Glorot':
nb_input = int(input_dim * base)
nb_output = int(self.filters * base)
self.H = np.float32(np.sqrt(1.5 / (nb_input + nb_output)))
#print('Glorot H: {}'.format(self.H))
if self.kernel_lr_multiplier == 'Glorot':
nb_input = int(input_dim * base)
nb_output = int(self.filters * base)
self.kernel_lr_multiplier = np.float32(1. / np.sqrt(1.5/ (nb_input + nb_output)))
#print('Glorot learning rate multiplier: {}'.format(self.lr_multiplier))
self.kernel_constraint = Clip(-self.H, self.H)
self.kernel_initializer = initializers.RandomUniform(-self.H, self.H)
self.kernel = self.add_weight(shape=kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.lr_multipliers = [self.kernel_lr_multiplier, self.bias_lr_multiplier]
self.bias = self.add_weight((self.output_dim,),
initializer=self.bias_initializers,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.lr_multipliers = [self.kernel_lr_multiplier]
self.bias = None
# Set input spec.
self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})
self.built = True