def search_model(experiment_label, steps, batch_size=32):
""" This is where we put everythin together.
We get the dataset, build the Training and Experiment objects, and run the experiment.
The experiments logs are generated in ~/minos/experiment_label
We use the CpuEnvironment to have the experiment run on the cpu, with 2 parralel processes.
We could use GpuEnvironment to use GPUs, and specify which GPUs to use, and how many tasks
per GPU
"""
batch_iterator, test_batch_iterator, nb_classes = get_reuters_dataset(batch_size, max_words)
layout = build_layout(max_words, nb_classes)
training = Training(
Objective('categorical_crossentropy'),
Optimizer(optimizer='Adam'),
Metric('categorical_accuracy'),
epoch_stopping_condition(),
batch_size)
parameters = custom_experiment_parameters()
experiment = Experiment(
experiment_label,
layout,
training,
batch_iterator,
test_batch_iterator,
CpuEnvironment(n_jobs=1),
parameters=parameters)
run_ga_search_experiment(
experiment,
population_size=100,
generations=steps,
resume=False,
log_level='DEBUG')
python类get()的实例源码
def call(self, x, mask=None):
activation = activations.get(self.activation)
return activation(backend.dot(x, self.W) + self.b)
def __init__(self, units,
activation='linear',
weights=None,
kernel_initializer='glorot_uniform',
kernel_regularizer=None,
kernel_constraint=None,
bias_initializer='uniform',
bias_regularizer=None,
bias_constraint=None,
activity_regularizer=None,
bias=True,
input_dim=None,
factorization=simple_tensor_factorization(),
**kwargs):
self.activation = activations.get(activation)
self.units = units
self.input_dim = input_dim
self.factorization = factorization
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_initializer = get_initializer(kernel_initializer)
self.bias_initializer = get_initializer(bias_initializer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.bias = bias
self.initial_weights = weights
self.input_spec = [InputSpec(ndim=2)]
if self.input_dim:
kwargs['input_shape'] = (self.input_dim,)
super(DenseTensor, self).__init__(**kwargs)
def __init__(self,inp_size,out_size,activation='tanh', **kwargs):
super(ntn_layer, self).__init__(**kwargs)
self.k = out_size
self.d = inp_size
self.activation = activations.get(activation)
self.test_out = 0
def __init__(self, filters_simple, filters_complex, nb_row, nb_col,
init='glorot_uniform', activation='relu', weights=None,
padding='valid', strides=(1, 1), data_format=K.image_data_format(),
kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None,
W_constraint=None, bias_constraint=None,
bias=True, **kwargs):
if padding not in {'valid', 'same'}:
raise Exception('Invalid border mode for Convolution2DEnergy:', padding)
self.filters_simple = filters_simple
self.filters_complex = filters_complex
self.nb_row = nb_row
self.nb_col = nb_col
self.init = initializers.get(init, data_format=data_format)
self.activation = activations.get(activation)
assert padding in {'valid', 'same'}, 'padding must be in {valid, same}'
self.padding = padding
self.strides = tuple(strides)
assert data_format in {'channels_last', 'channels_first'}, 'data_format must be in {tf, th}'
self.data_format = data_format
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.W_constraint = constraints.UnitNormOrthogonal(filters_complex, data_format)
self.bias_constraint = constraints.get(bias_constraint)
self.bias = bias
self.input_spec = [InputSpec(ndim=4)]
self.initial_weights = weights
super(Convolution2DEnergy, self).__init__(**kwargs)
def __init__(self, rank,
kernel_size=3,
data_format=None,
kernel_initialization=.1,
bias_initialization=1,
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(_ConvGDN, self).__init__(**kwargs)
self.rank = rank
self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank, 'kernel_size')
self.strides = conv_utils.normalize_tuple(1, rank, 'strides')
self.padding = conv_utils.normalize_padding('same')
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(1, rank, 'dilation_rate')
self.kernel_initializer = initializers.Constant(kernel_initialization)
self.bias_initializer = initializers.Constant(bias_initialization)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(ndim=self.rank + 2)
def __init__(self, filters,
kernel_initializer='glorot_uniform',
kernel_regularizer=None,
kernel_constraint=kconstraints.NonNeg(),
k_initializer='zeros',
k_regularizer=None,
k_constraint=None,
tied_k=False,
activity_regularizer=None,
strides=1,
padding='valid',
dilation_rate=1,
data_format=K.image_data_format(),
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(Conv2DSoftMinMax, self).__init__(**kwargs)
self.filters = filters
self.kernel_initializer = initializers.get(kernel_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.k_initializer = initializers.get(k_initializer)
self.k_regularizer = regularizers.get(k_regularizer)
self.k_constraint = constraints.get(k_constraint)
self.tied_k = tied_k
self.activity_regularizer = regularizers.get(activity_regularizer)
self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, 2, 'dilation_rate')
self.padding = conv_utils.normalize_padding(padding)
self.input_spec = InputSpec(min_ndim=2)
self.data_format = data_format
self.supports_masking = True
def __init__(self, weights=None, kernel_initializer='glorot_uniform',
alpha_initializer='ones', alpha_regularizer=None, alpha_constraint=None,
beta_delta_initializer='ones', beta_delta_regularizer=None, beta_delta_constraint=None,
gamma_eta_initializer='ones', gamma_eta_regularizer=None, gamma_eta_constraint=None,
rho_initializer='ones', rho_regularizer=None, rho_constraint=None,
**kwargs):
self.alpha_initializer = initializers.get(alpha_initializer)
self.beta_delta_initializer = initializers.get(beta_delta_initializer)
self.gamma_eta_initializer = initializers.get(gamma_eta_initializer)
self.rho_initializer = initializers.get(rho_initializer)
self.alpha_constraint = constraints.get(alpha_constraint)
self.beta_delta_constraint = constraints.get(beta_delta_constraint)
self.gamma_eta_constraint = constraints.get(gamma_eta_constraint)
self.rho_constraint = constraints.get(rho_constraint)
self.alpha_regularizer = regularizers.get(alpha_regularizer)
self.beta_delta_regularizer = regularizers.get(beta_delta_regularizer)
self.gamma_eta_regularizer = regularizers.get(gamma_eta_regularizer)
self.rho_regularizer = regularizers.get(rho_regularizer)
self.input_spec = [InputSpec(ndim=2)]
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(NakaRushton, self).__init__(**kwargs)
def __init__(self, filters,
sum_axes,
filter_axes,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_activation=None,
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(FilterDims, self).__init__(**kwargs)
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.activation = activations.get(activation)
self.kernel_activation = activations.get(kernel_activation)
self.filters = filters
self.sum_axes = list(sum_axes)
self.sum_axes.sort()
self.filter_axes = list(filter_axes)
self.filter_axes.sort()
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.use_bias = use_bias
self.input_spec = InputSpec(min_ndim=2)
self.supports_masking = True
def __init__(self, output_dim,
init='glorot_uniform', activation='linear', weights=None,
W_regularizer=None, b_regularizer=None, activity_regularizer=None,
W_constraint=None, b_constraint=None,
input_dim=None, input_length1=None, input_length2=None, **kwargs):
self.output_dim = output_dim
self.init = initializations.get(init)
self.activation = activations.get(activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.constraints = [self.W_constraint, self.b_constraint]
self.initial_weights = weights
self.input_dim = input_dim
self.input_length1 = input_length1
self.input_length2 = input_length2
if self.input_dim:
kwargs['input_shape'] = (self.input_length1, self.input_length2, self.input_dim)
self.input = K.placeholder(ndim=4)
super(HigherOrderTimeDistributedDense, self).__init__(**kwargs)
def __init__(self, input_shape, context='word', init='glorot_uniform', activation='tanh', weights=None, **kwargs):
self.init = initializations.get(init)
self.activation = activations.get(activation)
self.context = context
self.td1, self.td2, self.wd = input_shape
self.initial_weights = weights
kwargs['input_shape'] = input_shape
super(TensorAttention, self).__init__(**kwargs)
def __init__(self, max_sentences, activation='linear', **kwargs):
self.activation = activations.get(activation)
self.max_sentences = max_sentences
kwargs['input_shape'] = (self.max_sentences, 3)
super(WeightedMean, self).__init__(**kwargs)
def __init__(self, max_sentences, activation='linear', **kwargs):
self.activation = activations.get(activation)
self.max_sentences = max_sentences
kwargs['input_shape'] = (self.max_sentences, 3)
super(WeightedMean, self).__init__(**kwargs)
def get_initial_state(self, inputs):
print('inputs shape:', inputs.get_shape())
# apply the matrix on the first time step to get the initial s0.
s0 = activations.tanh(K.dot(inputs[:, 0], self.W_s))
# from keras.layers.recurrent to initialize a vector of (batchsize,
# output_dim)
y0 = K.zeros_like(inputs) # (samples, timesteps, input_dims)
y0 = K.sum(y0, axis=(1, 2)) # (samples, )
y0 = K.expand_dims(y0) # (samples, 1)
y0 = K.tile(y0, [1, self.output_dim])
return [y0, s0]
def __init__(self, nb_filter, nb_row, nb_col, transform_bias=-1,
init='glorot_uniform', activation='relu', weights=None,
border_mode='same', subsample=(1, 1), dim_ordering='th',
W_regularizer=None, b_regularizer=None, activity_regularizer=None,
W_constraint=None, b_constraint=None,
bias=True, **kwargs):
if border_mode not in {'valid', 'same'}:
raise Exception('Invalid border mode for Convolution2D:', border_mode)
self.nb_filter = nb_filter
self.nb_row = nb_row
self.nb_col = nb_col
self.transform_bias = transform_bias
self.init = initializations.get(init, dim_ordering=dim_ordering)
self.activation = activations.get(activation)
assert border_mode in {'valid', 'same'}, 'border_mode must be in {valid, same}'
self.border_mode = border_mode
self.subsample = tuple(subsample)
assert dim_ordering in {'tf', 'th'}, 'dim_ordering must be in {tf, th}'
self.dim_ordering = dim_ordering
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
self.input_spec = [InputSpec(ndim=4)]
self.initial_weights = weights
super(Conv2DHighway, self).__init__(**kwargs)
def __init__(self,output_dim,att_dim,attn_activation='tanh',
attn_inner_activation='tanh',
single_attn=False,**kwargs):
'''
attention_vec: ???????attention????????????????attention??
single_attention_param: ????t,??????????????attention?
'''
self.attn_activation=activations.get(attn_activation)
self.attn_inner_activation=activations.get(attn_inner_activation)
self.single_attention_param=single_attn
self.input_spec=None
self.att_dim=att_dim
super(AttentionLSTM,self).__init__(output_dim,**kwargs)
def __init__(self, units,
n_slots=50,
m_depth=20,
shift_range=3,
controller_model=None,
read_heads=1,
write_heads=1,
activation='sigmoid',
batch_size=777,
stateful=False,
**kwargs):
self.output_dim = units
self.units = units
self.n_slots = n_slots
self.m_depth = m_depth
self.shift_range = shift_range
self.controller = controller_model
self.activation = get_activations(activation)
self.read_heads = read_heads
self.write_heads = write_heads
self.batch_size = batch_size
# self.return_sequence = True
try:
if controller.state.stateful:
self.controller_with_state = True
except:
self.controller_with_state = False
self.controller_read_head_emitting_dim = _controller_read_head_emitting_dim(m_depth, shift_range)
self.controller_write_head_emitting_dim = _controller_write_head_emitting_dim(m_depth, shift_range)
super(NeuralTuringMachine, self).__init__(**kwargs)
recurrent_convolutional.py 文件源码
项目:keras-prednet
作者: kunimasa-kawasaki
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def __init__(self, nb_filter, nb_row, nb_col,
init='glorot_uniform', inner_init='orthogonal',
forget_bias_init='one', activation='tanh',
inner_activation='hard_sigmoid', dim_ordering="tf",
border_mode="valid", sub_sample=(1, 1),
W_regularizer=None, U_regularizer=None, b_regularizer=None,
dropout_W=0., dropout_U=0., **kwargs):
self.nb_filter = nb_filter
self.nb_row = nb_row
self.nb_col = nb_col
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.forget_bias_init = initializations.get(forget_bias_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.border_mode = border_mode
self.subsample = sub_sample
assert dim_ordering in {'tf', "th"}, 'dim_ordering must be in {tf,"th}'
self.dim_ordering = dim_ordering
kwargs["nb_filter"] = nb_filter
kwargs["nb_row"] = nb_row
kwargs["nb_col"] = nb_col
kwargs["dim_ordering"] = dim_ordering
self.W_regularizer = W_regularizer
self.U_regularizer = U_regularizer
self.b_regularizer = b_regularizer
self.dropout_W, self.dropout_U = dropout_W, dropout_U
super(LSTMConv2D, self).__init__(**kwargs)
def __init__(self, output_dim, weights=None, activation='linear', return_mask=True, **kwargs):
self.supports_masking = True
self.output_dim = output_dim
self.init = initializations.get('glorot_uniform')
self.activation = activations.get(activation)
self.initial_weights = weights
self.return_mask = return_mask
super(Projection, self).__init__(**kwargs)
def __init__(self, output_dim,
init = 'glorot_uniform', inner_init = 'orthogonal',
activation = 'tanh', W_regularizer = None,
U_regularizer = None, b_regularizer = None,
dropout_W = 0.0, dropout_U = 0.0,
tau=100, dt=20, noise=.1,
dale_ratio = None, **kwargs):
self.output_dim = output_dim
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.activation = activations.get(activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.U_regularizer = regularizers.get(U_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.dropout_W, self.dropout_U = dropout_W, dropout_U
self.tau = tau
self.dt = dt
self.noise = noise
self.dale_ratio = dale_ratio
if dale_ratio:
#make dales law matrix
dale_vec = np.ones(output_dim)
dale_vec[int(dale_ratio*output_dim):] = -1
dale = np.diag(dale_vec)
self.Dale = K.variable(dale)
if self.dropout_W or self.dropout_U:
self.uses_learning_phase = True
super(leak_recurrent, self).__init__(**kwargs)