def test_uniform(tensor_shape):
_runner(initializations.uniform, tensor_shape, target_mean=0.,
target_max=0.05, target_min=-0.05)
python类uniform()的实例源码
def create_critic_network(self, state_size,action_dim):
print("Now we build the model")
S = Input(shape=[state_size])
A = Input(shape=[action_dim],name='action2')
w = Dense(HIDDEN1_UNITS, init='he_uniform',activation='relu')(S)
h = merge([w,A],mode='concat')
h3 = Dense(HIDDEN2_UNITS, init='he_uniform',activation='relu')(h)
V = Dense(action_dim,init=lambda shape, name: uniform(shape, scale=3e-3, name=name),activation='linear')(h3)
model = Model(input=[S,A],output=V)
adam = Adam(lr=self.LEARNING_RATE)
model.compile(loss='mse', optimizer=adam)
return model, A, S
def create_actor_network(self, state_size,action_dim):
print("Now we build the model")
model = Sequential()
S = Input(shape=[state_size])
h0 = Dense(100, init='he_uniform',activation='relu')(S)
h1 = Dense(100, init='he_uniform',activation='relu')(h0)
V = Dense(8, init=lambda shape, name: uniform(shape, scale=3e-3, name=name),activation='tanh')(h1)
model = Model(input=S,output=V)
return model, model.trainable_weights, S
def unitary_ASB2016_init(shape, name=None):
assert shape[0]==shape[1]
N=shape[1]
theta = initializations.uniform((3,N),scale=np.pi,name='{}_theta'.format(name))
reflection = initializations.glorot_uniform((2,2*N),name='{}_reflection'.format(name))
idxperm = np.random.permutation(N)
idxpermaug = np.concatenate((idxperm,N+idxperm))
Iaug=augLeft(np.concatenate((np.eye(N),np.zeros((N,N))),axis=0),module=np).astype(np.float32)
Uaug=times_unitary_ASB2016(Iaug,N,[theta,reflection,idxpermaug])
return Uaug,theta,reflection,idxpermaug
def buildConvolution(self, name):
filters = self.params.get('filters')
nb_filter = self.params.get('nb_filter')
assert filters
assert nb_filter
convs = []
for fsz in filters:
layer_name = '%s-conv-%d' % (name, fsz)
conv = Convolution1D(
nb_filter=nb_filter,
filter_length=fsz,
border_mode='valid',
#activation='relu',
subsample_length=1,
init='glorot_uniform',
#init=init,
#init=lambda shape, name: initializations.uniform(shape, scale=0.01, name=name),
W_constraint=maxnorm(self.params.get('w_maxnorm')),
b_constraint=maxnorm(self.params.get('b_maxnorm')),
#W_regularizer=regularizers.l2(self.params.get('w_l2')),
#b_regularizer=regularizers.l2(self.params.get('b_l2')),
#input_shape=(self.q_length, self.wdim),
name=layer_name
)
convs.append(conv)
self.layers['%s-convolution' % name] = convs
def buildConvolution(self, name):
filters = self.params.get('filters')
nb_filter = self.params.get('nb_filter')
assert filters
assert nb_filter
convs = []
for fsz in filters:
layer_name = '%s-conv-%d' % (name, fsz)
conv = Convolution1D(
nb_filter=nb_filter,
filter_length=fsz,
border_mode='valid',
#activation='relu',
subsample_length=1,
init='glorot_uniform',
#init=init,
#init=lambda shape, name: initializations.uniform(shape, scale=0.01, name=name),
W_constraint=maxnorm(self.params.get('w_maxnorm')),
b_constraint=maxnorm(self.params.get('b_maxnorm')),
#W_regularizer=regularizers.l2(self.params.get('w_l2')),
#b_regularizer=regularizers.l2(self.params.get('b_l2')),
#input_shape=(self.q_length, self.wdim),
name=layer_name
)
convs.append(conv)
self.layers['%s-convolution' % name] = convs
def test_uniform(tensor_shape):
_runner(initializations.uniform, tensor_shape, target_mean=0.,
target_max=0.05, target_min=-0.05)
def test_uniform(tensor_shape):
_runner(initializations.uniform, tensor_shape, target_mean=0.,
target_max=0.05, target_min=-0.05)
initializations.py 文件源码
项目:DeepLearaning_TrafficFlowPrediction
作者: KarisM
项目源码
文件源码
阅读 28
收藏 0
点赞 0
评论 0
def glorot_uniform_sigm(shape, name=None, dim_ordering='th'):
"""
Glorot style weight initializer for sigmoid activations.
Like keras.initializations.glorot_uniform(), but with uniform random interval like in
Deeplearning.net tutorials.
They claim that the initialization random interval should be
+/- sqrt(6 / (fan_in + fan_out)) (like Keras' glorot_uniform()) when tanh activations are used,
+/- 4 sqrt(6 / (fan_in + fan_out)) when sigmoid activations are used.
See: http://deeplearning.net/tutorial/mlp.html#going-from-logistic-regression-to-mlp
"""
fan_in, fan_out = get_fans(shape, dim_ordering=dim_ordering)
s = 4. * np.sqrt(6. / (fan_in + fan_out))
return uniform(shape, s, name=name)
def unitary_ASB2016_init(shape, name=None):
assert shape[0]==shape[1]
N=shape[1]
theta = initializations.uniform((3,N),scale=np.pi,name='{}_theta'.format(name))
reflection = initializations.glorot_uniform((2,2*N),name='{}_reflection'.format(name))
idxperm = np.random.permutation(N)
idxpermaug = np.concatenate((idxperm,N+idxperm))
Iaug=augLeft(np.concatenate((np.eye(N),np.zeros((N,N))),axis=0),module=np).astype(np.float32)
Uaug=times_unitary_ASB2016(Iaug,N,[theta,reflection,idxpermaug])
return Uaug,theta,reflection,idxpermaug
def glorot_uniform_sigm(shape, name=None, dim_ordering='th'):
"""
Glorot style weight initializer for sigmoid activations.
Like keras.initializations.glorot_uniform(), but with uniform random interval like in
Deeplearning.net tutorials.
They claim that the initialization random interval should be
+/- sqrt(6 / (fan_in + fan_out)) (like Keras' glorot_uniform()) when tanh activations are used,
+/- 4 sqrt(6 / (fan_in + fan_out)) when sigmoid activations are used.
See: http://deeplearning.net/tutorial/mlp.html#going-from-logistic-regression-to-mlp
"""
fan_in, fan_out = get_fans(shape, dim_ordering=dim_ordering)
s = 4. * np.sqrt(6. / (fan_in + fan_out))
return uniform(shape, s, name=name)
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
if self.stateful:
self.reset_states()
else:
# initial states: all-zero tensor of shape (output_dim)
self.states = [None]
input_dim = input_shape[2]
self.input_dim = input_dim
self.W = self.init((input_dim, self.output_dim),
name='{}_W'.format(self.name))
#self.b = K.zeros((self.N,), name='{}_b'.format(self.name))
self.b = initializations.uniform((self.N,),scale=0.01,name='{}_b'.format(self.name))
self.baug=K.tile(self.b,[2])
h0 = self.h0_mean+initializations.uniform((2*self.N,),scale=0.01).get_value()
self.h0 = K.variable(h0,name='{}_h0'.format(self.name))
if ('full' in self.unitary_impl):
# we're using a full unitary recurrence matrix
if (self.inner_init=='svd'):
# use SVD to initialize U
self.U = unitary_svd_init((self.N, self.N),name='{}_U'.format(self.name))
elif (self.inner_init=='ASB2016'):
# use parameterization of [ASB2016] to initialize U
Uaug,_,_,_ = unitary_ASB2016_init((self.N,self.N))
Uaug=Uaug.eval()
self.U=K.variable(np.concatenate((Uaug[:self.N,:self.N],Uaug[:self.N,self.N:]),axis=0),name='{}_U'.format(self.name))
self.Uaug=augRight(self.U,module=K)
elif (self.unitary_impl=='ASB2016'):
# we're using the parameterization of [Arjovsky, Shah, Bengio 2016]
self.Uaug,self.theta,self.reflection,_ = unitary_ASB2016_init((self.N, self.N),name=self.name)
# set the trainable weights
if ('full' in self.unitary_impl):
self.trainable_weights = [self.W, self.U, self.b, self.h0]
elif (self.unitary_impl=='ASB2016'):
self.trainable_weights = [self.W, self.theta, self.reflection, self.b, self.h0]
self.regularizers = []
#if self.W_regularizer:
# self.W_regularizer.set_param(self.W)
# self.regularizers.append(self.W_regularizer)
#if self.U_regularizer:
# self.U_regularizer.set_param(self.U)
# self.regularizers.append(self.U_regularizer)
#if self.b_regularizer:
# self.b_regularizer.set_param(self.b)
# self.regularizers.append(self.b_regularizer)
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
if self.stateful:
self.reset_states()
else:
# initial states: all-zero tensor of shape (output_dim)
self.states = [None]
input_dim = input_shape[2]
self.input_dim = input_dim
self.W = self.init((input_dim, self.output_dim),
name='{}_W'.format(self.name))
#self.b = K.zeros((self.N,), name='{}_b'.format(self.name))
self.b = initializations.uniform((self.N,),scale=0.01,name='{}_b'.format(self.name))
self.baug=K.tile(self.b,[2])
h0 = self.h0_mean+initializations.uniform((2*self.N,),scale=0.01).get_value()
self.h0 = K.variable(h0,name='{}_h0'.format(self.name))
if ('full' in self.unitary_impl):
# we're using a full unitary recurrence matrix
if (self.inner_init=='svd'):
# use SVD to initialize U
self.U = unitary_svd_init((self.N, self.N),name='{}_U'.format(self.name))
elif (self.inner_init=='ASB2016'):
# use parameterization of [ASB2016] to initialize U
Uaug,_,_,_ = unitary_ASB2016_init((self.N,self.N))
Uaug=Uaug.eval()
self.U=K.variable(np.concatenate((Uaug[:self.N,:self.N],Uaug[:self.N,self.N:]),axis=0),name='{}_U'.format(self.name))
self.Uaug=augRight(self.U,module=K)
elif (self.unitary_impl=='ASB2016'):
# we're using the parameterization of [Arjovsky, Shah, Bengio 2016]
self.Uaug,self.theta,self.reflection,_ = unitary_ASB2016_init((self.N, self.N),name=self.name)
# set the trainable weights
if ('full' in self.unitary_impl):
self.trainable_weights = [self.W, self.U, self.b, self.h0]
elif (self.unitary_impl=='ASB2016'):
self.trainable_weights = [self.W, self.theta, self.reflection, self.b, self.h0]
self.regularizers = []
#if self.W_regularizer:
# self.W_regularizer.set_param(self.W)
# self.regularizers.append(self.W_regularizer)
#if self.U_regularizer:
# self.U_regularizer.set_param(self.U)
# self.regularizers.append(self.U_regularizer)
#if self.b_regularizer:
# self.b_regularizer.set_param(self.b)
# self.regularizers.append(self.b_regularizer)
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights