def build_generator(latent_size):
model = Sequential()
model.add(Dense(1024, input_dim=latent_size, activation='relu'))
model.add(Dense(28 * 28, activation='tanh'))
model.add(Reshape((1, 28, 28)))
return model
python类Reshape()的实例源码
def build_generator(latent_size):
'''
Any model with input shape (?, latent_size) and output shape (?, 1, 28, 28) fits here.
'''
model = Sequential()
model.add(Dense(1024, input_dim=latent_size, activation='relu'))
model.add(Dense(28 * 28, activation='tanh'))
model.add(Reshape((1, 28, 28)))
return model
def GeneratorDeconv(image_size = 64):
L = int(image_size)
inputs = Input(shape = (100, ))
x = Dense(512*int(L/16)**2)(inputs) #shape(512*(L/16)**2,)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Reshape((int(L/16), int(L/16), 512))(x) # shape(L/16, L/16, 512)
x = Conv2DTranspose(256, (4, 4), strides = (2, 2),
kernel_initializer = init,
padding = 'same')(x) # shape(L/8, L/8, 256)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2DTranspose(128, (4, 4), strides = (2, 2),
kernel_initializer = init,
padding = 'same')(x) # shape(L/4, L/4, 128)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2DTranspose(64, (4, 4), strides = (2, 2),
kernel_initializer = init,
padding = 'same')(x) # shape(L/2, L/2, 64)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2DTranspose(3, (4, 4), strides= (2, 2),
kernel_initializer = init,
padding = 'same')(x) # shape(L, L, 3)
images = Activation('tanh')(x)
model = Model(inputs = inputs, outputs = images)
model.summary()
return model
def build(self):
query = Input(name='query', shape=(self.config['text1_maxlen'],))
show_layer_info('Input', query)
doc = Input(name='doc', shape=(self.config['text2_maxlen'],))
show_layer_info('Input', doc)
embedding = Embedding(self.config['vocab_size'], self.config['embed_size'], weights=[self.config['embed']], trainable = self.embed_trainable)
q_embed = embedding(query)
show_layer_info('Embedding', q_embed)
d_embed = embedding(doc)
show_layer_info('Embedding', d_embed)
q_rep = Bidirectional(LSTM(self.config['hidden_size'], return_sequences=True, dropout=self.config['dropout_rate']))(q_embed)
show_layer_info('Bidirectional-LSTM', q_rep)
d_rep = Bidirectional(LSTM(self.config['hidden_size'], return_sequences=True, dropout=self.config['dropout_rate']))(d_embed)
show_layer_info('Bidirectional-LSTM', d_rep)
cross = Match(match_type='dot')([q_rep, d_rep])
#cross = Dot(axes=[2, 2])([q_embed, d_embed])
show_layer_info('Match-dot', cross)
cross_reshape = Reshape((-1, ))(cross)
show_layer_info('Reshape', cross_reshape)
mm_k = Lambda(lambda x: K.tf.nn.top_k(x, k=self.config['topk'], sorted=True)[0])(cross_reshape)
show_layer_info('Lambda-topk', mm_k)
pool1_flat_drop = Dropout(rate=self.config['dropout_rate'])(mm_k)
show_layer_info('Dropout', pool1_flat_drop)
if self.config['target_mode'] == 'classification':
out_ = Dense(2, activation='softmax')(pool1_flat_drop)
elif self.config['target_mode'] in ['regression', 'ranking']:
out_ = Dense(1)(pool1_flat_drop)
show_layer_info('Dense', out_)
#model = Model(inputs=[query, doc, dpool_index], outputs=out_)
model = Model(inputs=[query, doc], outputs=out_)
return model
def build(self):
query = Input(name='query', shape=(self.config['text1_maxlen'],))
show_layer_info('Input', query)
doc = Input(name='doc', shape=(self.config['text2_maxlen'],))
show_layer_info('Input', doc)
dpool_index = Input(name='dpool_index', shape=[self.config['text1_maxlen'], self.config['text2_maxlen'], 3], dtype='int32')
show_layer_info('Input', dpool_index)
embedding = Embedding(self.config['vocab_size'], self.config['embed_size'], weights=[self.config['embed']], trainable = self.embed_trainable)
q_embed = embedding(query)
show_layer_info('Embedding', q_embed)
d_embed = embedding(doc)
show_layer_info('Embedding', d_embed)
cross = Dot(axes=[2, 2], normalize=False)([q_embed, d_embed])
show_layer_info('Dot', cross)
cross_reshape = Reshape((self.config['text1_maxlen'], self.config['text2_maxlen'], 1))(cross)
show_layer_info('Reshape', cross_reshape)
conv2d = Conv2D(self.config['kernel_count'], self.config['kernel_size'], padding='same', activation='relu')
dpool = DynamicMaxPooling(self.config['dpool_size'][0], self.config['dpool_size'][1])
conv1 = conv2d(cross_reshape)
show_layer_info('Conv2D', conv1)
pool1 = dpool([conv1, dpool_index])
show_layer_info('DynamicMaxPooling', pool1)
pool1_flat = Flatten()(pool1)
show_layer_info('Flatten', pool1_flat)
pool1_flat_drop = Dropout(rate=self.config['dropout_rate'])(pool1_flat)
show_layer_info('Dropout', pool1_flat_drop)
if self.config['target_mode'] == 'classification':
out_ = Dense(2, activation='softmax')(pool1_flat_drop)
elif self.config['target_mode'] in ['regression', 'ranking']:
out_ = Dense(1)(pool1_flat_drop)
show_layer_info('Dense', out_)
model = Model(inputs=[query, doc, dpool_index], outputs=out_)
return model
def build_errors(states,base,pad,dim,size):
# address the numerical viscosity in swirling
s = K.round(states+viscosity_adjustment)
s = Reshape((dim+2*pad,dim+2*pad,1))(s)
s = Cropping2D(((pad,pad),(pad,pad)))(s)
s = K.reshape(s,[-1,size,base,size,base])
s = K.permute_dimensions(s, [0,1,3,2,4])
s = K.reshape(s,[-1,size,size,1,base,base])
s = K.tile (s,[1, 1, 1, 2, 1, 1,]) # number of panels : 2
allpanels = K.variable(panels)
allpanels = K.reshape(allpanels, [1,1,1,2,base,base])
allpanels = K.tile(allpanels, [K.shape(s)[0], size,size, 1, 1, 1])
def hash(x):
## 2x2 average hashing
x = K.reshape(x, [-1,size,size,2, base//3, 3, base//3, 3])
x = K.mean(x, axis=(5,7))
return K.round(x)
## diff hashing (horizontal diff)
# x1 = x[:,:,:,:,:,:-1]
# x2 = x[:,:,:,:,:,1:]
# d = x1 - x2
# return K.round(d)
## just rounding
# return K.round(x)
## do nothing
# return x
# s = hash(s)
# allpanels = hash(allpanels)
# error = K.binary_crossentropy(s, allpanels)
error = K.abs(s - allpanels)
error = hash(error)
error = K.mean(error, axis=(4,5))
return error
def __init__(self,N,M,min,max,anneal_rate):
self.N = N
self.M = M
self.layers = Sequential([
# Dense(N * M),
Reshape((N,M))])
self.min = min
self.max = max
self.anneal_rate = anneal_rate
self.tau = K.variable(max, name="temperature")
def build_decoder(self,input_shape):
data_dim = np.prod(input_shape)
return [
*([Dropout(self.parameters['dropout'])] if self.parameters['dropout_z'] else []),
Dense(self.parameters['layer'], activation='relu', use_bias=False),
BN(),
Dropout(self.parameters['dropout']),
Dense(self.parameters['layer'], activation='relu', use_bias=False),
BN(),
Dropout(self.parameters['dropout']),
Dense(data_dim, activation='sigmoid'),
Reshape(input_shape),]
def _build(self,input_shape):
data_dim = np.prod(input_shape)
self.gs = self.build_gs()
self.gs2 = self.build_gs(N=data_dim)
self.gs3 = self.build_gs(N=data_dim)
_encoder = self.build_encoder(input_shape)
_decoder = self.build_decoder(input_shape)
x = Input(shape=input_shape)
z = Sequential([flatten, *_encoder, self.gs])(x)
y = Sequential([flatten,
*_decoder,
self.gs2,
Lambda(take_true),
Reshape(input_shape)])(z)
z2 = Input(shape=(self.parameters['N'], self.parameters['M']))
y2 = Sequential([flatten,
*_decoder,
self.gs3,
Lambda(take_true),
Reshape(input_shape)])(z2)
def rec(x, y):
return bce(K.reshape(x,(K.shape(x)[0],data_dim,)),
K.reshape(y,(K.shape(x)[0],data_dim,)))
def loss(x, y):
return rec(x,y) + self.gs.loss() + self.gs2.loss()
self.callbacks.append(LambdaCallback(on_epoch_end=self.gs.cool))
self.callbacks.append(LambdaCallback(on_epoch_end=self.gs2.cool))
self.callbacks.append(LambdaCallback(on_epoch_end=self.gs3.cool))
self.custom_log_functions['tau'] = lambda: K.get_value(self.gs.tau)
self.loss = loss
self.metrics.append(rec)
self.encoder = Model(x, z)
self.decoder = Model(z2, y2)
self.net = Model(x, y)
self.autoencoder = self.net
def build_decoder(self,input_shape):
"this function did not converge well. sigh"
data_dim = np.prod(input_shape)
last_convolution = 1 + np.array(input_shape) // 4
first_convolution = last_convolution * 4
diff = tuple(first_convolution - input_shape)
crop = [[0,0],[0,0]]
for i in range(2):
if diff[i] % 2 == 0:
for j in range(2):
crop[i][j] = diff[i] // 2
else:
crop[i][0] = diff[i] // 2
crop[i][1] = diff[i] // 2 + 1
crop = ((crop[0][0],crop[0][1]),(crop[1][0],crop[1][1]))
print(last_convolution,first_convolution,diff,crop)
return [*([Dropout(self.parameters['dropout'])] if self.parameters['dropout_z'] else []),
*[Dense(self.parameters['layer'], activation='relu', use_bias=False),
BN(),
Dropout(self.parameters['dropout']),],
*[Dense(np.prod(last_convolution) * self.parameters['clayer'], activation='relu', use_bias=False),
BN(),
Dropout(self.parameters['dropout']),],
Reshape((*last_convolution, self.parameters['clayer'])),
*[UpSampling2D((2,2)),
Deconvolution2D(self.parameters['clayer'],(3,3), activation='relu',padding='same', use_bias=False),
BN(),
Dropout(self.parameters['dropout']),],
*[UpSampling2D((2,2)),
Deconvolution2D(1,(3,3), activation='sigmoid',padding='same'),],
Cropping2D(crop),
Reshape(input_shape),]
def build_decoder(self,input_shape):
data_dim = np.prod(input_shape)
return [
Dense(self.parameters['layer'], activation='relu', use_bias=False),
BN(),
Dropout(self.parameters['dropout']),
Dense(self.parameters['layer'], activation='relu', use_bias=False),
BN(),
Dropout(self.parameters['dropout']),
Dense(data_dim, activation='sigmoid'),
Reshape(input_shape),]
def combined_discriminate2(data,sae,discriminator,**kwargs):
_data = Input(shape=data.shape[1:])
_data2 = Reshape((*data.shape[1:],1))(_data)
_categorical = wrap(_data,K.concatenate([_data2, 1-_data2],-1),name="categorical")
_images = sae.decoder(_categorical)
_features = sae.features(_images)
_results = discriminator.net(_features)
m = Model(_data, _results)
return m.predict(data,**kwargs)
def __init__(self,sae,discriminator):
_data = Input(shape=(sae.parameters['N'],))
_data2 = Reshape((sae.parameters['N'],1))(_data)
_categorical = wrap(_data,K.concatenate([_data2, 1-_data2],-1),name="categorical")
_images = sae.decoder(_categorical)
_features = sae.features(_images)
_results = discriminator.net(_features)
m = Model(_data, _results)
self.model = m
# action autoencoder ################################################################
def build_decoder(self,input_shape):
data_dim = np.prod(input_shape)
return [
*[
Sequential([
Dense(self.parameters['layer'], activation=self.parameters['decoder_activation'], use_bias=False),
BN(),
Dropout(self.parameters['dropout']),])
for i in range(self.parameters['decoder_layers'])
],
Sequential([
Dense(data_dim, activation='sigmoid'),
Reshape(input_shape),]),]
def build_generator(latent_size):
# we will map a pair of (z, L), where z is a latent vector and L is a
# label drawn from P_c, to image space (..., 1, 28, 28)
cnn = Sequential()
cnn.add(Dense(1024, input_dim=latent_size, activation='relu'))
cnn.add(Dense(128 * 7 * 7, activation='relu'))
cnn.add(Reshape((128, 7, 7)))
# upsample to (..., 14, 14)
cnn.add(UpSampling2D(size=(2, 2)))
cnn.add(Convolution2D(256, 5, 5, border_mode='same',
activation='relu', init='glorot_normal'))
# upsample to (..., 28, 28)
cnn.add(UpSampling2D(size=(2, 2)))
cnn.add(Convolution2D(128, 5, 5, border_mode='same',
activation='relu', init='glorot_normal'))
# take a channel axis reduction
cnn.add(Convolution2D(1, 2, 2, border_mode='same',
activation='tanh', init='glorot_normal'))
# this is the z space commonly refered to in GAN papers
latent = Input(shape=(latent_size, ))
# this will be our label
image_class = Input(shape=(1,), dtype='int32')
# 10 classes in MNIST
cls = Flatten()(Embedding(10, latent_size,
init='glorot_normal')(image_class))
# hadamard product between z-space and a class conditional embedding
h = merge([latent, cls], mode='mul')
fake_image = cnn(h)
return Model(input=[latent, image_class], output=fake_image)
def build_model():
from keras.models import Model
from keras.layers import Input, Dense, Dropout, Activation, Flatten, Reshape
from keras.layers import Convolution2D, MaxPooling2D
nb_classes = 10
nb_filters = 32
pool_size = (2,2)
kernel_size = (3,3)
v = Input(shape=(28,28))
h = Reshape([1,28,28])(v)
h = Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid')(h)
h = Activation('relu')(h)
h = Convolution2D(nb_filters, kernel_size[0], kernel_size[1])(h)
h = Activation('relu')(h)
h = MaxPooling2D(pool_size=pool_size)(h)
h = Dropout(0.25)(h)
h = Flatten()(h)
h = Dense(128)(h)
h = Activation('relu')(h)
h = Dropout(0.5)(h)
h = Dense(nb_classes)(h)
o = Activation('softmax')(h)
model = Model(input=v, output=o)
return model
def build_model():
from keras.models import Model
from keras.layers import Input, Dense, Dropout, Activation, Flatten, Reshape
from keras.layers import Convolution2D, MaxPooling2D
nb_classes = 10
nb_filters = {{nb_filters,choice,[64,32,16,8]}}
pool_size = (2,2)
kernel_size = (3,3)
v = Input(shape=(28,28))
h = Reshape([1,28,28])(v)
h = Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid')(h)
h = Activation('relu')(h)
{{cnn_layer2,maybe,
h = Convolution2D(nb_filters, kernel_size[0], kernel_size[1])(h)
h = Activation('relu')(h)
}}
h = MaxPooling2D(pool_size=pool_size)(h)
h = Dropout(0.25)(h)
h = Flatten()(h)
h = Dense(128)(h)
h = Activation('relu')(h)
h = Dropout(0.5)(h)
h = Dense(nb_classes)(h)
o = Activation('softmax')(h)
model = Model(input=v, output=o)
return model
def emit_Reshape(self, IR_node):
shape_str = self.shapeToStr(IR_node.IR_layer.attr["shape"].list.i)
self.add_body(1, "{:<15} = layers.Reshape(name = '{}', target_shape = ({},))({})".format(
IR_node.variable_name,
IR_node.name,
shape_str,
self.parent_variable_name(IR_node)))
def _build_model(self):
model = Sequential()
model.add(Reshape((1, 80, 80), input_shape=(self.state_size,)))
model.add(Convolution2D(32, 6, 6, subsample=(3, 3), border_mode='same',
activation='relu', init='he_uniform'))
model.add(Flatten())
model.add(Dense(64, activation='relu', init='he_uniform'))
model.add(Dense(32, activation='relu', init='he_uniform'))
model.add(Dense(self.action_size, activation='softmax'))
opt = Adam(lr=self.learning_rate)
model.compile(loss='categorical_crossentropy', optimizer=opt)
return model
def cnn(height_a, height_q, count):
question_input = Input(shape=(height_q, 1), name='question_input')
embedding_q = Embedding(input_dim=count, output_dim=128, input_length=height_q)(question_input)
re_q = Reshape((height_q, 128, 1), input_shape=(height_q,))(embedding_q)
conv1_Q = Conv2D(128, (2, 128), activation='sigmoid', padding='valid',
kernel_regularizer=regularizers.l2(0.02),
kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.05))(re_q)
Max1_Q = MaxPooling2D((29, 1), strides=(1, 1), padding='valid')(conv1_Q)
F1_Q = Flatten()(Max1_Q)
Drop1_Q = Dropout(0.5)(F1_Q)
predictQ = Dense(64, activation='relu',
kernel_regularizer=regularizers.l2(0.02),
kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.05))(Drop1_Q)
# kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.01)
answer_input = Input(shape=(height_a, 1), name='answer_input')
embedding_a = Embedding(input_dim=count, output_dim=128, input_length=height_a)(answer_input)
re_a = Reshape((height_a, 128, 1), input_shape=(height_a,))(embedding_a)
conv1_A = Conv2D(128, (2, 128), activation='sigmoid', padding='valid',
kernel_regularizer=regularizers.l2(0.02),
kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.05))(re_a)
Max1_A = MaxPooling2D((399, 1), strides=(1, 1), padding='valid')(conv1_A)
F1_A = Flatten()(Max1_A)
Drop1_A = Dropout(0.5)(F1_A)
predictA = Dense(64, activation='relu',
kernel_regularizer=regularizers.l2(0.02),
kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.05))(Drop1_A)
predictions = merge([predictA, predictQ], mode='dot')
model = Model(inputs=[question_input, answer_input],
outputs=predictions)
model.compile(loss='mean_squared_error',
optimizer=Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0))
# model.compile(loss='mean_squared_error',
# optimizer='nadam')
return model