def run_benchmark(self, gpus=0):
input_dim_1 = 40
input_dim_2 = 60
input_shape = (self.num_samples, input_dim_1, 60)
x, y = generate_text_input_data(input_shape)
# build the model: a single LSTM
model = Sequential()
model.add(LSTM(128, input_shape=(input_dim_1, input_dim_2)))
model.add(Dense(input_dim_2), activation='softmax')
optimizer = RMSprop(lr=0.01)
if keras.backend.backend() is "tensorflow" and gpus > 1:
model = multi_gpu_model(model, gpus=gpus)
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
# create a distributed trainer for cntk
if keras.backend.backend() is "cntk" and gpus > 1:
start, end = cntk_gpu_mode_config(model, x.shape[0])
x = x[start: end]
y = y[start: end]
time_callback = timehistory.TimeHistory()
model.fit(x, y,
batch_size=self.batch_size,
epochs=self.epochs,
callbacks=[time_callback])
self.total_time = 0
for i in range(1, self.epochs):
self.total_time += time_callback.times[i]
评论列表
文章目录