def train_model(model, args, X_train, X_valid, y_train, y_valid):
"""
Train the model
"""
#Saves the model after every epoch.
#quantity to monitor, verbosity i.e logging mode (0 or 1),
#if save_best_only is true the latest best model according to the quantity monitored will not be overwritten.
#mode: one of {auto, min, max}. If save_best_only=True, the decision to overwrite the current save file is
# made based on either the maximization or the minimization of the monitored quantity. For val_acc,
#this should be max, for val_loss this should be min, etc. In auto mode, the direction is automatically
# inferred from the name of the monitored quantity.
checkpoint = ModelCheckpoint('model-{epoch:03d}.h5',
monitor='val_loss',
verbose=0,
save_best_only=args.save_best_only,
mode='auto')
#calculate the difference between expected steering angle and actual steering angle
#square the difference
#add up all those differences for as many data points as we have
#divide by the number of them
#that value is our mean squared error! this is what we want to minimize via
#gradient descent
model.compile(loss='mean_squared_error', optimizer=Adam(lr=args.learning_rate))
#Fits the model on data generated batch-by-batch by a Python generator.
#The generator is run in parallel to the model, for efficiency.
#For instance, this allows you to do real-time data augmentation on images on CPU in
#parallel to training your model on GPU.
#so we reshape our data into their appropriate batches and train our model simulatenously
model.fit_generator(batch_generator(args.data_dir, X_train, y_train, args.batch_size, True),
args.samples_per_epoch,
args.nb_epoch,
max_q_size=1,
validation_data=batch_generator(args.data_dir, X_valid, y_valid, args.batch_size, False),
nb_val_samples=len(X_valid),
callbacks=[checkpoint],
verbose=1)
#for command line args