def _upload_metrics(current_model):
bq.upload_metrics_to_bq(test_name=current_model.test_name,
total_time=current_model.total_time,
epochs=current_model.epochs,
batch_size=current_model.batch_size,
backend_type=keras.backend.backend(),
backend_version=get_backend_version(),
cpu_num_cores=config['cpu_num_cores'],
cpu_memory=config['cpu_memory'],
cpu_memory_info=config['cpu_memory_info'],
gpu_count=config['gpus'],
gpu_platform=config['gpu_platform'],
platform_type=config['platform_type'],
platform_machine_type=config['platform_machine_type'],
keras_version=keras.__version__,
sample_type=current_model.sample_type)
# MNIST MLP
python类__version__()的实例源码
def prog(self):#Show progress
nb_batches_total=(self.params['nb_epoch'] if not kv-1 else self.params['epochs'])*self.params['nb_sample']/self.params['batch_size']
nb_batches_epoch=self.params['nb_sample']/self.params['batch_size']
prog_total=(self.t_batches/nb_batches_total if nb_batches_total else 0)+0.01
prog_epoch=(self.c_batches/nb_batches_epoch if nb_batches_epoch else 0)+0.01
if self.t_epochs:
now=time.time()
t_mean=float(sum(self.t_epochs)) / len(self.t_epochs)
eta_t=(now-self.train_start)*((1/prog_total)-1)
eta_e=t_mean*(1-prog_epoch)
t_end=time.asctime(time.localtime(now+eta_t))
e_end=time.asctime(time.localtime(now+eta_e))
m='\nTotal:\nProg:'+str(prog_total*100.)[:5]+'%\nEpoch:'+str(self.epoch[-1])+'/'+str(self.stopped_epoch)+'\nETA:'+str(eta_t)[:8]+'sec\nTrain will be finished at '+t_end+'\nCurrent epoch:\nPROG:'+str(prog_epoch*100.)[:5]+'%\nETA:'+str(eta_e)[:8]+'sec\nCurrent epoch will be finished at '+e_end
self.t_send(m)
print(m)
else:
now=time.time()
eta_t=(now-self.train_start)*((1/prog_total)-1)
eta_e=(now-self.train_start)*((1/prog_epoch)-1)
t_end=time.asctime(time.localtime(now+eta_t))
e_end=time.asctime(time.localtime(now+eta_e))
m='\nTotal:\nProg:'+str(prog_total*100.)[:5]+'%\nEpoch:'+str(len(self.epoch))+'/'+str(self.stopped_epoch)+'\nETA:'+str(eta_t)[:8]+'sec\nTrain will be finished at '+t_end+'\nCurrent epoch:\nPROG:'+str(prog_epoch*100.)[:5]+'%\nETA:'+str(eta_e)[:8]+'sec\nCurrent epoch will be finished at '+e_end
self.t_send(m)
print(m)
def prog(self):#Show progress
nb_batches_total=(self.params['nb_epoch'] if not kv-1 else self.params['epochs'])*self.params['nb_sample']/self.params['batch_size']
nb_batches_epoch=self.params['nb_sample']/self.params['batch_size']
prog_total=(self.t_batches/nb_batches_total if nb_batches_total else 0)+0.01
prog_epoch=(self.c_batches/nb_batches_epoch if nb_batches_epoch else 0)+0.01
if self.t_epochs:
now=time.time()
t_mean=float(sum(self.t_epochs)) / len(self.t_epochs)
eta_t=(now-self.train_start)*((1/prog_total)-1)
eta_e=t_mean*(1-prog_epoch)
t_end=time.asctime(time.localtime(now+eta_t))
e_end=time.asctime(time.localtime(now+eta_e))
m='\nTotal:\nProg:'+str(prog_total*100.)[:5]+'%\nEpoch:'+str(self.epoch[-1])+'/'+str(self.stopped_epoch)+'\nETA:'+str(eta_t)[:8]+'sec\nTrain will be finished at '+t_end+'\nCurrent epoch:\nPROG:'+str(prog_epoch*100.)[:5]+'%\nETA:'+str(eta_e)[:8]+'sec\nCurrent epoch will be finished at '+e_end
self.t_send(m)
print(m)
else:
now=time.time()
eta_t=(now-self.train_start)*((1/prog_total)-1)
eta_e=(now-self.train_start)*((1/prog_epoch)-1)
t_end=time.asctime(time.localtime(now+eta_t))
e_end=time.asctime(time.localtime(now+eta_e))
m='\nTotal:\nProg:'+str(prog_total*100.)[:5]+'%\nEpoch:'+str(len(self.epoch))+'/'+str(self.stopped_epoch)+'\nETA:'+str(eta_t)[:8]+'sec\nTrain will be finished at '+t_end+'\nCurrent epoch:\nPROG:'+str(prog_epoch*100.)[:5]+'%\nETA:'+str(eta_e)[:8]+'sec\nCurrent epoch will be finished at '+e_end
self.t_send(m)
print(m)
def prog(self):#Show progress
nb_batches_total=(self.params['nb_epoch'] if not kv-1 else self.params['epochs'])*self.params['nb_sample']/self.params['batch_size']
nb_batches_epoch=self.params['nb_sample']/self.params['batch_size']
prog_total=(self.t_batches/nb_batches_total if nb_batches_total else 0)+0.01
prog_epoch=(self.c_batches/nb_batches_epoch if nb_batches_epoch else 0)+0.01
if self.t_epochs:
now=time.time()
t_mean=float(sum(self.t_epochs)) / len(self.t_epochs)
eta_t=(now-self.train_start)*((1/prog_total)-1)
eta_e=t_mean*(1-prog_epoch)
t_end=time.asctime(time.localtime(now+eta_t))
e_end=time.asctime(time.localtime(now+eta_e))
m='\nTotal:\nProg:'+str(prog_total*100.)[:5]+'%\nEpoch:'+str(self.epoch[-1])+'/'+str(self.stopped_epoch)+'\nETA:'+str(eta_t)[:8]+'sec\nTrain will be finished at '+t_end+'\nCurrent epoch:\nPROG:'+str(prog_epoch*100.)[:5]+'%\nETA:'+str(eta_e)[:8]+'sec\nCurrent epoch will be finished at '+e_end
self.t_send(m)
print(m)
else:
now=time.time()
eta_t=(now-self.train_start)*((1/prog_total)-1)
eta_e=(now-self.train_start)*((1/prog_epoch)-1)
t_end=time.asctime(time.localtime(now+eta_t))
e_end=time.asctime(time.localtime(now+eta_e))
m='\nTotal:\nProg:'+str(prog_total*100.)[:5]+'%\nEpoch:'+str(len(self.epoch))+'/'+str(self.stopped_epoch)+'\nETA:'+str(eta_t)[:8]+'sec\nTrain will be finished at '+t_end+'\nCurrent epoch:\nPROG:'+str(prog_epoch*100.)[:5]+'%\nETA:'+str(eta_e)[:8]+'sec\nCurrent epoch will be finished at '+e_end
self.t_send(m)
print(m)
def prog(self):#Show progress
nb_batches_total=(self.params['nb_epoch'] if not kv-1 else self.params['epochs'])*self.params['nb_sample']/self.params['batch_size']
nb_batches_epoch=self.params['nb_sample']/self.params['batch_size']
prog_total=(self.t_batches/nb_batches_total if nb_batches_total else 0)+0.01
prog_epoch=(self.c_batches/nb_batches_epoch if nb_batches_epoch else 0)+0.01
if self.t_epochs:
now=time.time()
t_mean=float(sum(self.t_epochs)) / len(self.t_epochs)
eta_t=(now-self.train_start)*((1/prog_total)-1)
eta_e=t_mean*(1-prog_epoch)
t_end=time.asctime(time.localtime(now+eta_t))
e_end=time.asctime(time.localtime(now+eta_e))
m='\nTotal:\nProg:'+str(prog_total*100.)[:5]+'%\nEpoch:'+str(self.epoch[-1])+'/'+str(self.stopped_epoch)+'\nETA:'+str(eta_t)[:8]+'sec\nTrain will be finished at '+t_end+'\nCurrent epoch:\nPROG:'+str(prog_epoch*100.)[:5]+'%\nETA:'+str(eta_e)[:8]+'sec\nCurrent epoch will be finished at '+e_end
self.t_send(m)
print(m)
else:
now=time.time()
eta_t=(now-self.train_start)*((1/prog_total)-1)
eta_e=(now-self.train_start)*((1/prog_epoch)-1)
t_end=time.asctime(time.localtime(now+eta_t))
e_end=time.asctime(time.localtime(now+eta_e))
m='\nTotal:\nProg:'+str(prog_total*100.)[:5]+'%\nEpoch:'+str(len(self.epoch))+'/'+str(self.stopped_epoch)+'\nETA:'+str(eta_t)[:8]+'sec\nTrain will be finished at '+t_end+'\nCurrent epoch:\nPROG:'+str(prog_epoch*100.)[:5]+'%\nETA:'+str(eta_e)[:8]+'sec\nCurrent epoch will be finished at '+e_end
self.t_send(m)
print(m)
def on_train_begin(self, logs={}):
self.epoch=[]
self.t_epochs=[]
self.t_batches=0
self.logs_batches={}
self.logs_epochs={}
self.train_start=time.time()
self.localtime = time.asctime( time.localtime(self.train_start) )
self.mesg = 'Train started at: '+self.localtime
self.t_send(self.mesg)
self.stopped_epoch = (self.params['epochs'] if kv-1 else self.params['nb_epoch'])
#==============================================================================
#==============================================================================
#==============================================================================
#
#==============================================================================
def prog(self):#Show progress
nb_batches_total=(self.params['nb_epoch'] if not kv-1 else self.params['epochs'])*self.params['nb_sample']/self.params['batch_size']
nb_batches_epoch=self.params['nb_sample']/self.params['batch_size']
prog_total=(self.t_batches/nb_batches_total if nb_batches_total else 0)+0.01
prog_epoch=(self.c_batches/nb_batches_epoch if nb_batches_epoch else 0)+0.01
if self.t_epochs:
now=time.time()
t_mean=float(sum(self.t_epochs)) / len(self.t_epochs)
eta_t=(now-self.train_start)*((1/prog_total)-1)
eta_e=t_mean*(1-prog_epoch)
t_end=time.asctime(time.localtime(now+eta_t))
e_end=time.asctime(time.localtime(now+eta_e))
m='\nTotal:\nProg:'+str(prog_total*100.)[:5]+'%\nEpoch:'+str(self.epoch[-1])+'/'+str(self.stopped_epoch)+'\nETA:'+str(eta_t)[:8]+'sec\nTrain will be finished at '+t_end+'\nCurrent epoch:\nPROG:'+str(prog_epoch*100.)[:5]+'%\nETA:'+str(eta_e)[:8]+'sec\nCurrent epoch will be finished at '+e_end
self.t_send(m)
print(m)
else:
now=time.time()
eta_t=(now-self.train_start)*((1/prog_total)-1)
eta_e=(now-self.train_start)*((1/prog_epoch)-1)
t_end=time.asctime(time.localtime(now+eta_t))
e_end=time.asctime(time.localtime(now+eta_e))
m='\nTotal:\nProg:'+str(prog_total*100.)[:5]+'%\nEpoch:'+str(len(self.epoch))+'/'+str(self.stopped_epoch)+'\nETA:'+str(eta_t)[:8]+'sec\nTrain will be finished at '+t_end+'\nCurrent epoch:\nPROG:'+str(prog_epoch*100.)[:5]+'%\nETA:'+str(eta_e)[:8]+'sec\nCurrent epoch will be finished at '+e_end
self.t_send(m)
print(m)
def prog(self):#Show progress
nb_batches_total=(self.params['nb_epoch'] if not kv-1 else self.params['epochs'])*self.params['nb_sample']/self.params['batch_size']
nb_batches_epoch=self.params['nb_sample']/self.params['batch_size']
prog_total=(self.t_batches/nb_batches_total if nb_batches_total else 0)+0.01
prog_epoch=(self.c_batches/nb_batches_epoch if nb_batches_epoch else 0)+0.01
if self.t_epochs:
now=time.time()
t_mean=float(sum(self.t_epochs)) / len(self.t_epochs)
eta_t=(now-self.train_start)*((1/prog_total)-1)
eta_e=t_mean*(1-prog_epoch)
t_end=time.asctime(time.localtime(now+eta_t))
e_end=time.asctime(time.localtime(now+eta_e))
m='\nTotal:\nProg:'+str(prog_total*100.)[:5]+'%\nEpoch:'+str(self.epoch[-1])+'/'+str(self.stopped_epoch)+'\nETA:'+str(eta_t)[:8]+'sec\nTrain will be finished at '+t_end+'\nCurrent epoch:\nPROG:'+str(prog_epoch*100.)[:5]+'%\nETA:'+str(eta_e)[:8]+'sec\nCurrent epoch will be finished at '+e_end
self.t_send(m)
print(m)
else:
now=time.time()
eta_t=(now-self.train_start)*((1/prog_total)-1)
eta_e=(now-self.train_start)*((1/prog_epoch)-1)
t_end=time.asctime(time.localtime(now+eta_t))
e_end=time.asctime(time.localtime(now+eta_e))
m='\nTotal:\nProg:'+str(prog_total*100.)[:5]+'%\nEpoch:'+str(len(self.epoch))+'/'+str(self.stopped_epoch)+'\nETA:'+str(eta_t)[:8]+'sec\nTrain will be finished at '+t_end+'\nCurrent epoch:\nPROG:'+str(prog_epoch*100.)[:5]+'%\nETA:'+str(eta_e)[:8]+'sec\nCurrent epoch will be finished at '+e_end
self.t_send(m)
print(m)
def collect_environment(self):
import socket
import os
import pip
import platform
env = {}
import aetros
env['aetros_version'] = aetros.__version__
env['python_version'] = platform.python_version()
env['python_executable'] = sys.executable
env['hostname'] = socket.gethostname()
env['variables'] = dict(os.environ)
if 'AETROS_SSH_KEY' in env['variables']: del env['variables']['AETROS_SSH_KEY']
if 'AETROS_SSH_KEY_BASE64' in env['variables']: del env['variables']['AETROS_SSH_KEY_BASE64']
env['pip_packages'] = sorted([[i.key, i.version] for i in pip.get_installed_distributions()])
self.set_system_info('environment', env)
def batchnorm_conversion(layer, name, verbose, **kwargs):
import keras
if (hasattr(keras,'__version__')):
keras_version = float(keras.__version__[0:3])
else:
keras_version = 0.2
if (keras_version <= 0.3):
std = np.array(layer.running_std.get_value())
epsilon = layer.epsilon
else:
std = np.sqrt(np.array(layer.running_std.get_value()+layer.epsilon))
epsilon = 0
return [blobs.BatchNormalization(
name=name,
verbose=verbose,
gamma=np.array(layer.gamma.get_value()),
beta=np.array(layer.beta.get_value()),
axis=layer.axis,
mean=np.array(layer.running_mean.get_value()),
std=std,
epsilon=epsilon)]
jsmacifar.py 文件源码
项目:AdversarialMachineLearning_COMP551
作者: arunrawlani
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def conv_2d(filters, kernel_shape, strides, padding):
"""
Defines the right convolutional layer according to the
version of Keras that is installed.
:param filters: (required integer) the dimensionality of the output
space (i.e. the number output of filters in the
convolution)
:param kernel_shape: (required tuple or list of 2 integers) specifies
the strides of the convolution along the width and
height.
:param padding: (required string) can be either 'valid' (no padding around
input or feature map) or 'same' (pad to ensure that the
output feature map size is identical to the layer input)
:return: the Keras layer
"""
if LooseVersion(keras.__version__) >= LooseVersion('2.0.0'):
return Conv2D(filters=filters, kernel_size=kernel_shape,
strides=strides, padding=padding)
else:
return Convolution2D(filters, kernel_shape[0], kernel_shape[1],
subsample=strides, border_mode=padding)
# the cnn_model used
jsmastl.py 文件源码
项目:AdversarialMachineLearning_COMP551
作者: arunrawlani
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def conv_2d(filters, kernel_shape, strides, padding):
"""
Defines the right convolutional layer according to the
version of Keras that is installed.
:param filters: (required integer) the dimensionality of the output
space (i.e. the number output of filters in the
convolution)
:param kernel_shape: (required tuple or list of 2 integers) specifies
the strides of the convolution along the width and
height.
:param padding: (required string) can be either 'valid' (no padding around
input or feature map) or 'same' (pad to ensure that the
output feature map size is identical to the layer input)
:return: the Keras layer
"""
if LooseVersion(keras.__version__) >= LooseVersion('2.0.0'):
return Conv2D(filters=filters, kernel_size=kernel_shape,
strides=strides, padding=padding)
else:
return Convolution2D(filters, kernel_shape[0], kernel_shape[1],
subsample=strides, border_mode=padding)
# the cnn_model used
def get_backend_version():
if keras.backend.backend() == "tensorflow":
return tf.__version__
if keras.backend.backend() == "theano":
return theano.__version__
if keras.backend.backend() == "cntk":
return cntk.__version__
return "undefined"
def on_train_begin(self, logs={}):
self.epoch=[]
self.t_epochs=[]
self.t_batches=0
self.logs_batches={}
self.logs_epochs={}
self.train_start=time.time()
self.localtime = time.asctime( time.localtime(self.train_start) )
self.mesg = 'Train started at: '+self.localtime
self.t_send(self.mesg)
self.stopped_epoch = (self.params['epochs'] if kv-1 else self.params['nb_epoch'])
def prog(self):#Show progress
nb_batches_total=(self.params['nb_epoch'] if not kv-1 else self.params['epochs'])*self.params['nb_sample']/self.params['batch_size']
nb_batches_epoch=self.params['nb_sample']/self.params['batch_size']
prog_total=(self.t_batches/nb_batches_total if nb_batches_total else 0)+0.01
prog_epoch=(self.c_batches/nb_batches_epoch if nb_batches_epoch else 0)+0.01
if self.t_epochs:
now=time.time()
t_mean=float(sum(self.t_epochs)) / len(self.t_epochs)
eta_t=(now-self.train_start)*((1/prog_total)-1)
eta_e=t_mean*(1-prog_epoch)
t_end=time.asctime(time.localtime(now+eta_t))
e_end=time.asctime(time.localtime(now+eta_e))
m='\nTotal:\nProg:'+str(prog_total*100.)[:5]+'%\nEpoch:'+str(self.epoch[-1])+'/'+str(self.stopped_epoch)+'\nETA:'+str(eta_t)[:8]+'sec\nTrain will be finished at '+t_end+'\nCurrent epoch:\nPROG:'+str(prog_epoch*100.)[:5]+'%\nETA:'+str(eta_e)[:8]+'sec\nCurrent epoch will be finished at '+e_end
self.t_send(m)
print(m)
else:
now=time.time()
eta_t=(now-self.train_start)*((1/prog_total)-1)
eta_e=(now-self.train_start)*((1/prog_epoch)-1)
t_end=time.asctime(time.localtime(now+eta_t))
e_end=time.asctime(time.localtime(now+eta_e))
m='\nTotal:\nProg:'+str(prog_total*100.)[:5]+'%\nEpoch:'+str(len(self.epoch))+'/'+str(self.stopped_epoch)+'\nETA:'+str(eta_t)[:8]+'sec\nTrain will be finished at '+t_end+'\nCurrent epoch:\nPROG:'+str(prog_epoch*100.)[:5]+'%\nETA:'+str(eta_e)[:8]+'sec\nCurrent epoch will be finished at '+e_end
self.t_send(m)
print(m)
#==============================================================================
#
#==============================================================================
def conv_2d(filters, kernel_shape, strides, padding, input_shape=None):
"""
Defines the right convolutional layer according to the
version of Keras that is installed.
:param filters: (required integer) the dimensionality of the output
space (i.e. the number output of filters in the
convolution)
:param kernel_shape: (required tuple or list of 2 integers) specifies
the strides of the convolution along the width and
height.
:param padding: (required string) can be either 'valid' (no padding around
input or feature map) or 'same' (pad to ensure that the
output feature map size is identical to the layer input)
:param input_shape: (optional) give input shape if this is the first
layer of the model
:return: the Keras layer
"""
if LooseVersion(keras.__version__) >= LooseVersion('2.0.0'):
if input_shape is not None:
return Conv2D(filters=filters, kernel_size=kernel_shape,
strides=strides, padding=padding,
input_shape=input_shape)
else:
return Conv2D(filters=filters, kernel_size=kernel_shape,
strides=strides, padding=padding)
else:
if input_shape is not None:
return Convolution2D(filters, kernel_shape[0], kernel_shape[1],
subsample=strides, border_mode=padding,
input_shape=input_shape)
else:
return Convolution2D(filters, kernel_shape[0], kernel_shape[1],
subsample=strides, border_mode=padding)
def _updated_config(self):
'''Shared between different serialization methods.'''
from keras import __version__ as keras_version
config = self.get_config()
model_config = {
'class_name': self.__class__.__name__,
'config': config,
'keras_version': keras_version
}
return model_config
def save_model(model, filepath, overwrite=True):
def get_json_type(obj):
if hasattr(obj, 'get_config'):
return {'class_name': obj.__class__.__name__,
'config': obj.get_config()}
if type(obj).__module__ == np.__name__:
return obj.item()
if callable(obj) or type(obj).__name__ == type.__name__:
return obj.__name__
raise TypeError('Not JSON Serializable:', obj)
import h5py
from keras import __version__ as keras_version
if not overwrite and os.path.isfile(filepath):
proceed = keras.models.ask_to_proceed_with_overwrite(filepath)
if not proceed:
return
f = h5py.File(filepath, 'w')
f.attrs['keras_version'] = str(keras_version).encode('utf8')
f.attrs['generator_config'] = json.dumps({
'class_name': model.discriminator.__class__.__name__,
'config': model.generator.get_config(),
}, default=get_json_type).encode('utf8')
f.attrs['discriminator_config'] = json.dumps({
'class_name': model.discriminator.__class__.__name__,
'config': model.discriminator.get_config(),
}, default=get_json_type).encode('utf8')
generator_weights_group = f.create_group('generator_weights')
discriminator_weights_group = f.create_group('discriminator_weights')
model.generator.save_weights_to_hdf5_group(generator_weights_group)
model.discriminator.save_weights_to_hdf5_group(discriminator_weights_group)
f.flush()
f.close()
def keras_version():
return tuple(map(int, keras.__version__.split('.')))
def assert_keras_version():
detected = keras.__version__
required = '.'.join(map(str, minimum_keras_version))
assert(keras_version() >= minimum_keras_version), 'You are using keras version {}. The minimum required version is {}.'.format(detected, required)
def set_generator_validation_nb(self, number):
"""
sets self.nb_val_samples which is used in model.fit if input is a generator
:param number:
:return:
"""
self.nb_val_samples = number
diff_to_batch = number % self.get_batch_size()
if diff_to_batch > 0:
self.nb_val_samples += self.get_batch_size() - diff_to_batch
import keras
if '1' != keras.__version__[0]:
self.nb_val_samples = self.nb_val_samples // self.get_batch_size()
def upload_keras_graph(self, model):
from aetros.keras import model_to_graph
import keras
if keras.__version__[0] == '2':
graph = model_to_graph(model)
self.set_graph(graph)
def _updated_config(self):
"""Shared between different serialization methods."""
from keras import __version__ as keras_version
config = self.get_config()
model_config = {
'class_name': self.__class__.__name__,
'config': config,
'keras_version': keras_version
}
return model_config
def test_SimpleRNN(self):
params = dict(
input_dims=[1, 2, 100], go_backwards=False, activation='tanh',
stateful=False, unroll=False, return_sequences=True, output_dim=4 # Passes for < 3
),
model = Sequential()
if keras.__version__[:2] == '2.':
model.add(SimpleRNN(units=params[0]['output_dim'],
input_shape=(params[0]['input_dims'][1],params[0]['input_dims'][2]),
activation=params[0]['activation'],
return_sequences=params[0]['return_sequences'],
go_backwards=params[0]['go_backwards'],
unroll=True,
))
else:
model.add(SimpleRNN(output_dim=params[0]['output_dim'],
input_length=params[0]['input_dims'][1],
input_dim=params[0]['input_dims'][2],
activation=params[0]['activation'],
return_sequences=params[0]['return_sequences'],
go_backwards=params[0]['go_backwards'],
unroll=True,
))
relative_error, keras_preds, coreml_preds = simple_model_eval(params, model)
for i in range(len(relative_error)):
self.assertLessEqual(relative_error[i], 0.01)
def test_SimpleLSTM(self):
params = dict(
input_dims=[1, 3, 5], go_backwards=True, activation='linear',
stateful=False, unroll=False, return_sequences=False, output_dim=3,
inner_activation='linear'
),
model = Sequential()
if keras.__version__[:2] == '2.':
model.add(LSTM(units=params[0]['output_dim'],
input_shape=(params[0]['input_dims'][1],params[0]['input_dims'][2]),
activation=params[0]['activation'],
return_sequences=params[0]['return_sequences'],
go_backwards=params[0]['go_backwards'],
unroll=True,
recurrent_activation='linear'
))
else:
model.add(LSTM(output_dim=params[0]['output_dim'],
input_length=params[0]['input_dims'][1],
input_dim=params[0]['input_dims'][2],
activation=params[0]['activation'],
return_sequences=params[0]['return_sequences'],
go_backwards=params[0]['go_backwards'],
unroll=True,
inner_activation='linear'
))
relative_error, keras_preds, coreml_preds = simple_model_eval(params, model)
for i in range(len(relative_error)):
self.assertLessEqual(relative_error[i], 0.01)
def test_SimpleGRU(self):
params = dict(
input_dims=[1, 4, 8], go_backwards=False, activation='tanh',
stateful=False, unroll=False, return_sequences=False, output_dim=4
),
model = Sequential()
if keras.__version__[:2] == '2.':
model.add(GRU(units=params[0]['output_dim'],
input_shape=(params[0]['input_dims'][1],params[0]['input_dims'][2]),
activation=params[0]['activation'],
recurrent_activation='sigmoid',
return_sequences=params[0]['return_sequences'],
go_backwards=params[0]['go_backwards'],
unroll=True,
))
else:
model.add(GRU(output_dim=params[0]['output_dim'],
input_length=params[0]['input_dims'][1],
input_dim=params[0]['input_dims'][2],
activation=params[0]['activation'],
inner_activation='sigmoid',
return_sequences=params[0]['return_sequences'],
go_backwards=params[0]['go_backwards'],
unroll=True,
))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
relative_error, keras_preds, coreml_preds = simple_model_eval(params, model)
for i in range(len(relative_error)):
self.assertLessEqual(relative_error[i], 0.01)
def _updated_config(self):
'''Shared between different serialization methods.'''
from keras import __version__ as keras_version
config = self.get_config()
model_config = {
'class_name': self.__class__.__name__,
'config': config,
'keras_version': keras_version
}
return model_config
def prog(self):#Show progress
nb_batches_total=(self.params['epochs'] if kv-1 else self.params['nb_epoch'])*(self.params['samples'] if kv-1 else self.params['nb_sample'])/self.params['batch_size']
nb_batches_epoch=(self.params['samples'] if kv-1 else self.params['nb_sample'])/self.params['batch_size']
prog_total=(self.t_batches/nb_batches_total if nb_batches_total else 0)+0.01
prog_epoch=(self.c_batches/nb_batches_epoch if nb_batches_epoch else 0)+0.01
if self.t_epochs:
now=time.time()
t_mean=float(sum(self.t_epochs)) / len(self.t_epochs)
eta_t=(now-self.train_start)*((1/prog_total)-1)
eta_e=t_mean*(1-prog_epoch)
t_end=time.asctime(time.localtime(now+eta_t))
e_end=time.asctime(time.localtime(now+eta_e))
m='\nTotal:\nProg:'+str(prog_total*100.)[:5]+'%\nEpoch:'+str(self.epoch[-1])+'/'+str(self.stopped_epoch)+'\nETA:'+str(eta_t)[:8]+'sec\nTrain will be finished at '+t_end+'\nCurrent epoch:\nPROG:'+str(prog_epoch*100.)[:5]+'%\nETA:'+str(eta_e)[:8]+'sec\nCurrent epoch will be finished at '+e_end
self.t_send(msg=m)
print(m)
else:
now=time.time()
eta_t=(now-self.train_start)*((1/prog_total)-1)
eta_e=(now-self.train_start)*((1/prog_epoch)-1)
t_end=time.asctime(time.localtime(now+eta_t))
e_end=time.asctime(time.localtime(now+eta_e))
m='\nTotal:\nProg:'+str(prog_total*100.)[:5]+'%\nEpoch:'+str(len(self.epoch))+'/'+str(self.stopped_epoch)+'\nETA:'+str(eta_t)[:8]+'sec\nTrain will be finished at '+t_end+'\nCurrent epoch:\nPROG:'+str(prog_epoch*100.)[:5]+'%\nETA:'+str(eta_e)[:8]+'sec\nCurrent epoch will be finished at '+e_end
self.t_send(msg=m)
print(m)
#==============================================================================
#
#==============================================================================
def run(self):
# check that model Keras version is same as local Keras version
f = h5py.File('/home/wil/ros/catkin_ws/src/diy_driverless_car_ROS/rover_ml/behavior_cloning/src/behavior_cloning/model.h5', mode='r')
model_version = f.attrs.get('keras_version')
keras_version = None
keras_version = str(keras_version).encode('utf8')
if model_version != keras_version:
print('You are using Keras version ', keras_version, ', but the model was built using ', model_version)
model = load_model('/home/wil/ros/catkin_ws/src/diy_driverless_car_ROS/rover_ml/behavior_cloning/src/behavior_cloning/model.h5')
print("Model loaded.")
while True:
# Only run loop if we have an image
if self.imgRcvd:
# step 1:
self.resized_image = cv2.resize(self.latestImage, (320,180))
# step 2:
image_array = np.asarray(self.resized_image)
# step 3:
self.cmdvel.linear.x = 0.2
self.cmdvel.angular.z = float(model.predict(image_array[None, :, :, :], batch_size=1))
#print(self.cmdvel.angular.z)
self.cmdVel_publish(self.cmdvel)
# Publish Processed Image
self.outputImage = self.latestImage
self.publish(self.outputImage, self.bridge, self.image_pub)
def make_model(X, class_names, nb_layers=4, try_checkpoint=True,
no_cp_fatal=False, weights_file='weights.hdf5'):
model = None
from_scratch = True
# Initialize weights using checkpoint if it exists.
if (try_checkpoint):
print("Looking for previous weights...")
if ( isfile(weights_file) ):
print ('Weights file detected. Loading from ',weights_file)
model = load_model(weights_file)
from_scratch = False
else:
if (no_cp_fatal):
raise Exception("No weights file detected; can't do anything. Aborting.")
else:
print('No weights file detected, so starting from scratch.')
if from_scratch:
if (LooseVersion(keras.__version__) < LooseVersion("2")):
print("Making Keras 1 version of model")
model = MyCNN(X, nb_classes=len(class_names), nb_layers=nb_layers)
else:
print("Making Keras 2 version of model")
model = MyCNN_Keras2(X, nb_classes=len(class_names), nb_layers=nb_layers)
model.summary()
return model