def setup_tutorial():
"""
Helper function to check correct configuration of tf and keras for tutorial
:return: True if setup checks completed
"""
# Set TF random seed to improve reproducibility
tf.set_random_seed(1234)
if not hasattr(backend, "tf"):
raise RuntimeError("This tutorial requires keras to be configured"
" to use the TensorFlow backend.")
# Image dimensions ordering should follow the Theano convention
if keras.backend.image_dim_ordering() != 'tf':
keras.backend.set_image_dim_ordering('tf')
print("INFO: '~/.keras/keras.json' sets 'image_dim_ordering' "
"to 'th', temporarily setting to 'tf'")
return True
python类backend()的实例源码
def setup_tutorial():
"""
Helper function to check correct configuration of tf and keras for tutorial
:return: True if setup checks completed
"""
# Set TF random seed to improve reproducibility
tf.set_random_seed(1234)
if not hasattr(backend, "tf"):
raise RuntimeError("This tutorial requires keras to be configured"
" to use the TensorFlow backend.")
# Image dimensions ordering should follow the Theano convention
if keras.backend.image_dim_ordering() != 'tf':
keras.backend.set_image_dim_ordering('tf')
print("INFO: '~/.keras/keras.json' sets 'image_dim_ordering' "
"to 'th', temporarily setting to 'tf'")
return True
def setup_tutorial():
"""
Helper function to check correct configuration of tf and keras for tutorial
:return: True if setup checks completed
"""
# Set TF random seed to improve reproducibility
tf.set_random_seed(1234)
if not hasattr(backend, "tf"):
raise RuntimeError("This tutorial requires keras to be configured"
" to use the TensorFlow backend.")
# Image dimensions ordering should follow the Theano convention
if keras.backend.image_dim_ordering() != 'tf':
keras.backend.set_image_dim_ordering('tf')
print("INFO: '~/.keras/keras.json' sets 'image_dim_ordering' "
"to 'th', temporarily setting to 'tf'")
return True
def variable(value, dtype=None, name=None, constraint=None):
if isinstance(value, Tensor):
value = value.value
if isinstance(value, torch.autograd.Variable):
value = value.data
if 'torch' in str(type(value)):
value = value.numpy()
name = _prepare_name(name, 'variable')
if dtype is None:
dtype = keras.backend.floatx()
if value.dtype != dtype:
value = np.cast[dtype](value)
torch_tensor = torch.from_numpy(value)
torch_variable = torch.autograd.Variable(torch_tensor, requires_grad=True)
ktorch_variable = Variable(torch_variable, name=name)
ktorch_variable.constraint = None
make_keras_tensor(ktorch_variable)
return ktorch_variable
def constant(value, dtype=None, shape=None, name=None):
value = np.array(value)
name = _prepare_name(name, 'constant')
if dtype is None:
dtype = keras.backend.floatx()
if value.dtype != dtype:
value = np.cast[dtype](value)
if value.shape == ():
if shape is None:
shape = ()
value = np.ones(shape) * value
torch_tensor = torch.from_numpy(value)
torch_variable = torch.autograd.Variable(torch_tensor, requires_grad=False)
ktorch_variable = Variable(torch_variable, name=name)
make_keras_tensor(ktorch_variable)
return ktorch_variable
def saveModel(self,outfile):
self.keras_model.save(self.outputDir+outfile)
import tensorflow as tf
import keras.backend as K
tfsession=K.get_session()
saver = tf.train.Saver()
tfoutpath=self.outputDir+outfile+'_tfsession/tf'
import os
os.system('rm -rf '+tfoutpath)
os.system('mkdir -p '+tfoutpath)
saver.save(tfsession, tfoutpath)
#import h5py
#f = h5py.File(self.outputDir+outfile, 'r+')
#del f['optimizer_weights']
#f.close()
def test_experiment_instance_utils(self, get_model):
new_session()
model = get_model()
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
expe = Experiment(model)
expe.model_dict = model
expe.backend_name = 'another_backend'
expe.model_dict = model
assert expe.backend is not None
expe = Experiment()
print(self)
def test_experiment_generator_setups(self, get_generators):
gen_t, data_t, d_stream_t, gen, data, d_stream, nb = get_generators
nb_train, nb_val = nb
test_model = model()
test_model.compile(loss='binary_crossentropy',
optimizer='rmsprop')
expe = Experiment(test_model)
expe.fit_gen([gen_t], [gen], nb_epoch=2,
samples_per_epoch=nb_train,
nb_val_samples=nb_val,
verbose=2, overwrite=True)
close_gens(gen_t, data_t, d_stream_t)
close_gens(gen, data, d_stream)
if K.backend() == 'tensorflow':
K.clear_session()
print(self)
def test_build_predict_func(self, get_model):
"""Test the build of a model"""
new_session()
X_tr = np.ones((train_samples, input_dim))
model = get_model()
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model_name = model.__class__.__name__
pred_func = KTB.build_predict_func(model)
tensors = [X_tr]
if model_name != 'Model':
tensors.append(1.)
res = pred_func(tensors)
assert len(res[0]) == len(X_tr)
if K.backend() == 'tensorflow':
K.clear_session()
print(self)
def test_fit(self, get_model):
"Test the training of a serialized model"
new_session()
data, data_val = make_data(train_samples, test_samples)
model = get_model()
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model_dict = dict()
model_dict['model_arch'] = to_dict_w_opt(model)
res = KTB.train(copy.deepcopy(model_dict['model_arch']), [data],
[data_val], [])
res = KTB.fit(NAME, VERSION, model_dict, [data], 'test', [data_val],
[])
assert len(res) == 4
if K.backend() == 'tensorflow':
K.clear_session()
print(self)
def call(self, x, mask=None):
if K.backend() == 'tensorflow':
xt = tf.transpose(x, perm=(2, 0 ,1))
gt = tf.gather(xt, self.indices)
return tf.transpose(gt, perm=(1, 2, 0))
return x[:, :, self.indices]
def data_cifar10():
"""
Preprocess CIFAR10 dataset
:return:
"""
# These values are specific to CIFAR10
img_rows = 32
img_cols = 32
nb_classes = 10
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
if keras.backend.image_dim_ordering() == 'th':
X_train = X_train.reshape(X_train.shape[0], 3, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 3, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 3)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 3)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
return X_train, Y_train, X_test, Y_test
def do_sparse():
return K == KTF or KTH.th_sparse_module
def _sort_weights_by_name(self, weights):
"""Sorts weights by name and returns them."""
if not weights:
return []
if K.backend() == 'theano':
key = lambda x: x.name if x.name else x.auto_name
else:
key = lambda x: x.name
weights.sort(key=key)
return weights
def on_registration(self, params):
if not self.registered:
self.registered = True
if self.is_master_process():
self.logger.info("Job %s/%s started." % (self.model_name, self.job_id))
self.logger.info("Open http://%s/model/%s/job/%s to monitor it." % (self.host, self.model_name, self.job_id))
self.logger.debug('Git backend start')
self.git.start()
else:
self.logger.info("Successfully reconnected.")
def on_signusr1(self, signal, frame):
self.logger.warning("USR1: backend job_id=%s (running=%s, ended=%s), client (online=%s, active=%s, registered=%s, "
"connected=%s, queue=%d), git (online=%s, active_thread=%s, last_push_time=%s)." % (
str(self.job_id),
str(self.running),
str(self.ended),
str(self.client.online),
str(self.client.active),
str(self.client.registered),
str(self.client.connected),
len(self.client.queue),
str(self.git.online),
str(self.git.active_thread),
str(self.git.last_push_time),
))
def is_master_process(self):
"""
Master means that aetros.backend.start_job() has been called without using the command `aetros start`.
If master is true, we collect and track some data that usually `aetros start` would do and reset the job's
temp files on the server.
:return:
"""
return os.getenv('AETROS_JOB_ID') is None
def sync_weights(self, push=True):
if not os.path.exists(self.get_job_model().get_weights_filepath_latest()):
return
self.logger.debug("sync weights...")
self.set_status('SYNC WEIGHTS', add_section=False)
with open(self.get_job_model().get_weights_filepath_latest(), 'rb') as f:
import keras.backend
self.git.commit_file('Added weights', 'aetros/weights/latest.hdf5', f.read())
image_data_format = None
if hasattr(keras.backend, 'set_image_data_format'):
image_data_format = keras.backend.image_data_format()
info = {
'framework': 'keras',
'backend': keras.backend.backend(),
'image_data_format': image_data_format
}
self.git.commit_file('Added weights', 'aetros/weights/latest.json', json.dumps(info))
if push:
self.git.push()
# todo, implement optional saving of self.get_job_model().get_weights_filepath_best()
def start_keras(logger, job_backend):
if 'KERAS_BACKEND' not in os.environ:
os.environ['KERAS_BACKEND'] = 'tensorflow'
from . import keras_model_utils
# we need to import keras here, so we know which backend is used (and whether GPU is used)
os.chdir(job_backend.git.work_tree)
logger.debug("Start simple model")
# we use the source from the job commit directly
with job_backend.git.batch_commit('Git Version'):
job_backend.set_system_info('git_remote_url', job_backend.git.get_remote_url('origin'))
job_backend.set_system_info('git_version', job_backend.git.job_id)
# all our shapes are Tensorflow schema. (height, width, channels)
import keras.backend
if hasattr(keras.backend, 'set_image_dim_ordering'):
keras.backend.set_image_dim_ordering('tf')
if hasattr(keras.backend, 'set_image_data_format'):
keras.backend.set_image_data_format('channels_last')
from .KerasCallback import KerasCallback
trainer = Trainer(job_backend)
keras_logger = KerasCallback(job_backend, job_backend.logger)
job_backend.progress(0, job_backend.job['config']['epochs'])
logger.info("Start training")
keras_model_utils.job_start(job_backend, trainer, keras_logger)
job_backend.done()
def tf_model_eval_distance(sess, x, model1, model2, X_test):
"""
Compute the L1 distance between prediction of original and squeezed data.
:param sess: TF session to use when training the graph
:param x: input placeholder
:param model1: model output original predictions
:param model2: model output squeezed predictions
:param X_test: numpy array with training inputs
:return: a float vector with the distance value
"""
# Define sympbolic for accuracy
# acc_value = keras.metrics.categorical_accuracy(y, model)
l2_diff = tf.sqrt( tf.reduce_sum(tf.square(tf.sub(model1, model2)),
axis=1))
l_inf_diff = tf.reduce_max(tf.abs(tf.sub(model1, model2)), axis=1)
l1_diff = tf.reduce_sum(tf.abs(tf.sub(model1, model2)), axis=1)
l1_dist_vec = np.zeros((len(X_test)))
with sess.as_default():
# Compute number of batches
nb_batches = int(math.ceil(float(len(X_test)) / FLAGS.batch_size))
assert nb_batches * FLAGS.batch_size >= len(X_test)
for batch in range(nb_batches):
if batch % 100 == 0 and batch > 0:
print("Batch " + str(batch))
# Must not use the `batch_indices` function here, because it
# repeats some examples.
# It's acceptable to repeat during training, but not eval.
start = batch * FLAGS.batch_size
end = min(len(X_test), start + FLAGS.batch_size)
cur_batch_size = end - start
l1_dist_vec[start:end] = l1_diff.eval(feed_dict={x: X_test[start:end],keras.backend.learning_phase(): 0})
assert end >= len(X_test)
return l1_dist_vec
def do_sparse():
return K == KTF or KTH.th_sparse_module
jsmacifar.py 文件源码
项目:AdversarialMachineLearning_COMP551
作者: arunrawlani
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def data_cifar10():
"""
Preprocess CIFAR10 dataset
:return:
"""
# These values are specific to CIFAR10
img_rows = 32
img_cols = 32
nb_classes = 10
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
if keras.backend.image_dim_ordering() == 'th':
X_train = X_train.reshape(X_train.shape[0], 3, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 3, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 3)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 3)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
return X_train, Y_train, X_test, Y_test
#conv_2d
fgsm_adv_training.py 文件源码
项目:AdversarialMachineLearning_COMP551
作者: arunrawlani
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def data_cifar10():
"""
Preprocess CIFAR10 dataset
:return:
"""
# These values are specific to CIFAR10
img_rows = 32
img_cols = 32
nb_classes = 10
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
if keras.backend.image_dim_ordering() == 'th':
X_train = X_train.reshape(X_train.shape[0], 3, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 3, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 3)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 3)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
np.save("cifar10_legitimate.npy",X_test)
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
return X_train, Y_train, X_test, Y_test
fgsm_adv_training.py 文件源码
项目:AdversarialMachineLearning_COMP551
作者: arunrawlani
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def data_stl10():
"""
Preprocess CIFAR10 dataset
:return:
"""
# These values are specific to CIFAR10
img_rows = 96
img_cols = 96
nb_classes = 10
# the data, shuffled and split between train and test sets
#(X_train, y_train), (X_test, y_test) = cifar10.load_data()
X_train = np.load('x_stl10_train.npy')
y_train = np.load('y_stl10_train.npy') - 1
X_test = np.load('x_stl10_test.npy')
y_test = np.load('y_stl10_test.npy') - 1
if keras.backend.image_dim_ordering() == 'th':
X_train = X_train.reshape(X_train.shape[0], 3, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 3, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 3)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 3)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
# np.save("cifar10_legitimate.npy",X_test)
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
return X_train, Y_train, X_test, Y_test
jsmastl.py 文件源码
项目:AdversarialMachineLearning_COMP551
作者: arunrawlani
项目源码
文件源码
阅读 30
收藏 0
点赞 0
评论 0
def data_stl10():
"""
Preprocess STL dataset
:return:
"""
# These values are specific to CIFAR10
img_rows = 96
img_cols = 96
nb_classes = 10
# the data, shuffled and split between train and test sets
#(X_train, y_train), (X_test, y_test) = cifar10.load_data()
X_train = np.load('x_stl10_train.npy')
y_train = np.load('y_stl10_train.npy') - 1
X_test = np.load('x_stl10_test.npy')
y_test = np.load('y_stl10_test.npy') - 1
if keras.backend.image_dim_ordering() == 'th':
X_train = X_train.reshape(X_train.shape[0], 3, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 3, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 3)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 3)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
# np.save("cifar10_legitimate.npy",X_test)
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
return X_train, Y_train, X_test, Y_test
#getting the grid visualization
def data_cifar10():
"""
Preprocess CIFAR10 dataset
:return:
"""
# These values are specific to CIFAR10
img_rows = 32
img_cols = 32
nb_classes = 10
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
if keras.backend.image_dim_ordering() == 'th':
X_train = X_train.reshape(X_train.shape[0], 3, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 3, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 3)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 3)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
return X_train, Y_train, X_test, Y_test
def substitute_model(img_rows=28, img_cols=28, nb_classes=10):
"""
Defines the model architecture to be used by the substitute
:param img_rows: number of rows in input
:param img_cols: number of columns in input
:param nb_classes: number of classes in output
:return: keras model
"""
model = Sequential()
# Find out the input shape ordering
if keras.backend.image_dim_ordering() == 'th':
input_shape = (1, img_rows, img_cols)
else:
input_shape = (img_rows, img_cols, 1)
# Define a fully connected model (it's different than the black-box)
layers = [Flatten(input_shape=input_shape),
Dense(200),
Activation('relu'),
Dropout(0.5),
Dense(200),
Activation('relu'),
Dropout(0.5),
Dense(nb_classes),
Activation('softmax')]
for layer in layers:
model.add(layer)
return model
def substitute_model(img_rows=28, img_cols=28, nb_classes=10):
"""
Defines the model architecture to be used by the substitute
:param img_rows: number of rows in input
:param img_cols: number of columns in input
:param nb_classes: number of classes in output
:return: keras model
"""
model = Sequential()
# Find out the input shape ordering
if keras.backend.image_dim_ordering() == 'th':
input_shape = (1, img_rows, img_cols)
else:
input_shape = (img_rows, img_cols, 1)
# Define a fully connected model (it's different than the black-box)
layers = [Flatten(input_shape=input_shape),
Dense(200),
Activation('relu'),
Dropout(0.5),
Dense(200),
Activation('relu'),
Dropout(0.5),
Dense(nb_classes),
Activation('softmax')]
for layer in layers:
model.add(layer)
return model
def do_sparse():
return K == KTF or KTH.th_sparse_module
def feature_to_image(features, height=28, width=28, channels=1, backend=K):
'''
Reshape a flattened image to the input format for convolutions.
Can be used either as a Keras operation using the default backend or
with numpy by using the argument backend=np
Conforms to the image data format setting defined in ~/.keras/keras.json
'''
if K.image_data_format() == "channels_first":
return backend.reshape(features, (-1, channels, height, width))
else:
return backend.reshape(features, (-1, height, width, channels))