def decode_predictions(preds, top=5):
global CLASS_INDEX
if len(preds.shape) != 2 or preds.shape[1] != 1000:
raise ValueError('`decode_predictions` expects '
'a batch of predictions '
'(i.e. a 2D array of shape (samples, 1000)). '
'Found array with shape: ' + str(preds.shape))
if CLASS_INDEX is None:
fpath = get_file('imagenet_class_index.json',
CLASS_INDEX_PATH,
cache_subdir='models')
CLASS_INDEX = json.load(open(fpath))
results = []
for pred in preds:
top_indices = pred.argsort()[-top:][::-1]
result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]
results.append(result)
return results
python类get_file()的实例源码
imagenet_utils.py 文件源码
项目:video_labelling_using_youtube8m
作者: LittleWat
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def decode_predictions(preds, top=5):
global CLASS_INDEX
if len(preds.shape) != 2 or preds.shape[1] != 1000:
raise ValueError('`decode_predictions` expects '
'a batch of predictions '
'(i.e. a 2D array of shape (samples, 1000)). '
'Found array with shape: ' + str(preds.shape))
if CLASS_INDEX is None:
fpath = get_file('imagenet_class_index.json',
CLASS_INDEX_PATH,
cache_subdir='models')
CLASS_INDEX = json.load(open(fpath))
results = []
for pred in preds:
top_indices = pred.argsort()[-top:][::-1]
result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]
results.append(result)
return results
def decode_predictions(preds, top=5):
global CLASS_INDEX
if len(preds.shape) != 2 or preds.shape[1] != 1000:
raise ValueError('`decode_predictions` expects '
'a batch of predictions '
'(i.e. a 2D array of shape (samples, 1000)). '
'Found array with shape: ' + str(preds.shape))
if CLASS_INDEX is None:
fpath = get_file('imagenet_class_index.json',
CLASS_INDEX_PATH,
cache_subdir='models')
CLASS_INDEX = json.load(open(fpath))
results = []
for pred in preds:
top_indices = pred.argsort()[-top:][::-1]
result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]
results.append(result)
return results
def load_data(path='conll2000.zip', min_freq=2):
path = get_file(path, origin='https://raw.githubusercontent.com/nltk/nltk_data/gh-pages/packages/corpora/conll2000.zip')
print(path)
archive = ZipFile(path, 'r')
train = _parse_data(archive.open('conll2000/train.txt'))
test = _parse_data(archive.open('conll2000/test.txt'))
archive.close()
word_counts = Counter(row[0].lower() for sample in train for row in sample)
vocab = ['<pad>', '<unk>'] + [w for w, f in iter(word_counts.items()) if f >= min_freq]
pos_tags = sorted(list(set(row[1] for sample in train + test for row in sample))) # in alphabetic order
chunk_tags = sorted(list(set(row[2] for sample in train + test for row in sample))) # in alphabetic order
train = _process_data(train, vocab, pos_tags, chunk_tags)
test = _process_data(test, vocab, pos_tags, chunk_tags)
return train, test, (vocab, pos_tags, chunk_tags)
imagenet_utils.py 文件源码
项目:vgg16-vgg19-resnet-inception-xception-example
作者: yong-ho
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def decode_predictions(preds, top=5):
global CLASS_INDEX
if len(preds.shape) != 2 or preds.shape[1] != 1000:
raise ValueError('`decode_predictions` expects '
'a batch of predictions '
'(i.e. a 2D array of shape (samples, 1000)). '
'Found array with shape: ' + str(preds.shape))
if CLASS_INDEX is None:
fpath = get_file('imagenet_class_index.json',
CLASS_INDEX_PATH,
cache_subdir='models')
CLASS_INDEX = json.load(open(fpath))
results = []
for pred in preds:
top_indices = pred.argsort()[-top:][::-1]
result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]
results.append(result)
return results
def decode_predictions(preds, top=5):
global CLASS_INDEX
if len(preds.shape) != 2 or preds.shape[1] != 1000:
raise ValueError('`decode_predictions` expects '
'a batch of predictions '
'(i.e. a 2D array of shape (samples, 1000)). '
'Found array with shape: ' + str(preds.shape))
if CLASS_INDEX is None:
fpath = get_file('imagenet_class_index.json',
CLASS_INDEX_PATH,
cache_subdir='models')
CLASS_INDEX = json.load(open(fpath))
results = []
for pred in preds:
top_indices = pred.argsort()[-top:][::-1]
result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]
results.append(result)
return results
def decode_predictions(preds, top=5):
global CLASS_INDEX
if len(preds.shape) != 2 or preds.shape[1] != 1000:
raise ValueError('`decode_predictions` expects '
'a batch of predictions '
'(i.e. a 2D array of shape (samples, 1000)). '
'Found array with shape: ' + str(preds.shape))
if CLASS_INDEX is None:
fpath = get_file('imagenet_class_index.json',
CLASS_INDEX_PATH,
cache_subdir='models')
CLASS_INDEX = json.load(open(fpath))
results = []
for pred in preds:
top_indices = pred.argsort()[-top:][::-1]
result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]
results.append(result)
return results
def decode_predictions(preds, top=5):
global CLASS_INDEX
if len(preds.shape) != 2 or preds.shape[1] != 1000:
raise ValueError('`decode_predictions` expects '
'a batch of predictions '
'(i.e. a 2D array of shape (samples, 1000)). '
'Found array with shape: ' + str(preds.shape))
if CLASS_INDEX is None:
fpath = get_file('imagenet_class_index.json',
CLASS_INDEX_PATH,
cache_subdir='models')
CLASS_INDEX = json.load(open(fpath))
results = []
for pred in preds:
top_indices = pred.argsort()[-top:][::-1]
result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]
results.append(result)
return results
def decode_predictions(preds, top=5):
global CLASS_INDEX
if len(preds.shape) != 2 or preds.shape[1] != 1000:
raise ValueError('`decode_predictions` expects '
'a batch of predictions '
'(i.e. a 2D array of shape (samples, 1000)). '
'Found array with shape: ' + str(preds.shape))
if CLASS_INDEX is None:
fpath = get_file('imagenet_class_index.json',
CLASS_INDEX_PATH,
cache_subdir='models')
CLASS_INDEX = json.load(open(fpath))
results = []
for pred in preds:
top_indices = pred.argsort()[-top:][::-1]
result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]
results.append(result)
return results
def decode_imagenet_predictions(preds, top=5):
global CLASS_INDEX
if len(preds.shape) != 2 or preds.shape[1] != 1000:
raise ValueError('`decode_predictions` expects '
'a batch of predictions '
'(i.e. a 2D array of shape (samples, 1000)). '
'Found array with shape: ' + str(preds.shape))
if CLASS_INDEX is None:
fpath = get_file('imagenet_class_index.json',
CLASS_INDEX_PATH,
cache_subdir='models')
CLASS_INDEX = json.load(open(fpath))
results = []
for pred in preds:
top_indices = pred.argsort()[-top:][::-1]
result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]
results.append(result)
return results
def fcn_Resnet50(input_shape = None, weight_decay=0.0002, batch_momentum=0.9, batch_shape=None, classes=40):
img_input = Input(shape=input_shape)
bn_axis = 3
x = Conv2D(64, kernel_size=(7,7), subsample=(2, 2), border_mode='same', name='conv1', W_regularizer=l2(weight_decay))(img_input)
x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
x = Activation('relu')(x)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv_block(3, [64, 64, 256], stage=2, block='a', strides=(1, 1))(x)
x = identity_block(3, [64, 64, 256], stage=2, block='b')(x)
x = identity_block(3, [64, 64, 256], stage=2, block='c')(x)
x = conv_block(3, [128, 128, 512], stage=3, block='a')(x)
x = identity_block(3, [128, 128, 512], stage=3, block='b')(x)
x = identity_block(3, [128, 128, 512], stage=3, block='c')(x)
x = identity_block(3, [128, 128, 512], stage=3, block='d')(x)
x = conv_block(3, [256, 256, 1024], stage=4, block='a')(x)
x = identity_block(3, [256, 256, 1024], stage=4, block='b')(x)
x = identity_block(3, [256, 256, 1024], stage=4, block='c')(x)
x = identity_block(3, [256, 256, 1024], stage=4, block='d')(x)
x = identity_block(3, [256, 256, 1024], stage=4, block='e')(x)
x = identity_block(3, [256, 256, 1024], stage=4, block='f')(x)
x = conv_block(3, [512, 512, 2048], stage=5, block='a')(x)
x = identity_block(3, [512, 512, 2048], stage=5, block='b')(x)
x = identity_block(3, [512, 512, 2048], stage=5, block='c')(x)
#classifying layer
x = Conv2D(filters=40, kernel_size=(1,1), strides=(1,1), init='he_normal', activation='linear', border_mode='valid', W_regularizer=l2(weight_decay))(x)
x = Conv2DTranspose(filters=40, kernel_initializer='he_normal', kernel_size=(64, 64), strides=(32, 32), padding='valid',use_bias=False, name='upscore2')(x)
x = Cropping2D(cropping=((19, 36),(19, 29)), name='score')(x)
model = Model(img_input, x)
weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5', RES_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
model.load_weights(weights_path, by_name=True)
return model
def get_embeddings_index(embedding_type='glove.42B.300d'):
"""Retrieves embeddings index from embedding name. Will automatically download and cache as needed.
Args:
embedding_type: The embedding type to load.
Returns:
The embeddings indexed by word.
"""
embeddings_index = _EMBEDDINGS_CACHE.get(embedding_type)
if embeddings_index is not None:
return embeddings_index
data_obj = _EMBEDDING_TYPES.get(embedding_type)
if data_obj is None:
raise ValueError("Embedding name should be one of '{}'".format(_EMBEDDING_TYPES.keys()))
cache_dir = os.path.expanduser(os.path.join('~', '.keras-text'))
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
file_path = get_file(embedding_type, origin=data_obj['url'], extract=True,
cache_dir=cache_dir, cache_subdir='embeddings')
file_path = os.path.join(os.path.dirname(file_path), data_obj['file'])
embeddings_index = _build_embeddings_index(file_path)
_EMBEDDINGS_CACHE[embedding_type] = embeddings_index
return embeddings_index
def load(mode=DatasetMode.small):
base_path = get_file(DataConstants.dataset, origin=DataConstants.origin, untar=True)
base_path = os.path.join(base_path, mode)
train_path = os.path.join(base_path, DataConstants.train)
test_path = os.path.join(base_path, DataConstants.test)
song_path = os.path.join(base_path, DataConstants.song_hash)
songs = dict(read_song_hash(song_path))
train, test = read_dataset(train_path, test_path)
return train, test, songs
def from_toml(filename):
from keras.utils.data_utils import get_file
volumes = {}
with open(filename, 'rb') as fin:
datasets = toml.load(fin).get('dataset', [])
for dataset in datasets:
hdf5_file = dataset['hdf5_file']
if dataset.get('use_keras_cache', False):
hdf5_file = get_file(hdf5_file, dataset['download_url'], md5_hash=dataset.get('download_md5', None))
image_dataset = dataset.get('image_dataset', None)
label_dataset = dataset.get('label_dataset', None)
mask_dataset = dataset.get('mask_dataset', None)
mask_bounds = dataset.get('mask_bounds', None)
resolution = dataset.get('resolution', None)
hdf5_pathed_file = os.path.join(os.path.dirname(filename), hdf5_file)
volume = HDF5Volume(hdf5_pathed_file,
image_dataset,
label_dataset,
mask_dataset,
mask_bounds=mask_bounds)
# If the volume configuration specifies an explicit resolution,
# override any provided in the HDF5 itself.
if resolution:
logging.info('Overriding resolution for volume "%s"', dataset['name'])
volume.resolution = np.array(resolution)
volumes[dataset['name']] = volume
return volumes
def decode_predictions(preds):
global CLASS_INDEX
assert len(preds.shape) == 2 and preds.shape[1] == 1000
if CLASS_INDEX is None:
fpath = get_file('imagenet_class_index.json',
CLASS_INDEX_PATH,
cache_subdir='models')
CLASS_INDEX = json.load(open(fpath))
indices = np.argmax(preds, axis=-1)
results = []
for i in indices:
results.append(CLASS_INDEX[str(i)])
return results
def run_conlleval(X_words_test, y_test, y_pred, index2word, index2chunk, pad_id=0):
'''
Runs the conlleval script for evaluation the predicted IOB-tags.
'''
url = 'http://www.cnts.ua.ac.be/conll2000/chunking/conlleval.txt'
path = get_file('conlleval',
origin=url,
md5_hash='61b632189e5a05d5bd26a2e1ec0f4f9e')
p = Popen(['perl', path], stdout=PIPE, stdin=PIPE, stderr=STDOUT)
y_true = np.squeeze(y_test, axis=2)
sequence_lengths = np.argmax(X_words_test == pad_id, axis=1)
nb_samples = X_words_test.shape[0]
conlleval_input = []
for k in range(nb_samples):
sent_len = sequence_lengths[k]
words = list(map(lambda idx: index2word[idx], X_words_test[k][:sent_len]))
true_tags = list(map(lambda idx: index2chunk[idx], y_true[k][:sent_len]))
pred_tags = list(map(lambda idx: index2chunk[idx], y_pred[k][:sent_len]))
sent = zip(words, true_tags, pred_tags)
for row in sent:
conlleval_input.append(' '.join(row))
conlleval_input.append('')
print()
conlleval_stdout = p.communicate(input='\n'.join(conlleval_input).encode())[0]
print(blue(conlleval_stdout.decode()))
print()
def load_names():
from keras.utils.data_utils import get_file
dirname = 'cifar-10-batches-py'
origin = 'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
path = get_file(dirname, origin=origin, untar=True)
with open(osp.join(path, 'batches.meta'), 'rb') as f:
return pickle.load(f)['label_names']
def nietzsche():
path = get_file('nietzsche.txt', origin='https://s3.amazonaws.com/text-datasets/nietzsche.txt')
text = open(path).read().lower()
return text
def decode_predictions(preds):
global CLASS_INDEX
assert len(preds.shape) == 2 and preds.shape[1] == 1000
if CLASS_INDEX is None:
fpath = get_file('imagenet_class_index.json',
CLASS_INDEX_PATH,
cache_subdir='models')
CLASS_INDEX = json.load(open(fpath))
indices = np.argmax(preds, axis=-1)
results = []
for i in indices:
results.append(CLASS_INDEX[str(i)])
return results
def get_densenet_weights_path(dataset_name="CIFAR-10", include_top=True):
assert dataset_name == "CIFAR-10"
if include_top:
weights_path = get_file('densenet_40_12_tf_dim_ordering_tf_kernels.h5',
TF_WEIGHTS_PATH,
cache_subdir='models')
else:
weights_path = get_file('densenet_40_12_tf_dim_ordering_tf_kernels_no_top.h5',
TF_WEIGHTS_PATH_NO_TOP,
cache_subdir='models')
return weights_path
models.py 文件源码
项目:Super-Resolution-using-Generative-Adversarial-Networks
作者: titu1994
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def load_vgg_weight(self, model):
# Loading VGG 16 weights
if K.image_dim_ordering() == "th":
weights = get_file('vgg16_weights_th_dim_ordering_th_kernels_notop.h5', THEANO_WEIGHTS_PATH_NO_TOP,
cache_subdir='models')
else:
weights = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5', TF_WEIGHTS_PATH_NO_TOP,
cache_subdir='models')
f = h5py.File(weights)
layer_names = [name for name in f.attrs['layer_names']]
if self.vgg_layers is None:
self.vgg_layers = [layer for layer in model.layers
if 'vgg_' in layer.name]
for i, layer in enumerate(self.vgg_layers):
g = f[layer_names[i]]
weights = [g[name] for name in g.attrs['weight_names']]
layer.set_weights(weights)
# Freeze all VGG layers
for layer in self.vgg_layers:
layer.trainable = False
return model
def get_pred_text_label(pred_id):
CLASS_INDEX_PATH = 'https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json'
fpath = get_file('imagenet_class_index.json',
CLASS_INDEX_PATH,
cache_subdir='models')
label_dict = json.load(open(fpath))
return label_dict[str(pred_id)][1]
def main_thread():
# Build the model and load the pre-trained weights on MPII
model = posereg.build(input_shape, pa16j.num_joints, export_heatmaps=True)
weights_path = get_file(weights_file, TF_WEIGHTS_PATH, md5_hash=md5_hash,
cache_subdir=cache_subdir)
model.load_weights(weights_path)
queue_frames = queue.Queue(2)
queue_poses = queue.Queue(2)
proc = threading.Thread(target=thread_grab_frames,
args=(queue_frames, queue_poses))
proc.daemon = True
proc.start()
clock = pygame.time.Clock()
show_fps_cnt = 0
while True:
x = queue_frames.get()
pred = model.predict(x)
pred.append(x) # Append the input frame
queue_poses.put(pred)
clock.tick()
show_fps_cnt += 1
if show_fps_cnt == 10:
show_fps_cnt = 0
print ('fps: ' + str(clock.get_fps()))
def download_facades_bw(tmp_path, data_folder_path):
# download to .tmp file
downloaded_path = get_file(tmp_path + '/facades_bw.tar', origin=AWS_FACADES_PATH)
# un-tar
untar_file(downloaded_path, data_folder_path + '/facades_bw', remove_tar=False, flags='-xvf')
# move data file
subprocess.call(['rm', '-rf', tmp_path])
def CIFAR10(show_info=True):
"""
This is pre-built CIFAR10 model trained for 250 epochs with 78.00% accuracy
:param show_info:
:return: model as Keras.Model
"""
# Getting Config first
config_path = get_file('cifar10_config_2000.json',
CONFIG_PATH,
cache_subdir='models')
# Getting weights next
weights_path = get_file('cifar10_weight_2000.h5',
WEIGHTS_PATH,
cache_subdir='models')
config_found = False
if os.path.isfile(config_path):
config_found = True
else:
if show_info is True:
print("Error: Unable to get the CIFAR10 model configuration on disk..")
weight_found = False
if os.path.isfile(weights_path):
weight_found = True
else:
if show_info is True:
print("Error: Unable to get the CIFAR10 model weights on disk..")
if config_found is False and weight_found is False:
if show_info is True:
print("Error: Unable to get the CIFAR10 model..")
return modelassist.ImportExport.import_keras_model_config_and_weight_and_compile(config_path, weights_path, show_info)
def MNIST2000(show_info=True):
"""
This is pre-built MNIST model trained for 2000 epochs with 99.38% accuracy
:param show_info:
:return: model as Keras.Model
"""
# Getting Config first
config_path = get_file('mnist_config_100.json',
CONFIG_PATH,
cache_subdir='models')
# Getting weights next
weights_path = get_file('mnist_weight_100.h5',
WEIGHTS_PATH,
cache_subdir='models')
config_found = False
if os.path.isfile(config_path):
config_found = True
else:
if show_info is True:
print("Error: Unable to get the MNIST model configuration on disk..")
weight_found = False
if os.path.isfile(weights_path):
weight_found = True
else:
if show_info is True:
print("Error: Unable to get the MNIST model weights on disk..")
if config_found is False and weight_found is False:
if show_info is True:
print("Error: Unable to get the MNIST model..")
return modelassist.ImportExport.import_keras_model_config_and_weight_and_compile(config_path, weights_path, show_info)
def download_from_cloud(model_file_name, json_url, h5_url):
print('Downloading from cloud')
json_file_name, h5_file_name = SequenceModel.get_full_file_names(model_file_name)
downloaded_json = get_file(os.path.normpath(json_file_name), origin=json_url)
if downloaded_json != json_file_name:
shutil.copy(downloaded_json, json_file_name)
downloaded_h5 = get_file(os.path.normpath(h5_file_name), origin=h5_url)
if downloaded_h5 != h5_file_name:
shutil.copy(downloaded_h5, h5_file_name)
def decode_predictions(preds, top=5):
LABELS = None
if len(preds.shape) == 2:
if preds.shape[1] == 2622:
fpath = get_file('rcmalli_vggface_labels_v1.npy',
V1_LABELS_PATH,
cache_subdir=VGGFACE_DIR)
LABELS = np.load(fpath)
elif preds.shape[1] == 8631:
fpath = get_file('rcmalli_vggface_labels_v2.npy',
V2_LABELS_PATH,
cache_subdir=VGGFACE_DIR)
LABELS = np.load(fpath)
else:
raise ValueError('`decode_predictions` expects '
'a batch of predictions '
'(i.e. a 2D array of shape (samples, 2622)) for V1 or '
'(samples, 8631) for V2.'
'Found array with shape: ' + str(preds.shape))
else:
raise ValueError('`decode_predictions` expects '
'a batch of predictions '
'(i.e. a 2D array of shape (samples, 2622)) for V1 or '
'(samples, 8631) for V2.'
'Found array with shape: ' + str(preds.shape))
results = []
for pred in preds:
top_indices = pred.argsort()[-top:][::-1]
result = [[str(LABELS[i].encode('utf8')), pred[i]] for i in top_indices]
result.sort(key=lambda x: x[1], reverse=True)
results.append(result)
return results
def decode_predictions(preds, top=5):
"""Decodes the prediction of an ImageNet model.
# Arguments
preds: Numpy tensor encoding a batch of predictions.
top: integer, how many top-guesses to return.
# Returns
A list of lists of top class prediction tuples
`(class_name, class_description, score)`.
One list of tuples per sample in batch input.
# Raises
ValueError: in case of invalid shape of the `pred` array
(must be 2D).
"""
global CLASS_INDEX
if len(preds.shape) != 2 or preds.shape[1] != 1000:
raise ValueError('`decode_predictions` expects '
'a batch of predictions '
'(i.e. a 2D array of shape (samples, 1000)). '
'Found array with shape: ' + str(preds.shape))
if CLASS_INDEX is None:
fpath = get_file('imagenet_class_index.json',
CLASS_INDEX_PATH,
cache_subdir='models')
CLASS_INDEX = json.load(open(fpath))
results = []
for pred in preds:
top_indices = pred.argsort()[-top:][::-1]
result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]
result.sort(key=lambda x: x[2], reverse=True)
results.append(result)
return results
def decode_predictions(preds, top=5):
"""Decodes the prediction of an ImageNet model.
# Arguments
preds: Numpy tensor encoding a batch of predictions.
top: integer, how many top-guesses to return.
# Returns
A list of lists of top class prediction tuples
`(class_name, class_description, score)`.
One list of tuples per sample in batch input.
# Raises
ValueError: in case of invalid shape of the `pred` array
(must be 2D).
"""
global CLASS_INDEX
if len(preds.shape) != 2 or preds.shape[1] != 1000:
raise ValueError('`decode_predictions` expects '
'a batch of predictions '
'(i.e. a 2D array of shape (samples, 1000)). '
'Found array with shape: ' + str(preds.shape))
if CLASS_INDEX is None:
fpath = get_file('imagenet_class_index.json',
CLASS_INDEX_PATH,
cache_subdir='models')
CLASS_INDEX = json.load(open(fpath))
results = []
for pred in preds:
top_indices = pred.argsort()[-top:][::-1]
result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]
result.sort(key=lambda x: x[2], reverse=True)
results.append(result)
return results