def preprocess_image_crop(image_path, img_size):
'''
Preprocess the image scaling it so that its smaller size is img_size.
The larger size is then cropped in order to produce a square image.
'''
img = load_img(image_path)
scale = float(img_size) / min(img.size)
new_size = (int(np.ceil(scale * img.size[0])), int(np.ceil(scale * img.size[1])))
# print('old size: %s,new size: %s' %(str(img.size), str(new_size)))
img = img.resize(new_size, resample=Image.BILINEAR)
img = img_to_array(img)
crop_h = img.shape[0] - img_size
crop_v = img.shape[1] - img_size
img = img[crop_h:img_size+crop_h, crop_v:img_size+crop_v, :]
img = np.expand_dims(img, axis=0)
img = vgg16.preprocess_input(img)
return img
# util function to open, resize and format pictures into appropriate tensors
python类preprocess_input()的实例源码
def preprocess_image_scale(image_path, img_size=None):
'''
Preprocess the image scaling it so that its larger size is max_size.
This function preserves aspect ratio.
'''
img = load_img(image_path)
if img_size:
scale = float(img_size) / max(img.size)
new_size = (int(np.ceil(scale * img.size[0])), int(np.ceil(scale * img.size[1])))
img = img.resize(new_size, resample=Image.BILINEAR)
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
img = vgg16.preprocess_input(img)
return img
# util function to convert a tensor into a valid image
def extract_vgg16_features(x):
from keras.preprocessing.image import img_to_array, array_to_img
from keras.applications.vgg16 import preprocess_input, VGG16
from keras.models import Model
# im_h = x.shape[1]
im_h = 224
model = VGG16(include_top=True, weights='imagenet', input_shape=(im_h, im_h, 3))
# if flatten:
# add_layer = Flatten()
# else:
# add_layer = GlobalMaxPool2D()
# feature_model = Model(model.input, add_layer(model.output))
feature_model = Model(model.input, model.get_layer('fc1').output)
print('extracting features...')
x = np.asarray([img_to_array(array_to_img(im, scale=False).resize((im_h,im_h))) for im in x])
x = preprocess_input(x) # data - 127. #data/255.#
features = feature_model.predict(x)
print('Features shape = ', features.shape)
return features
def preprocess(img):
img4d = img.copy()
img4d = img4d.astype("float64")
if K.image_dim_ordering() == "th":
# (H, W, C) -> (C, H, W)
img4d = img4d.transpose((2, 0, 1))
img4d = np.expand_dims(img4d, axis=0)
img4d = vgg16.preprocess_input(img4d)
return img4d
style-transfer.py 文件源码
项目:Deep-Learning-with-Keras
作者: PacktPublishing
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def preprocess(img):
img4d = img.copy()
img4d = img4d.astype("float64")
if K.image_dim_ordering() == "th":
# (H, W, C) -> (C, H, W)
img4d = img4d.transpose((2, 0, 1))
img4d = np.expand_dims(img4d, axis=0)
img4d = vgg16.preprocess_input(img4d)
return img4d
def preprocessImage(imagePath):
img = load_img(imagePath, target_size=(244, 244))
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
img = vgg16.preprocess_input(img)
img = img.reshape(img.shape[1:])
return img
def preprocess_image(image_path):
img = load_img(image_path, target_size=(im_height, im_width))
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
img = vgg16.preprocess_input(img)
return img
def get_features(url):
response = requests.get(url)
img = Image.open(BytesIO(response.content)).convert('RGB')
target_size = (224, 224)
model = VGG16(weights='imagenet', include_top=False, pooling='avg')
if img.size != target_size:
img = img.resize(target_size)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
features = model.predict(x).flatten()
return features.tolist()
def preprocess_image(image_path):
img = load_img(image_path, target_size=(img_nrows, img_ncols))
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
img = vgg16.preprocess_input(img)
return img
# util function to convert a tensor into a valid image
def preprocess_image(image_path):
img = load_img(image_path, target_size=(img_width, img_height))
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
img = vgg16.preprocess_input(img)
return img
# util function to convert a tensor into a valid image
def preprocess_image(image_path):
img = load_img(image_path, target_size=(img_nrows, img_ncols))
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
img = vgg16.preprocess_input(img)
return img
# util function to convert a tensor into a valid image
def preprocess_input(x):
return vgg16.preprocess_input(x.astype('float32'))
def preprocess_image(image_path):
img = load_img(image_path, target_size=(img_nrows, img_ncols))
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
img = vgg16.preprocess_input(img)
return img
# util function to convert a tensor into a valid image
def import_vgg16():
global VGG16
global Model
global image
global preprocess_input
global K
from keras.applications import VGG16
from keras.models import Model
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input
from keras import backend as K
if K.backend() == 'tensorflow':
limit_mem()
def imread(self, path):
if 'http' == path[:4]:
with contextlib.closing(urllib.urlopen(path)) as req:
local_url = cStringIO.StringIO(req.read())
img = image.load_img(local_url, target_size=(self.target_dim, self.target_dim))
else:
img = image.load_img(path, target_size=(self.target_dim, self.target_dim))
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = preprocess_input(img)
return img
improved_neural_doodle.py 文件源码
项目:Neural-Style-Transfer-Windows
作者: titu1994
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def preprocess_image(image_path):
img = load_img(image_path, target_size=(img_nrows, img_ncols))
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
img = vgg16.preprocess_input(img)
return img
def preprocess_image(image_path):
img = load_img(image_path, target_size=(img_nrows, img_ncols))
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
img = vgg16.preprocess_input(img)
return img
def load_image(path):
img_path = path
img = load_img(img_path, target_size=(299, 299))
x = img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
return x
def load_data(path, size=224, mode=None):
img = Image.open(path)
w,h = img.size
if w < h:
if w < size:
img = img.resize((size, size*h//w))
w, h = img.size
else:
if h < size:
img = img.resize((size*w//h, size))
w, h = img.size
img = img.crop((int((w-size)*0.5), int((h-size)*0.5), int((w+size)*0.5), int((h+size)*0.5)))
if mode=="original":
return img
if mode=="label":
y = np.array(img, dtype=np.int32)
mask = y == 255
y[mask] = 0
y = binarylab(y, size, 21)
y = np.expand_dims(y, axis=0)
return y
if mode=="data":
X = image.img_to_array(img)
X = np.expand_dims(X, axis=0)
X = preprocess_input(X)
return X
def preprocess_image(image_path, desired_dims):
img = load_img(image_path, target_size=desired_dims)
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
img = vgg16.preprocess_input(img)
return img
# util function to convert a tensor into a valid image
def preprocess_image(image_path):
img = load_img(image_path, target_size=(img_nrows, img_ncols))
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
img = vgg16.preprocess_input(img)
return img
# util function to convert a tensor into a valid image
def preprocess_image(image_path):
img = load_img(image_path, target_size=(img_width, img_height))
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
img = vgg16.preprocess_input(img)
return img
# util function to convert a tensor into a valid image
extract_cnn_vgg16_keras.py 文件源码
项目:flask-keras-cnn-image-retrieval
作者: willard-yuan
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def extract_feat(img_path):
# weights: 'imagenet'
# pooling: 'max' or 'avg'
# input_shape: (width, height, 3), width and height should >= 48
input_shape = (224, 224, 3)
model = VGG16(weights = 'imagenet', input_shape = (input_shape[0], input_shape[1], input_shape[2]), pooling = 'max', include_top = False)
img = image.load_img(img_path, target_size=(input_shape[0], input_shape[1]))
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = preprocess_input(img)
feat = model.predict(img)
norm_feat = feat[0]/LA.norm(feat[0])
return norm_feat
def extract_image_features(img_path):
model = models.VGG_16('weights/vgg16_weights.h5')
img = image.load_img(img_path,target_size=(224,224))
x = image.img_to_array(img)
x = np.expand_dims(x,axis=0)
x = preprocess_input(x)
last_layer_output = K.function([model.layers[0].input,K.learning_phase()],
[model.layers[-1].input])
features = last_layer_output([x,0])[0]
return features
def preprocess_image(image_path):
img = load_img(image_path, target_size=(img_nrows, img_ncols))
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
img = vgg16.preprocess_input(img)
return img
# util function to convert a tensor into a valid image
def preprocess_image(image_path):
img = load_img(image_path, target_size=(img_height, img_width))
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
img = vgg16.preprocess_input(img)
return img
# util function to convert a tensor into a valid image
def preprocess(img):
img4d = img.copy()
img4d = img4d.astype("float64")
if K.image_dim_ordering() == "th":
# (H, W, C) -> (C, H, W)
img4d = img4d.transpose((2, 0, 1))
img4d = np.expand_dims(img4d, axis=0)
img4d = vgg16.preprocess_input(img4d)
return img4d
def preprocess(img):
img4d = img.copy()
img4d = img4d.astype("float64")
if K.image_dim_ordering() == "th":
# (H, W, C) -> (C, H, W)
img4d = img4d.transpose((2, 0, 1))
img4d = np.expand_dims(img4d, axis=0)
img4d = vgg16.preprocess_input(img4d)
return img4d
def preprocess_image(filename, target_size):
img = image.load_img(filename, target_size=target_size)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
x = np.squeeze(x)
return x
def drawbbox(self, img, xx, testmodel, confid_thresh, w, h, c):
ttimg, x0_list, y0_list, x1_list, y1_list, classprob_list, class_id_list, confid_value_list = utils.predict(preprocess_input(np.asarray([xx])), testmodel, confid_thresh,w,h,c)
for x0,y0,x1,y1,classprob,class_id,confid_value in zip(x0_list, y0_list, x1_list, y1_list, classprob_list, class_id_list, confid_value_list):
# draw bounding box
cv2.rectangle(img, (x0, y0), (x1, y1), (255,255,255), 2)
# draw classimg
classimg = cv2.imread(cfgconst.label_names[class_id])
if y0-classimg.shape[0] <= 0:
yst =0
yend =classimg.shape[0]
elif y0 >= img.shape[0]:
yst = img.shape[0]-classimg.shape[0]-1
yend = img.shape[0]-1
else:
yst = y0 - classimg.shape[0]
yend = y0
if x0+classimg.shape[1] >= img.shape[1]:
xst = img.shape[1]-classimg.shape[1]-1
xend = img.shape[1]-1
elif x0 <=0:
xst = 0
xend = classimg.shape[1]
else:
xst = x0
xend = x0+classimg.shape[1]
#
img[yst:yend, xst:xend] = classimg
# draw text
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, str(classprob), (x0,y0+classimg.shape[0]-1), font, 0.5,(255,255,255),2,cv2.LINE_AA)
cv2.putText(img, str(confid_value), (x0,y1), font, 0.5,(128,255,255),1,cv2.LINE_AA)
#
cv2.imshow('frame',img)
cv2.waitKey(1)