def display_img_and_representation(x, y, pathimage, y_etichetta):
print y[y_etichetta]
img = sio.imread(pathimage)
plt.figure(figsize=(12,4))
plt.subplot(1,2,1)
plt.imshow(img)
plt.subplot(1,2,2)
plt.plot(x)
plt.show()
python类imread()的实例源码
def describe_dataset(dataset,kmeans):
y = list()
X = list()
paths = list()
classes=dataset.getClasses()
ni = 0
t1 = time()
for cl in classes:
for path in dataset.paths[cl]:
img = sio.imread(path,as_grey = True)
feat = extract_and_describe(img,kmeans)
X.append(feat)
y.append(classes.index(cl))
paths.append(path)
ni+= 1
X = np.array(X)
y = np.array(y)
t2 = time()
print "Elapsed time {0:0.2f}".format(t2-t1)
return X,y,paths
run03_model_train_v3.py 文件源码
项目:FCN_MSCOCO_Food_Segmentation
作者: gakarak
项目源码
文件源码
阅读 18
收藏 0
点赞 0
评论 0
def readDataMasked(pidx):
with open(pidx, 'r') as f:
wdir = os.path.dirname(pidx)
lstpath = f.read().splitlines()
lstpath = [os.path.join(wdir,xx) for xx in lstpath]
numPath = len(lstpath)
dataX = None
dataY = None
for ii,pp in enumerate(lstpath):
img4 = skio.imread(pp)
img = img4[:,:,:3].astype(np.float)
img -= img.mean()
img /= img.std()
msk = (img4[:,:,3]>0).astype(np.float)
msk = np_utils.to_categorical(msk.reshape(-1), 2)
# msk = msk.reshape(-1)
if dataX is None:
dataX = np.zeros([numPath] + list(img.shape))
dataY = np.zeros([numPath] + list(msk.shape))
dataX[ii] = img
dataY[ii] = msk
if (ii%100)==0:
print ('[%d/%d]' % (ii, numPath))
return (dataX, dataY)
run04_model_inference.py 文件源码
项目:FCN_MSCOCO_Food_Segmentation
作者: gakarak
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def readDataMasked(pidx):
with open(pidx, 'r') as f:
wdir = os.path.dirname(pidx)
lstpath = f.read().splitlines()
lstpath = [os.path.join(wdir,xx) for xx in lstpath]
numPath = len(lstpath)
dataX = None
dataY = None
for ii,pp in enumerate(lstpath):
img4 = skio.imread(pp)
img = img4[:,:,:3].astype(np.float)
img -= img.mean()
img /= img.std()
msk = (img4[:,:,3]>0).astype(np.float)
msk = np_utils.to_categorical(msk.reshape(-1), 2)
# msk = msk.reshape(-1)
if dataX is None:
dataX = np.zeros([numPath] + list(img.shape))
dataY = np.zeros([numPath] + list(msk.shape))
dataX[ii] = img
dataY[ii] = msk
if (ii%100)==0:
print ('[%d/%d]' % (ii, numPath))
return (dataX, dataY)
run10_common_onimage.py 文件源码
项目:FCN_MSCOCO_Food_Segmentation
作者: gakarak
项目源码
文件源码
阅读 16
收藏 0
点赞 0
评论 0
def getBatchDataByIdx(self, parBatchIdx):
rndIdx = parBatchIdx
parBatchSize = len(rndIdx)
dataX = np.zeros([parBatchSize] + list(self.shapeImg), dtype=np.float)
dataY = np.zeros([parBatchSize] + list(self.shapeMsk), dtype=np.float)
for ii, tidx in enumerate(rndIdx):
if self.isDataInMemory:
dataX[ii] = self.dataImg[tidx]
dataY[ii] = self.dataMskCls[tidx]
else:
tpathImg = self.arrPathDataImg[tidx]
tpathMsk = self.arrPathDataMsk[tidx]
tdataImg = self.adjustImage(skio.imread(tpathImg))
tdataMsk = skio.imread(tpathMsk)
tdataImg = self.transformImageFromOriginal(tdataImg)
tdataMsk = self.transformImageFromOriginal(tdataMsk)
tdataMskCls = self.convertMskToOneHot(tdataMsk)
dataX[ii] = tdataImg
dataY[ii] = tdataMskCls
if self.isTheanoShape:
tshp = dataY.shape
dataY = dataY.reshape([tshp[0], tshp[1], np.prod(tshp[-2:])]).transpose((0, 2, 1))
# print (tshp)
return (dataX, dataY)
def get(self, uri):
i = imread(uri)
if len(i.shape) == 2:
i = gray2rgb(i)
else:
i = i[:, :, :3]
c = self._image_to_color.get(i)
dbg = self._settings['debug']
if dbg is None:
return c
c, imgs = c
b = splitext(basename(uri))[0]
imsave(join(dbg, b + '-resized.jpg'), imgs['resized'])
imsave(join(dbg, b + '-back.jpg'), img_as_float(imgs['back']))
imsave(join(dbg, b + '-skin.jpg'), img_as_float(imgs['skin']))
imsave(join(dbg, b + '-clusters.jpg'), imgs['clusters'])
return c, {
'resized': join(dbg, b + '-resized.jpg'),
'back': join(dbg, b + '-back.jpg'),
'skin': join(dbg, b + '-skin.jpg'),
'clusters': join(dbg, b + '-clusters.jpg'),
}
def evaluate_sliding_window(img_filename, crops):
img = io.imread(img_filename).astype(np.float32)/255
if img.ndim == 2: # Handle B/W images
img = np.expand_dims(img, axis=-1)
img = np.repeat(img, 3, 2)
img_crops = np.zeros((batch_size, 227, 227, 3))
for i in xrange(len(crops)):
crop = crops[i]
img_crop = transform.resize(img[crop[1]:crop[1]+crop[3],crop[0]:crop[0]+crop[2]], (227, 227))-0.5
img_crop = np.expand_dims(img_crop, axis=0)
img_crops[i,:,:,:] = img_crop
# compute ranking scores
scores = sess.run([score_func], feed_dict={image_placeholder: img_crops})
# find the optimal crop
idx = np.argmax(scores[:len(crops)])
best_window = crops[idx]
# return the best crop
return (best_window[0], best_window[1], best_window[2], best_window[3])
def draw_blur_levels():
import matplotlib.pyplot as plt
from skimage import io
image = io.imread('out/66.png') # 36 for $, 79 for O
fig, axes = plt.subplots(nrows=2, ncols=3,
subplot_kw={'adjustable': 'box-forced'})
ax = axes.ravel()
for blur_level in range(6):
blurred = uniform_filter(image, 3.0*blur_level, mode='reflect', cval=0)
ax[blur_level].imshow(blurred, cmap='gray', interpolation='nearest')
ax[blur_level].set_title(str(blur_level), fontsize=20)
plt.show()
def load_data(src,shuffle=True):
""" Load data from directories.
"""
imgs = [img for img in glob.glob(os.path.join(src,'*.png'))]
x = np.zeros((len(imgs),100,100), dtype=np.float32)
y = np.zeros(len(imgs), dtype=np.int64)
for idx, img in enumerate(imgs):
im = io.imread(img,1)
im = img_as_float(im) # rescale from [0,255] to [0,1]
label = int(img.split('/')[-1].split('.')[0].split('_')[-1])
x[idx] = im
y[idx] = label
x = np.expand_dims(x,3)
data = zip(x,y)
if shuffle: random.shuffle(data)
return data
def PreprocessContentImage(path, long_edge):
img = io.imread(path)
logging.info("load the content image, size = %s", img.shape[:2])
factor = float(long_edge) / max(img.shape[:2])
new_size = (int(img.shape[0] * factor), int(img.shape[1] * factor))
resized_img = transform.resize(img, new_size)
sample = np.asarray(resized_img) * 256
# swap axes to make image from (224, 224, 3) to (3, 224, 224)
sample = np.swapaxes(sample, 0, 2)
sample = np.swapaxes(sample, 1, 2)
# sub mean
sample[0, :] -= 123.68
sample[1, :] -= 116.779
sample[2, :] -= 103.939
logging.info("resize the content image to %s", new_size)
return np.resize(sample, (1, 3, sample.shape[1], sample.shape[2]))
preprocessing.py 文件源码
项目:kaggle-yelp-restaurant-photo-classification
作者: u1234x1234
项目源码
文件源码
阅读 32
收藏 0
点赞 0
评论 0
def PreprocessImage(path, show_img=True):
# load image
img = io.imread(path)
print("Original Image Shape: ", img.shape)
# we crop image from center
short_egde = min(img.shape[:2])
yy = int((img.shape[0] - short_egde) / 2)
xx = int((img.shape[1] - short_egde) / 2)
crop_img = img[yy : yy + short_egde, xx : xx + short_egde]
# resize to 299, 299
resized_img = transform.resize(crop_img, (299, 299))
if show_img:
io.imshow(resized_img)
# convert to numpy.ndarray
sample = np.asarray(resized_img) * 256
# swap axes to make image from (299, 299, 3) to (3, 299, 299)
sample = np.swapaxes(sample, 0, 2)
sample = np.swapaxes(sample, 1, 2)
# sub mean
normed_img = sample - 128.
normed_img /= 128.
return np.reshape(normed_img, (1, 3, 299, 299))
predict.py 文件源码
项目:kaggle-yelp-restaurant-photo-classification
作者: u1234x1234
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def PreprocessImage(path, show_img=True):
# load image
img = io.imread(path)
# print("Original Image Shape: ", img.shape)
# we crop image from center
short_egde = min(img.shape[:2])
yy = int((img.shape[0] - short_egde) / 2)
xx = int((img.shape[1] - short_egde) / 2)
crop_img = img[yy : yy + short_egde, xx : xx + short_egde]
# resize to 299, 299
resized_img = transform.resize(crop_img, (299, 299))
if show_img:
io.imshow(resized_img)
# convert to numpy.ndarray
sample = np.asarray(resized_img) * 256
# swap axes to make image from (299, 299, 3) to (3, 299, 299)
sample = np.swapaxes(sample, 0, 2)
sample = np.swapaxes(sample, 1, 2)
# sub mean
normed_img = sample - 128.
normed_img /= 128.
return np.reshape(normed_img, (1, 3, 299, 299))
def cropframes(clip_dir, image_files, clip_path):
clip = clip_path.split('/')[-1]
clip_name = clip.split('.')[0]
crop_dir = clip_dir + 'cropped/'
# crop_dir = '/home/sxg755/dataset/train/all_frames/cropped/'
if not os.path.exists(crop_dir):
os.makedirs(crop_dir)
cropped_files = []
for idx, image in enumerate(image_files):
img = io.imread(image)
h = img.shape[0]
w = img.shape[1]
img_cropped = img[0:4*h/5, 0:w]
io.imsave(crop_dir + clip_name + '_keyframe' + "{0:0>4}".format(idx+1) + '.jpg', img_cropped)
cropped_files.append(crop_dir + clip_name + '_keyframe' + "{0:0>4}".format(idx+1) + '.jpg')
return cropped_files
def load_images(self, test_list):
"""
train_list : list of users to use for testing
eg ["user_1", "user_2", "user_3"]
"""
self.image_list = []
for user in test_list:
csv = "%s%s/%s_loc.csv" % (self.data_directory, user, user)
with open(csv) as fh:
data = [line.strip().split(',') for line in fh]
for line in data[1:]:
img_path, x1,y1,x2,y2, = line
pos = tuple(map(int,(x1,y1,x2,y2)))
letter = img_path[-6]
img = io.imread("%s%s" % (self.data_directory, img_path))
self.image_list.append((img, pos, letter))
read_localization.py 文件源码
项目:sign-detection-and-localization
作者: rajat503
项目源码
文件源码
阅读 17
收藏 0
点赞 0
评论 0
def train(user_list, path):
train_data = []
train_boxes =[]
for user in user_list:
with open(path+user+'/'+user+'_loc.csv', 'rb') as csvfile:
x=csv.reader(csvfile)
for row in x:
if row[0]=='image':
continue
image = io.imread(path+row[0])
data_vector = image
# data_vector = np.array(image.flatten()).tolist()
# sys.exit(0)
ground_truth = [int(row[1]), int(row[2]), int(row[3]), int(row[4])]
user_id = int(user.split('_')[1])
train_data.append(data_vector)
train_boxes.append(ground_truth)
localization.train(train_data, train_boxes)
# train(['user_3','user_4','user_5','user_6','user_7','user_9','user_10','user_11','user_12','user_13','user_14','user_15','user_16','user_17','user_18','user_19'])
# train(['user_3','user_4', 'user_5','user_6','user_7','user_9','user_10', 'user_11', 'user_12', 'user_13', 'user_14' ,'user_15', 'user_16', 'user_17', 'user_18','user_19'])
ImageNet.py 文件源码
项目:Representation-Learning-by-Learning-to-Count
作者: gitlimlab
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def __init__(self, ids, name='default',
max_examples=None, is_train=True):
self._ids = list(ids)
self.name = name
self.is_train = is_train
if max_examples is not None:
self._ids = self._ids[:max_examples]
file = os.path.join(__IMAGENET_IMG_PATH__, self._ids[0])
try:
imread(file)
except:
raise IOError('Dataset not found. Please make sure the dataset was downloaded.')
log.info("Reading Done: %s", file)
def PreprocessImage(path, show_img=False):
# load image
img = io.imread(path)
print("Original Image Shape: ", img.shape)
# we crop image from center
short_egde = min(img.shape[:2])
yy = int((img.shape[0] - short_egde) / 2)
xx = int((img.shape[1] - short_egde) / 2)
crop_img = img[yy : yy + short_egde, xx : xx + short_egde]
# resize to 224, 224
resized_img = transform.resize(crop_img, (224, 224))
if show_img:
io.imshow(resized_img)
# convert to numpy.ndarray
sample = np.asarray(resized_img) * 255
# swap axes to make image from (224, 224, 3) to (3, 224, 224)
sample = np.swapaxes(sample, 0, 2)
sample = np.swapaxes(sample, 1, 2)
# sub mean
normed_img = sample - mean_img
normed_img.resize(1, 3, 224, 224)
return normed_img
# Get preprocessed batch (single image batch)
def PreprocessContentImage(path, long_edge):
img = io.imread(path)
logging.info("load the content image, size = %s", img.shape[:2])
factor = float(long_edge) / max(img.shape[:2])
new_size = (int(img.shape[0] * factor), int(img.shape[1] * factor))
resized_img = transform.resize(img, new_size)
sample = np.asarray(resized_img) * 256
# swap axes to make image from (224, 224, 3) to (3, 224, 224)
sample = np.swapaxes(sample, 0, 2)
sample = np.swapaxes(sample, 1, 2)
# sub mean
sample[0, :] -= 123.68
sample[1, :] -= 116.779
sample[2, :] -= 103.939
logging.info("resize the content image to %s", new_size)
return np.resize(sample, (1, 3, sample.shape[1], sample.shape[2]))
def get_real_images(paths):
real_images = []
for path in paths:
# Calculate a threshold to do image binarization, all colors at every pixel will be translated to number 0(white) or 1(black)
camera = io.imread(path)
val = filters.threshold_otsu(camera)
result = (camera < val)*1.0
real_images.append(result)
np_images = numpy.array(real_images)
np_images = np_images.reshape(np_images.shape[0], np_images.shape[1] * np_images.shape[2])
return np_images
def read_coco_image(data_split, coco_id, root_dir=osp.join(DATA_ROOT, 'mscoco')):
file_name = 'COCO_{}2014_'.format(data_split) + str(coco_id).zfill(12) + '.jpg'
im = imread(osp.join(root_dir, data_split+'2014', file_name))
return im