def min_side(_, pos):
"""
Given an object pixels' positions, return the minimum side length of its
bounding box
:param _: pixel values (unused)
:param pos: pixel position (1-D)
:return: minimum bounding box side length
"""
xs = np.array([i / SSIZE for i in pos])
ys = np.array([i % SSIZE for i in pos])
minx = np.amin(xs)
miny = np.amin(ys)
maxx = np.amax(xs)
maxy = np.amax(ys)
ct1 = compute_line(np.array([minx, miny]), np.array([minx, maxy]))
ct2 = compute_line(np.array([minx, miny]), np.array([maxx, miny]))
return min(ct1, ct2)
python类amin()的实例源码
def generate_patch_probs(path, patch_locations, patch_size, im_size):
x, y, z = patch_locations
seg = nib.load(glob.glob(os.path.join(path, '*_seg.nii.gz'))[0]).get_data().astype(np.float32)
p = []
for i in range(len(x)):
for j in range(len(y)):
for k in range(len(z)):
patch = seg[int(x[i] - patch_size / 2) : int(x[i] + patch_size / 2),
int(y[j] - patch_size / 2) : int(y[j] + patch_size / 2),
int(z[k] - patch_size / 2) : int(z[k] + patch_size / 2)]
patch = (patch > 0).astype(np.float32)
percent = np.sum(patch) / (patch_size * patch_size * patch_size)
p.append((1 - np.abs(percent - 0.5)) * percent)
p = np.asarray(p, dtype=np.float32)
p[p == 0] = np.amin(p[np.nonzero(p)])
p = p / np.sum(p)
return p
def view_trigger_snippets_bis(trigger_snippets, elec_index, save=None):
fig = pylab.figure()
ax = fig.add_subplot(1, 1, 1)
for n in xrange(0, trigger_snippets.shape[2]):
y = trigger_snippets[:, elec_index, n]
x = numpy.arange(- (y.size - 1) / 2, (y.size - 1) / 2 + 1)
b = 0.5 + 0.5 * numpy.random.rand()
ax.plot(x, y, color=(0.0, 0.0, b), linestyle='solid')
ax.grid(True)
ax.set_xlim([numpy.amin(x), numpy.amax(x)])
ax.set_xlabel("time")
ax.set_ylabel("amplitude")
if save is None:
pylab.show()
else:
pylab.savefig(save)
pylab.close(fig)
return
def to_rgb(img):
"""
Converts the given array into a RGB image. If the number of channels is not
3 the array is tiled such that it has 3 channels. Finally, the values are
rescaled to [0,255)
:param img: the array to convert [nx, ny, channels]
:returns img: the rgb image [nx, ny, 3]
"""
img = np.atleast_3d(img)
channels = img.shape[2]
if channels < 3:
img = np.tile(img, 3)
img[np.isnan(img)] = 0
img -= np.amin(img)
img /= np.amax(img)
img *= 255
return img
def add(self, x, y = None):
self.X = np.memmap(
self.path+"/X.npy", self.X.dtype,
shape = (self.nrows + x.shape[0] , x.shape[1])
)
self.X[self.nrows:self.nrows + x.shape[0],:] = x
if y is not None:
if x.shape != y.shape: raise "x and y should have the same shape"
self.Y = np.memmap(
self.path+"/Y.npy", self.Y.dtype,
shape = (self.nrows + y.shape[0] , y.shape[1])
)
self.Y[self.nrows:self.nrows + y.shape[0],:] = y
delta = x - self.running_mean
n = self.X.shape[0] + np.arange(x.shape[0]) + 1
self.running_dev += np.sum(delta * (x - self.running_mean), 0)
self.running_mean += np.sum(delta / n[:, np.newaxis], 0)
self.running_max = np.amax(np.vstack((self.running_max, x)), 0)
self.running_min = np.amin(np.vstack((self.running_min, x)), 0)
self.nrows += x.shape[0]
def _make_grid(dim=(11,4)):
"""
this function generates the structure for an asymmetrical circle grid
domain (0-1)
"""
x,y = range(dim[0]),range(dim[1])
p = np.array([[[s,i] for s in x] for i in y], dtype=np.float32)
p[:,1::2,1] += 0.5
p = np.reshape(p, (-1,2), 'F')
# scale height = 1
x_scale = 1./(np.amax(p[:,0])-np.amin(p[:,0]))
y_scale = 1./(np.amax(p[:,1])-np.amin(p[:,1]))
p *=x_scale,x_scale/.5
return p
def analytic_infinite_wire(obsloc,wireloc,orientation,I=1.):
"""
Compute the response of an infinite wire with orientation 'orientation'
and current I at the obsvervation locations obsloc
Output:
B: magnetic field [Bx,By,Bz]
"""
n,d = obsloc.shape
t,d = wireloc.shape
d = np.sqrt(np.dot(obsloc**2.,np.ones([d,t]))+np.dot(np.ones([n,d]),(wireloc.T)**2.)
- 2.*np.dot(obsloc,wireloc.T))
distr = np.amin(d, axis=1, keepdims = True)
idxmind = d.argmin(axis=1)
r = obsloc - wireloc[idxmind]
orient = np.c_[[orientation for i in range(obsloc.shape[0])]]
B = (mu_0*I)/(2*np.pi*(distr**2.))*np.cross(orientation,r)
return B
def calculate_feature_statistics(feature_id):
feature = Feature.objects.get(pk=feature_id)
dataframe = _get_dataframe(feature.dataset.id)
feature_col = dataframe[feature.name]
feature.min = np.amin(feature_col).item()
feature.max = np.amax(feature_col).item()
feature.mean = np.mean(feature_col).item()
feature.variance = np.nanvar(feature_col).item()
unique_values = np.unique(feature_col)
integer_check = (np.mod(unique_values, 1) == 0).all()
feature.is_categorical = integer_check and (unique_values.size < 10)
if feature.is_categorical:
feature.categories = list(unique_values)
feature.save(update_fields=['min', 'max', 'variance', 'mean', 'is_categorical', 'categories'])
del unique_values, feature
def find_min_max(scp_file):
minimum = float("inf")
maximum = -float("inf")
uid = 0
offset = 0
ark_dict, uid = read_mats(uid, offset, scp_file)
while ark_dict:
for key in ark_dict.keys():
mat_max = np.amax(ark_dict[key])
mat_min = np.amin(ark_dict[key])
if mat_max > maximum:
maximum = mat_max
if mat_min < minimum:
minimum = mat_min
ark_dict, uid = read_mats(uid, offset, scp_file)
print("min:", minimum, "max:", maximum)
return minimum, maximum
def _gini(self, array):
"""Calculate the Gini coefficient of a numpy array."""
# https://github.com/oliviaguest/gini
# based on bottom eq:
# http://www.statsdirect.com/help/generatedimages/equations/equation154.svg
# from:
# http://www.statsdirect.com/help/default.htm#nonparametric_methods/gini.htm
# All values are treated equally, arrays must be 1d:
array = array.flatten()
if np.amin(array) < 0:
# Values cannot be negative:
array -= np.amin(array)
# Values cannot be 0:
array += 0.0000001
# Values must be sorted:
array = np.sort(array)
# Index per array element:
index = np.arange(1,array.shape[0]+1)
# Number of array elements:
n = array.shape[0]
# Gini coefficient:
return ((np.sum((2 * index - n - 1) * array)) / (n * np.sum(array)))
def displayDataset(self, dataset):
eps = 0.00001
linewidth = dataset.linewidth
if np.var(dataset.values) < eps:
linewidth += 2
mean = np.mean(dataset.values)
x = np.arange(0, 1, 0.1)
x = np.sort(np.append(x, [mean, mean-eps, mean+eps]))
density = [1 if v == mean else 0 for v in x]
else:
self.kde.fit(np.asarray([[x] for x in dataset.values]))
## Computes the x axis
x_max = np.amax(dataset.values)
x_min = np.amin(dataset.values)
delta = x_max - x_min
density_delta = 1.1 * delta
x = np.arange(x_min, x_max, density_delta / self.num_points)
x_density = [[y] for y in x]
## kde.score_samples returns the 'log' of the density
log_density = self.kde.score_samples(x_density).tolist()
density = map(math.exp, log_density)
self.ax.plot(x, density, label = dataset.label, color = dataset.color,
linewidth = linewidth, linestyle = dataset.linestyle)
def display(self, output_filename):
fig, (ax) = plt.subplots(1, 1)
data = [d.values for d in self.datasets]
labels = [d.label for d in self.datasets]
bp = ax.boxplot(data, labels = labels, notch = 0, sym = '+', vert = '1', whis = 1.5)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='black', marker='+')
for i in range(len(self.datasets)):
box = bp['boxes'][i]
box_x = []
box_y = []
for j in range(5):
box_x.append(box.get_xdata()[j])
box_y.append(box.get_ydata()[j])
box_coords = list(zip(box_x, box_y))
box_polygon = Polygon(box_coords, facecolor = self.datasets[i].color)
ax.add_patch(box_polygon)
if self.title is not None:
ax.set_title(self.title)
x_min = np.amin([np.amin(d.values) for d in self.datasets])
x_max = np.amax([np.amax(d.values) for d in self.datasets])
ax.set_ylim(x_min - 0.05*(x_max - x_min), x_max + 0.05*(x_max - x_min))
fig.savefig(output_filename)
plt.close(fig)
def build_img_pair(img_batch):
input_cast = img_batch[:,:,:,0:6].astype(dtype = np.float32)
input_min = np.amin(input_cast, axis=(1,2,3))
input_max = np.amax(input_cast, axis=(1,2,3))
for i in range(3):
input_min = np.expand_dims(input_min, i+1)
input_max = np.expand_dims(input_max, i+1)
input_norm = (input_cast - input_min) / (input_max - input_min)
gt_cast = img_batch[:,:,:,6].astype(dtype = np.float32)
gt_cast = np.expand_dims(gt_cast, 3)
gt_min = np.amin(gt_cast, axis=(1,2,3))
gt_max = np.amax(gt_cast, axis=(1,2,3))
for i in range(3):
gt_min = np.expand_dims(gt_min, i+1)
gt_max = np.expand_dims(gt_max, i+1)
gt_norm = (gt_cast - gt_min) / (gt_max - gt_min)
return input_norm, gt_norm
def gini(array):
"""Calculate the Gini coefficient of a numpy array."""
# based on bottom eq:
# http://www.statsdirect.com/help/generatedimages/equations/equation154.svg
# from:
# http://www.statsdirect.com/help/default.htm#nonparametric_methods/gini.htm
# All values are treated equally, arrays must be 1d:
array = array.flatten()
if np.amin(array) < 0:
# Values cannot be negative:
array -= np.amin(array)
# Values cannot be 0:
array += 0.0000001
# Values must be sorted:
array = np.sort(array)
# Index per array element:
index = np.arange(1,array.shape[0]+1)
# Number of array elements:
n = array.shape[0]
# Gini coefficient:
return ((np.sum((2 * index - n - 1) * array)) / (n * np.sum(array)))
single_File_For_ColorizationModel_For_Not_OOP_Fan.py 文件源码
项目:Deep-learning-Colorization-for-visual-media
作者: OmarSayedMostafa
项目源码
文件源码
阅读 33
收藏 0
点赞 0
评论 0
def Get_Batch_Chrominance():
''''Convert every image in the batch to LAB Colorspace and normalize each value of it between [0,1]
Return:
AbColores_values array [batch_size,2224,224,2] 0-> A value, 1-> B value color
'''
global AbColores_values
global ColorImages_Batch
AbColores_values = np.empty((Batch_size,224,224,2),"float32")
for indx in range(Batch_size):
lab = color.rgb2lab(ColorImages_Batch[indx])
Min_valueA = np.amin(lab[:,:,1])
Max_valueA = np.amax(lab[:,:,1])
Min_valueB = np.amin(lab[:,:,2])
Max_valueB = np.amax(lab[:,:,2])
AbColores_values[indx,:,:,0] = Normalize(lab[:,:,1],-128,127)
AbColores_values[indx,:,:,1] = Normalize(lab[:,:,2],-128,127)
def match_set_with_pts(db_set_feats, query_set_feats, dist_type,
pt_set_dist_mode):
print('start matching sets using points...')
if query_set_feats is None:
query_set_feats = db_set_feats
dist_mat = np.empty(
(len(query_set_feats), len(db_set_feats)), dtype=np.float)
for i in range(len(query_set_feats)):
for j in range(len(db_set_feats)):
if dist_type == DistType.Hamming:
tmp_dist_mat = scipy.spatial.distance.cdist(query_set_feats[i],
db_set_feats[j], 'hamming')
if dist_type == DistType.L2:
tmp_dist_mat = scipy.spatial.distance.cdist(
query_set_feats[i], db_set_feats[j], 'euclidean')
if pt_set_dist_mode == PtSetDist.Min:
dist_mat[i, j] = np.amin(tmp_dist_mat)
if pt_set_dist_mode == PtSetDist.Avg:
dist_mat[i, j] = np.mean(tmp_dist_mat)
if pt_set_dist_mode == PtSetDist.MeanMin:
dist_mat[i, j] = np.mean(np.amin(tmp_dist_mat, axis=1))
return dist_mat
def __init__(self, data, leafsize=10):
"""Construct a kd-tree.
Parameters:
===========
data : array-like, shape (n,k)
The data points to be indexed. This array is not copied, and
so modifying this data will result in bogus results.
leafsize : positive integer
The number of points at which the algorithm switches over to
brute-force.
"""
self.data = np.asarray(data)
self.n, self.m = np.shape(self.data)
self.leafsize = int(leafsize)
if self.leafsize<1:
raise ValueError("leafsize must be at least 1")
self.maxes = np.amax(self.data,axis=0)
self.mins = np.amin(self.data,axis=0)
self.tree = self.__build(np.arange(self.n), self.maxes, self.mins)
def to_rgb(img):
"""
Converts the given array into a RGB image. If the number of channels is not
3 the array is tiled such that it has 3 channels. Finally, the values are
rescaled to [0,255)
:param img: the array to convert [nx, ny, channels]
:returns img: the rgb image [nx, ny, 3]
"""
img = np.atleast_3d(img)
channels = img.shape[2]
if channels < 3:
img = np.tile(img, 3)
img[np.isnan(img)] = 0
img -= np.amin(img)
img /= np.amax(img)
img *= 255
return img
def get_classification(self, idx):
"""Docs"""
img_rows = self.params.get('img_rows', self.Sxx.shape[0])
img_cols = self.params.get('img_cols', 1)
if self.Sxx is None or self.active_song.classification is None:
raise TypeError('No active song from which to get data')
if np.amax(idx) > self.Sxx.shape[1]:
raise IndexError('Data index of sample out of bounds, only {0} '
'samples in the dataset'.format(self.Sxx.shape[1] - img_cols))
if np.amin(idx) < 0:
raise IndexError('Data index of sample out of bounds, '
'negative index requested')
# index out the data
classification = self.active_song.classification[idx]
return classification
def standard_resize(image, max_side):
if image is None:
return None, None, None
original_h, original_w, _ = image.shape
if all(side < max_side for side in [original_h, original_w]):
return image, original_h, original_w
aspect_ratio = float(np.amax((original_w, original_h)) / float(np.amin((original_h, original_w))))
if original_w >= original_h:
new_w = max_side
new_h = max_side / aspect_ratio
else:
new_h = max_side
new_w = max_side / aspect_ratio
new_h = int(new_h)
new_w = int(new_w)
resized_image = cv2.resize(image, (new_w, new_h))
return resized_image, new_w, new_h
def update_time_to_recover_map(self):
max_distance = 30
self.recover_map = numpy.zeros((max_distance + 1, self.width, self.height))
self.recover_map_smooth = numpy.zeros((max_distance + 1, self.width, self.height))
self.recover_map[0] = numpy.divide(self.strength_map, numpy.maximum(self.production_map, 0.001))
self.recover_map[0] = numpy.multiply(self.recover_map[0], 1 - (self.is_owner_map[self.game_map.my_id] + self.is_enemy_map))
self.recover_map[0] += (self.is_owner_map[self.game_map.my_id] + self.is_enemy_map) * 999
distance = 1
while distance <= max_distance:
dir_map = numpy.zeros((4, self.width, self.height))
dir_map[0] = roll_xy(self.recover_map[distance - 1], 0, 1)
dir_map[1] = roll_xy(self.recover_map[distance - 1], 0, -1)
dir_map[2] = roll_xy(self.recover_map[distance - 1], 1, 0)
dir_map[3] = roll_xy(self.recover_map[distance - 1], -1, 0)
self.recover_map[distance] = numpy.add(self.recover_map[distance - 1], numpy.amin(dir_map, 0))
distance += 1
for d in range(2, max_distance):
self.recover_map[d] = self.recover_map[d] / d
self.recover_map_smooth[d] = spread_n(self.recover_map[d], 4)
def update_recover_map(self):
max_distance = 50
self.recover_map = numpy.zeros((max_distance + 1, self.width, self.height))
self.recover_map[0] = numpy.divide(self.strength_map, numpy.maximum(self.production_map, 0.01))
self.recover_map[0] = numpy.multiply(self.recover_map[0], self.is_neutral_map)
self.recover_map[0] += (self.is_owned_map + self.is_enemy_map) * 999
for distance in range(1, max_distance + 1):
dir_map = numpy.zeros((4, self.width, self.height))
dir_map[0] = roll_xy(self.recover_map[distance - 1], 0, 1)
dir_map[1] = roll_xy(self.recover_map[distance - 1], 0, -1)
dir_map[2] = roll_xy(self.recover_map[distance - 1], 1, 0)
dir_map[3] = roll_xy(self.recover_map[distance - 1], -1, 0)
self.recover_map[distance] = numpy.add(self.recover_map[distance - 1], numpy.amin(dir_map, 0))
for d in range(2, max_distance + 1):
self.recover_map[d] = self.recover_map[d] / d
def update_time_to_recover_map(self):
max_distance = 30
self.recover_map = numpy.zeros((max_distance + 1, self.width, self.height))
self.recover_map_smooth = numpy.zeros((max_distance + 1, self.width, self.height))
self.recover_map[0] = numpy.divide(self.strength_map, numpy.maximum(self.production_map, 0.001))
self.recover_map[0] = numpy.multiply(self.recover_map[0], 1 - (self.is_owner_map[self.game_map.my_id] + self.is_enemy_map))
self.recover_map[0] += (self.is_owner_map[self.game_map.my_id] + self.is_enemy_map) * 999
distance = 1
while distance <= max_distance:
dir_map = numpy.zeros((4, self.width, self.height))
dir_map[0] = roll_xy(self.recover_map[distance - 1], 0, 1)
dir_map[1] = roll_xy(self.recover_map[distance - 1], 0, -1)
dir_map[2] = roll_xy(self.recover_map[distance - 1], 1, 0)
dir_map[3] = roll_xy(self.recover_map[distance - 1], -1, 0)
self.recover_map[distance] = numpy.add(self.recover_map[distance - 1], numpy.amin(dir_map, 0))
distance += 1
for d in range(2, max_distance):
self.recover_map[d] = self.recover_map[d] / d
self.recover_map_smooth[d] = spread_n(self.recover_map[d], 4)
def update_recover_map(self):
max_distance = 50
self.recover_map = numpy.zeros((max_distance + 1, self.width, self.height))
self.recover_map[0] = numpy.divide(self.strength_map, numpy.maximum(self.production_map, 0.01))
self.recover_map[0] = numpy.multiply(self.recover_map[0], self.is_neutral_map)
self.recover_map[0] += (self.is_owned_map + self.is_enemy_map) * 999
for distance in range(1, max_distance + 1):
dir_map = numpy.zeros((4, self.width, self.height))
dir_map[0] = roll_xy(self.recover_map[distance - 1], 0, 1)
dir_map[1] = roll_xy(self.recover_map[distance - 1], 0, -1)
dir_map[2] = roll_xy(self.recover_map[distance - 1], 1, 0)
dir_map[3] = roll_xy(self.recover_map[distance - 1], -1, 0)
self.recover_map[distance] = numpy.add(self.recover_map[distance - 1], numpy.amin(dir_map, 0))
for d in range(2, max_distance + 1):
self.recover_map[d] = self.recover_map[d] / d
def update_time_to_recover_map(self):
max_distance = 30
self.recover_map = numpy.zeros((max_distance + 1, self.width, self.height))
self.recover_map_smooth = numpy.zeros((max_distance + 1, self.width, self.height))
self.recover_map[0] = numpy.divide(self.strength_map, numpy.maximum(self.production_map, 0.001))
self.recover_map[0] = numpy.multiply(self.recover_map[0], 1 - (self.is_owner_map[self.game_map.my_id] + self.is_enemy_map))
self.recover_map[0] += (self.is_owner_map[self.game_map.my_id] + self.is_enemy_map) * 999
distance = 1
while distance <= max_distance:
dir_map = numpy.zeros((4, self.width, self.height))
dir_map[0] = roll_xy(self.recover_map[distance - 1], 0, 1)
dir_map[1] = roll_xy(self.recover_map[distance - 1], 0, -1)
dir_map[2] = roll_xy(self.recover_map[distance - 1], 1, 0)
dir_map[3] = roll_xy(self.recover_map[distance - 1], -1, 0)
self.recover_map[distance] = numpy.add(self.recover_map[distance - 1], numpy.amin(dir_map, 0))
distance += 1
for d in range(2, max_distance):
self.recover_map[d] = self.recover_map[d] / d
self.recover_map_smooth[d] = spread_n(self.recover_map[d], 4)
def update_time_to_recover_map(self):
max_distance = 30
self.recover_map = numpy.zeros((max_distance + 1, self.width, self.height))
self.recover_map_smooth = numpy.zeros((max_distance + 1, self.width, self.height))
self.recover_map[0] = numpy.divide(self.strength_map, numpy.maximum(self.production_map, 0.001))
self.recover_map[0] = numpy.multiply(self.recover_map[0], 1 - (self.is_owner_map[self.game_map.my_id] + self.is_enemy_map))
self.recover_map[0] += (self.is_owner_map[self.game_map.my_id] + self.is_enemy_map) * 999
distance = 1
while distance <= max_distance:
dir_map = numpy.zeros((4, self.width, self.height))
dir_map[0] = roll_xy(self.recover_map[distance - 1], 0, 1)
dir_map[1] = roll_xy(self.recover_map[distance - 1], 0, -1)
dir_map[2] = roll_xy(self.recover_map[distance - 1], 1, 0)
dir_map[3] = roll_xy(self.recover_map[distance - 1], -1, 0)
self.recover_map[distance] = numpy.add(self.recover_map[distance - 1], numpy.amin(dir_map, 0))
distance += 1
for d in range(2, max_distance):
self.recover_map[d] = self.recover_map[d] / d
self.recover_map_smooth[d] = spread_n(self.recover_map[d], 4)
def update_time_to_recover_map(self):
max_distance = 30
self.recover_map = numpy.zeros((max_distance + 1, self.width, self.height))
self.recover_map_smooth = numpy.zeros((max_distance + 1, self.width, self.height))
self.recover_map[0] = numpy.divide(self.strength_map, numpy.maximum(self.production_map, 0.001))
self.recover_map[0] = numpy.multiply(self.recover_map[0], 1 - (self.is_owner_map[self.game_map.my_id] + self.is_enemy_map))
self.recover_map[0] += (self.is_owner_map[self.game_map.my_id] + self.is_enemy_map) * 999
distance = 1
while distance <= max_distance:
dir_map = numpy.zeros((4, self.width, self.height))
dir_map[0] = roll_xy(self.recover_map[distance - 1], 0, 1)
dir_map[1] = roll_xy(self.recover_map[distance - 1], 0, -1)
dir_map[2] = roll_xy(self.recover_map[distance - 1], 1, 0)
dir_map[3] = roll_xy(self.recover_map[distance - 1], -1, 0)
self.recover_map[distance] = numpy.add(self.recover_map[distance - 1], numpy.amin(dir_map, 0))
distance += 1
for d in range(2, max_distance):
self.recover_map[d] = self.recover_map[d] / d
self.recover_map_smooth[d] = spread_n(self.recover_map[d], 4)
def diagonal(_, pos):
"""
Given an object pixels' positions, return the diagonal length of its
bound box
:param _: pixel values (unused)
:param pos: pixel position (1-D)
:return: diagonal of bounding box
"""
xs = np.array([i / SSIZE for i in pos])
ys = np.array([i % SSIZE for i in pos])
minx = np.amin(xs)
miny = np.amin(ys)
maxx = np.amax(xs)
maxy = np.amax(ys)
return compute_line(np.array([minx, miny]), np.array([maxx, maxy]))
def binarization (array):
''' Takes a binary-class datafile and turn the max value (positive class) into 1 and the min into 0'''
array = np.array(array, dtype=float) # conversion needed to use np.inf after
if len(np.unique(array)) > 2:
raise ValueError ("The argument must be a binary-class datafile. {} classes detected".format(len(np.unique(array))))
# manipulation which aims at avoid error in data with for example classes '1' and '2'.
array[array == np.amax(array)] = np.inf
array[array == np.amin(array)] = 0
array[array == np.inf] = 1
return np.array(array, dtype=int)
def bench_on(runner, sym, Ns, trials, dtype=None):
global args, kernel, out, mkl_layer
prepare = globals().get("prepare_"+sym, prepare_default)
kernel = globals().get("kernel_"+sym, None)
if not kernel:
kernel = getattr(np.linalg, sym)
out_lvl = runner.__doc__.split('.')[0].strip()
func_s = kernel.__doc__.split('.')[0].strip()
log.debug('Preparing input data for %s (%s).. ' % (sym, func_s))
args = [prepare(int(i)) for i in Ns]
it = range(len(Ns))
# pprint(Ns)
out = np.empty(shape=(len(Ns), trials))
b = body(trials)
tic, toc = (0, 0)
log.debug('Warming up %s (%s).. ' % (sym, func_s))
runner(range(1000), empty_work)
kernel(*args[0])
runner(range(1000), empty_work)
log.debug('Benchmarking %s on %s: ' % (func_s, out_lvl))
gc_old = gc.isenabled()
# gc.disable()
tic = time.time()
runner(it, b)
toc = time.time() - tic
if gc_old:
gc.enable()
if 'reused_pool' in globals():
del globals()['reused_pool']
#calculate average time and min time and also keep track of outliers (max time in the loop)
min_time = np.amin(out)
max_time = np.amax(out)
mean_time = np.mean(out)
stdev_time = np.std(out)
#print("Min = %.5f, Max = %.5f, Mean = %.5f, stdev = %.5f " % (min_time, max_time, mean_time, stdev_time))
#final_times = [min_time, max_time, mean_time, stdev_time]
print('## %s: Outter:%s, Inner:%s, Wall seconds:%f\n' % (sym, out_lvl, mkl_layer, float(toc)))
return out