def getCrop(self, dpt, xstart, xend, ystart, yend, zstart, zend, thresh_z=True, background=0):
"""
Crop patch from image
:param dpt: depth image to crop from
:param xstart: start x
:param xend: end x
:param ystart: start y
:param yend: end y
:param zstart: start z
:param zend: end z
:param thresh_z: threshold z values
:return: cropped image
"""
if len(dpt.shape) == 2:
cropped = dpt[max(ystart, 0):min(yend, dpt.shape[0]), max(xstart, 0):min(xend, dpt.shape[1])].copy()
# add pixels that are out of the image in order to keep aspect ratio
cropped = numpy.pad(cropped, ((abs(ystart)-max(ystart, 0),
abs(yend)-min(yend, dpt.shape[0])),
(abs(xstart)-max(xstart, 0),
abs(xend)-min(xend, dpt.shape[1]))), mode='constant', constant_values=background)
elif len(dpt.shape) == 3:
cropped = dpt[max(ystart, 0):min(yend, dpt.shape[0]), max(xstart, 0):min(xend, dpt.shape[1]), :].copy()
# add pixels that are out of the image in order to keep aspect ratio
cropped = numpy.pad(cropped, ((abs(ystart)-max(ystart, 0),
abs(yend)-min(yend, dpt.shape[0])),
(abs(xstart)-max(xstart, 0),
abs(xend)-min(xend, dpt.shape[1])),
(0, 0)), mode='constant', constant_values=background)
else:
raise NotImplementedError()
if thresh_z is True:
msk1 = numpy.logical_and(cropped < zstart, cropped != 0)
msk2 = numpy.logical_and(cropped > zend, cropped != 0)
cropped[msk1] = zstart
cropped[msk2] = 0. # backface is at 0, it is set later
return cropped
python类mode()的实例源码
def majority_vote(votes):
mode_result = mode(votes, axis=0)
return mode_result.mode[0]
def mode(x, axis = None, keepdims = False):
from scipy.stats import mode as sp_mode
mode_x, _ = sp_mode(x, axis = axis)
if not keepdims:
mode_x = np.take(mode_x, 0, axis = axis)
return mode_x
def set_mode(self, mode):
self.mode = mode
for layer in self.middle_layers:
layer.set_mode(mode)
self.cost_layer.set_mode(mode)
def get_cost(self, data_iterator):
ret = 0
old_mode = self.mode
self.set_mode('predict')
data_iterator.begin(do_shuffle=False)
while True:
ret += self.cost_func(*(data_iterator.get_batch()))
data_iterator.next()
if data_iterator.no_batch_left():
break
self.set_mode(old_mode)
return ret / data_iterator.total()
def get_error_dict(self, data_iterator):
if len(self.error_func_dict) > 0:
l = {}
for key in self.error_func_dict:
ret = 0
old_mode = self.mode
self.set_mode('predict')
data_iterator.begin(do_shuffle=False)
while True:
ret += self.error_func_dict[key](*(data_iterator.get_batch()))
data_iterator.next()
if data_iterator.no_batch_left():
break
self.set_mode(old_mode)
l['key'] = ret / data_iterator.total()
return l
#else: # disable, since only for binary predictions
#error = 0
#old_mode = self.mode
#self.set_mode('predict')
#data_iterator.begin(do_shuffle=False)
#while True:
# output = self.output_func_dict[0](*data_iterator.input_batch())
# target = data_iterator.output_batch()[0]
# pred = output.reshape((output.shape[0])) > 0.5
# target = target.reshape(target.shape[0]).astype("bool")
# error += (pred == target).sum()
# data_iterator.next()
# if data_iterator.no_batch_left():
# break
#error = 1 - (error / numpy_floatX(data_iterator.total()))
#self.set_mode(old_mode)
#return [error]
def set_mode(self, mode):
self.mode = mode
for layer in self.middle_layers:
layer.set_mode(mode)
self.cost_layer.set_mode(mode)
def get_cost(self, data_iterator):
ret = 0
old_mode = self.mode
self.set_mode('predict')
data_iterator.begin(do_shuffle=False)
while True:
ret += self.cost_func(*(data_iterator.get_batch()))
data_iterator.next()
if data_iterator.no_batch_left():
break
self.set_mode(old_mode)
return ret / (data_iterator.total()*data_iterator.num_segments)
def get_error_dict(self, data_iterator):
if len(self.error_func_dict) > 0:
l = {}
for key in self.error_func_dict:
ret = 0
old_mode = self.mode
self.set_mode('predict')
data_iterator.begin(do_shuffle=False)
while True:
ret += self.error_func_dict[key](*(data_iterator.get_batch()))
data_iterator.next()
if data_iterator.no_batch_left():
break
self.set_mode(old_mode)
l['key'] = ret / (data_iterator.total()*data_iterator.num_segments)
return l
#else: # disable, since only for binary predictions
#error = 0
#old_mode = self.mode
#self.set_mode('predict')
#data_iterator.begin(do_shuffle=False)
#while True:
# output = self.output_func_dict[0](*data_iterator.input_batch())
# target = data_iterator.output_batch()[0]
# pred = output.reshape((output.shape[0])) > 0.5
# target = target.reshape(target.shape[0]).astype("bool")
# error += (pred == target).sum()
# data_iterator.next()
# if data_iterator.no_batch_left():
# break
#error = 1 - (error / numpy_floatX(data_iterator.total()*data_iterator.num_segments))
#self.set_mode(old_mode)
#return [error]
def json_graph(model, categories=None, scales=None):
json_dict = {}
# build links json representation
json_dict["links"] = []
for s_node, t_node, n_common in model.links_:
link_dict = {"source": str(s_node),
"target": str(t_node),
"n_common": n_common}
json_dict["links"].append(link_dict)
# node json representation
json_dict["nodes"] = []
for (p_n, p) in enumerate(model.nodes_):
for (c_n, c) in enumerate(p):
node_dict = {"id": str((p_n, c_n)),
"int_id": model.nodes_to_int_[(p_n, c_n)],
"color": 1,
"n_members": c.shape[0]}
if categories is not None:
for name, arr in categories.items():
node_dict[name] = int(mode(arr[c], axis=None)[0])
if scales is not None:
for name, arr in scales.items():
node_dict[name] = np.mean(arr[c], axis=None)
json_dict["nodes"].append(node_dict)
# list of categories and scales
e_name = "categories_and_scales"
json_dict[e_name] = []
for c in (["int_id"] + (list(categories.keys()) if categories
is not None else [])):
json_dict[e_name].append({"name": c, "type": "category"})
for s in (list(scales.keys()) if scales is not None else []):
json_dict[e_name].append({"name": s, "type": "scale"})
return json.dumps(json_dict, indent=4)
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_true(np.all(mode == mode2))
assert_true(np.all(score == score2))
def _most_frequent(array, extra_value, n_repeat):
"""Compute the most frequent value in a 1d array extended with
[extra_value] * n_repeat, where extra_value is assumed to be not part
of the array."""
# Compute the most frequent value in array only
if array.size > 0:
mode = stats.mode(array)
most_frequent_value = mode[0][0]
most_frequent_count = mode[1][0]
else:
most_frequent_value = 0
most_frequent_count = 0
# Compare to array + [extra_value] * n_repeat
if most_frequent_count == 0 and n_repeat == 0:
return np.nan
elif most_frequent_count < n_repeat:
return extra_value
elif most_frequent_count > n_repeat:
return most_frequent_value
elif most_frequent_count == n_repeat:
# Ties the breaks. Copy the behaviour of scipy.stats.mode
if most_frequent_value < extra_value:
return most_frequent_value
else:
return extra_value
def transform(self, y, replace_unique=False):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
self._check_fitted()
classes = np.unique(y)
_check_numpy_unicode_bug(classes)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
if replace_unique:
mode = stats.mode(y)[0][0]
for i in xrange(0,len(y)):
if y[i] in diff:
y[i]=mode
else:
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def mode_function(df):
counts = mode(df)
return counts[0][0]
def freq_from_autocorr(signal, sampling_rate):
corr = fftconvolve(signal, signal[::-1], mode='full')
corr = corr[len(corr)//2:]
d = np.diff(corr)
start = find_index_by_true(d > 0)[0]
peak = np.argmax(corr[start:]) + start
px, py = parabolic(corr, peak)
return sampling_rate / px
def predict(self, X):
"""
Predict values using the model
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Returns
-------
C : numpy array of shape [n_samples, n_outputs]
Predicted values.
"""
dim = len(self._classifiers)
ensemble_output = np.zeros((len(X),dim))
# Z-score
X = (X-self._med)/(self._std+self._noise)
for i in range(0,dim):
xrot_z = X.dot(self._inforotar[i])
ensemble_output[:,i] = self._classifiers[i].predict(xrot_z)
y_pred = mode(ensemble_output, axis=1)[0]
return y_pred
def pr_object(detect, truth, overlap=10):
# we assume that both truth and detect volumes are separate objects
from scipy import stats
# TODO: 64-bit support
# manual relabel (could be slow!)
utruth = np.unique(truth)
utruth = utruth[utruth > 0]
udetect = np.unique(detect)
udetect = udetect[udetect > 0]
tp = 0.0
fp = 0.0
fn = 0.0
# TODO: removing only greatest match
# for each truth object find a detection
for t in utruth: # background is ignored
match = detect[truth == t]
match = match[match > 0] # get rid of spurious values
match = stats.mode(match)
if match[1] >= overlap:
tp += 1
# any detected objects can only be used once, so remove them here.
# detect = mahotas.labeled.remove_regions(detect, match[0])
detect[detect == match[0]] = 0
else:
fn += 1
# detect_left, fp = mahotas.labeled.relabel(detect)
fp = np.unique(detect)
fp = fp[fp > 0]
fp = len(fp)
precision = 0
recall = 0
if tp + fp > 0:
precision = tp/(tp+fp)
if tp + fn > 0:
recall = tp/(tp+fn)
if (precision == 0) or (recall == 0):
f1 = 0
else:
f1 = (2*precision*recall)/(precision+recall)
print(precision)
print(recall)
print(f1)
return precision, recall, f1
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features).
Test samples.
Returns
-------
y : array of shape [n_samples]
Class labels for each data sample.
"""
# TODO: Make classification of multiple samples a bit more effective...
if X.ndim > 1 and X.shape[1] != 1:
out = []
for x in X:
out += self.predict(x)
return out
X = X.flatten()
if self.metric == 'minkowski':
dists = np.sum(np.abs(self._data - X) ** self.p, axis=1)
else:
# TODO: Implement other metrics.
raise ValueError("Only Minkowski distance metric implemented...")
argument = np.argsort(dists)
labels = self._labels[argument[:self.n_neighbors]]
if self.weights == 'distance':
weights = 1 / dists[argument[:self.n_neighbors]]
out = np.zeros((len(self._classes), ), 'float')
for i, c in enumerate(self._classes):
out[i] = np.sum(weights[labels == c])
out /= np.sum(out)
y_pred = self._labels[np.argmax(out)]
else:
y_pred, _ = mode(labels)
return y_pred.tolist()
def _dense_fit(self, X, strategy, missing_values, axis):
"""Fit the transformer on dense data."""
X = check_array(X, force_all_finite=False)
mask = _get_mask(X, missing_values)
masked_X = ma.masked_array(X, mask=mask)
# Mean
if strategy == "mean":
mean_masked = np.ma.mean(masked_X, axis=axis)
# Avoid the warning "Warning: converting a masked element to nan."
mean = np.ma.getdata(mean_masked)
mean[np.ma.getmask(mean_masked)] = np.nan
return mean
# Median
elif strategy == "median":
if tuple(int(v) for v in np.__version__.split('.')[:2]) < (1, 5):
# In old versions of numpy, calling a median on an array
# containing nans returns nan. This is different is
# recent versions of numpy, which we want to mimic
masked_X.mask = np.logical_or(masked_X.mask,
np.isnan(X))
median_masked = np.ma.median(masked_X, axis=axis)
# Avoid the warning "Warning: converting a masked element to nan."
median = np.ma.getdata(median_masked)
median[np.ma.getmaskarray(median_masked)] = np.nan
return median
# Most frequent
elif strategy == "most_frequent":
# scipy.stats.mstats.mode cannot be used because it will no work
# properly if the first element is masked and if it's frequency
# is equal to the frequency of the most frequent valid element
# See https://github.com/scipy/scipy/issues/2636
# To be able access the elements by columns
if axis == 0:
X = X.transpose()
mask = mask.transpose()
most_frequent = np.empty(X.shape[0])
for i, (row, row_mask) in enumerate(zip(X[:], mask[:])):
row_mask = np.logical_not(row_mask).astype(np.bool)
row = row[row_mask]
most_frequent[i] = _most_frequent(row, np.nan, 0)
return most_frequent
def predict(model, training_cnf, predict_dir, weights_from, dataset_name, convert, image_size, sync,
predict_type):
images = data.get_image_files(predict_dir)
# Form now, hard coded models, cnfs, and weights
# Need to take these from program inputs or an ensembling config file
print('Creating predictor 1')
weights_from1 = 'weights.sa/model-epoch-97.ckpt'
model1 = 'examples/mnist_model_sa.py'
training_cnf1 = 'examples/mnist_cnf.py'
model_def1 = util.load_module(model1)
model1 = model_def1.model
cnf1 = util.load_module(training_cnf1).cnf
standardizer = cnf1.get('standardizer', NoOpStandardizer())
preprocessor = convert_preprocessor(model_def1.image_size[0]) if convert else None
prediction_iterator1 = create_prediction_iter(cnf1, standardizer, model_def1.crop_size, preprocessor, sync)
# predictor1 = QuasiCropPredictor(model1, cnf1, weights_from1, prediction_iterator1, 20)
predictor1 = OneCropPredictor(model1, cnf1, weights_from1, prediction_iterator1)
print('Creating predictor 2')
weights_from2 = 'weights.rv/model-epoch-31.ckpt'
model2 = 'examples/mnist_model.py'
training_cnf2 = 'examples/mnist_cnf.py'
model_def2 = util.load_module(model2)
model2 = model_def2.model
cnf2 = util.load_module(training_cnf2).cnf
standardizer = cnf2.get('standardizer', NoOpStandardizer())
preprocessor = convert_preprocessor(model_def2.image_size[0]) if convert else None
prediction_iterator2 = create_prediction_iter(cnf2, standardizer, model_def2.crop_size, preprocessor, sync)
# predictor2 = QuasiCropPredictor(model2, cnf2, weights_from2, prediction_iterator2, 20)
predictor2 = OneCropPredictor(model2, cnf2, weights_from2, prediction_iterator2)
predictor = EnsemblePredictor([predictor1, predictor2])
def softmax_result_to_vote(predictions):
return predictions.argmax(axis=1)
def vote_combiner(votes):
return mode(votes, axis=0)[0].reshape(-1)
class_predictions = predictor.predict_with_voting(
images,
[softmax_result_to_vote, softmax_result_to_vote],
vote_combiner
)
if not os.path.exists(os.path.join(predict_dir, '..', 'results')):
os.mkdir(os.path.join(predict_dir, '..', 'results'))
if not os.path.exists(os.path.join(predict_dir, '..', 'results', dataset_name)):
os.mkdir(os.path.join(predict_dir, '..', 'results', dataset_name))
names = data.get_names(images)
image_class_predictions = np.column_stack([names, class_predictions])
title = np.array(['image', 'label'])
image_class_predictions = np.vstack([title, image_class_predictions])
prediction_class_file = os.path.abspath(
os.path.join(predict_dir, '..', 'results', dataset_name, 'predictions_class.csv'))
np.savetxt(prediction_class_file, image_class_predictions, delimiter=",", fmt="%s")
print('Class predictions saved to: %s' % prediction_class_file)