def test_compress(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1], axis=0)
assert_equal(out, tgt)
tgt = [[1, 3], [6, 8]]
out = arr.compress([0, 1, 0, 1, 0], axis=1)
assert_equal(out, tgt)
tgt = [[1], [6]]
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1], axis=1)
assert_equal(out, tgt)
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1])
assert_equal(out, 1)
python类compress()的实例源码
def test_compress(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1], axis=0)
assert_equal(out, tgt)
tgt = [[1, 3], [6, 8]]
out = arr.compress([0, 1, 0, 1, 0], axis=1)
assert_equal(out, tgt)
tgt = [[1], [6]]
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1], axis=1)
assert_equal(out, tgt)
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1])
assert_equal(out, 1)
def get_cloud_colors(data):
""" Get colors from the cloud """
dtype = np.dtype('float32')
dtype = dtype.newbyteorder('<')
buf = np.frombuffer(data.data, dtype)
buf = np.resize(buf, (data.width * data.height, 8))
buf = np.compress([True, True, True, False, True, False, False,
False], buf, axis=1)
cond = np.isnan(buf).any(1)
buf[cond] = [0.0, 0.0, 0.0, 0.0]
buf = np.compress([False, False, False, True], buf, axis=1)
nstr = buf.tostring()
rgb = np.fromstring(nstr, dtype='uint8')
rgb.resize((data.height * data.width), 4)
rgb = np.compress([True, True, True, False], rgb, axis=1)
return np.array([rgb])
def _getWavesetIntersection(self):
minw = refs._default_waveset[0]
maxw = refs._default_waveset[-1]
for component in self.components[1:]:
if component.emissivity != None:
wave = component.emissivity.GetWaveSet()
minw = max(minw, wave[0])
maxw = min(maxw, wave[-1])
result = self._mergeEmissivityWavesets()
result = N.compress(result > minw, result)
result = N.compress(result < maxw, result)
# intersection with vega spectrum (why???)
vegasp = spectrum.TabularSourceSpectrum(locations.VegaFile)
vegaws = vegasp.GetWaveSet()
result = N.compress(result > vegaws[0], result)
result = N.compress(result < vegaws[-1], result)
return result
def test_compress(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1], axis=0)
assert_equal(out, tgt)
tgt = [[1, 3], [6, 8]]
out = arr.compress([0, 1, 0, 1, 0], axis=1)
assert_equal(out, tgt)
tgt = [[1], [6]]
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1], axis=1)
assert_equal(out, tgt)
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1])
assert_equal(out, 1)
def test_compress(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1], axis=0)
assert_equal(out, tgt)
tgt = [[1, 3], [6, 8]]
out = arr.compress([0, 1, 0, 1, 0], axis=1)
assert_equal(out, tgt)
tgt = [[1], [6]]
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1], axis=1)
assert_equal(out, tgt)
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1])
assert_equal(out, 1)
def _min_or_max_axis(X, axis, min_or_max):
N = X.shape[axis]
if N == 0:
raise ValueError("zero-size array to reduction operation")
M = X.shape[1 - axis]
mat = X.tocsc() if axis == 0 else X.tocsr()
mat.sum_duplicates()
major_index, value = _minor_reduce(mat, min_or_max)
not_full = np.diff(mat.indptr)[major_index] < N
value[not_full] = min_or_max(value[not_full], 0)
mask = value != 0
major_index = np.compress(mask, major_index)
value = np.compress(mask, value)
from scipy.sparse import coo_matrix
if axis == 0:
res = coo_matrix((value, (np.zeros(len(value)), major_index)),
dtype=X.dtype, shape=(1, M))
else:
res = coo_matrix((value, (major_index, np.zeros(len(value)))),
dtype=X.dtype, shape=(M, 1))
return res.A.ravel()
def test_compress(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1], axis=0)
assert_equal(out, tgt)
tgt = [[1, 3], [6, 8]]
out = arr.compress([0, 1, 0, 1, 0], axis=1)
assert_equal(out, tgt)
tgt = [[1], [6]]
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1], axis=1)
assert_equal(out, tgt)
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1])
assert_equal(out, 1)
def computejobcpus(self):
""" stats for the cores on the nodes that were assigend to the job (if available) """
proc = self._job.getdata('proc')
if proc == None:
return {"error": ProcessingError.CPUSET_UNKNOWN}, {"error": ProcessingError.CPUSET_UNKNOWN}
cpusallowed = self._job.getdata('proc')['cpusallowed']
ratios = numpy.empty((self._ncpumetrics, self._totalcores), numpy.double)
coreindex = 0
for host, last in self._last.iteritems():
elapsed = last - self._first[host]
if host in cpusallowed and 'error' not in cpusallowed[host]:
elapsed = elapsed[:, cpusallowed[host]]
else:
return {"error": ProcessingError.CPUSET_UNKNOWN}, {"error": ProcessingError.CPUSET_UNKNOWN}
coresperhost = len(elapsed[0, :])
ratios[:, coreindex:(coreindex+coresperhost)] = 1.0 * elapsed / numpy.sum(elapsed, 0)
coreindex += coresperhost
allowedcores = numpy.array(ratios[:, :coreindex])
results = {}
for i, name in enumerate(self._outnames):
results[name] = calculate_stats(allowedcores[i, :])
results['all'] = {"cnt": coreindex}
effective = numpy.compress(allowedcores[1, :] < 0.95, allowedcores , axis=1)
effectiveresults = {
'all': len(effective[i, :])
}
if effectiveresults['all'] > 0:
for i, name in enumerate(self._outnames):
effectiveresults[name] = calculate_stats(effective[i, :])
return results, effectiveresults
def break_info(self, range=None):
"""
Return break information for the axis
The range, major breaks & minor_breaks are
in transformed space. The labels for the major
breaks depict data space values.
"""
if range is None:
range = self.dimension()
major = self.get_breaks(range)
if major is None or len(major) == 0:
major = minor = labels = np.array([])
else:
major = major.compress(np.isfinite(major))
minor = self.get_minor_breaks(major, range)
major = major.compress(
(range[0] <= major) & (major <= range[1]))
labels = self.get_labels(major)
return {'range': range,
'labels': labels,
'major': major,
'minor': minor}
def test_compress(self):
arr = [[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]]
tgt = [[5, 6, 7, 8, 9]]
out = np.compress([0, 1], arr, axis=0)
assert_equal(out, tgt)
def test_axis(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr, axis=0)
assert_equal(out, tgt)
tgt = [[1, 3], [6, 8]]
out = np.compress([0, 1, 0, 1, 0], arr, axis=1)
assert_equal(out, tgt)
def test_truncate(self):
tgt = [[1], [6]]
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr, axis=1)
assert_equal(out, tgt)
def test_flatten(self):
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr)
assert_equal(out, 1)
def whiskers(self, whis: float = 1.5) -> t.Tuple[float, float]:
"""
Calculates the upper and the lower whisker for a boxplot.
I.e. the minimum and the maximum value of the data set
the lie in the range (Q1 - whis * IQR, Q3 + whis * IQR).
IQR being the interquartil distance, Q1 the lower and Q2 the upper quartile.
Adapted from http://stackoverflow.com/a/20096945
"""
q1, q2, q3 = self.quartiles()
iqr = self.iqr()
hi_val = q1 + whis * self.iqr()
whisk_hi = np.compress(self.array <= hi_val, self.array)
if len(whisk_hi) == 0 or np.max(whisk_hi) < q3:
whisk_hi = q3
else:
whisk_hi = max(whisk_hi)
# get low extreme
lo_val = q1 - whis * iqr
whisk_lo = np.compress(self.array >= lo_val, self.array)
if len(whisk_lo) == 0 or np.min(whisk_lo) > q1:
whisk_lo = q1
else:
whisk_lo = min(whisk_lo)
return whisk_lo, whisk_hi
def PCR_preprocess(file_path, log_mode = False, pseudotime_mode = False,
pcv_method = 'Rprincurve', anchor_gene = None,
exclude_marker_names = None):
low_gene_fraction_max = 0.8
data_tag, output_directory = create_output_directory(file_path)
cell_IDs, cell_stages, data = get_PCR_or_RNASeq_data(file_path, pseudotime_mode)
with open(file_path, 'r') as f:
markers = np.loadtxt(f, dtype = str, delimiter = '\t',
skiprows = 1 if pseudotime_mode else 2, usecols = [0])
markers.reshape(markers.size)
if exclude_marker_names:
indices = np.zeros(0, dtype = int)
for name in exclude_marker_names:
indices = np.append(indices, np.where(markers == name)[0])
data = np.delete(data, indices, axis = 1)
markers = np.delete(markers, indices)
if pseudotime_mode:
cell_stages = infer_pseudotime(data, output_directory, data_tag, pcv_method,
anchor_gene, markers)
condition = np.mean(data == 0, axis = 0) < low_gene_fraction_max
data = np.compress(condition, data, 1)
markers = np.compress(condition, markers)
write_preprocessed_data(output_directory, cell_IDs, cell_stages, data, markers)
return cell_IDs, data, markers, cell_stages.astype(float), data_tag, output_directory
def test_compress(self):
arr = [[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]]
tgt = [[5, 6, 7, 8, 9]]
out = np.compress([0, 1], arr, axis=0)
assert_equal(out, tgt)
def test_axis(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr, axis=0)
assert_equal(out, tgt)
tgt = [[1, 3], [6, 8]]
out = np.compress([0, 1, 0, 1, 0], arr, axis=1)
assert_equal(out, tgt)
def test_truncate(self):
tgt = [[1], [6]]
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr, axis=1)
assert_equal(out, tgt)
def test_flatten(self):
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr)
assert_equal(out, 1)
def get_cloud_data(data):
""" Get the data out of a cloud as a numpy array """
dtype = np.dtype('float32')
dtype = dtype.newbyteorder('<')
buf = np.frombuffer(data.data, dtype)
buf = np.resize(buf, (data.width * data.height, 8))
return np.compress([True, True, True, False, True, False, False,
False], buf, axis=1)
def get_cloud_image(self, data):
""" Get an image from the cloud """
dta = np.zeros((data.height, data.width), dtype="float32")
dtype = np.dtype('float32')
dtype = dtype.newbyteorder('<')
buf = np.frombuffer(data.data, dtype)
buf = np.resize(buf, (data.width * data.height, 8))
buf = np.compress([True, True, True, True, True, False, False, False],
buf, axis=1)
buf = buf[~np.isnan(buf).any(1)]
for point in buf:
point[3] = 1.0
src = np.asmatrix(point[:4])
src = np.reshape(src, (4, 1))
dst = np.dot(self.p_left, src)
pnt_w = dst[2, 0]
if pnt_w != 0:
img_x = dst[0, 0] / pnt_w
img_y = dst[1, 0] / pnt_w
dta[img_y, img_x] = point[4]
nstr = dta.tostring()
img = np.fromstring(nstr, dtype='uint8')
img.resize(data.height, data.width, 4)
img = np.compress([True, True, True, False], img, axis=2)
return img
def _find_door(self):
""" Find the door, The most distant point in our cloud """
cloud = self.fc.zarj.eyes.get_stereo_cloud()
image, details = self.fc.zarj.eyes.get_cloud_image_with_details(cloud)
# we only want the center of the image
shape = image.shape
print shape
cloud = details[0:2*shape[0]/3, shape[1]/3:2*shape[1]/3]
cloud = np.compress([False, False, True, False], cloud, axis=2)
cloud = cloud.flatten()
return np.nanmax(cloud)
def log10(self, data, ind):
data = np.compress(data[:, ind] > 0, data, 0)
data[:, ind] = np.log10(data[:, ind])
return data
converter.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 40
收藏 0
点赞 0
评论 0
def _get_default_locs(self, vmin, vmax):
"Returns the default locations of ticks."
if self.plot_obj.date_axis_info is None:
self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq)
locator = self.plot_obj.date_axis_info
if self.isminor:
return np.compress(locator['min'], locator['val'])
return np.compress(locator['maj'], locator['val'])
converter.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def _set_default_format(self, vmin, vmax):
"Returns the default ticks spacing."
if self.plot_obj.date_axis_info is None:
self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq)
info = self.plot_obj.date_axis_info
if self.isminor:
format = np.compress(info['min'] & np.logical_not(info['maj']),
info)
else:
format = np.compress(info['maj'], info)
self.formatdict = dict([(x, f) for (x, _, _, f) in format])
return self.formatdict
def test_small_large(self):
# test the small and large code paths, current cutoff 400 elements
for s in [5, 20, 51, 200, 1000]:
d = np.random.randn(4, s)
# Randomly set some elements to NaN:
w = np.random.randint(0, d.size, size=d.size // 5)
d.ravel()[w] = np.nan
d[:,0] = 1. # ensure at least one good value
# use normal median without nans to compare
tgt = []
for x in d:
nonan = np.compress(~np.isnan(x), x)
tgt.append(np.median(nonan, overwrite_input=True))
assert_array_equal(np.nanmedian(d, axis=-1), tgt)
def trimSpectrum(sp, minw, maxw):
"""Create a new spectrum with trimmed upper and lower ranges.
Parameters
----------
sp : `SourceSpectrum`
Spectrum to trim.
minw, maxw : number
Lower and upper limits (inclusive) for the wavelength set
in the trimmed spectrum.
Returns
-------
result : `TabularSourceSpectrum`
Trimmed spectrum.
"""
wave = sp.GetWaveSet()
flux = sp(wave)
new_wave = N.compress(wave >= minw, wave)
new_flux = N.compress(wave >= minw, flux)
new_wave = N.compress(new_wave <= maxw, new_wave)
new_flux = N.compress(new_wave <= maxw, new_flux)
result = TabularSourceSpectrum()
result._wavetable = new_wave
result._fluxtable = new_flux
result.waveunits = units.Units(sp.waveunits.name)
result.fluxunits = units.Units(sp.fluxunits.name)
return result
def test_compress(self):
arr = [[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]]
tgt = [[5, 6, 7, 8, 9]]
out = np.compress([0, 1], arr, axis=0)
assert_equal(out, tgt)
def test_axis(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr, axis=0)
assert_equal(out, tgt)
tgt = [[1, 3], [6, 8]]
out = np.compress([0, 1, 0, 1, 0], arr, axis=1)
assert_equal(out, tgt)