def xyz_array_to_pointcloud2(points, stamp=None, frame_id=None):
'''
Create a sensor_msgs.PointCloud2 from an array
of points.
'''
msg = PointCloud2()
if stamp:
msg.header.stamp = stamp
if frame_id:
msg.header.frame_id = frame_id
if len(points.shape) == 3:
msg.height = points.shape[1]
msg.width = points.shape[0]
else:
msg.height = 1
msg.width = len(points)
msg.fields = [
PointField('x', 0, PointField.FLOAT32, 1),
PointField('y', 4, PointField.FLOAT32, 1),
PointField('z', 8, PointField.FLOAT32, 1)]
msg.is_bigendian = False
msg.point_step = 12
msg.row_step = 12*points.shape[0]
msg.is_dense = int(np.isfinite(points).all())
msg.data = np.asarray(points, np.float32).tostring()
return msg
python类isfinite()的实例源码
def check_stoplimit_prices(price, label):
"""
Check to make sure the stop/limit prices are reasonable and raise
a BadOrderParameters exception if not.
"""
try:
if not isfinite(price):
raise BadOrderParameters(
msg="""Attempted to place an order with a {} price
of {}.""".format(label, price)
)
# This catches arbitrary objects
except TypeError:
raise BadOrderParameters(
msg="""Attempted to place an order with a {} price
of {}.""".format(label, type(price))
)
if price < 0:
raise BadOrderParameters(
msg="""Can't place a {} order
with a negative price.""".format(label)
)
def get_xyz_points(cloud_array, remove_nans=True):
'''
Pulls out x, y, and z columns from the cloud recordarray, and returns a 3xN matrix.
'''
# remove crap points
if remove_nans:
mask = np.isfinite(cloud_array['x']) & np.isfinite(cloud_array['y']) & np.isfinite(cloud_array['z'])
cloud_array = cloud_array[mask]
# pull out x, y, and z values
points = np.zeros(list(cloud_array.shape) + [3], dtype=np.float)
points[...,0] = cloud_array['x']
points[...,1] = cloud_array['y']
points[...,2] = cloud_array['z']
return points
def _build_gmm(self, data):
"""
Build gmm from data
"""
st = time.time()
self.gmm = GMM(n_components=self.K, covariance_type='diag')
self.gmm.fit(data)
# Setup codebook for closest center lookup
self.codebook = self.gmm.means_
print 'Vocab construction from data %s (%s KB, %s) => GMM %s took %5.3f s' % \
(data.shape, data.nbytes / 1024, data.dtype, self.gmm.means_.shape, time.time() - st)
print 'GMM: %s' % ('GOOD' if np.isfinite(self.gmm.means_).all() else 'BAD')
# Save codebook, and index
self.index_codebook()
def add(self, pts, ids=None, prune=True):
# Add only if valid and non-zero
if not len(pts):
return
# Retain valid points
valid = np.isfinite(pts).all(axis=1)
pts = pts[valid]
# ID valid points
max_id = np.max(self.ids) + 1 if len(self.ids) else 0
tids = np.arange(len(pts), dtype=np.int64) + max_id if ids is None else ids[valid].astype(np.int64)
# Add pts to track
for tid, pt in zip(tids, pts):
self.tracks_[tid].append(self.index_, pt)
# If features are propagated
if prune:
self.prune()
# Frame counter
self.index_ += 1
def initialize(self, length=None):
"""see ``__init__``"""
if length is None:
length = len(self.bounds)
max_i = min((len(self.bounds) - 1, length - 1))
self._lb = array([self.bounds[min((i, max_i))][0]
if self.bounds[min((i, max_i))][0] is not None
else -np.Inf
for i in range(length)], copy=False)
self._ub = array([self.bounds[min((i, max_i))][1]
if self.bounds[min((i, max_i))][1] is not None
else np.Inf
for i in range(length)], copy=False)
lb = self._lb
ub = self._ub
# define added values for lower and upper bound
self._al = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(lb[i])) / 20])
if isfinite(lb[i]) else 1 for i in rglen(lb)], copy=False)
self._au = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(ub[i])) / 20])
if isfinite(ub[i]) else 1 for i in rglen(ub)], copy=False)
def __getitem__(self, item: str) -> Any:
if self._query_values or item in self._values:
return self._values.get(item)
hyperparameter = self.configuration_space._hyperparameters[item]
item_idx = self.configuration_space._hyperparameter_idx[item]
if not np.isfinite(self._vector[item_idx]):
raise KeyError()
value = hyperparameter._transform(self._vector[item_idx])
# Truncate the representation of the float to be of constant
# length for a python version
if isinstance(hyperparameter, FloatHyperparameter):
value = float(repr(value))
# TODO make everything faster, then it'll be possible to init all values
# at the same time and use an OrderedDict instead of only a dict here to
# support iterating that dict in the same order as the actual order of
# hyperparameters
self._values[item] = value
return self._values[item]
def test_posterior_zeros(self):
p = np.asarray([.5, 0., 0.]).reshape((1, 3))
posterior = self.eval(self.posterior, p)
print 'posterior', posterior
posterior_grad = self.eval(self.posterior_grad, p)
print 'posterior grad', posterior_grad
kl = self.eval(self.posterior_kl, p)
print kl
self.assertGreater(kl.sum(), 0)
self.assertFalse(np.isnan(kl).any())
self.assertTrue(np.isfinite(kl).all())
grad = self.eval(self.posterior_kl_grad, p)
print grad
self.assertFalse(np.isnan(grad).any())
self.assertTrue(np.isfinite(grad).all())
def to_cartesian(r_dev, pos, normal):
""" Transform radial deviations from an ellipsoidal grid to Cartesian
Parameters
----------
r_dev : ndarray, shape (N, )
Array containing the N radial deviations from the ellipse. r < 0 means
inside the ellipse.
pos : ndarray, shape (2, N)
The N (y, x) positions of the ellipse (as given by ``ellipse_grid``)
normal : ndarray, shape (2, N)
The N (y, x) unit normals of the ellipse (as given by ``ellipse_grid``)
"""
coord_new = pos + r_dev * normal
coord_new = coord_new[:, np.isfinite(coord_new).all(0)]
return coord_new
def estimate_theta(self, samples):
'''
Estimates the theta parameters from the given samples.
Parameters
----------
samples : array_like
n-by-2 matrix of samples where n is the number of samples.
'''
if self.theta is not None:
bnds = self.theta_bounds()
def cost(theta):
'''
Calculates the cost of a given `theta` parameter.
'''
self.theta = np.asarray(theta)
vals = self.logpdf(samples)
# For optimization, filter out inifinity values
return -np.sum(vals[np.isfinite(vals)])
result = minimize(cost, self.theta, method='TNC', bounds=bnds)
self.theta = result.x
def test_complex_nan_comparisons():
nans = [complex(np.nan, 0), complex(0, np.nan), complex(np.nan, np.nan)]
fins = [complex(1, 0), complex(-1, 0), complex(0, 1), complex(0, -1),
complex(1, 1), complex(-1, -1), complex(0, 0)]
with np.errstate(invalid='ignore'):
for x in nans + fins:
x = np.array([x])
for y in nans + fins:
y = np.array([y])
if np.isfinite(x) and np.isfinite(y):
continue
assert_equal(x < y, False, err_msg="%r < %r" % (x, y))
assert_equal(x > y, False, err_msg="%r > %r" % (x, y))
assert_equal(x <= y, False, err_msg="%r <= %r" % (x, y))
assert_equal(x >= y, False, err_msg="%r >= %r" % (x, y))
assert_equal(x == y, False, err_msg="%r == %r" % (x, y))
def __ipow__(self, other):
"""
Raise self to the power other, in place.
"""
other_data = getdata(other)
other_mask = getmask(other)
with np.errstate(divide='ignore', invalid='ignore'):
self._data.__ipow__(np.where(self._mask, self.dtype.type(1),
other_data))
invalid = np.logical_not(np.isfinite(self._data))
if invalid.any():
if self._mask is not nomask:
self._mask |= invalid
else:
self._mask = invalid
np.copyto(self._data, self.fill_value, where=invalid)
new_mask = mask_or(other_mask, invalid)
self._mask = mask_or(self._mask, new_mask)
return self
def step(self, action):
self.forward_dynamics(action)
comvel = self.get_body_comvel("torso")
forward_reward = comvel[0]
lb, ub = self.action_bounds
scaling = (ub - lb) * 0.5
ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(action / scaling))
contact_cost = 0.5 * 1e-3 * np.sum(
np.square(np.clip(self.model.data.cfrc_ext, -1, 1))),
survive_reward = 0.05
reward = forward_reward - ctrl_cost - contact_cost + survive_reward
state = self._state
notdone = np.isfinite(state).all() \
and state[2] >= 0.2 and state[2] <= 1.0
done = not notdone
ob = self.get_current_obs()
return Step(ob, float(reward), done)
def _preprocess(t, v):
""" Raises and exception if any of the inputs are not valid.
Otherwise, returns a list of Points, ordered by t.
"""
# Validate the inputs.
if len(t) != len(v):
raise ValueError('`t` and `v` must have the same length.')
t_arr, v_arr = np.array(t), np.array(v)
if not np.all(np.isfinite(t)):
raise ValueError('All values in `t` must be finite.')
finite_mask = np.isfinite(v_arr)
if np.sum(finite_mask) < 2:
raise ValueError('`v` must have at least 2 finite values.')
t_arr, v_arr = t_arr[finite_mask], v_arr[finite_mask]
if len(np.unique(t_arr)) != len(t_arr):
raise ValueError('All `t` values must be unique.')
# Order both arrays by t-values.
sort_order = np.argsort(t_arr)
t_arr, v_arr = t_arr[sort_order], v_arr[sort_order]
return t_arr, v_arr
def step(self, action):
self.forward_dynamics(action)
comvel = self.get_body_comvel("torso")
forward_reward = comvel[0]
lb, ub = self.action_bounds
scaling = (ub - lb) * 0.5
ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(action / scaling))
contact_cost = 0.5 * 1e-3 * np.sum(
np.square(np.clip(self.model.data.cfrc_ext, -1, 1))),
survive_reward = 0.05
reward = forward_reward - ctrl_cost - contact_cost + survive_reward
state = self._state
notdone = np.isfinite(state).all() \
and state[2] >= 0.2 and state[2] <= 1.0
done = not notdone
ob = self.get_current_obs()
return Step(ob, float(reward), done)
def initialize(self, length=None):
"""see ``__init__``"""
if length is None:
length = len(self.bounds)
max_i = min((len(self.bounds) - 1, length - 1))
self._lb = array([self.bounds[min((i, max_i))][0]
if self.bounds[min((i, max_i))][0] is not None
else -np.Inf
for i in range(length)], copy=False)
self._ub = array([self.bounds[min((i, max_i))][1]
if self.bounds[min((i, max_i))][1] is not None
else np.Inf
for i in range(length)], copy=False)
lb = self._lb
ub = self._ub
# define added values for lower and upper bound
self._al = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(lb[i])) / 20])
if isfinite(lb[i]) else 1 for i in rglen(lb)], copy=False)
self._au = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(ub[i])) / 20])
if isfinite(ub[i]) else 1 for i in rglen(ub)], copy=False)
def _zscore(a):
""" Calculating z-score of data on the first axis.
If the numbers in any column are all equal, scipy.stats.zscore
will return NaN for this column. We shall correct them all to
be zeros.
Parameters
----------
a: numpy array
Returns
-------
zscore: numpy array
The z-scores of input "a", with any columns including non-finite
numbers replaced by all zeros.
"""
assert a.ndim > 1, 'a must have more than one dimensions'
zscore = scipy.stats.zscore(a, axis=0)
zscore[:, np.logical_not(np.all(np.isfinite(zscore), axis=0))] = 0
return zscore
def test_funcs(self):
data = self.data['binary']
truth = self.truths['binary']
nlp = self.truths_to_nlp(truth)
params = self.joker_params['binary']
p = np.concatenate((nlp, [truth['K'].value], [self.fd.v0.value]))
mcmc_p = to_mcmc_params(p)
p2 = from_mcmc_params(mcmc_p)
assert np.allclose(p, p2.reshape(p.shape)) # test roundtrip
lp = ln_prior(p, params)
assert np.isfinite(lp)
ll = ln_likelihood(p, params, data)
assert np.isfinite(ll).all()
# remove jitter from params passed in to mcmc_p
mcmc_p = list(mcmc_p)
mcmc_p.pop(5) # log-jitter is 5th index in mcmc packed
lnpost = ln_posterior(mcmc_p, params, data)
assert np.isfinite(lnpost)
assert np.allclose(lnpost, lp+ll.sum())
def test_sample_prior(self):
rnd1 = np.random.RandomState(42)
joker1 = TheJoker(self.joker_params['binary'], random_state=rnd1)
rnd2 = np.random.RandomState(42)
joker2 = TheJoker(self.joker_params['triple'], random_state=rnd2)
samples1 = joker1.sample_prior(8)
samples2 = joker2.sample_prior(8)
for key in samples1.keys():
assert quantity_allclose(samples1[key], samples2[key])
samples, ln_vals = joker2.sample_prior(8, return_logprobs=True)
assert np.isfinite(ln_vals).all()
def get_blazar_redshifts(blazar_type):
table = Table.read(filename, hdu='LAT_Point_Source_Catalog')
known_redshift_mask = np.isfinite(table['Redshift'])
known_redshift_table = table[known_redshift_mask]
if blazar_type == "bll":
class_1 = known_redshift_table['CLASS'] == "bll "
class_2 = known_redshift_table['CLASS'] == "BLL "
if blazar_type == "fsrq":
class_1 = known_redshift_table['CLASS'] == "fsrq "
class_2 = known_redshift_table['CLASS'] == "FSRQ "
if blazar_type == "bcu":
class_1 = known_redshift_table['CLASS'] == "bcu "
class_2 = known_redshift_table['CLASS'] == "BCU "
class_type_mask = np.logical_or.reduce((class_1, class_2))
sub_table = known_redshift_table[class_type_mask]
return sub_table["Redshift"]
def get_data(self, element, ranges, style):
if self.geographic:
vdim = element.vdims[0] if element.vdims else None
value = element.level
if vdim is not None and (value is not None and np.isfinite(value)):
self._norm_kwargs(element, ranges, style, vdim)
style['clim'] = style.pop('vmin'), style.pop('vmax')
style['array'] = np.array([value])
return ([element.data], element.crs), style, {}
else:
SkipRendering('Shape can only be plotted on geographic plot, '
'supply a coordinate reference system.')
########################################
# Geographic features and annotations #
########################################
def get_extents(self, element, ranges):
"""
Subclasses the get_extents method using the GeoAxes
set_extent method to project the extents to the
Elements coordinate reference system.
"""
extents = super(GeoPlot, self).get_extents(element, ranges)
if not getattr(element, 'crs', None) or not self.geographic:
return extents
elif any(e is None or not np.isfinite(e) for e in extents):
extents = None
else:
try:
extents = project_extents(extents, element.crs, DEFAULT_PROJ)
except:
extents = None
return (np.NaN,)*4 if not extents else extents
def test_convert_xy(x, y):
assume(x != 0 and y != 0)
assume(np.isfinite(x) and np.isfinite(y))
assume(abs(x) < 1E6 and abs(y) < 1E6)
assume(abs(x) > 0.01 and abs(y) > 0.01)
# Test radians
r, theta = to_polar(x, y)
x_new, y_new = to_cartesian(r, theta)
assert np.allclose(x, x_new)
assert np.allclose(y, y_new)
# Test degrees
r, theta = to_polar(x, y, theta_units="degrees")
x_new, y_new = to_cartesian(r, theta, theta_units="degrees")
assert np.allclose(x, x_new)
assert np.allclose(y, y_new)
def _get_viewpoint_estimation_labels(viewpoint_data, clss, num_classes):
"""Bounding-box regression targets are stored in a compact form in the
roidb.
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets). The loss weights
are similarly expanded.
Returns:
view_target_data (ndarray): N x 3K blob of regression targets
view_loss_weights (ndarray): N x 3K blob of loss weights
"""
view_targets = np.zeros((clss.size, 3 * num_classes), dtype=np.float32)
view_loss_weights = np.zeros(view_targets.shape, dtype=np.float32)
inds = np.where( (clss > 0) & np.isfinite(viewpoint_data[:,0]) & np.isfinite(viewpoint_data[:,1]) & np.isfinite(viewpoint_data[:,2]) )[0]
for ind in inds:
cls = clss[ind]
start = 3 * cls
end = start + 3
view_targets[ind, start:end] = viewpoint_data[ind, :]
view_loss_weights[ind, start:end] = [1., 1., 1.]
assert not np.isinf(view_targets).any(), 'viewpoint undefined'
return view_targets, view_loss_weights
def correlations(A,B,pc_n=100):
p = (1 - distance.correlation(A.flatten(),B.flatten()))
spear = spearmanr(A.flatten(),B.flatten())
dist_genes = np.zeros(A.shape[0])
for i in range(A.shape[0]):
dist_genes[i] = 1 - distance.correlation(A[i],B[i])
pg = (np.average(dist_genes[np.isfinite(dist_genes)]))
dist_sample = np.zeros(A.shape[1])
for i in range(A.shape[1]):
dist_sample[i] = 1 - distance.correlation(A[:,i],B[:,i])
ps = (np.average(dist_sample[np.isfinite(dist_sample)]))
pc_dist = []
if pc_n > 0:
u0,s0,vt0 = np.linalg.svd(A)
u,s,vt = np.linalg.svd(B)
for i in range(pc_n):
pc_dist.append(abs(1 - distance.cosine(u0[:,i],u[:,i])))
pc_dist = np.array(pc_dist)
return p,spear[0],pg,ps,pc_dist
def check_stoplimit_prices(price, label):
"""
Check to make sure the stop/limit prices are reasonable and raise
a BadOrderParameters exception if not.
"""
try:
if not isfinite(price):
raise BadOrderParameters(
msg="Attempted to place an order with a {} price "
"of {}.".format(label, price)
)
# This catches arbitrary objects
except TypeError:
raise BadOrderParameters(
msg="Attempted to place an order with a {} price "
"of {}.".format(label, type(price))
)
if price < 0:
raise BadOrderParameters(
msg="Can't place a {} order with a negative price.".format(label)
)
def step(self, action):
self.forward_dynamics(action)
comvel = self.get_body_comvel("torso")
forward_reward = comvel[0]
lb, ub = self.action_bounds
scaling = (ub - lb) * 0.5
ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(action / scaling))
contact_cost = 0.5 * 1e-3 * np.sum(
np.square(np.clip(self.model.data.cfrc_ext, -1, 1))),
survive_reward = 0.05
reward = forward_reward - ctrl_cost - contact_cost + survive_reward
state = self._state
notdone = np.isfinite(state).all() \
and state[2] >= 0.2 and state[2] <= 1.0
done = not notdone
ob = self.get_current_obs()
return Step(ob, float(reward), done)
def initialize(self, length=None):
"""see ``__init__``"""
if length is None:
length = len(self.bounds)
max_i = min((len(self.bounds) - 1, length - 1))
self._lb = array([self.bounds[min((i, max_i))][0]
if self.bounds[min((i, max_i))][0] is not None
else -np.Inf
for i in range(length)], copy=False)
self._ub = array([self.bounds[min((i, max_i))][1]
if self.bounds[min((i, max_i))][1] is not None
else np.Inf
for i in range(length)], copy=False)
lb = self._lb
ub = self._ub
# define added values for lower and upper bound
self._al = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(lb[i])) / 20])
if isfinite(lb[i]) else 1 for i in rglen(lb)], copy=False)
self._au = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(ub[i])) / 20])
if isfinite(ub[i]) else 1 for i in rglen(ub)], copy=False)
def sanitize_array(array):
"""
Replace NaN and Inf (there should not be any!)
:param array:
:return:
"""
a = np.ravel(array)
#maxi = np.nanmax((filter(lambda x: x != float('inf'), a))
# ) # Max except NaN and Inf
#mini = np.nanmin((filter(lambda x: x != float('-inf'), a))
# ) # Mini except NaN and Inf
maxi = np.nanmax(a[np.isfinite(a)])
mini = np.nanmin(a[np.isfinite(a)])
array[array == float('inf')] = maxi
array[array == float('-inf')] = mini
mid = (maxi + mini) / 2
array[np.isnan(array)] = mid
return array
def _calculate(self, X, y, categorical, metafeatures, helpers):
skews = helpers.get_value("Skewnesses")
std = np.nanstd(skews) if len(skews) > 0 else 0
return std if np.isfinite(std) else 0
# @metafeatures.define("cancor1")
# def cancor1(X, y):
# pass
# @metafeatures.define("cancor2")
# def cancor2(X, y):
# pass
################################################################################
# Information-theoretic metafeatures