def softassign(self):
"""
Run the softassign algorithm until convergence.
"""
# TODO add possibility of slack
for i, indices in enumerate(self.element_type_subset_indices):
M = self.match_matrix[indices]
old_M = M.copy()
for it in xrange(self.max_softassign_iterations):
# normalize across rows (except slack)
M /= np.sum(M,axis=1)[:,None]
# normalize across columns (except slack)
M /= np.sum(M,axis=0)
max_row_normalization_error = np.max(abs(np.sum(M, axis = 1)-1))
# break if converged
if max_row_normalization_error < self.softassign_convergence_threshold:
oprint(5, "Softassign algorithm for subset %d converged in iteration %d" % (i, it+1))
break
mean_squared_difference = np.max(abs(old_M-M))
if mean_squared_difference < self.softassign_convergence_threshold2:
oprint(5, "Softassign algorithm for subset %d converged in iteration %d" % (i, it+1))
break
if it == (self.max_softassign_iterations - 1):
eprint(3, "WARNING: Softassign algorithm for subset %d did not converge to %.2g (reached %.2g) in %d iterations" % (i, self.softassign_convergence_threshold, max_row_normalization_error, self.max_softassign_iterations))
np.copyto(old_M, M)
# M is NOT a view, but a copy
self.match_matrix[indices] = M
python类copyto()的实例源码
def backup_match_matrix(self):
np.copyto(self.old_old_match_matrix,self.old_match_matrix)
np.copyto(self.old_match_matrix,self.match_matrix)
def test_object_array_self_copy(self):
# An object array being copied into itself DECREF'ed before INCREF'ing
# causing segmentation faults (gh-3787)
a = np.array(object(), dtype=object)
np.copyto(a, a)
assert_equal(sys.getrefcount(a[()]), 2)
a[()].__class__ # will segfault if object was deleted
def test_copyto():
a = np.arange(6, dtype='i4').reshape(2, 3)
# Simple copy
np.copyto(a, [[3, 1, 5], [6, 2, 1]])
assert_equal(a, [[3, 1, 5], [6, 2, 1]])
# Overlapping copy should work
np.copyto(a[:, :2], a[::-1, 1::-1])
assert_equal(a, [[2, 6, 5], [1, 3, 1]])
# Defaults to 'same_kind' casting
assert_raises(TypeError, np.copyto, a, 1.5)
# Force a copy with 'unsafe' casting, truncating 1.5 to 1
np.copyto(a, 1.5, casting='unsafe')
assert_equal(a, 1)
# Copying with a mask
np.copyto(a, 3, where=[True, False, True])
assert_equal(a, [[3, 1, 3], [3, 1, 3]])
# Casting rule still applies with a mask
assert_raises(TypeError, np.copyto, a, 3.5, where=[True, False, True])
# Lists of integer 0's and 1's is ok too
np.copyto(a, 4.0, casting='unsafe', where=[[0, 1, 1], [1, 0, 0]])
assert_equal(a, [[3, 4, 4], [4, 1, 3]])
# Overlapping copy with mask should work
np.copyto(a[:, :2], a[::-1, 1::-1], where=[[0, 1], [1, 1]])
assert_equal(a, [[3, 4, 4], [4, 3, 3]])
# 'dst' must be an array
assert_raises(TypeError, np.copyto, [1, 2, 3], [2, 3, 4])
def _replace_nan(a, val):
"""
If `a` is of inexact type, make a copy of `a`, replace NaNs with
the `val` value, and return the copy together with a boolean mask
marking the locations where NaNs were present. If `a` is not of
inexact type, do nothing and return `a` together with a mask of None.
Note that scalars will end up as array scalars, which is important
for using the result as the value of the out argument in some
operations.
Parameters
----------
a : array-like
Input array.
val : float
NaN values are set to val before doing the operation.
Returns
-------
y : ndarray
If `a` is of inexact type, return a copy of `a` with the NaNs
replaced by the fill value, otherwise return `a`.
mask: {bool, None}
If `a` is of inexact type, return a boolean mask marking locations of
NaNs, otherwise return None.
"""
is_new = not isinstance(a, np.ndarray)
if is_new:
a = np.array(a)
if not issubclass(a.dtype.type, np.inexact):
return a, None
if not is_new:
# need copy
a = np.array(a, subok=True)
mask = np.isnan(a)
np.copyto(a, val, where=mask)
return a, mask
def place(arr, mask, vals):
"""
Change elements of an array based on conditional and input values.
Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that
`place` uses the first N elements of `vals`, where N is the number of
True values in `mask`, while `copyto` uses the elements where `mask`
is True.
Note that `extract` does the exact opposite of `place`.
Parameters
----------
arr : ndarray
Array to put data into.
mask : array_like
Boolean mask array. Must have the same size as `a`.
vals : 1-D sequence
Values to put into `a`. Only the first N elements are used, where
N is the number of True values in `mask`. If `vals` is smaller
than N it will be repeated.
See Also
--------
copyto, put, take, extract
Examples
--------
>>> arr = np.arange(6).reshape(2, 3)
>>> np.place(arr, arr>2, [44, 55])
>>> arr
array([[ 0, 1, 2],
[44, 55, 44]])
"""
if not isinstance(arr, np.ndarray):
raise TypeError("argument 1 must be numpy.ndarray, "
"not {name}".format(name=type(arr).__name__))
return _insert(arr, mask, vals)
def __call__(self, a, b, *args, **kwargs):
"Execute the call behavior."
# Get the data
(da, db) = (getdata(a), getdata(b))
# Get the result
with np.errstate(divide='ignore', invalid='ignore'):
result = self.f(da, db, *args, **kwargs)
# Get the mask as a combination of the source masks and invalid
m = ~umath.isfinite(result)
m |= getmask(a)
m |= getmask(b)
# Apply the domain
domain = ufunc_domain.get(self.f, None)
if domain is not None:
m |= filled(domain(da, db), True)
# Take care of the scalar case first
if (not m.ndim):
if m:
return masked
else:
return result
# When the mask is True, put back da if possible
# any errors, just abort; impossible to guarantee masked values
try:
np.copyto(result, 0, casting='unsafe', where=m)
# avoid using "*" since this may be overlaid
masked_da = umath.multiply(m, da)
# only add back if it can be cast safely
if np.can_cast(masked_da.dtype, result.dtype, casting='safe'):
result += masked_da
except:
pass
# Transforms to a (subclass of) MaskedArray
masked_result = result.view(get_masked_subclass(a, b))
masked_result._mask = m
if isinstance(a, MaskedArray):
masked_result._update_from(a)
elif isinstance(b, MaskedArray):
masked_result._update_from(b)
return masked_result
def _recursive_printoption(result, mask, printopt):
"""
Puts printoptions in result where mask is True.
Private function allowing for recursion
"""
names = result.dtype.names
for name in names:
(curdata, curmask) = (result[name], mask[name])
if curdata.dtype.names:
_recursive_printoption(curdata, curmask, printopt)
else:
np.copyto(curdata, printopt, where=curmask)
return
def _recursive_filled(a, mask, fill_value):
"""
Recursively fill `a` with `fill_value`.
"""
names = a.dtype.names
for name in names:
current = a[name]
if current.dtype.names:
_recursive_filled(current, mask[name], fill_value[name])
else:
np.copyto(current, fill_value[name], where=mask[name])
def putmask(a, mask, values): # , mode='raise'):
"""
Changes elements of an array based on conditional and input values.
This is the masked array version of `numpy.putmask`, for details see
`numpy.putmask`.
See Also
--------
numpy.putmask
Notes
-----
Using a masked array as `values` will **not** transform a `ndarray` into
a `MaskedArray`.
"""
# We can't use 'frommethod', the order of arguments is different
if not isinstance(a, MaskedArray):
a = a.view(MaskedArray)
(valdata, valmask) = (getdata(values), getmask(values))
if getmask(a) is nomask:
if valmask is not nomask:
a._sharedmask = True
a._mask = make_mask_none(a.shape, a.dtype)
np.copyto(a._mask, valmask, where=mask)
elif a._hardmask:
if valmask is not nomask:
m = a._mask.copy()
np.copyto(m, valmask, where=mask)
a.mask |= m
else:
if valmask is nomask:
valmask = getmaskarray(values)
np.copyto(a._mask, valmask, where=mask)
np.copyto(a._data, valdata, where=mask)
return
def __getitem__(self, i):
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
np.copyto(a, self.buffer[self.data_offsets[i]:self.data_offsets[i + 1]])
return torch.from_numpy(a)
def test_copyto(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
b = xp.empty((2, 3, 4), dtype=dtype)
xp.copyto(b, a)
return b
def test_copyto_dtype(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype='?')
b = xp.empty((2, 3, 4), dtype=dtype)
xp.copyto(b, a)
return b
def test_copyto_broadcast(self, xp, dtype):
a = testing.shaped_arange((3, 1), xp, dtype)
b = xp.empty((2, 3, 4), dtype=dtype)
xp.copyto(b, a)
return b
def test_copyto_where(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
b = testing.shaped_reverse_arange((2, 3, 4), xp, dtype)
c = testing.shaped_arange((2, 3, 4), xp, '?')
xp.copyto(a, b, where=c)
return a
def test_copyto_multigpu(self, xp, dtype):
with cuda.Device(0):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
with cuda.Device(1):
b = xp.empty((2, 3, 4), dtype=dtype)
xp.copyto(b, a)
return b
def test_copyto_multigpu_noncontinguous(self, dtype):
with cuda.Device(0):
src = testing.shaped_arange((2, 3, 4), cupy, dtype)
src = src.swapaxes(0, 1)
with cuda.Device(1):
dst = cupy.empty_like(src)
cupy.copyto(dst, src)
expected = testing.shaped_arange((2, 3, 4), numpy, dtype)
expected = expected.swapaxes(0, 1)
testing.assert_array_equal(expected, src.get())
testing.assert_array_equal(expected, dst.get())
def test_copyto(self, xp, dtype):
dst = xp.ones(self.dst_shape, dtype=dtype)
xp.copyto(dst, self.src)
return dst
def combine_constraints(self, constraints):
if constraints is not None: #[hack]
# print('combine strokes')
[im_c, mask_c, im_e, mask_e] = constraints
if self.prev_im_c is None:
mask_c_f = mask_c
else:
mask_c_f = np.maximum(self.prev_mask_c, mask_c)
if self.prev_im_e is None:
mask_e_f = mask_e
else:
mask_e_f = np.maximum(self.prev_mask_e, mask_e)
if self.prev_im_c is None:
im_c_f = im_c
else:
im_c_f = self.prev_im_c.copy()
mask_c3 = np.tile(mask_c, [1,1, im_c.shape[2]])
np.copyto(im_c_f, im_c, where=mask_c3.astype(np.bool)) #[hack]
if self.prev_im_e is None:
im_e_f = im_e
else:
im_e_f = self.prev_im_e.copy()
mask_e3 = np.tile(mask_e, [1,1,im_e.shape[2]])
np.copyto(im_e_f, im_e, where=mask_e3.astype(np.bool))
return [im_c_f, mask_c_f, im_e_f, mask_e_f]
else:
return [self.prev_im_c, self.prev_mask_c, self.prev_im_e, self.prev_mask_e]
def align_fill_down(l, u,
long_indexed_df,
long_array):
'''Data align current values to all future months
(short array segment aligned to long array)
This function is used to set the values from the last standalone month as
the initial data for integrated dataset computation when a delayed
implementation exists.
uses pandas df auto align - relatively slow
TODO (for developer) - consider an all numpy solution
inputs
l, u (integers)
current month slice indexes (from long df)
long_indexed_df (dataframe)
empty long dataframe with empkey indexes
long_array (array)
long array of multiple month data
(orig_job, fur_codes, etc)
declare long indexed df outside of function (input).
grab current month slice for array insertion (copy).
chop long df to begin with current month (copy).
assign array to short df.
data align short df to long df (chopped to current month and future).
copy chopped df column as array to long_array
return long_array
'''
short_df = long_indexed_df[l:u].copy()
short_df['x'] = long_array[l:u]
# chopped_df begins with a defined index (row), normally the begining of
# a delayed implementation month
chopped_df = long_indexed_df[l:].copy()
# data align short_df to chopped_df
chopped_df['x'] = short_df['x']
result_array = chopped_df.x.values
result_size = result_array.size
np.copyto(long_array[-result_size:], result_array)
return long_array
# ALIGN NEXT (month)