def test_filled_like(self):
self.check_like_function(np.full_like, 0, True)
self.check_like_function(np.full_like, 1, True)
self.check_like_function(np.full_like, 1000, True)
self.check_like_function(np.full_like, 123.456, True)
self.check_like_function(np.full_like, np.inf, True)
python类full_like()的实例源码
def test_filled_like(self):
self.check_like_function(np.full_like, 0, True)
self.check_like_function(np.full_like, 1, True)
self.check_like_function(np.full_like, 1000, True)
self.check_like_function(np.full_like, 123.456, True)
self.check_like_function(np.full_like, np.inf, True)
def generate_rrab_lightcurve(
times,
mags=None,
errs=None,
paramdists={
'period':sps.uniform(loc=0.45,scale=0.35),
'fourierorder':[8,11],
'amplitude':sps.uniform(loc=0.4,scale=0.5),
'phioffset':np.pi,
},
magsarefluxes=False
):
'''This generates fake RRab light curves.
times is an array of time values that will be used as the time base.
mags and errs will have the model mags applied to them. If either is None,
np.full_like(times, 0.0) will used as a substitute.
paramdists is a dict containing parameter distributions to use for the
transitparams, in order:
{'period', 'fourierorder', 'amplitude'}
These are all 'frozen' scipy.stats distribution objects, e.g.:
https://docs.scipy.org/doc/scipy/reference/stats.html#continuous-distributions
The minimum light curve epoch will be automatically chosen from a uniform
distribution between times.min() and times.max().
The amplitude will be flipped automatically as appropriate if
magsarefluxes=True.
'''
modeldict = generate_sinusoidal_lightcurve(times,
mags=mags,
errs=errs,
paramdists=paramdists,
magsarefluxes=magsarefluxes)
modeldict['vartype'] = 'RRab'
return modeldict
def generate_rrc_lightcurve(
times,
mags=None,
errs=None,
paramdists={
'period':sps.uniform(loc=0.10,scale=0.30),
'fourierorder':[2,3],
'amplitude':sps.uniform(loc=0.1,scale=0.3),
'phioffset':1.5*np.pi,
},
magsarefluxes=False
):
'''This generates fake RRc light curves.
times is an array of time values that will be used as the time base.
mags and errs will have the model mags applied to them. If either is None,
np.full_like(times, 0.0) will used as a substitute.
paramdists is a dict containing parameter distributions to use for the
transitparams, in order:
{'period', 'fourierorder', 'amplitude'}
These are all 'frozen' scipy.stats distribution objects, e.g.:
https://docs.scipy.org/doc/scipy/reference/stats.html#continuous-distributions
The minimum light curve epoch will be automatically chosen from a uniform
distribution between times.min() and times.max().
The amplitude will be flipped automatically as appropriate if
magsarefluxes=True.
'''
modeldict = generate_sinusoidal_lightcurve(times,
mags=mags,
errs=errs,
paramdists=paramdists,
magsarefluxes=magsarefluxes)
modeldict['vartype'] = 'RRc'
return modeldict
def generate_hads_lightcurve(
times,
mags=None,
errs=None,
paramdists={
'period':sps.uniform(loc=0.04,scale=0.06),
'fourierorder':[5,10],
'amplitude':sps.uniform(loc=0.1,scale=0.6),
'phioffset':np.pi,
},
magsarefluxes=False
):
'''This generates fake HADS light curves.
times is an array of time values that will be used as the time base.
mags and errs will have the model mags applied to them. If either is None,
np.full_like(times, 0.0) will used as a substitute.
paramdists is a dict containing parameter distributions to use for the
transitparams, in order:
{'period', 'fourierorder', 'amplitude'}
These are all 'frozen' scipy.stats distribution objects, e.g.:
https://docs.scipy.org/doc/scipy/reference/stats.html#continuous-distributions
The minimum light curve epoch will be automatically chosen from a uniform
distribution between times.min() and times.max().
The amplitude will be flipped automatically as appropriate if
magsarefluxes=True.
'''
modeldict = generate_sinusoidal_lightcurve(times,
mags=mags,
errs=errs,
paramdists=paramdists,
magsarefluxes=magsarefluxes)
modeldict['vartype'] = 'HADS'
return modeldict
def generate_rotator_lightcurve(
times,
mags=None,
errs=None,
paramdists={
'period':sps.uniform(loc=0.80,scale=119.20),
'fourierorder':[2,3],
'amplitude':sps.uniform(loc=0.01,scale=0.7),
'phioffset':1.5*np.pi,
},
magsarefluxes=False
):
'''This generates fake rotator light curves.
times is an array of time values that will be used as the time base.
mags and errs will have the model mags applied to them. If either is None,
np.full_like(times, 0.0) will used as a substitute.
paramdists is a dict containing parameter distributions to use for the
transitparams, in order:
{'period', 'fourierorder', 'amplitude'}
These are all 'frozen' scipy.stats distribution objects, e.g.:
https://docs.scipy.org/doc/scipy/reference/stats.html#continuous-distributions
The minimum light curve epoch will be automatically chosen from a uniform
distribution between times.min() and times.max().
The amplitude will be flipped automatically as appropriate if
magsarefluxes=True.
'''
modeldict = generate_sinusoidal_lightcurve(times,
mags=mags,
errs=errs,
paramdists=paramdists,
magsarefluxes=magsarefluxes)
modeldict['vartype'] = 'rotator'
return modeldict
def generate_lpv_lightcurve(
times,
mags=None,
errs=None,
paramdists={
'period':sps.uniform(loc=250.0,scale=250.0),
'fourierorder':[2,3],
'amplitude':sps.uniform(loc=0.1,scale=0.8),
'phioffset':1.5*np.pi,
},
magsarefluxes=False
):
'''This generates fake LPV light curves.
times is an array of time values that will be used as the time base.
mags and errs will have the model mags applied to them. If either is None,
np.full_like(times, 0.0) will used as a substitute.
paramdists is a dict containing parameter distributions to use for the
transitparams, in order:
{'period', 'fourierorder', 'amplitude'}
These are all 'frozen' scipy.stats distribution objects, e.g.:
https://docs.scipy.org/doc/scipy/reference/stats.html#continuous-distributions
The minimum light curve epoch will be automatically chosen from a uniform
distribution between times.min() and times.max().
The amplitude will be flipped automatically as appropriate if
magsarefluxes=True.
'''
modeldict = generate_sinusoidal_lightcurve(times,
mags=mags,
errs=errs,
paramdists=paramdists,
magsarefluxes=magsarefluxes)
modeldict['vartype'] = 'LPV'
return modeldict
def process(self, **kwargs):
"""Process module."""
old_observations = tuple(
getattr(self, '_' + x) for x in self._obs_keys)
if (kwargs.get('root', 'output') == 'output' and
'extra_times' in kwargs):
obslist = (list(
zip(*([kwargs.get(k) for k in self._okeys] +
[[True for x in range(len(kwargs['times']))]]))
) + list(
zip(*([kwargs.get('extra_' + k) for k in self._okeys] +
[[False for x in range(len(kwargs['extra_times']))]]))))
obslist.sort()
self._all_observations = np.concatenate([
np.atleast_2d(np.array(x, dtype=object))
for x in obslist], axis=0).T
for ki, key in enumerate(self._obs_keys):
setattr(self, '_' + key, self._all_observations[ki])
else:
for key in list(
set(self._obs_keys) - set([
'frequencies', 'observed'])):
setattr(self, '_' + key, kwargs[key])
self._frequencies = np.array([
x / frequency_unit(y) if x is not None else None
for x, y in zip(kwargs['frequencies'], kwargs['u_frequencies'])
])
self._observed = np.full_like(kwargs['times'], True, dtype=bool)
self._all_observations = tuple(
getattr(self, '_' + x) for x in self._obs_keys)
outputs = OrderedDict(
[('all_' + x, getattr(self, '_' + x))
for x in list(set(self._obs_keys) - set(['observed']))])
if any(not np.array_equal(x, y) for x, y in zip(
old_observations, self._all_observations)):
self._all_band_indices = np.array([
(self._photometry.find_band_index(
b, telescope=t, instrument=i, mode=m, bandset=bs, system=s)
if f is None else -1)
for ti, b, t, s, i, m, bs, f, uf, o
in zip(*self._all_observations)
])
self._observation_types = np.array([
self._photometry._band_kinds[bi] if bi >= 0 else
'fluxdensity' for bi in self._all_band_indices
], dtype=object)
outputs['all_band_indices'] = self._all_band_indices
outputs['observation_types'] = self._observation_types
outputs['observed'] = np.array(self._observed, dtype=bool)
return outputs
def __init__(self, seed=42):
np.random.seed(seed)
EPOCH = np.random.uniform(0., 40)
self.data = OrderedDict()
self.joker_params = OrderedDict()
self.truths = OrderedDict()
P = np.random.uniform(40, 80) * u.day
mjd = np.random.uniform(0, 300., 8)
_genmjd = mjd + (EPOCH % P.value)
# First just a binary
truth = dict()
truth['P'] = P
truth['K'] = np.random.uniform(5, 15) * u.km/u.s
truth['phi0'] = np.random.uniform(0., 2*np.pi) * u.radian
truth['omega'] = np.random.uniform(0., 2*np.pi) * u.radian
truth['ecc'] = np.random.uniform()
self.v0 = np.random.uniform(-100, 100) * u.km/u.s
orbit = RVOrbit(**truth)
rv = orbit.generate_rv_curve(mjd) + self.v0
err = np.full_like(rv.value, 0.01) * u.km/u.s
data = RVData(mjd, rv, stddev=err)
self.data['binary'] = data
self.joker_params['binary'] = JokerParams(P_min=8*u.day, P_max=1024*u.day)
self.truths['binary'] = truth.copy()
self.truths['binary']['phi0'] = self.truths['binary']['phi0'] - ((2*np.pi*data.t_offset/P.value))*u.radian
# hierarchical triple - long term velocity trend
self.v1 = np.random.uniform(-1, 1) * u.km/u.s/u.day
orbit = RVOrbit(**truth)
rv = orbit.generate_rv_curve(mjd) + self.v0 + self.v1*(mjd-mjd.min())*u.day
err = np.full_like(rv.value, 0.01) * u.km/u.s
data = RVData(mjd, rv, stddev=err, t_offset=mjd.min())
self.data['triple'] = data
self.joker_params['triple'] = JokerParams(P_min=8*u.day, P_max=1024*u.day,
trend_cls=VelocityTrend2)
self.truths['triple'] = truth.copy()
self.truths['triple']['phi0'] = self.truths['triple']['phi0'] - ((2*np.pi*data.t_offset/P.value))*u.radian
# Binary on circular orbit
truth = dict()
truth['P'] = P
truth['K'] = np.random.uniform(5, 15) * u.km/u.s
truth['phi0'] = np.random.uniform(0., 2*np.pi) * u.radian
truth['omega'] = 0*u.radian
truth['ecc'] = 0.
orbit = RVOrbit(**truth)
rv = orbit.generate_rv_curve(_genmjd) + self.v0
err = np.full_like(rv.value, 0.1) * u.km/u.s
data = RVData(mjd+EPOCH, rv, stddev=err)
self.data['circ_binary'] = data
self.joker_params['circ_binary'] = JokerParams(P_min=8*u.day, P_max=1024*u.day)
self.truths['circ_binary'] = truth.copy()
self.truths['circ_binary']['phi0'] = self.truths['circ_binary']['phi0'] - (2*np.pi*data.t_offset/P.value)*u.radian
def test_compare_to_str_array(self, missing_value):
strs = self.strs
shape = strs.shape
arr = LabelArray(strs, missing_value=missing_value)
if missing_value is None:
# As of numpy 1.9.2, object array != None returns just False
# instead of an array, with a deprecation warning saying the
# behavior will change in the future. Work around that by just
# using the ufunc.
notmissing = np.not_equal(strs, missing_value)
else:
notmissing = (strs != missing_value)
check_arrays(arr.not_missing(), notmissing)
check_arrays(arr.is_missing(), ~notmissing)
# The arrays are equal everywhere, but comparisons against the
# missing_value should always produce False
check_arrays(strs == arr, notmissing)
check_arrays(strs != arr, np.zeros_like(strs, dtype=bool))
def broadcastable_row(value, dtype):
return np.full((shape[0], 1), value, dtype=strs.dtype)
def broadcastable_col(value, dtype):
return np.full((1, shape[1]), value, dtype=strs.dtype)
# Test comparison between arr and a like-shap 2D array, a column
# vector, and a row vector.
for comparator, dtype, value in product((eq, ne),
(bytes, unicode, object),
set(self.rowvalues)):
check_arrays(
comparator(arr, np.full_like(strs, value)),
comparator(strs, value) & notmissing,
)
check_arrays(
comparator(arr, broadcastable_row(value, dtype=dtype)),
comparator(strs, value) & notmissing,
)
check_arrays(
comparator(arr, broadcastable_col(value, dtype=dtype)),
comparator(strs, value) & notmissing,
)
def _gen_init_reduce(self, reduce_var, reduce_op):
"""generate code to initialize reduction variables on non-root
processors.
"""
red_var_typ = self.typemap[reduce_var.name]
el_typ = red_var_typ
if self._isarray(reduce_var.name):
el_typ = red_var_typ.dtype
init_val = None
pre_init_val = ""
if reduce_op == Reduce_Type.Sum:
init_val = str(el_typ(0))
if reduce_op == Reduce_Type.Prod:
init_val = str(el_typ(1))
if reduce_op == Reduce_Type.Min:
init_val = "numba.targets.builtins.get_type_max_value(np.ones(1,dtype=np.{}).dtype)".format(el_typ)
if reduce_op == Reduce_Type.Max:
init_val = "numba.targets.builtins.get_type_min_value(np.ones(1,dtype=np.{}).dtype)".format(el_typ)
if reduce_op in [Reduce_Type.Argmin, Reduce_Type.Argmax]:
# don't generate initialization for argmin/argmax since they are not
# initialized by user and correct initialization is already there
return []
assert init_val is not None
#import pdb; pdb.set_trace()
if self._isarray(reduce_var.name):
pre_init_val = "v = np.full_like(s, {}, s.dtype)".format(init_val)
init_val = "v"
f_text = "def f(s):\n {}\n s = hpat.distributed_lower._root_rank_select(s, {})".format(pre_init_val, init_val)
loc_vars = {}
exec(f_text, {}, loc_vars)
f = loc_vars['f']
f_block = compile_to_numba_ir(f, {'hpat': hpat, 'numba': numba, 'np': np},
self.typingctx, (red_var_typ,), self.typemap, self.calltypes).blocks.popitem()[1]
replace_arg_nodes(f_block, [reduce_var])
nodes = f_block.body[:-3]
nodes[-1].target = reduce_var
return nodes
def __init__(self, img, bg=None, maxDev=1e-4, maxIter=10, remove_border_size=0,
# feature_size=5,
cameraMatrix=None, distortionCoeffs=None): # 20
"""
Args:
img (path or array): Reference image
Kwargs:
bg (path or array): background image - same for all given images
maxDev (float): Relative deviation between the last two iteration steps
Stop iterative refinement, if deviation is smaller
maxIter (int): Stop iterative refinement after maxIter steps
"""
self.lens = None
if cameraMatrix is not None:
self.lens = LensDistortion()
self.lens._coeffs['distortionCoeffs'] = distortionCoeffs
self.lens._coeffs['cameraMatrix'] = cameraMatrix
self.maxDev = maxDev
self.maxIter = maxIter
self.remove_border_size = remove_border_size
#self.feature_size = feature_size
img = imread(img, 'gray')
self.bg = bg
if bg is not None:
self.bg = getBackground(bg)
if not isinstance(self.bg, np.ndarray):
self.bg = np.full_like(img, self.bg, dtype=img.dtype)
else:
self.bg = self.bg.astype(img.dtype)
img = cv2.subtract(img, self.bg)
if self.lens is not None:
img = self.lens.correct(img, keepSize=True)
# CREATE TEMPLATE FOR PATTERN COMPARISON:
pos = self._findObject(img)
self.obj_shape = img[pos].shape
PatternRecognition.__init__(self, img[pos])
self._ff_mma = MaskedMovingAverage(shape=img.shape,
dtype=np.float64)
self.object = None
self.Hs = [] # Homography matrices of all fitted images
self.Hinvs = [] # same, but inverse
self.fits = [] # all imaged, fitted to reference
self._fit_masks = []
self._refined = False
# TODO: remove that property?
def quantile_mapping(input_data, data_to_match, mask=None,
alpha=0.4, beta=0.4):
'''quantile mapping'''
assert input_data.get_axis_num('time') == 0
assert data_to_match.get_axis_num('time') == 0
assert input_data.shape[1:] == data_to_match.shape[1:]
# Make mask if mask is one was not provided
if mask is None:
d0 = input_data.isel(time=0, drop=True)
mask = xr.Variable(d0.dims, ~da.isnull(d0))
# quantiles for the input data
n = len(input_data['time'])
x1 = (np.arange(1, n + 1) - alpha) / (n + 1. - alpha - beta)
# quantiles for the obs
n = len(data_to_match['time'])
x0 = (np.arange(1, n + 1) - alpha) / (n + 1. - alpha - beta)
def qmap(data, like, mask):
# Use numpy to sort these arrays before we loop through each variable
sort_inds_all = np.argsort(data, axis=0)
sorted_all = np.sort(like, axis=0)
ii, jj = mask.nonzero()
new = np.full_like(data, np.nan)
for i, j in zip(ii, jj):
# Sorted Observations
y0 = sorted_all[:, i, j]
# Indicies that would sort the input data
sort_inds = sort_inds_all[:, i, j]
new[sort_inds, i, j] = np.interp(x1, x0, y0) # TODO: handle edges
return new
if isinstance(input_data.data, da.Array):
# dask arrays
new = da.map_blocks(qmap, input_data.data, data_to_match.data,
mask.data, chunks=input_data.data.chunks,
name='qmap')
else:
# numpy arrays
new = qmap(input_data.data, data_to_match.data, mask.data)
return xr.DataArray(new, dims=input_data.dims, coords=input_data.coords,
attrs=input_data.attrs, name=input_data.name)
def _round_hitcounts(self, accuracy, count_miss=None):
"""Round the accuracy to the nearest hit counts.
Parameters
----------
accuracy : np.ndarray[float]
The accuracy to round in the range [0, 1]
count_miss : np.ndarray[int]int, optional
The number of misses to fix.
Returns
-------
count_300 : np.ndarray[int]
The number of 300s.
count_100 : np.ndarray[int]
The number of 100s.
count_50 : np.ndarray[int]
The number of 50s.
count_miss : np.ndarray[int]
The number of misses.
"""
if count_miss is None:
count_miss = np.full_like(accuracy, 0)
max_300 = len(self.hit_objects) - count_miss
accuracy = np.maximum(
0.0,
np.minimum(
calculate_accuracy(max_300, 0, 0, count_miss) * 100.0,
accuracy * 100,
),
)
count_50 = np.full_like(accuracy, 0)
count_100 = np.round(
-3.0 *
((accuracy * 0.01 - 1.0) * len(self.hit_objects) + count_miss) *
0.5,
)
mask = count_100 > len(self.hit_objects) - count_miss
count_100[mask] = 0
count_50[mask] = np.round(
-6.0 *
((accuracy[mask] * 0.01 - 1.0) * len(self.hit_objects) +
count_miss[mask]) *
0.2,
)
count_50[mask] = np.minimum(max_300[mask], count_50[mask])
count_100[~mask] = np.minimum(max_300[~mask], count_100[~mask])
count_300 = (
len(self.hit_objects) -
count_100 -
count_50 -
count_miss
)
return count_300, count_100, count_50, count_miss
def full(shape, fill_value, dtype=None, order='C'):
"""
Return a new array of given shape and type, filled with `fill_value`.
Parameters
----------
shape : int or sequence of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
fill_value : scalar
Fill value.
dtype : data-type, optional
The desired data-type for the array The default, `None`, means
`np.array(fill_value).dtype`.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory.
Returns
-------
out : ndarray
Array of `fill_value` with the given shape, dtype, and order.
See Also
--------
zeros_like : Return an array of zeros with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
empty_like : Return an empty array with shape and type of input.
full_like : Fill an array with shape and type of input.
zeros : Return a new array setting values to zero.
ones : Return a new array setting values to one.
empty : Return a new uninitialized array.
Examples
--------
>>> np.full((2, 2), np.inf)
array([[ inf, inf],
[ inf, inf]])
>>> np.full((2, 2), 10)
array([[10, 10],
[10, 10]])
"""
if dtype is None:
dtype = array(fill_value).dtype
a = empty(shape, dtype, order)
multiarray.copyto(a, fill_value, casting='unsafe')
return a
def full_like(a, fill_value, dtype=None, order='K', subok=True):
"""
Return a full array with the same shape and type as a given array.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of
the returned array.
fill_value : scalar
Fill value.
dtype : data-type, optional
Overrides the data type of the result.
order : {'C', 'F', 'A', or 'K'}, optional
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible.
subok : bool, optional.
If True, then the newly created array will use the sub-class
type of 'a', otherwise it will be a base-class array. Defaults
to True.
Returns
-------
out : ndarray
Array of `fill_value` with the same shape and type as `a`.
See Also
--------
zeros_like : Return an array of zeros with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
empty_like : Return an empty array with shape and type of input.
zeros : Return a new array setting values to zero.
ones : Return a new array setting values to one.
empty : Return a new uninitialized array.
full : Fill a new array.
Examples
--------
>>> x = np.arange(6, dtype=np.int)
>>> np.full_like(x, 1)
array([1, 1, 1, 1, 1, 1])
>>> np.full_like(x, 0.1)
array([0, 0, 0, 0, 0, 0])
>>> np.full_like(x, 0.1, dtype=np.double)
array([ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
>>> np.full_like(x, np.nan, dtype=np.double)
array([ nan, nan, nan, nan, nan, nan])
>>> y = np.arange(6, dtype=np.double)
>>> np.full_like(y, 0.1)
array([ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
"""
res = empty_like(a, dtype=dtype, order=order, subok=subok)
multiarray.copyto(res, fill_value, casting='unsafe')
return res
def back_transform(self, scores):
"transform nore score back to orginal data"
values = np.full_like(scores, np.nan)
lo_value = self.transform_table['value'][0]
up_value = self.transform_table['value'][-1]
lo_score = self.transform_table['score'][0]
up_score = self.transform_table['score'][-1]
# scores in normal range
normal_mask = np.logical_and(scores <= up_score, scores >= lo_score)
normal_scores = scores[normal_mask]
values[normal_mask] = self.back_func(normal_scores)
# scores in lower tail: 1=linear, 2=power
lower_mask = scores < lo_score
lower_scores = scores[lower_mask]
temp = list()
for sc in lower_scores:
backtr = lo_value
cdflo = gcum(lo_score)
cdfbt = gcum(sc)
if self.ltail == 1: # linear
backtr = powint(0, cdflo, self.zmin, lo_value, cdfbt, 1)
temp.append(backtr)
elif self.ltail == 2: # power
cpow = 1.0 / self.ltpar
backtr = powint(0, cdflo, self.zmin, lo_value, cdfbt, cpow)
temp.append(backtr)
values[lower_mask] = temp
# scores in upper tail: 1=linear, 2=power, 4=hyperbolic
upper_mask = scores > up_score
upper_scores = scores[upper_mask]
temp = list()
for sc in up_score:
backtr = up_value
cdfhi = gcum(up_score)
cdfbt = gcum(sc) # cdf value of the score to be back-transformed
if self.utail == 1: # linear
backtr = powint(cdfhi, 1.0, up_value, self.zmax, cdfbt, 1)
temp.append(backtr)
elif self.utail == 2: # power
cpow = 1.0 / self.utpar
backtr = powint(cdfhi, 1.0, up_value, self.zmax, cdfbt, cpow)
temp.append(backtr)
elif self.utail == 4: # hyperbolic
l = (up_value**self.utpar) * (1 - gcum(up_score))
backtr = (l / (1 - gcum(sc)))**(1 / self.utpar)
temp.append(backtr)
values[upper_mask] = temp
return values
def _evaluate_rollout(self, state, limit):
# _, player, legal_moves = Game.possible_moves(state)
winner = 0
# old_board = Board()
# old_board.stones = state
player = None
for i in range(limit):
legal_states, p, legal_moves = Game.possible_moves(state)
if player is None:
player = p
if len(legal_states) == 0:
break
probs = self._rollout(state, legal_moves)
mask = np.full_like(probs, -0.01)
mask[:, legal_moves] = probs[:, legal_moves]
probs = mask
best_move = np.argmax(probs, 1)[0]
idx = np.where(legal_moves == best_move)[0]
# if idx.size == 0:
# print(i, idx)
# print(best_move)
# print(probs.shape)
# print(legal_moves)
# print(probs)
assert idx.size == 1
idx = idx[0]
st1 = legal_states[idx]
over, winner, last_loc = st1.is_over(state)
if over:
break
state = st1
else:
# If no break from the loop, issue a warning.
print("WARNING: rollout reached move limit")
if winner == 0:
return 0
else:
return 1 if winner == player else -1
dnn_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def testMultipliesGradient(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
embedding_wire = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('wire', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'feature_columns': [embedding_language, embedding_wire],
'head': head_lib._multi_class_head(2),
'hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
embedding_language: 0.0
},
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'wire':
sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
model_ops = dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN,
params)
with monitored_session.MonitoredSession() as sess:
language_var = dnn_linear_combined._get_embedding_variable(
embedding_language, 'dnn', 'dnn/input_from_feature_columns')
wire_var = dnn_linear_combined._get_embedding_variable(
embedding_wire, 'dnn', 'dnn/input_from_feature_columns')
for _ in range(2):
_, language_value, wire_value = sess.run(
[model_ops.train_op, language_var, wire_var])
initial_value = np.full_like(language_value, 0.1)
self.assertTrue(np.all(np.isclose(language_value, initial_value)))
self.assertFalse(np.all(np.isclose(wire_value, initial_value)))
dnn_linear_combined_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def testMultipliesGradient(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
embedding_wire = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('wire', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'dnn_feature_columns': [embedding_language, embedding_wire],
'head': head_lib._multi_class_head(2),
'dnn_hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
embedding_language: 0.0
},
'dnn_optimizer': 'Adagrad',
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'wire':
sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
model_ops = dnn_linear_combined._dnn_linear_combined_model_fn(
features, labels, model_fn.ModeKeys.TRAIN, params)
with monitored_session.MonitoredSession() as sess:
language_var = dnn_linear_combined._get_embedding_variable(
embedding_language, 'dnn', 'dnn/input_from_feature_columns')
wire_var = dnn_linear_combined._get_embedding_variable(
embedding_wire, 'dnn', 'dnn/input_from_feature_columns')
for _ in range(2):
_, language_value, wire_value = sess.run(
[model_ops.train_op, language_var, wire_var])
initial_value = np.full_like(language_value, 0.1)
self.assertTrue(np.all(np.isclose(language_value, initial_value)))
self.assertFalse(np.all(np.isclose(wire_value, initial_value)))