def generate_patch_probs(path, patch_locations, patch_size, im_size):
x, y, z = patch_locations
seg = nib.load(glob.glob(os.path.join(path, '*_seg.nii.gz'))[0]).get_data().astype(np.float32)
p = []
for i in range(len(x)):
for j in range(len(y)):
for k in range(len(z)):
patch = seg[int(x[i] - patch_size / 2) : int(x[i] + patch_size / 2),
int(y[j] - patch_size / 2) : int(y[j] + patch_size / 2),
int(z[k] - patch_size / 2) : int(z[k] + patch_size / 2)]
patch = (patch > 0).astype(np.float32)
percent = np.sum(patch) / (patch_size * patch_size * patch_size)
p.append((1 - np.abs(percent - 0.5)) * percent)
p = np.asarray(p, dtype=np.float32)
p[p == 0] = np.amin(p[np.nonzero(p)])
p = p / np.sum(p)
return p
python类nonzero()的实例源码
def callback_rect(self, eclick, erelease):
xmin, xmax, ymin, ymax = eclick.xdata, erelease.xdata, eclick.ydata, erelease.ydata
if xmin > xmax:
xmin, xmax = xmax, xmin
if ymin > ymax:
ymin, ymax = ymax, ymin
x, y = self.x_position, self.y_position
in_selection = ((x >= xmin) & (x <= xmax) &
(y >= ymin) & (y <= ymax))
indices = np.nonzero(in_selection)[0]
add_or_remove = None
if erelease.key == 'shift':
add_or_remove = 'add'
elif erelease.key == 'control':
add_or_remove = 'remove'
self.update_inspect(indices, add_or_remove)
def get_bc_counts(genomes, genes, molecule_counter):
genome_ids = molecule_counter.get_column('genome')
genome_index = cr_reference.get_genome_index(genomes)
conf_mapped_reads = molecule_counter.get_column('reads')
barcodes = molecule_counter.get_column('barcode')
bc_counts = {}
for genome in genomes:
genome_id = cr_reference.get_genome_id(genome, genome_index)
genome_indices = genome_ids == genome_id
if genome_indices.sum() == 0:
# edge case - there's no data for this genome (e.g. empty sample, false barnyard sample, or nothing confidently mapped)
continue
bcs_for_genome = barcodes[genome_indices]
# only count UMIs with at least one conf mapped read
umi_conf_mapped_to_genome = conf_mapped_reads[genome_indices] > 0
bc_breaks = bcs_for_genome[1:] - bcs_for_genome[:-1]
bc_breaks = np.concatenate(([1], bc_breaks)) # first row is always a break
bc_break_indices = np.nonzero(bc_breaks)[0]
unique_bcs = bcs_for_genome[bc_break_indices]
umis_per_bc = np.add.reduceat(umi_conf_mapped_to_genome, bc_break_indices)
cmb_reads_per_bc = np.add.reduceat(conf_mapped_reads[genome_indices], bc_break_indices)
bc_counts[genome] = (unique_bcs, umis_per_bc, cmb_reads_per_bc)
return bc_counts
def primes_2_to_n(n):
"""
Efficient algorithm to find and list primes from
2 to `n'.
Args:
n (int): highest number from which to search for primes
Returns:
np array of all primes from 2 to n
References:
Robert William Hanks,
https://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n/
"""
sieve = np.ones(int(n / 3 + (n % 6 == 2)), dtype=np.bool)
for i in range(1, int((n ** 0.5) / 3 + 1)):
if sieve[i]:
k = 3 * i + 1 | 1
sieve[int(k * k / 3)::2 * k] = False
sieve[int(k * (k - 2 * (i & 1) + 4) / 3)::2 * k] = False
return np.r_[2, 3, ((3 * np.nonzero(sieve)[0][1:] + 1) | 1)]
def top_uncer_items(adata, pp, n, flag = None):
"""
Return top a flag list of top n uncertain item that not flag
"""
uncertain = np.abs(pp[:,0] - 0.5)
if flag != None:
addition = np.asarray(flag, dtype = int)*10# flagged items are not consider, increase their value
uncertain = uncertain + addition
if len(uncertain) <= n:
return np.nonzero(uncertain <= 10000000)[0]
sorted_uncertain = np.sort(uncertain)
thresh = sorted_uncertain[n]
return np.nonzero(uncertain <= thresh)[0]
def items_for_expert(adata, pp, n, flag):
"""
take n items for expert to consider
"""
combined_prob = 0.8*np.asarray(adata.taken_crowd_prob) + 0.2*pp[:,1]
uncertain = np.abs(combined_prob - 0.5)
if flag != None:
addition = np.asarray(flag, dtype = int)*10# flagged items are not consider, increase their value
uncertain = uncertain + addition
if len(uncertain) <= n:
return np.nonzero(uncertain <= 10000000)[0]
sorted_uncertain = np.sort(uncertain)
thresh = sorted_uncertain[n]
return np.nonzero(uncertain <= thresh)[0]
def balance_dataset(dataset_0, labels_0, dataset_1, labels_1, ratio=1):
"""Balance the dataset_0 with samples from dataset_1 up to given ratio.
Args:
dataset_0: array of text samples
labels_0: array of labels for dataset_0
dataset_1: array of text samples
labels_1: array of labels for dataset_1
ratio: ratio of samples of class 1 to samples of class 0 (default 1.0)
Returns:
balanced array of text samples, corresponding array of labels
"""
initial_train_size = dataset_0.shape[0]
insult_inds = np.nonzero(labels_1)[0]
num_insults_0 = len(np.nonzero(labels_0)[0])
num_insults_1 = len(np.nonzero(labels_1)[0])
insult_inds_to_add = insult_inds[np.random.randint(low=0, high=num_insults_1,
size=(ratio * (initial_train_size - num_insults_0) - num_insults_0))]
result = dataset_0.append(dataset_1.iloc[insult_inds_to_add])
result_labels = labels_0.append(labels_1.iloc[insult_inds_to_add])
return result, result_labels
def retrieve_features(best_estimator):
"""Retrieve selected features from any estimator.
In case it has the 'get_support' method, use it.
Else, if it has a 'coef_' attribute, assume it's a linear model and the
features correspond to the indices of the coefficients != 0
"""
if hasattr(best_estimator, 'get_support'):
return np.nonzero(best_estimator.get_support())[0]
elif hasattr(best_estimator, 'coef_'):
# print best_estimator.coef_
if best_estimator.coef_.ndim > 1 and 1 not in best_estimator.coef_.shape:
sel_feats = []
for dim in range(best_estimator.coef_.ndim):
sel_feats += np.nonzero(
best_estimator.coef_[dim])[0].ravel().tolist()
return np.unique(sel_feats)
return np.nonzero(best_estimator.coef_.flatten())[0]
else:
# Raise an error
raise AttributeError('The best_estimator object does not have '
'neither the `coef_` attribute nor the '
'`get_support` method')
def _get_missing_m_trend(self, pad='DEFAULT_PAD', **kwargs):
"""Get a single second of missing data."""
logging.debug('Fetching missing m-trend: {}'.format(self))
missing_buf = self.fetch() # explicitly fetch from NDS2
trend = self.channel.split('.')[1].split(',')[0]
# make m-trend value for this minute based on trend extension
if len(np.nonzero(missing_buf == -1)[0]) != 0:
# this won't actually check for anything at the moment because
# gwpy.timeseries.TimeSeries.fetch() does not have a padding option
# yet
logging.warn('Still missing data in {}'.format(self))
elif trend == 'mean':
buf_trend = missing_buf.mean()
elif trend == 'min':
buf_trend = missing_buf.min()
elif trend == 'max':
buf_trend = missing_buf.max()
elif trend == 'rms':
buf_trend = missing_buf.rms(60)[0]
elif trend == 'n':
buf_trend = missing_buf.sum()
else:
raise ValueError('Unrecognized trend type: {}'.format(trend))
return buf_trend
def plot_timeseries(self, ax, **kwargs):
"""Scale up by 10^9 since plots are in ns, not seconds.
Remove any indices considered bad in ``plot_properties``"""
# define the variables for our plots
y = np.delete(self.plot_vars.means - self.trend,
self.bad_indices.means) / SEC_PER['ns']
t = np.delete(self.t_axis, self.bad_indices.means)
yerr = np.delete(self.plot_vars.stds,
self.bad_indices.means) / SEC_PER['ns']
mint = np.delete(self.t_axis, self.bad_indices.mins)
miny = np.delete(self.plot_vars.mins - self.trend,
self.bad_indices.mins) / SEC_PER['ns']
maxt = np.delete(self.t_axis, self.bad_indices.maxs)
maxy = np.delete(self.plot_vars.maxs - self.trend,
self.bad_indices.maxs) / SEC_PER['ns']
# plot everything, but only if the plotted data has nonzero length
# in order to avoid an annoying matplotlib bug when adding legends.
if len(t) != 0:
ax.errorbar(t, y, marker="o", color="green", linestyle='none',
yerr=yerr, label="Means +/- Std. Dev.")
if len(mint) != 0:
ax.scatter(mint, miny, marker="^", color="blue", label="Minima")
if len(maxt) != 0:
ax.scatter(maxt, maxy, marker="v", color="red", label="Maxima")
def plot_timeseries(self, ax, **kwargs):
"""Scale up by 10^9 since plots are in ns, not seconds.
Remove any indices considered bad in ``plot_properties``"""
# define the variables for our plots
t = np.delete(self.t_axis, self.bad_indices.means)
y = np.delete(self.plot_vars.means - self.trend,
self.bad_indices.means) / SEC_PER['ns']
yerr = np.delete(self.plot_vars.stds,
self.bad_indices.means) / SEC_PER['ns']
mint = np.delete(self.t_axis, self.bad_indices.absmins)
miny = np.delete(self.plot_vars.absmins - self.trend,
self.bad_indices.absmins) / SEC_PER['ns']
maxt = np.delete(self.t_axis, self.bad_indices.absmaxs)
maxy = np.delete(self.plot_vars.absmaxs - self.trend,
self.bad_indices.absmaxs) / SEC_PER['ns']
# plot everything, but only if the plotted data has nonzero length
# in order to avoid an annoying matplotlib bug when adding legends.
if len(t) != 0:
ax.errorbar(t, y, marker="o", color="green", linestyle='none',
yerr=yerr, label="Means +/- Std. Dev.")
if len(mint) != 0:
ax.scatter(mint,miny,marker="^", color="blue", label="Abs. Minima")
if len(maxt) != 0:
ax.scatter(maxt,maxy,marker="v", color="red", label="Abs. Maxima")
def __act_manual(self, state_meas):
if len(self.__measure_for_manual):
# [AMMO2, AMMO3, AMMO4, AMMO5, AMMO6, AMMO7, WEAPON2,
# WEAPON3 WEAPON4 WEAPON5 WEAPON6 WEAPON7 SELECTED_WEAPON]
assert len(self.__measure_for_manual) == 13
# [SELECT_WEAPON2 SELECT_WEAPON3 SELECT_WEAPON4 SELECT_WEAPON5 SELECT_WEAPON6 SELECT_WEAPON7]
curr_action = np.zeros((state_meas.shape[0], self.__num_manual_controls), dtype=np.int)
for ns in range(state_meas.shape[0]):
curr_ammo = state_meas[ns, self.__measure_for_manual[:6]]
curr_weapons = state_meas[ns, self.__measure_for_manual[6:12]]
if self.verbose:
print 'current ammo:', curr_ammo
print 'current weapons:', curr_weapons
available_weapons = np.logical_and(curr_ammo >= np.array([1, 2, 1, 1, 1, 40]), curr_weapons)
if any(available_weapons):
best_weapon = np.nonzero(available_weapons)[0][-1]
if not state_meas[ns, self.__measure_for_manual[12]] == best_weapon + 2:
curr_action[ns, best_weapon] = 1
return curr_action
else:
return []
def test_bool_flat_indexing_invalid_nr_elements(self, level=rlevel):
s = np.ones(10, dtype=float)
x = np.array((15,), dtype=float)
def ia(x, s, v):
x[(s > 0)] = v
# After removing deprecation, the following are ValueErrors.
# This might seem odd as compared to the value error below. This
# is due to the fact that the new code always uses "nonzero" logic
# and the boolean special case is not taken.
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
self.assertRaises(IndexError, ia, x, s, np.zeros(9, dtype=float))
self.assertRaises(IndexError, ia, x, s, np.zeros(11, dtype=float))
# Old special case (different code path):
self.assertRaises(ValueError, ia, x.flat, s, np.zeros(9, dtype=float))
self.assertRaises(ValueError, ia, x.flat, s, np.zeros(11, dtype=float))
def test_nonzero_twodim(self):
x = np.array([[0, 1, 0], [2, 0, 3]])
assert_equal(np.count_nonzero(x), 3)
assert_equal(np.nonzero(x), ([0, 1, 1], [1, 0, 2]))
x = np.eye(3)
assert_equal(np.count_nonzero(x), 3)
assert_equal(np.nonzero(x), ([0, 1, 2], [0, 1, 2]))
x = np.array([[(0, 1), (0, 0), (1, 11)],
[(1, 1), (1, 0), (0, 0)],
[(0, 0), (1, 5), (0, 1)]], dtype=[('a', 'f4'), ('b', 'u1')])
assert_equal(np.count_nonzero(x['a']), 4)
assert_equal(np.count_nonzero(x['b']), 5)
assert_equal(np.nonzero(x['a']), ([0, 1, 1, 2], [2, 0, 1, 1]))
assert_equal(np.nonzero(x['b']), ([0, 0, 1, 2, 2], [0, 2, 0, 1, 2]))
assert_(not x['a'].T.flags.aligned)
assert_equal(np.count_nonzero(x['a'].T), 4)
assert_equal(np.count_nonzero(x['b'].T), 5)
assert_equal(np.nonzero(x['a'].T), ([0, 1, 1, 2], [1, 1, 2, 0]))
assert_equal(np.nonzero(x['b'].T), ([0, 0, 1, 2, 2], [0, 1, 2, 0, 2]))
def test_half_ordering(self):
"""Make sure comparisons are working right"""
# All non-NaN float16 values in reverse order
a = self.nonan_f16[::-1].copy()
# 32-bit float copy
b = np.array(a, dtype=float32)
# Should sort the same
a.sort()
b.sort()
assert_equal(a, b)
# Comparisons should work
assert_((a[:-1] <= a[1:]).all())
assert_(not (a[:-1] > a[1:]).any())
assert_((a[1:] >= a[:-1]).all())
assert_(not (a[1:] < a[:-1]).any())
# All != except for +/-0
assert_equal(np.nonzero(a[:-1] < a[1:])[0].size, a.size-2)
assert_equal(np.nonzero(a[1:] > a[:-1])[0].size, a.size-2)
def denseToSparseAbvCutoff(self, denseMatrix, cutoff):
"""
Remove datas in denseMatrix that is below cutoff, Convert the remaining datas into sparse matrix.
Parameters:
----------------------
denseMatrix: dense numpy matrix
cutoff: int or float
Returns
----------------------
Scipy csr_matrix
"""
maskArray=denseMatrix>=cutoff
sparseMatrix=csr_matrix( (np.asarray(denseMatrix[maskArray]).reshape(-1),np.nonzero(maskArray)),\
shape=denseMatrix.shape)
return sparseMatrix
def get_starting_location(self):
rows_with_points = self.hits.max(axis=1)
max_row = np.nonzero(rows_with_points)[0].max()
# TODO: if max_row == 215, we should exit
row = self.hits[max_row, :]
idx = np.nonzero(row)[0]
col = np.random.choice(idx)
col = np.random.randint(col - 2, col + 2)
loc = [min(STATE.layout.rows - 1, max_row + 10), col]
assert self.max_row <= max_row
self.max_row = max_row
return loc
#def get_starting_location(self):
# rows_with_points = self.hits.max(axis=1)
# max_row = np.nonzero(rows_with_points)[0].max()
# loc = [min(STATE.layout.rows - 1, max_row + 10), np.random.randint(STATE.layout.columns)]
#assert self.max_row <= max_row
#self.max_row = max_row
#return loc
def bestMap(L1, L2):
if L1.__len__() != L2.__len__():
print('size(L1) must == size(L2)')
Label1 = np.unique(L1)
nClass1 = Label1.__len__()
Label2 = np.unique(L2)
nClass2 = Label2.__len__()
nClass = max(nClass1, nClass2)
G = np.zeros((nClass, nClass))
for i in range(nClass1):
for j in range(nClass2):
G[i][j] = np.nonzero((L1 == Label1[i]) * (L2 == Label2[j]))[0].__len__()
c = linear_assignment_.linear_assignment(-G.T)[:, 1]
newL2 = np.zeros(L2.__len__())
for i in range(nClass2):
for j in np.nonzero(L2 == Label2[i])[0]:
if len(Label1) > c[i]:
newL2[j] = Label1[c[i]]
return accuracy_score(L1, newL2)
def _vlines(lines, ctrs=None, lengths=None, vecs=None, angle_lo=20, angle_hi=160, ransac_options=RANSAC_OPTIONS):
ctrs = ctrs if ctrs is not None else lines.mean(1)
vecs = vecs if vecs is not None else lines[:, 1, :] - lines[:, 0, :]
lengths = lengths if lengths is not None else np.hypot(vecs[:, 0], vecs[:, 1])
angles = np.degrees(np.arccos(vecs[:, 0] / lengths))
points = np.column_stack([ctrs[:, 0], angles])
point_indices, = np.nonzero((angles > angle_lo) & (angles < angle_hi))
points = points[point_indices]
if len(points) > 2:
model_ransac = linear_model.RANSACRegressor(**ransac_options)
model_ransac.fit(points[:, 0].reshape(-1, 1), points[:, 1].reshape(-1, 1))
inlier_mask = model_ransac.inlier_mask_
valid_lines = lines[point_indices[inlier_mask], :, :]
else:
valid_lines = []
return valid_lines
def _hlines(lines, ctrs=None, lengths=None, vecs=None, angle_lo=20, angle_hi=160, ransac_options=RANSAC_OPTIONS):
ctrs = ctrs if ctrs is not None else lines.mean(1)
vecs = vecs if vecs is not None else lines[:, 1, :] - lines[:, 0, :]
lengths = lengths if lengths is not None else np.hypot(vecs[:, 0], vecs[:, 1])
angles = np.degrees(np.arccos(vecs[:, 1] / lengths))
points = np.column_stack([ctrs[:, 1], angles])
point_indices, = np.nonzero((angles > angle_lo) & (angles < angle_hi))
points = points[point_indices]
if len(points) > 2:
model_ransac = linear_model.RANSACRegressor(**ransac_options)
model_ransac.fit(points[:, 0].reshape(-1, 1), points[:, 1].reshape(-1, 1))
inlier_mask = model_ransac.inlier_mask_
valid_lines = lines[point_indices[inlier_mask], :, :]
else:
valid_lines = []
return valid_lines
def _tipping_point_update(self, tmp, consump, peak_temp_interval=30.0):
"""Determine whether a tipping point has occurred, if so reduce consumption for
all periods after this date.
"""
draws = tmp.shape[0]
disaster = self._disaster_simulation()
disaster_cons = self._disaster_cons_simulation()
period_lengths = self.tree.decision_times[1:] - self.tree.decision_times[:-1]
tmp_scale = np.maximum(self.peak_temp, tmp)
ave_prob_of_survival = 1.0 - np.square(tmp / tmp_scale)
prob_of_survival = ave_prob_of_survival**(period_lengths / peak_temp_interval)
# this part may be done better, this takes a long time to loop over
res = prob_of_survival < disaster
rows, cols = np.nonzero(res)
row, count = np.unique(rows, return_counts=True)
first_occurance = zip(row, cols[np.insert(count.cumsum()[:-1],0,0)])
for pos in first_occurance:
consump[pos[0], pos[1]:] *= np.exp(-disaster_cons[pos[0]])
return consump
def contributions(in_length, out_length, scale, kernel, k_width):
if scale < 1:
h = lambda x: scale * kernel(scale * x)
kernel_width = 1.0 * k_width / scale
else:
h = kernel
kernel_width = k_width
x = np.arange(1, out_length+1).astype(np.float64)
u = x / scale + 0.5 * (1 - 1 / scale)
left = np.floor(u - kernel_width / 2)
P = int(ceil(kernel_width)) + 2
ind = np.expand_dims(left, axis=1) + np.arange(P) - 1 # -1 because indexing from 0
indices = ind.astype(np.int32)
weights = h(np.expand_dims(u, axis=1) - indices - 1) # -1 because indexing from 0
weights = np.divide(weights, np.expand_dims(np.sum(weights, axis=1), axis=1))
aux = np.concatenate((np.arange(in_length), np.arange(in_length - 1, -1, step=-1))).astype(np.int32)
indices = aux[np.mod(indices, aux.size)]
ind2store = np.nonzero(np.any(weights, axis=0))
weights = weights[:, ind2store]
indices = indices[:, ind2store]
return weights, indices
def decode(self, sentence, src=True):
'''
Given an encoded sentence matrix,
return the represented sentence string (tokenized).
'''
words = []
for word in sentence:
idxs = np.nonzero(word)[0]
if len(idxs) > 1:
raise Exception("Multiple hot bits on word vec")
elif len(idxs) == 0:
continue
if src:
words.append(self.words_src[0][idxs[0]])
else:
words.append(self.words_dst[0][idxs[0]])
return ' '.join(words)
def move_ellipses(self, coll, cyl=False):
xz = self.x[:, ::2] if not cyl else np.column_stack(
[np.sqrt(np.sum(self.x[:, :2]**2, 1)), self.x[:, 2]])
coll.set_offsets(xz)
#inside = self.inside_wall()
#margin = np.nonzero(self.alive)[0][self.inside_wall(2.)]
colors = np.full((self.N,), "b", dtype=str)
#colors[margin] = "r"
colors[self.success] = "k"
colors[self.fail] = "k"
colors[self.alive & ~self.can_bind] = "r"
#colors = [("r" if inside[i] else "g") if margin[i] else "b" for i in range(self.N)]
coll.set_facecolors(colors)
#y = self.x[:, 1]
#d = 50.
#sizes = self.params.rMolecule*(1. + y/d)
#coll.set(widths=sizes, heights=sizes)
def set_free(self, uid=None):
''' reset status of occupied points to zero '''
self.read()
data = self.data
# check for valid id
if uid in [0,1]:
print "Error: %s is not a valid ID, returning." %uid
return None
if uid is not None:
# get indices of vertices by id
vertices = np.nonzero(data["status"] == uid)[0]
else:
vertices = np.nonzero(np.logical_and(data["status"] != 0, data["status"] != 1))[0]
# reset vertices to 0
data["status"][vertices] = 0
self.write()
def _split_data(self):
counts = np.zeros(self._num_classes)
labeled_indices = list()
num_per_class = int(self._num_labels / self._num_classes)
for i, l in enumerate(self._labels):
index = np.nonzero(l)[0][0]
if counts[index] < num_per_class:
counts[index] += 1
labeled_indices.append(i)
elif counts.sum() == self._num_labels:
break
else:
continue
all_indices = set(range(self._num_train_images))
unlabeled_indices = list(all_indices - set(labeled_indices))
images_labeled = self._images[labeled_indices]
images_unlabeled = self._images[unlabeled_indices]
labels = self._labels[labeled_indices]
return images_labeled, images_unlabeled, labels
def jaccard(v1, v2):
'''
Due to the idiosyncracies of my code the jaccard index is a bit
altered. The theory is the same but the implementation might be a bit
weird. I do not have two vectors containing the words of both documents
but instead I have two equally sized vectors. The columns of the vectors
are the same and represent the words in the whole corpus. If an entry
is 1 then the word is present in the document. If it is 0 then it is not present.
SO first we find the indices of the words in each documents and then jaccard is
calculated based on the indices.
'''
indices1 = numpy.nonzero(v1)[0].tolist()
indices2 = numpy.nonzero(v2)[0].tolist()
inter = len(set(indices1) & set(indices2))
un = len(set(indices1) | set(indices2))
dist = 1 - inter/float(un)
return dist
def get_signature_genes(X,n,lda=10):
W = np.zeros((X.shape[0],X.shape[0]))
# coarse search from the bottom
while (abs(W).sum(1) > 0).sum() < n:
lda /= 10.
model = MultiTaskLasso(alpha=lda,max_iter=100,tol=.001,selection='random',warm_start=True)
model.fit(X.T,X.T)
W = model.coef_.T
#print len(np.nonzero(abs(W).sum(1))[0]),model.score(X.T,X.T)
# fine search from the top
while (abs(W).sum(1) > 0).sum() > n*1.2:
lda *= 2.
model.set_params(alpha=lda)
model.fit(X.T,X.T)
W = model.coef_.T
#print len(np.nonzero(abs(W).sum(1))[0]),model.score(X.T,X.T)
# finer search
while (abs(W).sum(1) > 0).sum() > n:
lda *= 1.1
model.set_params(alpha=lda)
model.fit(X.T,X.T)
W = model.coef_.T
#print len(np.nonzero(abs(W).sum(1))[0]),model.score(X.T,X.T)
return np.nonzero(abs(W).sum(1))[0]
def rargmax(vector):
# random argmax
m = np.max(vector)
indices = np.nonzero(vector == m)[0]
return pr.choice(indices)
# Reward Update Q
# Algorithm
# For each s,a initialize table entry Q(s,a)<-0
# Observe current stat s
# Do foever:
# select an action a and execute it
# receive immediate reward
# observe the new state
# update the table entry for Q(s,a)
# update the state
# Non-deterministic environment
def rargmax(vector):
# random argmax
m = np.max(vector)
indices = np.nonzero(vector == m)[0]
return pr.choice(indices)
# Reward Update Q
# Algorithm
# For each s,a initialize table entry Q(s,a)<-0
# Observe current stat s
# Do foever:
# select an action a and execute it
# receive immediate reward
# observe the new state
# update the table entry for Q(s,a)
# update the state