def div0(a, b):
""" ignore / 0, div0( [-1, 0, 1], 0 ) -> [0, 0, 0] """
with np.errstate(divide='ignore', invalid='ignore'):
c = np.true_divide(a, b)
c[~np.isfinite(c)] = 0 # -inf inf NaN
return c
python类true_divide()的实例源码
def fullness(self):
potential_leaves = np.prod(np.ceil(np.true_divide(self.bounds[1] - self.bounds[0], self.leaf_shape)))
return self.root_node.count_leaves() / float(potential_leaves)
def test_true_divide(self):
# True_divide has a non uniform signature, see #3484.
# This also tests type_tuple_type_resolver.
a = np.full(5, 12.5)
b = np.full(5, 10.0)
tgt = np.full(5, 1.25)
assert_almost_equal(np.true_divide(a, b, dtype=np.float64), tgt)
assert_almost_equal(np.true_divide(a, b, dtype=np.float32), tgt)
assert_raises(TypeError, np.true_divide, a, b, dtype=np.int)
def _hist_bin_doane(x):
"""
Doane's histogram bin estimator.
Improved version of Sturges' formula which works better for
non-normal data. See
http://stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
if x.size > 2:
sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3)))
sigma = np.std(x)
if sigma > 0.0:
# These three operations add up to
# g1 = np.mean(((x - np.mean(x)) / sigma)**3)
# but use only one temp array instead of three
temp = x - np.mean(x)
np.true_divide(temp, sigma, temp)
np.power(temp, 3, temp)
g1 = np.mean(temp)
return x.ptp() / (1.0 + np.log2(x.size) +
np.log2(1.0 + np.absolute(g1) / sg1))
return 0.0
def __truediv__(self, other):
"""
Divide other into self, and return a new masked array.
"""
if self._delegate_binop(other):
return NotImplemented
return true_divide(self, other)
def __rtruediv__(self, other):
"""
Divide self into other, and return a new masked array.
"""
return true_divide(other, self)
def __itruediv__(self, other):
""" See __div__. """
oth = sanitize_units_mul(self, other)
np.true_divide(self, oth, out=self)
return self
def one_shot_classification(test_data, num_shots, num_classes, compute_similarities, k_neighbours=1,
num_episodes=10000):
data_shape = np.prod(test_data[0][0].shape)
episode_length = num_shots * num_classes + 1
batch = np.zeros([num_classes, episode_length, data_shape], dtype=np.float32)
accuracy = 0.
votes = np.zeros(num_classes)
for episode in xrange(num_episodes):
classes = np.random.choice(test_data.shape[0], num_classes, False)
classes_idx = np.repeat(classes[:, np.newaxis], num_shots, 1).flatten()
idx = []
for k in xrange(num_classes):
idx.append(np.random.choice(test_data.shape[1], num_shots + 1, False))
idx = np.vstack(idx)
y = np.repeat(np.arange(num_classes)[:, np.newaxis], num_shots, 1).flatten()
# print batch[:, :-1, :].shape, idx[:, :-1].flatten().shape
batch[:, :-1, :] = test_data[classes_idx, idx[:, :-1].flatten(), :]
batch[:, -1, :] = test_data[classes, idx[:, -1].flatten(), :]
# np.true_divide(batch, 255., out=batch, casting='unsafe')
# sim[i, j] -- similarity between batch[i, -1] and batch[i, j]
sim = compute_similarities(batch)
for k in xrange(num_classes):
votes[:] = 0.
nearest = sim[k].argsort()[-k_neighbours:]
for j in nearest:
votes[y[j]] += sim[k, j]
y_hat = votes.argmax()
if y_hat == k:
accuracy += 1
status = 'episode: %d, accuracy: %f' % (episode, accuracy / num_classes / (episode + 1))
sys.stdout.write('\r' + status)
sys.stdout.flush()
return accuracy / num_episodes / num_classes
def load_data(path):
raw_data = np.load(path)
data = []
min_size = min([raw_data[f].shape[0] for f in raw_data.files])
max_value = max([raw_data[f].max() for f in raw_data.files])
for cl in raw_data.files:
class_data = raw_data[cl][:min_size]
class_data = class_data.reshape(min_size, np.prod(class_data.shape[1:]))
np.true_divide(class_data, max_value, out=class_data, casting='unsafe')
# reverse_data = class_data.copy()
# reverse_data[class_data > 0.] = 0.
# reverse_data[class_data <= 0.95] = 1.
# data.append(reverse_data[None, :, :])
data.append(class_data[None, :, :])
return np.concatenate(data, axis=0)
def _prepare_network_input(self, states):
""" Normalizes the states from one minibatch.
Args:
states (numpy.ndarray): Mini-batch of states, shape=(batch_size,sequence_length,frame_width,frame_height)
Returns:
normalized_states (numpy.ndarray): State values divided by the maximim state value, shape=(batch_size,sequence_length,frame_width,frame_height)
"""
_logger.debug("Normalizing input")
return np.true_divide(states, self.grayscales)
def prod_all_but_j(vector):
""" returns a vector where the jth term is the product of all the entries except the jth one """
zeros = np.where(vector==0)[0]
if len(zeros) > 1:
return np.zeros(len(vector))
if len(zeros) == 1:
result = np.zeros(len(vector))
j = zeros[0]
result[j] = np.prod(vector[np.arange(len(vector)) != j])
return result
joint = np.prod(vector)
return np.true_divide(joint,vector)
def R(self,pa,eta):
""" returns the ratio of the probability of the given assignment under each action to the probability under the eta weighted sum of actions. """
Q = (eta*pa).sum()
ratio = np.true_divide(pa,Q)
ratio[np.isnan(ratio)] = 0 # we get nan when 0/0 but should just be 0 in this case
return ratio
def V(self,eta):
""" returns a vector of length K with the expected value of R (over x sampled from p(x|a)) for each action a """
#with np.errstate(divide='ignore'):
u = np.true_divide(1.0,np.dot(self.A,eta))
u = np.nan_to_num(u) # converts infinities to very large numbers such that multiplying by 0 gives 0
v = np.dot(self.A2T,u)
return v
def P(self,x):
""" calculate vector of P_a for each action a """
indx = np.arange(len(x))
ps = self.pX[x,indx] #probability of P(X_i = x_i) for each i given do()
joint = ps.prod() # probability of x given do()
pi = np.true_divide(joint,ps) # will be nan for elements for which ps is 0
for j in np.where(np.isnan(pi))[0]:
pi[j] = np.prod(ps[indx != j])
pij = np.vstack((pi,pi))
pij[1-x,indx] = 0 # now this is the probability of x given do(x_i=j)
pij = pij.reshape((len(x)*2,)) #flatten first N-1 will be px=0,2nd px=1
result = np.hstack((pij,joint))
return result
def estimate_infrequent(self,h):
qij_hat = np.true_divide(self.trials,h)
s_indx = np.argsort(qij_hat) #indexes of elements from s in sorted(s)
m_hat = Parallel.calculate_m(qij_hat[s_indx])
infrequent = s_indx[0:m_hat]
return infrequent
def run(self,T,model):
self.trials = np.full(model.K,2,dtype=int)
self.success = np.full(model.K,1,dtype=int)
for t in xrange(T):
fails = self.trials - self.success
theta = np.random.beta(self.success,fails)
arm = argmax_rand(theta)
self.trials[arm] +=1
self.success[arm]+= model.sample_multiple(arm,1)
mu = np.true_divide(self.success,self.trials)
self.best_action = argmax_rand(mu)
return max(model.expected_rewards) - model.expected_rewards[self.best_action]
def upper_bound(self,t):
mu = np.true_divide(self.success,self.trials)
interval = np.sqrt(self.alpha*np.log(t)/(2.0*self.trials))
return mu+interval
def allocate(self,T,K):
logK = .5 + np.true_divide(1,range(2,K+1)).sum()
n = np.zeros((K),dtype=int)
n[1:] = np.ceil((1.0/logK)*np.true_divide((T - K),range(K,1,-1)))
allocations = np.diff(n)
return allocations
def run(self,T,model):
self.trials = np.zeros(model.K)
self.success = np.zeros(model.K)
for t in xrange(T):
x,y = model.sample(model.K-1)
xij = np.hstack((1-x,x,1)) # first N actions represent x_i = 0,2nd N x_i=1, last do()
self.trials += xij
self.success += y*xij
self.u = np.true_divide(self.success,self.trials)
self.best_action = argmax_rand(self.u)
return max(model.expected_rewards) - model.expected_rewards[self.best_action]
def run(self,T,model):
trials_per_action = T/model.K
success = model.sample_multiple(range(model.K),trials_per_action)
self.u = np.true_divide(success,trials_per_action)
self.best_action = argmax_rand(self.u)
return max(model.expected_rewards) - model.expected_rewards[self.best_action]