def compHistDistance(h1, h2):
def normalize(h):
if np.sum(h) == 0:
return h
else:
return h / np.sum(h)
def smoothstep(x, x_min=0., x_max=1., k=2.):
m = 1. / (x_max - x_min)
b = - m * x_min
x = m * x + b
return betainc(k, k, np.clip(x, 0., 1.))
def fn(X, Y, k):
return 4. * (1. - smoothstep(Y, 0, (1 - Y) * X + Y + .1)) \
* np.sqrt(2 * X) * smoothstep(X, 0., 1. / k, 2) \
+ 2. * smoothstep(Y, 0, (1 - Y) * X + Y + .1) \
* (1. - 2. * np.sqrt(2 * X) * smoothstep(X, 0., 1. / k, 2) - 0.5)
h1 = normalize(h1)
h2 = normalize(h2)
return max(0, np.sum(fn(h2, h1, len(h1))))
# return np.sum(np.where(h2 != 0, h2 * np.log10(h2 / (h1 + 1e-10)), 0)) # KL divergence
python类max()的实例源码
def effective_sample_size(x, mu, var, logger):
"""
Calculate the effective sample size of sequence generated by MCMC.
:param x:
:param mu: mean of the variable
:param var: variance of the variable
:param logger: logg
:return: effective sample size of the sequence
Make sure that `mu` and `var` are correct!
"""
# batch size, time, dimension
b, t, d = x.shape
ess_ = np.ones([d])
for s in range(1, t):
p = auto_correlation_time(x, s, mu, var)
if np.sum(p > 0.05) == 0:
break
else:
for j in range(0, d):
if p[j] > 0.05:
ess_[j] += 2.0 * p[j] * (1.0 - float(s) / t)
logger.info('ESS: max [%f] min [%f] / [%d]' % (t / np.min(ess_), t / np.max(ess_), t))
return t / ess_
def score_fun_first_term(vals_hist,a_mid):
sum = 0.0
lim = int(np.max(vals_hist.keys()))
for i in range(0, lim+1):
if (vals_hist[i] > 0):
inner_sum = 0.0
for j in range(0, i):
inner_sum += j/(1.0 + a_mid*j)
sum += vals_hist[i]*inner_sum
return sum
##############################
## in-line functions
##############################
def estimate_clipping_rect(projector, size):
"""
Return:
rect -- NSRect style 2d-tuple.
flipped (bool) -- Whether y-axis is flipped.
"""
# lt -> rt -> lb -> rb
image_corners = [(0, 0), (size[0], 0), (0, size[1]), size]
x_points = []
y_points = []
for corner in image_corners:
x, y = map(int, projector.project_point(*corner))
x_points.append(x)
y_points.append(y)
min_x = min(x_points)
min_y = min(y_points)
max_x = max(x_points)
max_y = max(y_points)
rect = ((min_x, min_y), (max_x - min_x, max_y - min_y))
flipped = y_points[3] < 0
return rect, flipped
def to_dim_times_two(self, bounds):
"""return boundaries in format ``[[lb0, ub0], [lb1, ub1], ...]``,
as used by ``BoxConstraints...`` class.
"""
if not bounds:
b = [[None, None]]
else:
l = [None, None] # figure out lenths
for i in [0, 1]:
try:
l[i] = len(bounds[i])
except TypeError:
bounds[i] = [bounds[i]]
l[i] = 1
b = [] # bounds in different format
try:
for i in range(max(l)):
b.append([bounds[0][i] if i < l[0] else None,
bounds[1][i] if i < l[1] else None])
except (TypeError, IndexError):
print("boundaries must be provided in the form " +
"[scalar_of_vector, scalar_or_vector]")
raise
return b
def alleviate_conditioning_in_coordinates(self, condition=1e8):
"""pass scaling from `C` to `sigma_vec`.
As a result, `C` is a correlation matrix, i.e., all diagonal
entries of `C` are `1`.
"""
if max(self.dC) / min(self.dC) > condition:
# allows for much larger condition numbers, if axis-parallel
if hasattr(self, 'sm') and isinstance(self.sm, sampler.GaussFullSampler):
old_coordinate_condition = max(self.dC) / min(self.dC)
old_condition = self.sm.condition_number
factors = self.sm.to_correlation_matrix()
self.sigma_vec *= factors
self.pc /= factors
self._updateBDfromSM(self.sm)
utils.print_message('\ncondition in coordinate system exceeded'
' %.1e, rescaled to %.1e, '
'\ncondition changed from %.1e to %.1e'
% (old_coordinate_condition, max(self.dC) / min(self.dC),
old_condition, self.sm.condition_number),
iteration=self.countiter)
def plot_axes_scaling(self, iabscissa=1):
from matplotlib import pyplot
if not hasattr(self, 'D'):
self.load()
dat = self
if np.max(dat.D[:, 5:]) == np.min(dat.D[:, 5:]):
pyplot.text(0, dat.D[-1, 5],
'all axes scaling values equal to %s'
% str(dat.D[-1, 5]),
verticalalignment='center')
return self # nothing interesting to plot
self._enter_plotting()
pyplot.semilogy(dat.D[:, iabscissa], dat.D[:, 5:], '-b')
# pyplot.hold(True)
pyplot.grid(True)
ax = array(pyplot.axis())
# ax[1] = max(minxend, ax[1])
pyplot.axis(ax)
pyplot.title('Principle Axes Lengths')
# pyplot.xticks(xticklocs)
self._xlabel(iabscissa)
self._finalize_plotting()
return self
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
scale = max(1, dim ** .5 / 8.)
self.linearTF = scale * compute_rotation(self.rseed, dim)
# if self.zerox:
# self.xopt = zeros(dim) # does not work here
# else:
# TODO: clean this line
self.xopt = np.hstack(dot(self.linearTF, 0.5 * np.ones((dim, 1)) / scale ** 2))
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
def update_measure(self):
"""updated noise level measure using two fitness lists ``self.fit`` and
``self.fitre``, return ``self.noiseS, all_individual_measures``.
Assumes that ``self.idx`` contains the indices where the fitness
lists differ.
"""
lam = len(self.fit)
idx = np.argsort(self.fit + self.fitre)
ranks = np.argsort(idx).reshape((2, lam))
rankDelta = ranks[0] - ranks[1] - np.sign(ranks[0] - ranks[1])
# compute rank change limits using both ranks[0] and ranks[1]
r = np.arange(1, 2 * lam) # 2 * lam - 2 elements
limits = [0.5 * (Mh.prctile(np.abs(r - (ranks[0, i] + 1 - (ranks[0, i] > ranks[1, i]))),
self.theta * 50) +
Mh.prctile(np.abs(r - (ranks[1, i] + 1 - (ranks[1, i] > ranks[0, i]))),
self.theta * 50))
for i in self.idx]
# compute measurement
# max: 1 rankchange in 2*lambda is always fine
s = np.abs(rankDelta[self.idx]) - Mh.amax(limits, 1) # lives roughly in 0..2*lambda
self.noiseS += self.cum * (np.mean(s) - self.noiseS)
return self.noiseS, s
def logscale_img(img_array,
cap=255.0,
coeff=1000.0):
'''
This scales the image according to the relation:
logscale_img = np.log(coeff*(img/max(img))+1)/np.log(coeff)
Taken from the DS9 scaling algorithms page at:
http://hea-www.harvard.edu/RD/ds9/ref/how.html
According to that page:
coeff = 1000.0 works well for optical images
coeff = 100.0 works well for IR images
'''
logscaled_img = np.log(coeff*img_array/np.nanmax(img_array)+1)/np.log(coeff)
return cap*logscaled_img
def genplot(x, y, fit, xdata=None, ydata=None, maxpts=10000):
bin_range = (0, 360)
a = (np.arange(*bin_range))
f_a = nuth_func(a, fit[0], fit[1], fit[2])
nuth_func_str = r'$y=%0.2f*cos(%0.2f-x)+%0.2f$' % tuple(fit)
if xdata.size > maxpts:
import random
idx = random.sample(list(range(xdata.size)), 10000)
else:
idx = np.arange(xdata.size)
f, ax = plt.subplots()
ax.set_xlabel('Aspect (deg)')
ax.set_ylabel('dh/tan(slope) (m)')
ax.plot(xdata[idx], ydata[idx], 'k.', label='Orig pixels')
ax.plot(x, y, 'ro', label='Bin median')
ax.axhline(color='k')
ax.plot(a, f_a, 'b', label=nuth_func_str)
ax.set_xlim(*bin_range)
pad = 0.2 * np.max([np.abs(y.min()), np.abs(y.max())])
ax.set_ylim(y.min() - pad, y.max() + pad)
ax.legend(prop={'size':8})
return f
#Function copied from from openPIV pyprocess
def center_clipping(x, percent=30):
"""
Performs center clipping, a spectral whitening process
need some type of spectrum flattening so that the
speech signal more closely approximates a periodic impulse train
Args:
x (array): signal data
percent (float): percent threshold to clip
Returns:
cc (array): center clipped signal
clip_level (float): value of clipping
"""
max_amp = np.max(np.abs(x))
clip_level = max_amp * (percent / 100)
positive_mask = x > clip_level
negative_mask = x < -clip_level
cc = np.zeros(x.shape)
cc[positive_mask] = x[positive_mask] - clip_level
cc[negative_mask] = x[negative_mask] + clip_level
return cc, clip_level
def calc_tvd(sess,Generator,Data,N=50000,nbins=10):
Xd=sess.run(Data.X,{Data.N:N})
step,Xg=sess.run([Generator.step,Generator.X],{Generator.N:N})
p_gen,_ = np.histogramdd(Xg,bins=nbins,range=[[0,1],[0,1],[0,1]],normed=True)
p_dat,_ = np.histogramdd(Xd,bins=nbins,range=[[0,1],[0,1],[0,1]],normed=True)
p_gen/=nbins**3
p_dat/=nbins**3
tvd=0.5*np.sum(np.abs( p_gen-p_dat ))
mvd=np.max(np.abs( p_gen-p_dat ))
return step,tvd, mvd
s_tvd=make_summary(Data.name+'_tvd',tvd)
s_mvd=make_summary(Data.name+'_mvd',mvd)
return step,s_tvd,s_mvd
#return make_summary('tvd/'+Generator.name,tvd)
def scatter2d(x,y,title='2dscatterplot',xlabel=None,ylabel=None):
fig=plt.figure()
plt.scatter(x,y)
plt.title(title)
if xlabel:
plt.xlabel(xlabel)
if ylabel:
plt.ylabel(ylabel)
if not 0<=np.min(x)<=np.max(x)<=1:
raise ValueError('summary_scatter2d title:',title,' input x exceeded [0,1] range.\
min:',np.min(x),' max:',np.max(x))
if not 0<=np.min(y)<=np.max(y)<=1:
raise ValueError('summary_scatter2d title:',title,' input y exceeded [0,1] range.\
min:',np.min(y),' max:',np.max(y))
plt.xlim([0,1])
plt.ylim([0,1])
return fig
rl-network-train.py 文件源码
项目:Deep-Learning-with-Keras
作者: PacktPublishing
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def get_next_batch(experience, model, num_actions, gamma, batch_size):
batch_indices = np.random.randint(low=0, high=len(experience),
size=batch_size)
batch = [experience[i] for i in batch_indices]
X = np.zeros((batch_size, 80, 80, 4))
Y = np.zeros((batch_size, num_actions))
for i in range(len(batch)):
s_t, a_t, r_t, s_tp1, game_over = batch[i]
X[i] = s_t
Y[i] = model.predict(s_t)[0]
Q_sa = np.max(model.predict(s_tp1)[0])
if game_over:
Y[i, a_t] = r_t
else:
Y[i, a_t] = r_t + gamma * Q_sa
return X, Y
############################# main ###############################
# initialize parameters
def forward(self, input):
"""During the forward pass, it inhibits all inhibitions below some
threshold :math:`?`, typically :math:`0`. In other words, it computes point-wise
.. math:: y=max(0,x)
Parameters
----------
x : float32
The activation (the summed, weighted input of a neuron).
Returns
-------
float32
The output of the rectify function applied to the activation.
"""
self.last_forward = input
return np.maximum(0.0, input)
def forward(self, input):
""":math:`\\varphi(\\mathbf{x})_j =
\\frac{e^{\mathbf{x}_j}}{\sum_{k=1}^K e^{\mathbf{x}_k}}`
where :math:`K` is the total number of neurons in the layer. This
activation function gets applied row-wise.
Parameters
----------
x : float32
The activation (the summed, weighted input of a neuron).
Returns
-------
float32 where the sum of the row is 1 and each single value is in [0, 1]
The output of the softmax function applied to the activation.
"""
assert np.ndim(input) == 2
self.last_forward = input
x = input - np.max(input, axis=1, keepdims=True)
exp_x = np.exp(x)
s = exp_x / np.sum(exp_x, axis=1, keepdims=True)
return s
def main(max_iter):
# prepare
npdl.utils.random.set_seed(1234)
# data
digits = load_digits()
X_train = digits.data
X_train /= np.max(X_train)
Y_train = digits.target
n_classes = np.unique(Y_train).size
# model
model = npdl.model.Model()
model.add(npdl.layers.Dense(n_out=500, n_in=64, activation=npdl.activations.ReLU()))
model.add(npdl.layers.Dense(n_out=n_classes, activation=npdl.activations.Softmax()))
model.compile(loss=npdl.objectives.SCCE(), optimizer=npdl.optimizers.SGD(lr=0.005))
# train
model.fit(X_train, npdl.utils.data.one_hot(Y_train), max_iter=max_iter, validation_split=0.1)
def _updateMaxTextSize(self, x):
## Informs that the maximum tick size orthogonal to the axis has
## changed; we use this to decide whether the item needs to be resized
## to accomodate.
if self.orientation in ['left', 'right']:
mx = max(self.textWidth, x)
if mx > self.textWidth or mx < self.textWidth-10:
self.textWidth = mx
if self.style['autoExpandTextSpace'] is True:
self._updateWidth()
#return True ## size has changed
else:
mx = max(self.textHeight, x)
if mx > self.textHeight or mx < self.textHeight-10:
self.textHeight = mx
if self.style['autoExpandTextSpace'] is True:
self._updateHeight()
#return True ## size has changed
def _updateHeight(self):
if not self.isVisible():
h = 0
else:
if self.fixedHeight is None:
if not self.style['showValues']:
h = 0
elif self.style['autoExpandTextSpace'] is True:
h = self.textHeight
else:
h = self.style['tickTextHeight']
h += self.style['tickTextOffset'][1] if self.style['showValues'] else 0
h += max(0, self.style['tickLength'])
if self.label.isVisible():
h += self.label.boundingRect().height() * 0.8
else:
h = self.fixedHeight
self.setMaximumHeight(h)
self.setMinimumHeight(h)
self.picture = None