def sample():
'''
Draw a sample from the distribution of polar angle of the angular
momentum vector, :math:`\\theta`, computed using the Monte Carlo
technique discussed in the paper.
.. plot::
:align: center
from planetplanet.photo import theta
import matplotlib.pyplot as pl
x = [theta.sample() for i in range(10000)]
pl.hist(x, bins = 50)
pl.xlabel(r'$\\theta$ [deg]', fontweight = 'bold')
pl.ylabel('Probability', fontweight = 'bold')
pl.show()
'''
y = np.random.random()
f = lambda x: CDF(x) - y
while np.sign(f(0)) == np.sign(f(1)):
y = np.random.random()
f = lambda x: CDF(x) - y
return brentq(f, 0, 1)
python类sign()的实例源码
def calc_loss(self, states, actions, rewards, next_states, episode_ends):
qv = self.agent.q(states)
q_t = self.target(next_states) # Q(s', *)
max_q_prime = np.array(list(map(np.max, q_t.data)), dtype=np.float32) # max_a Q(s', a)
target = cuda.to_cpu(qv.data.copy())
for i in range(self.replay_size):
if episode_ends[i][0] is True:
_r = np.sign(rewards[i])
else:
_r = np.sign(rewards[i]) + self.gamma * max_q_prime[i]
target[i, actions[i]] = _r
td = Variable(self.target.arr_to_gpu(target)) - qv
td_tmp = td.data + 1000.0 * (abs(td.data) <= 1) # Avoid zero division
td_clip = td * (abs(td.data) <= 1) + td/abs(td_tmp) * (abs(td.data) > 1)
zeros = Variable(self.target.arr_to_gpu(np.zeros((self.replay_size, self.target.n_action), dtype=np.float32)))
loss = F.mean_squared_error(td_clip, zeros)
self._loss = loss.data
self._qv = np.max(qv.data)
return loss
def __call__(self, x):
"""
Args:
x (FloatTensor/LongTensor or ndarray)
Returns:
x_mu (LongTensor or ndarray)
"""
mu = self.qc - 1.
if isinstance(x, np.ndarray):
x_mu = np.sign(x) * np.log1p(mu * np.abs(x)) / np.log1p(mu)
x_mu = ((x_mu + 1) / 2 * mu + 0.5).astype(int)
elif isinstance(x, (torch.Tensor, torch.LongTensor)):
if isinstance(x, torch.LongTensor):
x = x.float()
mu = torch.FloatTensor([mu])
x_mu = torch.sign(x) * torch.log1p(mu *
torch.abs(x)) / torch.log1p(mu)
x_mu = ((x_mu + 1) / 2 * mu + 0.5).long()
return x_mu
def _step(self, action):
obs, reward, done, info = self.env.step(action)
obs = self.process_observation(obs)
if self.squash_rewards:
reward = float(np.sign(reward))
else:
reward = float(reward) / float(self.reward_scale)
info["frame/lives"] = info["ale.lives"]
if self.lives is None:
self.lives = info["ale.lives"]
else:
current_lives = info["ale.lives"]
lost = self.lives - current_lives
self.lives = current_lives
if lost > 0:
reward -= lost * self.death_penalty
return obs, reward, done, info
def _step(self, action):
obs, reward, done, info = self.env.step(action)
obs = self.process_observation(obs)
if self.squash_rewards:
reward = float(np.sign(reward))
else:
reward = float(reward) / float(self.reward_scale)
info["frame/lives"] = info["ale.lives"]
if self.lives is None:
self.lives = info["ale.lives"]
else:
current_lives = info["ale.lives"]
lost = self.lives - current_lives
self.lives = current_lives
if lost > 0:
reward -= lost * self.death_penalty
return obs, reward, done, info
def reward(self, a, s1):
"""Immediate Reward Function."""
reward = 0
s0, s1 = self.data, s1.data
# rewards related to states
if any(proximity < ProximitySensor.COLLISION_THRESHOLD
for proximity in s0[1:]):
reward += self.IMMEDIATE_REWARD['collision']
reward += (np.sign(s0[0] - s1[0]) *
self.IMMEDIATE_REWARD['position-delta'])
if s1[0] < s0[0]:
reward_proximity = (self.IMMEDIATE_REWARD['close-to-goal'] *
(1 - self.data[0] / 28))
reward += reward_proximity
logger.info('distance: %.2f, reward-proximity: %.2f',
s0[0], reward_proximity)
# rewards related to actions.
reward += self.IMMEDIATE_REWARD[a]
logger.info('reward: %.2f', reward)
return reward
def soft_threshold(X, thresh):
"""Proximal mapping of l1-norm results in soft-thresholding. Therefore, it is required
for the optimisation of the GFGL or IFGL.
Parameters
----------
X : ndarray
input data of arbitrary shape
thresh : float
threshold value
Returns
-------
ndarray soft threshold applied
"""
return (np.absolute(X) - thresh).clip(0) * np.sign(X)
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = 0.5 * sign(unif(dim, self.rseed) - 0.5) * 4.2096874633
self.scales = (self.condition ** .5) ** np.linspace(0, 1, dim)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(2 * np.abs(self.xopt), curshape)
self.arrscales = resize(self.scales, curshape)
self.arrsigns = resize(sign(self.xopt), curshape)
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = .5 * self._mu1 * sign(gauss(dim, self.rseed))
self.rotation = compute_rotation(self.rseed + 1e6, dim)
self.scales = (self.condition ** .5) ** linspace(0, 1, dim)
self.linearTF = dot(compute_rotation(self.rseed, dim), diag(self.scales))
# decouple scaling from function definition
self.linearTF = dot(self.linearTF, self.rotation)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
# self.arrxopt = resize(self.xopt, curshape)
self.arrscales = resize(2. * sign(self.xopt), curshape) # makes up for xopt
def update_measure(self):
"""updated noise level measure using two fitness lists ``self.fit`` and
``self.fitre``, return ``self.noiseS, all_individual_measures``.
Assumes that ``self.idx`` contains the indices where the fitness
lists differ.
"""
lam = len(self.fit)
idx = np.argsort(self.fit + self.fitre)
ranks = np.argsort(idx).reshape((2, lam))
rankDelta = ranks[0] - ranks[1] - np.sign(ranks[0] - ranks[1])
# compute rank change limits using both ranks[0] and ranks[1]
r = np.arange(1, 2 * lam) # 2 * lam - 2 elements
limits = [0.5 * (Mh.prctile(np.abs(r - (ranks[0, i] + 1 - (ranks[0, i] > ranks[1, i]))),
self.theta * 50) +
Mh.prctile(np.abs(r - (ranks[1, i] + 1 - (ranks[1, i] > ranks[0, i]))),
self.theta * 50))
for i in self.idx]
# compute measurement
# max: 1 rankchange in 2*lambda is always fine
s = np.abs(rankDelta[self.idx]) - Mh.amax(limits, 1) # lives roughly in 0..2*lambda
self.noiseS += self.cum * (np.mean(s) - self.noiseS)
return self.noiseS, s
def compute_pvalues_for_processes(self,U_matrix,chane_prob, num_bootstrapped_stats=100):
N = U_matrix.shape[0]
bootsraped_stats = np.zeros(num_bootstrapped_stats)
# orsetinW = simulate(N,num_bootstrapped_stats,corr)
for proc in range(num_bootstrapped_stats):
# W = np.sign(orsetinW[:,proc])
W = simulatepm(N,chane_prob)
WW = np.outer(W, W)
st = np.mean(U_matrix * WW)
bootsraped_stats[proc] = N * st
stat = N*np.mean(U_matrix)
return float(np.sum(bootsraped_stats > stat)) / num_bootstrapped_stats
def get_extrema(data):
# find extrema by finding indexes where diff changes sign
data_diff = np.diff(data)
asign = np.sign(data_diff)
signchange = ((np.roll(asign, 1) - asign) != 0).astype(int)
# first and last value is always a local extrema
signchange[0] = 1
# last value is missing because the diff-array is 1 value shorter than the
# input array so we have to add it again
signchange = np.append(signchange, np.array([1]))
calc_data = data[np.where(signchange != 0)]
return calc_data
def compute_mingrad_l1(self,main_rdd,cinfo,K):
R = cinfo
def maxmin_l1(tpl1,tpl2):
(z1,x1,lam1,i1)=tpl1
(z2,x2,lam2,i2)=tpl2
zt = max(abs(z1),abs(z2))
if zt>abs(z2):
out = (z1,x1,lam1,i1)
else:
out = (z2,x2,lam2,i2)
return out
def CompMingrad(tpl):
p=[]
for ((tx,lam),index) in tpl:
p.append(((np.matrix(tx)*R)[0,0],tx,lam,index))
return p
(mingrad,xmin,lambdaMin,iStar)=main_rdd.flatMapValues(CompMingrad).map(lambda (key, value):value).reduce(maxmin_l1)
s_star = -np.sign(mingrad)
return (mingrad,xmin,lambdaMin,iStar,s_star)
def calc_PrimaryRegion(self,X,Z):
"""Predicts magnitude and direction of primary field in region"""
# CALCULATES INDUCING FIELD WITHIN REGION AND RETURNS AT LOCATIONS
# Initiate Variables from object
I = self.I
a1 = self.a1
eps = 1e-6
mu0 = 4*np.pi*1e-7 # 1e9*mu0
s = np.abs(X) # Define Radial Distance
k = 4*a1*s/(Z**2 + (a1+s)**2)
Bpx = mu0*np.sign(X)*(Z*I/(2*np.pi*s + eps))*(1/np.sqrt(Z**2 + (a1+s)**2))*(-sp.ellipk(k) + ((a1**2 + Z**2 + s**2)/(Z**2 + (s-a1)**2))*sp.ellipe(k))
Bpz = mu0* ( I/(2*np.pi ))*(1/np.sqrt(Z**2 + (a1+s)**2))*( sp.ellipk(k) + ((a1**2 - Z**2 - s**2)/(Z**2 + (s-a1)**2))*sp.ellipe(k))
Bpx[(X>-1.025*a1) & (X<-0.975*a1) & (Z>-0.025*a1) & (Z<0.025*a1)] = 0.
Bpx[(X<1.025*a1) & (X>0.975*a1) & (Z>-0.025*a1) & (Z<0.025*a1)] = 0.
Bpz[(X>-1.025*a1) & (X<-0.975*a1) & (Z>-0.025*a1) & (Z<0.025*a1)] = 0.
Bpz[(X<1.025*a1) & (X>0.975*a1) & (Z>-0.025*a1) & (Z<0.025*a1)] = 0.
Babs = np.sqrt(Bpx**2 + Bpz**2)
return Bpx,Bpz,Babs
def make_classifier(estimator, params=None):
"""Make a classifier for a possible regressor.
.. deprecated:: 0.5
Parameters
----------
estimator : sklearn-like class
It must contain at least a fit and predict method.
params : dict, optional
Parameters of the classifier.
Returns
-------
generic_classifier : class
sklearn-like class that is a subclass of estimator. The predict method
has been overwritten in order to return only the sign of the results.
Note: this assumes that labels are 1 and -1.
"""
if params is None:
params = {}
params['predict'] = predict
params.setdefault('score', accuracy_score)
return type('GenericClassifier', (estimator,), params)()
def inv_mulaw(y, mu=256):
"""Inverse of mu-law companding (mu-law expansion)
.. math::
f^{-1}(x) = sign(y) (1 / \mu) (1 + \mu)^{|y|} - 1)
Args:
y (array-like): Compressed signal. Each value of input signal must be in
range of [-1, 1].
mu (number): Compression parameter ``?``.
Returns:
array-like: Uncomprresed signal (-1 <= x <= 1)
See also:
:func:`nnmnkwii.preprocessing.inv_mulaw`
:func:`nnmnkwii.preprocessing.mulaw_quantize`
:func:`nnmnkwii.preprocessing.inv_mulaw_quantize`
"""
return _sign(y) * (1.0 / mu) * ((1.0 + mu)**_abs(y) - 1.0)
def vorEdges(vor, far):
"""
Given a voronoi tesselation, retuns the set of voronoi edges.
far is the length of the "infinity" edges
"""
edges = []
for simplex in vor.ridge_vertices:
simplex = numpy.asarray(simplex)
if numpy.all(simplex >= 0):
edge = {}
edge['p1'], edge['p2'] = vor.vertices[simplex, 0], vor.vertices[simplex, 1]
edge['p1'] = numpy.array([vor.vertices[simplex, 0][0], vor.vertices[simplex, 1][0]])
edge['p2'] = numpy.array([vor.vertices[simplex, 0][1], vor.vertices[simplex, 1][1]])
edge['t'] = (edge['p2'] - edge['p1']) / numpy.linalg.norm(edge['p2'] - edge['p1'])
edges.append(edge)
ptp_bound = vor.points.ptp(axis=0)
center = vor.points.mean(axis=0)
for pointidx, simplex in zip(vor.ridge_points, vor.ridge_vertices):
simplex = numpy.asarray(simplex)
if numpy.any(simplex < 0):
i = simplex[simplex >= 0][0] # finite end Voronoi vertex
t = vor.points[pointidx[1]] - vor.points[pointidx[0]] # tangent
t /= numpy.linalg.norm(t)
n = numpy.array([-t[1], t[0]]) # normal
midpoint = vor.points[pointidx].mean(axis=0)
direction = numpy.sign(numpy.dot(midpoint - center, n)) * n
far_point = vor.vertices[i] + direction * ptp_bound.max() * far
edge = {}
edge['p1'], edge['p2'] = numpy.array([vor.vertices[i, 0], far_point[0]]), numpy.array(
[vor.vertices[i, 1], far_point[1]])
edge['p1'], edge['p2'] = vor.vertices[i, :], far_point
edge['t'] = (edge['p2'] - edge['p1']) / numpy.linalg.norm(edge['p2'] - edge['p1'])
edges.append(edge)
return edges
def __call__(self, x_mu):
"""
Args:
x_mu (FloatTensor/LongTensor or ndarray)
Returns:
x (FloatTensor or ndarray)
"""
mu = self.qc - 1.
if isinstance(x_mu, np.ndarray):
x = ((x_mu) / mu) * 2 - 1.
x = np.sign(x) * (np.exp(np.abs(x) * np.log1p(mu)) - 1.) / mu
elif isinstance(x_mu, (torch.Tensor, torch.LongTensor)):
if isinstance(x_mu, torch.LongTensor):
x_mu = x_mu.float()
mu = torch.FloatTensor([mu])
x = ((x_mu) / mu) * 2 - 1.
x = torch.sign(x) * (torch.exp(torch.abs(x) * torch.log1p(mu)) - 1.) / mu
return x
def write_load_file(loadfilename, load_nodeID_amp, direction=-3,
header_comment="$Generated by GaussExc.py\n"):
"""write load file
:param loadfilename:
:param load_nodeID_amp: list of int node ID, float amp
:param direction: default = -3 (orientation (1, 2, 3) and sign)
:param header_comment:
"""
from numpy import sign, abs
d = abs(direction)
dsign = sign(direction)
lfile = open(loadfilename, 'w')
lfile.write(header_comment)
lfile.write("*LOAD_NODE_POINT\n")
[lfile.write("%i,%i,1,%.4f\n" % (i, d, dsign * j))
for i, j in load_nodeID_amp]
lfile.write("*END\n")
lfile.close()
return 0
def get_output_p(self, path): # this gives the p_dist for every step: the latent posterior wrt obs_act
if self.recurrent:
obs_actions = [np.concatenate([path["observations"][:, self.obs_regressed],
path["actions"][:, self.act_regressed]],
axis=1)] # is this the same??
else:
obs_actions = np.concatenate([path["observations"][:, self.obs_regressed],
path["actions"][:, self.act_regressed]], axis=1)
if self.noisify_traj_coef:
obs_actions += np.random.normal(loc=0.0, scale=float(np.mean(np.abs(obs_actions))) * self.noisify_traj_coef,
size=np.shape(obs_actions))
if self.use_only_sign:
obs_actions = np.sign(obs_actions)
if self.policy.latent_name == 'bernoulli':
return self._regressor._f_p(obs_actions).flatten()
elif self.policy.latent_name == 'normal':
return self._regressor._f_pdists(obs_actions).flatten()
def predict_log_likelihood(self, paths, latents):
if self.recurrent:
observations = np.array([p["observations"][:, self.obs_regressed] for p in paths])
actions = np.array([p["actions"][:, self.act_regressed] for p in paths])
obs_actions = np.concatenate([observations, actions], axis=2) # latents must match first 2dim: (batch,time)
else:
observations = np.concatenate([p["observations"][:, self.obs_regressed] for p in paths])
actions = np.concatenate([p["actions"][:, self.act_regressed] for p in paths])
obs_actions = np.concatenate([observations, actions], axis=1)
latents = np.concatenate(latents, axis=0)
if self.noisify_traj_coef:
noise = np.random.multivariate_normal(mean=np.zeros_like(np.mean(obs_actions, axis=0)),
cov=np.diag(np.mean(np.abs(obs_actions),
axis=0) * self.noisify_traj_coef),
size=np.shape(obs_actions)[0])
obs_actions += noise
if self.use_only_sign:
obs_actions = np.sign(obs_actions)
return self._regressor.predict_log_likelihood(obs_actions, latents) # see difference with fit above...
def lowb_mutual(self, paths, times=(0, None)):
if self.recurrent:
observations = np.array([p["observations"][times[0]:times[1], self.obs_regressed] for p in paths])
actions = np.array([p["actions"][times[0]:times[1], self.act_regressed] for p in paths])
obs_actions = np.concatenate([observations, actions], axis=2)
latents = np.array([p['agent_infos']['latents'][times[0]:times[1]] for p in paths])
else:
observations = np.concatenate([p["observations"][times[0]:times[1], self.obs_regressed] for p in paths])
actions = np.concatenate([p["actions"][times[0]:times[1], self.act_regressed] for p in paths])
obs_actions = np.concatenate([observations, actions], axis=1)
latents = np.concatenate([p['agent_infos']["latents"][times[0]:times[1]] for p in paths])
if self.noisify_traj_coef:
obs_actions += np.random.multivariate_normal(mean=np.zeros_like(np.mean(obs_actions,axis=0)),
cov=np.diag(np.mean(np.abs(obs_actions),
axis=0) * self.noisify_traj_coef),
size=np.shape(obs_actions)[0])
if self.use_only_sign:
obs_actions = np.sign(obs_actions)
H_latent = self.policy.latent_dist.entropy(self.policy.latent_dist_info) # sum of entropies latents in
return H_latent + np.mean(self._regressor.predict_log_likelihood(obs_actions, latents))
def find_fermi_SPB(cbm_vbm, c, T, tolerance=0.001, tolerance_loose=0.03, alpha=0.02, max_iter=1000):
tp = get_tp(c)
sgn = np.sign(c)
m_eff = np.prod(cbm_vbm[tp]["eff_mass_xx"]) ** (1.0 / 3.0)
c *= sgn
initial_energy = cbm_vbm[tp]["energy"]
fermi = initial_energy + 0.02
iter = 0
for iter in range(max_iter):
calc_doping = 4 * pi * (2 * m_eff * m_e * k_B * T / hbar ** 2) ** 1.5 * fermi_integral(0.5, fermi, T,
initial_energy) * 1e-6 / e ** 1.5
fermi += alpha * sgn * (calc_doping - c) / abs(c + calc_doping) * fermi
relative_error = abs(calc_doping - c) / abs(c)
if relative_error <= tolerance:
# This here assumes that the SPB generator set the VBM to 0.0 and CBM= gap + scissor
if sgn < 0:
return fermi
else:
return -(fermi - initial_energy)
if relative_error > tolerance:
raise ValueError("could NOT find a corresponding SPB fermi level after {} itenrations".format(max_iter))
def hex2vec(h, ell):
"""hex2vec(h, ell) generates sign vector of length ell from the hex string h.
ell must be <= 4*len(h) (excluding the optional leading "0x")
"""
if h[0:2] in ['0x', '0X']:
h = h[2:]
nybble = numpy.array([
[0, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0], [0, 0, 1, 1],
[0, 1, 0, 0], [0, 1, 0, 1], [
0, 1, 1, 0], [0, 1, 1, 1],
[1, 0, 0, 0], [1, 0, 0, 1], [
1, 0, 1, 0], [1, 0, 1, 1],
[1, 1, 0, 0], [1, 1, 0, 1], [1, 1, 1, 0], [1, 1, 1, 1]])
vec = numpy.ravel(numpy.array([nybble[int(x, 16)] for x in h]))
if len(vec) < ell:
raise ValueError('hex string too short')
return vec[len(vec) - ell:]
def native_pla(x_d, y_d, is_rand=False, repeat=1, eta=1.0):
total_update = 0
for rpt in range(0, repeat):
w = np.zeros(len(x_d[0]))
update_count = 0
all_pass = False
index = [i for i in range(len(x_d))]
if is_rand:
random.shuffle(index)
while not all_pass:
all_pass = True
for t in index:
if np.sign(np.inner(x_d[t], w)) != y_d[t]:
w += eta * y_d[t] * x_d[t]
all_pass = False
update_count += 1
total_update += update_count
return w, total_update / repeat
def update(self, es, **kwargs):
if es.countiter < 2:
self.initialize(es)
self.fit = es.fit.fit
else:
ft1, ft2 = self.fit[int(self.index_to_compare)], self.fit[int(np.ceil(self.index_to_compare))]
ftt1, ftt2 = es.fit.fit[(es.popsize - 1) // 2], es.fit.fit[int(np.ceil((es.popsize - 1) / 2))]
pt2 = self.index_to_compare - int(self.index_to_compare)
# ptt2 = (es.popsize - 1) / 2 - (es.popsize - 1) // 2 # not in use
s = 0
if 1 < 3:
s += pt2 * sum(es.fit.fit <= self.fit[int(np.ceil(self.index_to_compare))])
s += (1 - pt2) * sum(es.fit.fit < self.fit[int(self.index_to_compare)])
s -= es.popsize / 2.
s *= 2. / es.popsize # the range was popsize, is 2
self.s = (1 - self.c) * self.s + self.c * s
es.sigma *= exp(self.s / self.damp)
# es.more_to_write.append(10**(self.s))
#es.more_to_write.append(10**((2 / es.popsize) * (sum(es.fit.fit < self.fit[int(self.index_to_compare)]) - (es.popsize + 1) / 2)))
# # es.more_to_write.append(10**(self.index_to_compare - sum(self.fit <= es.fit.fit[es.popsize // 2])))
# # es.more_to_write.append(10**(np.sign(self.fit[int(self.index_to_compare)] - es.fit.fit[es.popsize // 2])))
self.fit = es.fit.fit
def update(self, es, function_values, **kwargs):
"""the first and second value in ``function_values``
must reflect two mirrored solutions sampled
in direction / in opposite direction of
the previous mean shift, respectively.
"""
# TODO: on the linear function, the two mirrored samples lead
# to a sharp increase of condition of the covariance matrix.
# They should not be used to update the covariance matrix,
# if the step-size inreases quickly. This should be fine with
# negative updates though.
if not self.initialized:
self.initialize(es.N, es.opts)
if 1 < 3:
# use the ranking difference of the mirrors for adaptation
# damp = 5 should be fine
z = np.where(es.fit.idx == 1)[0][0] - np.where(es.fit.idx == 0)[0][0]
z /= es.popsize - 1 # z in [-1, 1]
self.s = (1 - self.sp.c) * self.s + self.sp.c * np.sign(z) * np.abs(z)**self.sp.z_exponent
if self.s > 0:
es.sigma *= exp(self.s / self.sp.dampup)
else:
es.sigma *= exp(self.s / self.sp.dampdown)
#es.more_to_write.append(10**z)
def update_measure(self):
"""updated noise level measure using two fitness lists ``self.fit`` and
``self.fitre``, return ``self.noiseS, all_individual_measures``.
Assumes that `self.idx` contains the indices where the fitness
lists differ
"""
lam = len(self.fit)
idx = np.argsort(self.fit + self.fitre)
ranks = np.argsort(idx).reshape((2, lam))
rankDelta = ranks[0] - ranks[1] - np.sign(ranks[0] - ranks[1])
# compute rank change limits using both ranks[0] and ranks[1]
r = np.arange(1, 2 * lam) # 2 * lam - 2 elements
limits = [0.5 * (Mh.prctile(np.abs(r - (ranks[0, i] + 1 - (ranks[0, i] > ranks[1, i]))),
self.theta * 50) +
Mh.prctile(np.abs(r - (ranks[1, i] + 1 - (ranks[1, i] > ranks[0, i]))),
self.theta * 50))
for i in self.idx]
# compute measurement
# max: 1 rankchange in 2*lambda is always fine
s = np.abs(rankDelta[self.idx]) - Mh.amax(limits, 1) # lives roughly in 0..2*lambda
self.noiseS += self.cum * (np.mean(s) - self.noiseS)
return self.noiseS, s
def update_market_value(self, price):
"""Compute the current marke value of the position. This is the current
price multiplied by the direction of the trade (r2epresented by the sign
of the net number of shares bought and sold). The function also updated
the unrealized and realized profits and losses.
"""
# Compute the mean of the bid and ask price to compute the assumed value
# of the position.
#
# N.B. That the market value is akin to the amount of cash that is would
# be injected into the portfolio if the position were liquidated. This
# means that if a position is short, then a negative amount will be
# injected (i.e. paid out). On the other hand, the current value is the
# profit-and-loss on a position relative to the cost basis.
self.market_value = self.net * price
self.unrealized_pnl = self.market_value - self.cost_basis
self.realized_pnl = self.market_value + self.net_tot_incl_comm
def iterate(self, x, eps=32, alp=1.0):
num_iter = min(eps + 4, 1.25 * eps)
loss = 1.0
x = np.copy(x)
while loss > 0 and num_iter > 0:
inp = x.reshape((1,) + inp_size)
outs = self.f_outputs([inp, 0])
loss = outs[0]
print('Loss: ', loss)
grads = np.array(outs[1:]).reshape(inp_size)
s_grads = np.sign(grads)
adv_x = x - alp * s_grads
sub_x = np.minimum(x + eps, np.maximum(x - eps, adv_x))
next_x = preprocess_img(np.clip(deprocess_img(sub_x), 0.0, 255.0))
x = next_x
confidence = self.mdl.predict(x.reshape((1,) + inp_size))[0][0]
print('Current confidence value: ', confidence) #'minval =', min_val)
yield (deprocess_img(x), confidence)
num_iter -= 1