def __init__(self, session, ob_dim=None, n_epochs=10, stepsize=1e-3):
"""
They provide us with an ob_dim in the code so I assume we can use it;
makes it easy to define the layers anyway. This gets constructed upon
initialization so future calls to self.fit should remember this. I
actually use the pre-processed version, though.
"""
self.n_epochs = n_epochs
self.lrate = stepsize
self.sy_ytarg = tf.placeholder(shape=[None], name="nnvf_y", dtype=tf.float32)
self.sy_ob_no = tf.placeholder(shape=[None, ob_dim+1], name="nnvf_ob", dtype=tf.float32)
self.sy_h1 = utils.lrelu(utils.dense(self.sy_ob_no, 32, "nnvf_h1", weight_init=utils.normc_initializer(1.0)), leak=0.0)
self.sy_h2 = utils.lrelu(utils.dense(self.sy_h1, 32, "nnvf_h2", weight_init=utils.normc_initializer(1.0)), leak=0.0)
self.sy_final_n = utils.dense(self.sy_h2, 1, "nnvf_final", weight_init=utils.normc_initializer(1.0))
self.sy_ypred = tf.reshape(self.sy_final_n, [-1])
self.sy_l2_error = tf.reduce_mean(tf.square(self.sy_ypred - self.sy_ytarg))
self.fit_op = tf.train.AdamOptimizer(stepsize).minimize(self.sy_l2_error)
self.sess = session
python类square()的实例源码
def row_cosine_similarities(self):
"""The squared row cosine similarities.
The row cosine similarities are obtained by calculating the cosine of the angle shaped by
the row principal coordinates and the row principal components. This is calculated by
squaring each row projection coordinate and dividing each squared coordinate by the sum of
the squared coordinates, which results in a ratio comprised between 0 and 1 representing the
squared cosine.
Returns:
pandas.DataFrame: A dataframe of shape (`n`, `k`) containing the squared row cosine
similarities.
"""
squared_coordinates = np.square(self.row_principal_coordinates)
total_squares = squared_coordinates.sum(axis='columns')
return squared_coordinates.div(total_squares, axis='rows')
def column_cosine_similarities(self):
"""The squared column cosine similarities.
The column cosine similarities are obtained by calculating the cosine of the angle shaped by
the column principal coordinates and the column principal components. This is calculated by
squaring each column projection coordinate and dividing each squared coordinate by the sum
of the squared coordinates, which results in a ratio comprised between 0 and 1 representing
the squared cosine.
Returns:
pandas.DataFrame: A dataframe of shape (`p`, `k`) containing the squared row cosine
similarities.
"""
squared_column_pc = np.square(self.column_principal_coordinates)
total_squares = squared_column_pc.sum(axis='rows')
return squared_column_pc.div(total_squares, axis='columns')
def row_cosine_similarities(self):
"""The squared row cosine similarities.
The row cosine similarities are obtained by calculating the cosine of the angle shaped by
the row principal coordinates and the row principal components. This is calculated by
squaring each row projection coordinate and dividing each squared coordinate by the sum of
the squared coordinates, which results in a ratio comprised between 0 and 1 representing the
squared cosine.
Returns:
pandas.DataFrame: A dataframe of shape (`n`, `k`) containing the squared row cosine
similarities.
"""
squared_coordinates = np.square(self.row_principal_coordinates)
total_squares = squared_coordinates.sum(axis='columns')
return squared_coordinates.div(total_squares, axis='rows')
def _tipping_point_update(self, tmp, consump, peak_temp_interval=30.0):
"""Determine whether a tipping point has occurred, if so reduce consumption for
all periods after this date.
"""
draws = tmp.shape[0]
disaster = self._disaster_simulation()
disaster_cons = self._disaster_cons_simulation()
period_lengths = self.tree.decision_times[1:] - self.tree.decision_times[:-1]
tmp_scale = np.maximum(self.peak_temp, tmp)
ave_prob_of_survival = 1.0 - np.square(tmp / tmp_scale)
prob_of_survival = ave_prob_of_survival**(period_lengths / peak_temp_interval)
# this part may be done better, this takes a long time to loop over
res = prob_of_survival < disaster
rows, cols = np.nonzero(res)
row, count = np.unique(rows, return_counts=True)
first_occurance = zip(row, cols[np.insert(count.cumsum()[:-1],0,0)])
for pos in first_occurance:
consump[pos[0], pos[1]:] *= np.exp(-disaster_cons[pos[0]])
return consump
def compute_P(max_string_length, sigma_position):
"""
P is a matrix that contains all possible position
uncertainty values. This function pre-compute all
possible values since those values are independant of
the amino acids sequence.
"""
P = np.zeros((max_string_length, max_string_length))
for i in xrange(max_string_length):
for j in xrange(max_string_length):
P[i,j] = i-j
P = np.square(P)
P /= -2.0 * (sigma_position ** 2.0)
P = np.exp(P)
return P
def compute_psi_dict(amino_acids, aa_descriptors):
"""
This function pre-compute the square Euclidean distance
between all amino acids descriptors and stock the distance
in an hash table for easy and fast access during the
GS kernel computation.
amino_acids -- List of all amino acids in aa_descriptors
aa_descriptors -- The i-th row of this matrix contain the
descriptors of the i-th amino acid of amino_acids list.
"""
# For every amino acids couple (a_1, a_2) psiDict is a hash table
# that contain the squared Euclidean distance between the descriptors
# of a_1 and a_2
psiDict = {}
# Fill the hash table psiDict
for i in xrange(len(amino_acids)):
for j in xrange(len(amino_acids)):
c = aa_descriptors[i] - aa_descriptors[j]
psiDict[amino_acids[i], amino_acids[j]] = np.dot(c,c)
return psiDict
def __call__(self, input_data, weights):
'''
input_data in this case is a numpy array with batch_size on axis 1
and weights is a matrix with 1 column
'''
if self.state is None:
self.state = np.ones_like(weights)
if self.velocity is None:
self.velocity = np.zeros_like(weights)
gradient = - input_data.mean(axis=1)
self.state[:] = self.decay_rate * self.state + \
(1.0 - self.decay_rate) * np.square(gradient)
self.velocity = self.velocity * self.momentum + \
self.learning_rate * gradient / np.sqrt(self.state + self.epsilon) + \
self.learning_rate * self.wdecay * weights
weights[:] = weights - self.velocity
return weights
def __call__(self, input_data, weights):
'''
input_data in this case is a numpy array with batch_size on axis 1
and weights is a matrix with 1 column
'''
if self.state is None:
self.state = np.zeros_like(weights)
gradient = - input_data.mean(axis=1)
self.state[:] = self.state + np.square(gradient)
weights[:] = weights \
- gradient * self.learning_rate / (np.sqrt(self.state + self.epsilon))
return weights
def n_even_fcn(f, o, w, l):
"""Even case."""
# Variables :
k = np.array(range(0, int(l) + 1, 1)) + 0.5
b = np.zeros(k.shape)
# # Run Loop :
for s in range(0, len(f), 2):
m = (o[s + 1] - o[s]) / (f[s + 1] - f[s])
b1 = o[s] - m * f[s]
b = b + (m / (4 * np.pi * np.pi) * (np.cos(2 * np.pi * k * f[
s + 1]) - np.cos(2 * np.pi * k * f[s])) / (
k * k)) * abs(np.square(w[round((s + 1) / 2)]))
b = b + (f[s + 1] * (m * f[s + 1] + b1) * np.sinc(2 * k * f[
s + 1]) - f[s] * (m * f[s] + b1) * np.sinc(2 * k * f[s])) * abs(
np.square(w[round((s + 1) / 2)]))
a = (np.square(w[0])) * 4 * b
h = 0.5 * np.concatenate((np.flipud(a), a))
return h
def NevenFcn(F, M, W, L): # N is even
# Variables :
k = np.array(range(0, int(L) + 1, 1)) + 0.5
b = np.zeros(k.shape)
# # Run Loop :
for s in range(0, len(F), 2):
m = (M[s + 1] - M[s]) / (F[s + 1] - F[s])
b1 = M[s] - m * F[s]
b = b + (m / (4 * np.pi * np.pi) * (np.cos(2 * np.pi * k * F[
s + 1]) - np.cos(2 * np.pi * k * F[s])) / (
k * k)) * abs(np.square(W[round((s + 1) / 2)]))
b = b + (F[s + 1] * (m * F[s + 1] + b1) * np.sinc(2 * k * F[
s + 1]) - F[s] * (m * F[s] + b1) * np.sinc(2 * k * F[s])) * abs(
np.square(W[round((s + 1) / 2)]))
a = (np.square(W[0])) * 4 * b
h = 0.5 * np.concatenate((np.flipud(a), a))
return h
####################################################################
# - Filt the signal :
####################################################################
def morlet(x, Fs, f, wavelet_width=7):
dt = 1/Fs
sf = f/wavelet_width
st = 1/(2*np.pi*sf)
N, nepoch = x.shape
t = np.arange(-3.5*st, 3.5*st, dt)
A = 1/(st*np.sqrt(np.pi))**(1/2)
m = A*np.exp(-np.square(t)/(2*st**2))*np.exp(1j*2*np.pi*f*t)
xMorlet = np.zeros((N, nepoch))
for k in range(0, nepoch):
y = 2*np.abs(np.convolve(x[:, k], m))/Fs
xMorlet[:, k] = y[int(np.ceil(len(m)/2))-1:int(len(y)-np.floor(
len(m)/2))]
return xMorlet
euclidean_score.py 文件源码
项目:Python-Machine-Learning-Cookbook
作者: PacktPublishing
项目源码
文件源码
阅读 28
收藏 0
点赞 0
评论 0
def euclidean_score(dataset, user1, user2):
if user1 not in dataset:
raise TypeError('User ' + user1 + ' not present in the dataset')
if user2 not in dataset:
raise TypeError('User ' + user2 + ' not present in the dataset')
# Movies rated by both user1 and user2
rated_by_both = {}
for item in dataset[user1]:
if item in dataset[user2]:
rated_by_both[item] = 1
# If there are no common movies, the score is 0
if len(rated_by_both) == 0:
return 0
squared_differences = []
for item in dataset[user1]:
if item in dataset[user2]:
squared_differences.append(np.square(dataset[user1][item] - dataset[user2][item]))
return 1 / (1 + np.sqrt(np.sum(squared_differences)))
def agent_reward(self, agent, world):
# Agents are negatively rewarded if caught by adversaries
rew = 0
shape = False
adversaries = self.adversaries(world)
if shape: # reward can optionally be shaped (increased reward for increased distance from adversary)
for adv in adversaries:
rew += 0.1 * np.sqrt(np.sum(np.square(agent.state.p_pos - adv.state.p_pos)))
if agent.collide:
for a in adversaries:
if self.is_collision(a, agent):
rew -= 10
# agents are penalized for exiting the screen, so that they can be caught by the adversaries
def bound(x):
if x < 0.9:
return 0
if x < 1.0:
return (x - 0.9) * 10
return min(np.exp(2 * x - 2), 10)
for p in range(world.dim_p):
x = abs(agent.state.p_pos[p])
rew -= bound(x)
return rew
def benchmark_data(self, agent, world):
rew = 0
collisions = 0
occupied_landmarks = 0
min_dists = 0
for l in world.landmarks:
dists = [np.sqrt(np.sum(np.square(a.state.p_pos - l.state.p_pos))) for a in world.agents]
min_dists += min(dists)
rew -= min(dists)
if min(dists) < 0.1:
occupied_landmarks += 1
if agent.collide:
for a in world.agents:
if self.is_collision(a, agent):
rew -= 1
collisions += 1
return (rew, collisions, min_dists, occupied_landmarks)
def get_collision_force(self, entity_a, entity_b):
if (not entity_a.collide) or (not entity_b.collide):
return [None, None] # not a collider
if (entity_a is entity_b):
return [None, None] # don't collide against itself
# compute actual distance between entities
delta_pos = entity_a.state.p_pos - entity_b.state.p_pos
dist = np.sqrt(np.sum(np.square(delta_pos)))
# minimum allowable distance
dist_min = entity_a.size + entity_b.size
# softmax penetration
k = self.contact_margin
penetration = np.logaddexp(0, -(dist - dist_min)/k)*k
force = self.contact_force * delta_pos / dist * penetration
force_a = +force if entity_a.movable else None
force_b = -force if entity_b.movable else None
return [force_a, force_b]
def squared_loss(data, predictions):
""" Calculates squared loss
Parameters
----------
data : np.ndarray
Univariate data
predictions : np.ndarray
Univariate predictions
Returns
----------
- np.ndarray of the squared loss
"""
return np.square(data-predictions)
def step(self, action):
self.forward_dynamics(action)
comvel = self.get_body_comvel("torso")
forward_reward = comvel[0]
lb, ub = self.action_bounds
scaling = (ub - lb) * 0.5
ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(action / scaling))
contact_cost = 0.5 * 1e-3 * np.sum(
np.square(np.clip(self.model.data.cfrc_ext, -1, 1))),
survive_reward = 0.05
reward = forward_reward - ctrl_cost - contact_cost + survive_reward
state = self._state
notdone = np.isfinite(state).all() \
and state[2] >= 0.2 and state[2] <= 1.0
done = not notdone
ob = self.get_current_obs()
return Step(ob, float(reward), done)
def step(self, action):
self.forward_dynamics(action)
next_obs = self.get_current_obs()
alive_bonus = self.alive_bonus
data = self.model.data
comvel = self.get_body_comvel("torso")
lin_vel_reward = comvel[0]
lb, ub = self.action_bounds
scaling = (ub - lb) * 0.5
ctrl_cost = .5 * self.ctrl_cost_coeff * np.sum(
np.square(action / scaling))
impact_cost = .5 * self.impact_cost_coeff * np.sum(
np.square(np.clip(data.cfrc_ext, -1, 1)))
vel_deviation_cost = 0.5 * self.vel_deviation_cost_coeff * np.sum(
np.square(comvel[1:]))
reward = lin_vel_reward + alive_bonus - ctrl_cost - \
impact_cost - vel_deviation_cost
done = data.qpos[2] < 0.8 or data.qpos[2] > 2.0
return Step(next_obs, reward, done)
def kl_sym(self, old_dist_info_vars, new_dist_info_vars):
old_means = old_dist_info_vars["mean"]
old_log_stds = old_dist_info_vars["log_std"]
new_means = new_dist_info_vars["mean"]
new_log_stds = new_dist_info_vars["log_std"]
"""
Compute the KL divergence of two multivariate Gaussian distribution with
diagonal covariance matrices
"""
old_std = TT.exp(old_log_stds)
new_std = TT.exp(new_log_stds)
# means: (N*A)
# std: (N*A)
# formula:
# { (\mu_1 - \mu_2)^2 + \sigma_1^2 - \sigma_2^2 } / (2\sigma_2^2) +
# ln(\sigma_2/\sigma_1)
numerator = TT.square(old_means - new_means) + \
TT.square(old_std) - TT.square(new_std)
denominator = 2 * TT.square(new_std) + 1e-8
return TT.sum(
numerator / denominator + new_log_stds - old_log_stds, axis=-1)