def mpi_moments(x, axis=0):
x = np.asarray(x, dtype='float64')
newshape = list(x.shape)
newshape.pop(axis)
n = np.prod(newshape,dtype=int)
totalvec = np.zeros(n*2+1, 'float64')
addvec = np.concatenate([x.sum(axis=axis).ravel(),
np.square(x).sum(axis=axis).ravel(),
np.array([x.shape[axis]],dtype='float64')])
MPI.COMM_WORLD.Allreduce(addvec, totalvec, op=MPI.SUM)
sum = totalvec[:n]
sumsq = totalvec[n:2*n]
count = totalvec[2*n]
if count == 0:
mean = np.empty(newshape); mean[:] = np.nan
std = np.empty(newshape); std[:] = np.nan
else:
mean = sum/count
std = np.sqrt(np.maximum(sumsq/count - np.square(mean),0))
return mean, std, count
python类square()的实例源码
def batchnorm(x, name, phase, updates, gamma=0.96):
k = x.get_shape()[1]
runningmean = tf.get_variable(name+"/mean", shape=[1, k], initializer=tf.constant_initializer(0.0), trainable=False)
runningvar = tf.get_variable(name+"/var", shape=[1, k], initializer=tf.constant_initializer(1e-4), trainable=False)
testy = (x - runningmean) / tf.sqrt(runningvar)
mean_ = mean(x, axis=0, keepdims=True)
var_ = mean(tf.square(x), axis=0, keepdims=True)
std = tf.sqrt(var_)
trainy = (x - mean_) / std
updates.extend([
tf.assign(runningmean, runningmean * gamma + mean_ * (1 - gamma)),
tf.assign(runningvar, runningvar * gamma + var_ * (1 - gamma))
])
y = switch(phase, trainy, testy)
out = y * tf.get_variable(name+"/scaling", shape=[1, k], initializer=tf.constant_initializer(1.0), trainable=True)\
+ tf.get_variable(name+"/translation", shape=[1,k], initializer=tf.constant_initializer(0.0), trainable=True)
return out
# ================================================================
# Mathematical utils
# ================================================================
def huber_loss(x, delta=1.0):
"""Reference: https://en.wikipedia.org/wiki/Huber_loss"""
return tf.where(
tf.abs(x) < delta,
tf.square(x) * 0.5,
delta * (tf.abs(x) - 0.5 * delta)
)
# ================================================================
# Basic Stuff
# ================================================================
# ================================================================
# Theano-like Function
# ================================================================
# ================================================================
# Optimizer utils
# ================================================================
def get_normalized_dispersion(mat_mean, mat_var, nbins=20):
mat_disp = (mat_var - mat_mean) / np.square(mat_mean)
quantiles = np.percentile(mat_mean, np.arange(0, 100, 100 / nbins))
quantiles = np.append(quantiles, mat_mean.max())
# merge bins with no difference in value
quantiles = np.unique(quantiles)
if len(quantiles) <= 1:
# pathological case: the means are all identical. just return raw dispersion.
return mat_disp
# calc median dispersion per bin
(disp_meds, _, disp_bins) = scipy.stats.binned_statistic(mat_mean, mat_disp, statistic='median', bins=quantiles)
# calc median absolute deviation of dispersion per bin
disp_meds_arr = disp_meds[disp_bins-1] # 0th bin is empty since our quantiles start from 0
disp_abs_dev = abs(mat_disp - disp_meds_arr)
(disp_mads, _, disp_bins) = scipy.stats.binned_statistic(mat_mean, disp_abs_dev, statistic='median', bins=quantiles)
# calculate normalized dispersion
disp_mads_arr = disp_mads[disp_bins-1]
disp_norm = (mat_disp - disp_meds_arr) / disp_mads_arr
return disp_norm
def temporalize(x, smoothing_steps, distance='L1'):
"""
:param x: An (n_samples, n_dims) dataset
:return: A (n_samples, ) array of indexes that can be used to shuffle the input for temporal smoothness.
"""
x_flat = x.reshape(x.shape[0], -1)
index_buffer = np.arange(1, smoothing_steps+1)
next_sample_buffer = x_flat[1:smoothing_steps+1].copy()
# Technically, we could do this without a next_sample_buffer (and only an index_buffer), but it would require
# repeatedly accessing a bunch of really scattered memory, so we do it this way.
shuffling_indices = np.zeros(len(x), dtype=int)
rectifier = np.abs if distance=='L1' else np.square if distance=='L2' else bad_value(distance)
p=ProgressIndicator(len(x), name = 'Temporalize')
current_index = 0
for i in xrange(len(x)):
shuffling_indices[i] = current_index
closest = np.argmin(rectifier(x_flat[current_index]-next_sample_buffer).sum(axis=1))
current_index = index_buffer[closest]
weve_aint_done_yet = i+smoothing_steps+1 < len(x)
next_index = i+smoothing_steps+1
next_sample_buffer[closest] = x_flat[next_index] if weve_aint_done_yet else float('inf')
index_buffer[closest] = next_index if weve_aint_done_yet else -1
p()
return shuffling_indices
def calculate_features_for_VAD(sound_frames, frequencies_axis, spectrogram):
features = numpy.empty((spectrogram.shape[0], 3))
# smooted_spectrogram, smoothed_frequencies_axis = smooth_spectrogram(spectrogram, frequencies_axis, 24)
for time_ind in range(spectrogram.shape[0]):
mean_spectrum = spectrogram[time_ind].mean()
if mean_spectrum > 0.0:
sfm = -10.0 * math.log10(stats.gmean(spectrogram[time_ind]) / mean_spectrum)
else:
sfm = 0.0
# max_freq = smoothed_frequencies_axis[smooted_spectrogram[time_ind].argmax()]
max_freq = frequencies_axis[spectrogram[time_ind].argmax()]
features[time_ind][0] = numpy.square(sound_frames[time_ind]).mean()
features[time_ind][1] = sfm
features[time_ind][2] = max_freq
"""medfilt_order = 3
for feature_ind in range(features.shape[0]):
features[feature_ind] = signal.medfilt(features[feature_ind], medfilt_order)"""
return features
def score(self,X_test,y_test):
"""
returns the score on test set
<PARAMETERS>
X : input features of testing set (numpy array, list)
y : values to map to of testing set (numpy array, list)
<return type>
returns score (int)
"""
y_test = np.reshape(y_test,(y_test.shape[0],1))
if self.normalize_ == True:
X_test = self.normalize(X_test)
X_test = self.add_bias(X_test)
self.predict(X_test)
u = np.square(y_test - self.predictions).sum()
v = np.square(y_test - y_test.mean()).sum()
self.score = 1 - u/v
return self.score
def get_std(self, num_items, vars, expectation):
num_pairs = 0
std_sum = 0.0
# If self distance computed std for top and bottom half
if self.use_self_distance:
for i in xrange(num_items):
var_half_1, var_half_2 = torch.chunk(vars[i], 2, dim=2)
std_sum += np.square(self.as_np(self.distance(var_half_1, var_half_2)) - expectation)
return np.sqrt(std_sum / num_items)
# Otherwise compute std for all pairs of images
for i in xrange(num_items - 1):
for j in xrange(i + 1, num_items):
num_pairs += 1
std_sum += np.square(self.as_np(self.distance(vars[i], vars[j])) - expectation)
return np.sqrt(std_sum / num_pairs)
def get_std(self, num_items, vars, expectation):
num_pairs = 0
std_sum = 0.0
# If self distance computed std for top and bottom half
if self.use_self_distance:
for i in xrange(num_items):
var_half_1, var_half_2 = torch.chunk(vars[i], 2, dim=2)
std_sum += np.square(self.as_np(self.distance(var_half_1, var_half_2)) - expectation)
return np.sqrt(std_sum / num_items)
# Otherwise compute std for all pairs of images
for i in xrange(num_items - 1):
for j in xrange(i + 1, num_items):
num_pairs += 1
std_sum += np.square(self.as_np(self.distance(vars[i], vars[j])) - expectation)
return np.sqrt(std_sum / num_pairs)
def check_onset(audio):
# Determine if there is an 'early' onset
b_lpf, a_lpf = signal.butter(1, 200/44100.0, 'low')
sim = numpy.square(audio)
audio_lpf = signal.filtfilt(b_lpf,a_lpf,sim)
thres = 0.015
invert = 0
found = 0
for i in xrange(len(audio)):
if ((audio_lpf[i] > thres) and not found):
if (i < 4000):
print "Detected onset for ",fil," at time ",i/44.1," msecs "
found = 1
invert = 1
return invert
def check_onset(audio):
# Determine if there is an 'early' onset
b_lpf, a_lpf = signal.butter(1, 200/44100.0, 'low')
sim = numpy.square(audio)
audio_lpf = signal.filtfilt(b_lpf,a_lpf,sim)
thres = 0.015
invert = 0
found = 0
for i in xrange(len(audio)):
if ((audio_lpf[i] > thres) and not found):
if (i < 4000):
print "Detected onset for ",fil," at time ",i/44.1," msecs "
found = 1
invert = 1
return invert
def process(self, wave):
wave.check_mono()
if wave.sample_rate != self.sr:
raise Exception("Wrong sample rate")
n = int(np.ceil(2 * wave.num_frames / float(self.w_len)))
m = (n + 1) * self.w_len / 2
swindow = self.make_signal_window(n)
win_ratios = [self.window / swindow[t * self.w_len / 2 :
t * self.w_len / 2 + self.w_len]
for t in range(n)]
wave = wave.zero_pad(0, int(m - wave.num_frames))
wave = audio.Wave(signal.hilbert(wave), wave.sample_rate)
result = np.zeros((self.n_bins, n))
for b in range(self.n_bins):
w = self.widths[b]
wc = 1 / np.square(w + 1)
filter = self.filters[b]
band = fftfilt(filter, wave.zero_pad(0, int(2 * w))[:,0])
band = band[int(w) : int(w + m), np.newaxis]
for t in range(n):
frame = band[t * self.w_len / 2:
t * self.w_len / 2 + self.w_len,:] * win_ratios[t]
result[b, t] = wc * np.real(np.conj(np.dot(frame.conj().T, frame)))
return audio.Spectrogram(result, self.sr, self.w_len, self.w_len / 2)
def _step(self, a):
pos_before = mass_center(self.model)
self.do_simulation(a, self.frame_skip)
pos_after = mass_center(self.model)
pos_after_standup = self.model.data.qpos[2][0]
down = bool(( self.model.data.qpos[2] < 1.0) or ( self.model.data.qpos[2] > 2.0))
alive_bonus = 5.0 if not down else 1.0
data = self.model.data
uph_cost = (pos_after_standup - 0) / self.model.opt.timestep
lin_vel_cost = 0.25 * (pos_after - pos_before) / self.model.opt.timestep
quad_ctrl_cost = 0.1 * np.square(data.ctrl).sum()
quad_impact_cost = .5e-6 * np.square(data.cfrc_ext).sum()
quad_impact_cost = min(quad_impact_cost, 10)
reward = lin_vel_cost + uph_cost - quad_ctrl_cost - quad_impact_cost + alive_bonus
qpos = self.model.data.qpos
done = bool(False)
return self._get_obs(), reward, done, dict(reward_linup=uph_cost, reward_quadctrl=-quad_ctrl_cost, reward_impact=-quad_impact_cost, reward_alive=alive_bonus)
def sandstroke_non_linear(self,xys,grains=10,left=True):
pix = self.pix
rectangle = self.ctx.rectangle
fill = self.ctx.fill
dx = xys[:,2] - xys[:,0]
dy = xys[:,3] - xys[:,1]
aa = arctan2(dy,dx)
directions = column_stack([cos(aa),sin(aa)])
dd = sqrt(square(dx)+square(dy))
for i,d in enumerate(dd):
rnd = sqrt(random((grains,1)))
if left:
rnd = 1.0-rnd
for x,y in xys[i,:2] + directions[i,:]*rnd*d:
rectangle(x,y,pix,pix)
fill()
def sandstroke(self,xys,grains=10):
pix = self.pix
rectangle = self.ctx.rectangle
fill = self.ctx.fill
dx = xys[:,2] - xys[:,0]
dy = xys[:,3] - xys[:,1]
aa = arctan2(dy,dx)
directions = column_stack([cos(aa),sin(aa)])
dd = sqrt(square(dx)+square(dy))
for i,d in enumerate(dd):
for x,y in xys[i,:2] + directions[i,:]*random((grains,1))*d:
rectangle(x,y,pix,pix)
fill()
def __init__(self, channel, timeseries=None, stats=None, n=None):
"""If timeseries is None, then we are manually initializing with custom
stat values. Otherwise, calculate stat values from the timeseries.
Input is assumed to be a GWpy timeseries."""
self.channel = channel
if timeseries is None:
self.n = n
self.stats = stats
else:
self.n = 1
# wrap the end of the second onto the start of that same second.
# obviously this is only okay with quasi periodic signals!
zero_crossing = np.concatenate((timeseries.value[DUOTONE_START:],
timeseries.value[:DUOTONE_END]))
self.stats = {
"sum": zero_crossing,
"sum_sq": np.square(zero_crossing),
"mean": zero_crossing,
"sigma": zero_crossing * 0., # i.e. zeros
"max": zero_crossing,
"min": zero_crossing
}
def step(self, action):
self.forward_dynamics(action)
next_obs = self.get_current_obs()
lb, ub = self.action_bounds
scaling = (ub - lb) * 0.5
ctrl_cost = 0.5 * self.ctrl_cost_coeff * np.sum(
np.square(action / scaling))
forward_reward = np.linalg.norm(self.get_body_comvel("torso")) # swimmer has no problem of jumping reward
reward = forward_reward - ctrl_cost
done = False
if self.sparse_rew:
if abs(self.get_body_com("torso")[0]) > 100.0:
reward = 1.0
done = True
else:
reward = 0.
com = np.concatenate([self.get_body_com("torso").flat]).reshape(-1)
ori = self.get_ori()
return Step(next_obs, reward, done, com=com, ori=ori)
def step(self, action):
self.forward_dynamics(action)
next_obs = self.get_current_obs()
lb, ub = self.action_bounds
scaling = (ub - lb) * 0.5
ctrl_cost = 0.5 * self.ctrl_cost_coeff * np.sum(
np.square(action / scaling))
forward_reward = np.linalg.norm(self.get_body_comvel("torso")) # swimmer has no problem of jumping reward
reward = forward_reward - ctrl_cost
done = False
if self.sparse_rew:
if abs(self.get_body_com("torso")[0]) > 100.0:
reward = 1.0
done = True
else:
reward = 0.
com = np.concatenate([self.get_body_com("torso").flat]).reshape(-1)
ori = self.get_ori()
return Step(next_obs, reward, done, com=com, ori=ori)
def step(self, action):
self.forward_dynamics(action)
next_obs = self.get_current_obs()
lb, ub = self.action_bounds
scaling = (ub - lb) * 0.5
ctrl_cost = 0.5 * self.ctrl_cost_coeff * np.sum(
np.square(action / scaling))
forward_reward = np.linalg.norm(self.get_body_comvel("torso")) # swimmer has no problem of jumping reward
reward = forward_reward - ctrl_cost
done = False
if self.sparse_rew:
if abs(self.get_body_com("torso")[0]) > 100.0:
reward = 1.0
done = True
else:
reward = 0.
com = np.concatenate([self.get_body_com("torso").flat]).reshape(-1)
ori = self.get_ori()
return Step(next_obs, reward, done, com=com, ori=ori)
def step(self, action):
self.forward_dynamics(action)
comvel = self.get_body_comvel("torso")
forward_reward = comvel[0]
lb, ub = self.action_bounds
scaling = (ub - lb) * 0.5
ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(action / scaling))
contact_cost = 0.5 * 1e-3 * np.sum(
np.square(np.clip(self.model.data.cfrc_ext, -1, 1))),
survive_reward = 0.05
reward = forward_reward - ctrl_cost - contact_cost + survive_reward
state = self._state
notdone = np.isfinite(state).all() \
and state[2] >= 0.2 and state[2] <= 1.0
done = not notdone
ob = self.get_current_obs()
return Step(ob, float(reward), done)
def distance_function(values, medians):
"""This function calculates the distance metric.
N.B. Only uses the non-NaN values.
dist = sum( (s - m)^2 )
s is the vector of sample values
m is the vector of probe medians
Args:
values (numpy array of floats)
medians (numpy array of floats)
Returns:
dist (float)
"""
non_nan_idx = ~np.isnan(values)
assert np.size(non_nan_idx) != 0, "All values in this sample are NaN!"
non_nan_values = values[non_nan_idx]
non_nan_medians = medians[non_nan_idx]
dist = sum(np.square(non_nan_values - non_nan_medians))
return dist
# tested #
def step(self, action):
self.forward_dynamics(action)
comvel = self.get_body_comvel("torso")
forward_reward = comvel[0]
lb, ub = self.action_bounds
scaling = (ub - lb) * 0.5
ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(action / scaling))
contact_cost = 0.5 * 1e-3 * np.sum(
np.square(np.clip(self.model.data.cfrc_ext, -1, 1))),
survive_reward = 0.05
reward = forward_reward - ctrl_cost - contact_cost + survive_reward
state = self._state
notdone = np.isfinite(state).all() \
and state[2] >= 0.2 and state[2] <= 1.0
done = not notdone
ob = self.get_current_obs()
return Step(ob, float(reward), done)
def step(self, action):
self.forward_dynamics(action)
next_obs = self.get_current_obs()
alive_bonus = self.alive_bonus
data = self.model.data
comvel = self.get_body_comvel("torso")
lin_vel_reward = comvel[0]
lb, ub = self.action_bounds
scaling = (ub - lb) * 0.5
ctrl_cost = .5 * self.ctrl_cost_coeff * np.sum(
np.square(action / scaling))
impact_cost = .5 * self.impact_cost_coeff * np.sum(
np.square(np.clip(data.cfrc_ext, -1, 1)))
vel_deviation_cost = 0.5 * self.vel_deviation_cost_coeff * np.sum(
np.square(comvel[1:]))
reward = lin_vel_reward + alive_bonus - ctrl_cost - \
impact_cost - vel_deviation_cost
done = data.qpos[2] < 0.8 or data.qpos[2] > 2.0
return Step(next_obs, reward, done)
def kl_sym(self, old_dist_info_vars, new_dist_info_vars):
old_means = old_dist_info_vars["mean"]
old_log_stds = old_dist_info_vars["log_std"]
new_means = new_dist_info_vars["mean"]
new_log_stds = new_dist_info_vars["log_std"]
"""
Compute the KL divergence of two multivariate Gaussian distribution with
diagonal covariance matrices
"""
old_std = TT.exp(old_log_stds)
new_std = TT.exp(new_log_stds)
# means: (N*A)
# std: (N*A)
# formula:
# { (\mu_1 - \mu_2)^2 + \sigma_1^2 - \sigma_2^2 } / (2\sigma_2^2) +
# ln(\sigma_2/\sigma_1)
numerator = TT.square(old_means - new_means) + \
TT.square(old_std) - TT.square(new_std)
denominator = 2 * TT.square(new_std) + 1e-8
return TT.sum(
numerator / denominator + new_log_stds - old_log_stds, axis=-1)
def kl(self, old_dist_info, new_dist_info):
old_means = old_dist_info["mean"]
old_log_stds = old_dist_info["log_std"]
new_means = new_dist_info["mean"]
new_log_stds = new_dist_info["log_std"]
"""
Compute the KL divergence of two multivariate Gaussian distribution with
diagonal covariance matrices
"""
old_std = np.exp(old_log_stds)
new_std = np.exp(new_log_stds)
# means: (N*A)
# std: (N*A)
# formula:
# { (\mu_1 - \mu_2)^2 + \sigma_1^2 - \sigma_2^2 } / (2\sigma_2^2) +
# ln(\sigma_2/\sigma_1)
numerator = np.square(old_means - new_means) + \
np.square(old_std) - np.square(new_std)
denominator = 2 * np.square(new_std) + 1e-8
return np.sum(
numerator / denominator + new_log_stds - old_log_stds, axis=-1)
# more lossy version
# return TT.sum(
# numerator / denominator + TT.log(new_std) - TT.log(old_std ), axis=-1)
def kl_sym(self, old_dist_info_vars, new_dist_info_vars):
old_means = old_dist_info_vars["mean"]
old_log_stds = old_dist_info_vars["log_std"]
new_means = new_dist_info_vars["mean"]
new_log_stds = new_dist_info_vars["log_std"]
"""
Compute the KL divergence of two multivariate Gaussian distribution with
diagonal covariance matrices
"""
old_std = tf.exp(old_log_stds)
new_std = tf.exp(new_log_stds)
# means: (N*A)
# std: (N*A)
# formula:
# { (\mu_1 - \mu_2)^2 + \sigma_1^2 - \sigma_2^2 } / (2\sigma_2^2) +
# ln(\sigma_2/\sigma_1)
numerator = tf.square(old_means - new_means) + \
tf.square(old_std) - tf.square(new_std)
denominator = 2 * tf.square(new_std) + 1e-8
return tf.reduce_sum(
numerator / denominator + new_log_stds - old_log_stds, reduction_indices=-1)
def test_pmean(self):
X = np.random.randn(10, 2, 3)
pm = PosteriorMean()
pm.addSample(X[0,:], average = False)
pm.addSample(X[1,:], average = False)
pm.addSample(X[2,:], average = True)
pm.addSample(X[3,:], average = True)
self.assertTrue(np.allclose(pm.sample_avg, X[2:4, :].mean(0)))
self.assertTrue(pm.n == 2)
for i in range(4, X.shape[0]):
pm.addSample(X[i,:], average = True)
self.assertTrue(np.allclose(pm.sample_avg, X[2:, :].mean(0)))
self.assertTrue(pm.n == 8)
Xvar = pm.getVar()
Xsub = X[2:, :]
Xvar_true = np.square((Xsub - Xsub.mean(0))).sum(0) / (Xsub.shape[0] - 1)
self.assertTrue(np.allclose(Xvar, Xvar_true))
def update(self, x):
batch_mean = np.mean(x, axis=0)
batch_var = np.var(x, axis=0)
batch_count = x.shape[0]
delta = batch_mean - self.mean
tot_count = self.count + batch_count
new_mean = self.mean + delta * batch_count / tot_count
m_a = self.var * (self.count)
m_b = batch_var * (batch_count)
M2 = m_a + m_b + np.square(delta) * self.count * batch_count / (self.count + batch_count)
new_var = M2 / (self.count + batch_count)
new_count = batch_count + self.count
self.mean = new_mean
self.var = new_var
self.count = new_count
def __init__(self, epsilon=1e-2, shape=()):
self._sum = tf.get_variable(
dtype=tf.float64,
shape=shape,
initializer=tf.constant_initializer(0.0),
name="runningsum", trainable=False)
self._sumsq = tf.get_variable(
dtype=tf.float64,
shape=shape,
initializer=tf.constant_initializer(epsilon),
name="runningsumsq", trainable=False)
self._count = tf.get_variable(
dtype=tf.float64,
shape=(),
initializer=tf.constant_initializer(epsilon),
name="count", trainable=False)
self.shape = shape
self.mean = tf.to_float(self._sum / self._count)
self.std = tf.sqrt( tf.maximum( tf.to_float(self._sumsq / self._count) - tf.square(self.mean) , 1e-2 ))
newsum = tf.placeholder(shape=self.shape, dtype=tf.float64, name='sum')
newsumsq = tf.placeholder(shape=self.shape, dtype=tf.float64, name='var')
newcount = tf.placeholder(shape=[], dtype=tf.float64, name='count')
self.incfiltparams = U.function([newsum, newsumsq, newcount], [],
updates=[tf.assign_add(self._sum, newsum),
tf.assign_add(self._sumsq, newsumsq),
tf.assign_add(self._count, newcount)])
def update(self, x):
batch_mean = np.mean(x, axis=0)
batch_var = np.var(x, axis=0)
batch_count = x.shape[0]
delta = batch_mean - self.mean
tot_count = self.count + batch_count
new_mean = self.mean + delta * batch_count / tot_count
m_a = self.var * (self.count)
m_b = batch_var * (batch_count)
M2 = m_a + m_b + np.square(delta) * self.count * batch_count / (self.count + batch_count)
new_var = M2 / (self.count + batch_count)
new_count = batch_count + self.count
self.mean = new_mean
self.var = new_var
self.count = new_count