def mse(ypredict, ytrue):
"""
>>> mse(1.0, 3.0)
4.0
"""
diff = ypredict - ytrue
return np.mean(diff**2)
python类mean()的实例源码
def compute_score(self, gts, res):
"""
Computes Rouge-L score given a set of reference and candidate sentences for the dataset
Invoked by evaluate_captions.py
:param hypo_for_image: dict : candidate / test sentences with "image name" key and "tokenized sentences" as values
:param ref_for_image: dict : reference MS-COCO sentences with "image name" key and "tokenized sentences" as values
:returns: average_score: float (mean ROUGE-L score computed by averaging scores for all the images)
"""
assert(gts.keys() == res.keys())
imgIds = gts.keys()
score = []
for id in imgIds:
hypo = res[id]
ref = gts[id]
score.append(self.calc_score(hypo, ref))
# Sanity check.
assert(type(hypo) is list)
assert(len(hypo) == 1)
assert(type(ref) is list)
assert(len(ref) > 0)
average_score = np.mean(np.array(score))
return average_score, np.array(score)
def analytic_convolution_gaussian(mu1,covar1,mu2,covar2):
"""
The analytic vconvolution of two Gaussians is simply the sum of the two mean vectors
and the two convariance matrixes
--- INPUT ---
mu1 The mean of the first gaussian
covar1 The covariance matrix of of the first gaussian
mu2 The mean of the second gaussian
covar2 The covariance matrix of of the second gaussian
"""
muconv = mu1+mu2
covarconv = covar1+covar2
return muconv, covarconv
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def smooth_colors(src, dst, src_l):
blur_amount = BLUR_FRACTION * np.linalg.norm(np.mean(src_l[LEFT_EYE_IDX], axis = 0) - np.mean(src_l[RIGHT_EYE_IDX], axis = 0))
blur_amount = (int)(blur_amount)
if blur_amount % 2 == 0:
blur_amount += 1
src_blur = cv2.GaussianBlur(src, (blur_amount, blur_amount), 0)
dst_blur = cv2.GaussianBlur(dst, (blur_amount, blur_amount), 0)
dst_blur += (128 * ( dst_blur <= 1.0 )).astype(dst_blur.dtype)
return (np.float64(dst) * np.float64(src_blur)/np.float64(dst_blur))
def get_tm_opp(pts1, pts2):
# Transformation matrix - ( Translation + Scaling + Rotation )
# using Procuster analysis
pts1 = np.float64(pts1)
pts2 = np.float64(pts2)
m1 = np.mean(pts1, axis = 0)
m2 = np.mean(pts2, axis = 0)
# Removing translation
pts1 -= m1
pts2 -= m2
std1 = np.std(pts1)
std2 = np.std(pts2)
std_r = std2/std1
# Removing scaling
pts1 /= std1
pts2 /= std2
U, S, V = np.linalg.svd(np.transpose(pts1) * pts2)
# Finding the rotation matrix
R = np.transpose(U * V)
return np.vstack([np.hstack((std_r * R,
np.transpose(m2) - std_r * R * np.transpose(m1))), np.matrix([0.0, 0.0, 1.0])])
def fit(self, x):
s = x.shape
x = x.copy().reshape((s[0],np.prod(s[1:])))
m = np.mean(x, axis=0)
x -= m
sigma = np.dot(x.T,x) / x.shape[0]
U, S, V = linalg.svd(sigma)
tmp = np.dot(U, np.diag(1./np.sqrt(S+self.regularization)))
tmp2 = np.dot(U, np.diag(np.sqrt(S+self.regularization)))
self.ZCA_mat = th.shared(np.dot(tmp, U.T).astype(th.config.floatX))
self.inv_ZCA_mat = th.shared(np.dot(tmp2, U.T).astype(th.config.floatX))
self.mean = th.shared(m.astype(th.config.floatX))
def apply(self, x):
s = x.shape
if isinstance(x, np.ndarray):
return np.dot(x.reshape((s[0],np.prod(s[1:]))) - self.mean.get_value(), self.ZCA_mat.get_value()).reshape(s)
elif isinstance(x, T.TensorVariable):
return T.dot(x.flatten(2) - self.mean.dimshuffle('x',0), self.ZCA_mat).reshape(s)
else:
raise NotImplementedError("Whitening only implemented for numpy arrays or Theano TensorVariables")
def invert(self, x):
s = x.shape
if isinstance(x, np.ndarray):
return (np.dot(x.reshape((s[0],np.prod(s[1:]))), self.inv_ZCA_mat.get_value()) + self.mean.get_value()).reshape(s)
elif isinstance(x, T.TensorVariable):
return (T.dot(x.flatten(2), self.inv_ZCA_mat) + self.mean.dimshuffle('x',0)).reshape(s)
else:
raise NotImplementedError("Whitening only implemented for numpy arrays or Theano TensorVariables")
# T.nnet.relu has some issues with very large inputs, this is more stable
def softmax_loss(p_true, output_before_softmax):
output_before_softmax -= T.max(output_before_softmax, axis=1, keepdims=True)
if p_true.ndim==2:
return T.mean(T.log(T.sum(T.exp(output_before_softmax),axis=1)) - T.sum(p_true*output_before_softmax, axis=1))
else:
return T.mean(T.log(T.sum(T.exp(output_before_softmax),axis=1)) - output_before_softmax[T.arange(p_true.shape[0]),p_true])
def get_output_for(self, input, **kwargs):
return T.mean(input, axis=(2,3))
def run_epoch_doc(docs, labels, tags, tm, pad_id, cf):
batches = int(math.ceil(float(len(docs))/cf.batch_size))
accs = []
for b in xrange(batches):
d, y, m, t, num_docs = get_batch_doc(docs, labels, tags, b, cf.doc_len, cf.tag_len, cf.batch_size, pad_id)
prob = sess.run(tm.sup_probs, {tm.doc:d, tm.label:y, tm.sup_mask: m, tm.tag: t})
pred = np.argmax(prob, axis=1)
accs.extend(pred[:num_docs] == y[:num_docs])
print "\ntest classification accuracy = %.3f" % np.mean(accs)
def print_corpus_stats(name, sents, docs, stats):
print name + ":"
print "\tno. of docs =", len(docs[0])
if len(sents[0]) > 0:
print "\ttopic model no. of sequences =", len(sents[0])
print "\ttopic model no. of tokens =", sum([ len(item[2])-1 for item in sents[0] ])
print "\toriginal doc mean len =", stats[3]
print "\toriginal doc max len =", stats[4]
print "\toriginal doc min len =", stats[5]
if len(sents[1]) > 0:
print "\tlanguage model no. of sequences =", len(sents[1])
print "\tlanguage model no. of tokens =", sum([ len(item[2])-1 for item in sents[1] ])
print "\toriginal sent mean len =", stats[0]
print "\toriginal sent max len =", stats[1]
print "\toriginal sent min len =", stats[2]
def MSE(self, responses):
mean = np.mean(responses, axis=0)
return np.mean((responses - mean) ** 2)
def make_leaf(self, responses):
self.leaf = np.mean(responses, axis=0)
def predict(self, point):
response = []
for i in range(self.ntrees):
response.append(self.trees[i].predict(point))
return np.mean(response, axis=0)
def normaliza(self, X):
correction = np.sqrt((len(X) - 1) / len(X)) # std factor corretion
mean_ = np.mean(X, 0)
scale_ = np.std(X, 0)
X = X - mean_
X = X / (scale_ * correction)
return X
def gof(self):
r2mean = np.mean(self.r2.T[self.endoexo()[0]].values)
AVEmean = self.AVE().copy()
totalblock = 0
for i in range(self.lenlatent):
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block = len(block.columns.values)
totalblock += block
AVEmean[self.latent[i]] = AVEmean[self.latent[i]] * block
AVEmean = np.sum(AVEmean) / totalblock
return np.sqrt(AVEmean * r2mean)
def srmr(self):
srmr = (self.empirical() - self.implied())
srmr = np.sqrt(((srmr.values) ** 2).mean())
return srmr
def dataInfo(self):
sd_ = np.std(self.data, 0)
mean_ = np.mean(self.data, 0)
skew = scipy.stats.skew(self.data)
kurtosis = scipy.stats.kurtosis(self.data)
w = [scipy.stats.shapiro(self.data.ix[:, i])[0]
for i in range(len(self.data.columns))]
return [mean_, sd_, skew, kurtosis, w]
def htmt(self):
htmt_ = pd.DataFrame(pd.DataFrame.corr(self.data_),
index=self.manifests, columns=self.manifests)
mean = []
allBlocks = []
for i in range(self.lenlatent):
block_ = self.Variables['measurement'][
self.Variables['latent'] == self.latent[i]]
allBlocks.append(list(block_.values))
block = htmt_.ix[block_, block_]
mean_ = (block - np.diag(np.diag(block))).values
mean_[mean_ == 0] = np.nan
mean.append(np.nanmean(mean_))
comb = [[k, j] for k in range(self.lenlatent)
for j in range(self.lenlatent)]
comb_ = [(np.sqrt(mean[comb[i][1]] * mean[comb[i][0]]))
for i in range(self.lenlatent ** 2)]
comb__ = []
for i in range(self.lenlatent ** 2):
block = (htmt_.ix[allBlocks[comb[i][1]],
allBlocks[comb[i][0]]]).values
# block[block == 1] = np.nan
comb__.append(np.nanmean(block))
htmt__ = np.divide(comb__, comb_)
where_are_NaNs = np.isnan(htmt__)
htmt__[where_are_NaNs] = 0
htmt = pd.DataFrame(np.tril(htmt__.reshape(
(self.lenlatent, self.lenlatent)), k=-1), index=self.latent, columns=self.latent)
return htmt