def _init_hyperparameters(self, X, T):
n_samples = X.shape[0]
if (self.mean is None):
self.mean = sp.zeros(n_samples + 1)
if (self.cov is None):
self.cov = sp.ones(n_samples + 1)
if (self.beta is None):
self.beta = 1
return
python类mean()的实例源码
def predict(self, X, T, X_new):
"""Predict ``X_new`` with given traning data ``(X, T)``."""
n_tests = X_new.shape[0]
phi = sp.r_[sp.ones(n_tests).reshape(1, -1), self._compute_design_matrix(X_new, X)] # Add x0
phi = phi[self.rv_indices, :]
predict_mean = sp.dot(self.mean, phi)
predict_cov = 1 / self.beta + sp.dot(phi.T, sp.dot(self.cov, phi)).diagonal()
return predict_mean, predict_cov
def score(self, X_train, T_train, X_test, T_test):
Y = self.predict(X_train, T_train, X_test)
return sp.mean(sp.isclose(Y, T_test))
def check_domain(input_domain):
baseline, total_bigrams_settings = load_settings()
if os.path.isfile('data/database.json'):
with open('data/database.json', 'r') as f:
try:
bigram_dict = json.load(f)
# if the file is empty the ValueError will be thrown
except ValueError:
bigram_dict = {}
percentage = []
for bigram_position in xrange(len(input_domain) - 1): #Run through each bigram in the data
if input_domain[bigram_position:bigram_position + 2] in bigram_dict: #Check if bigram is in dictionary
percentage.append((bigram_dict[input_domain[bigram_position:bigram_position + 2]] / total_bigrams_settings) * 100) #Get bigram dictionary value and convert to percantage
else:
percentage.append(0) #Bigram value is 0 as it doesn't exist
if baseline >= scipy.mean(percentage):
print 67 * "*"
print 'Baseline:', baseline, 'Domain Average Bigram Percentage:',scipy.mean(percentage)
return 1
else:
return 0
percentage = [] #Clear percentage list
def transform_3d(self, X):
X_resampled = sp.zeros((X.shape[0], self.n_samples, X.shape[2]))
xnew = sp.linspace(0, 1, self.n_samples)
for i in range(X.shape[0]):
end = last_index(X[i])
for j in range(X.shape[2]):
X_resampled[i, :, j] = resampled(X[i, :end, j], n_samples=self.n_samples, kind=self.interp_kind)
# Compute indices based on alignment of dimension self.scaling_col_idx with the reference
indices_xy = [[] for _ in range(self.n_samples)]
if self.save_path and len(DTWSampler.saved_dtw_path)==(self.d+1): # verify if full dtw path already exists
current_path = DTWSampler.saved_dtw_path[i]
else:
# append path
current_path = dtw_path(X_resampled[i, :, self.scaling_col_idx], self.reference_series)
if self.save_path: # save current path is asked
DTWSampler.saved_dtw_path.append(current_path)
for t_current, t_ref in current_path:
indices_xy[t_ref].append(t_current)
for j in range(X.shape[2]):
if False and j == self.scaling_col_idx:
X_resampled[i, :, j] = xnew
else:
ynew = sp.array([sp.mean(X_resampled[i, indices, j]) for indices in indices_xy])
X_resampled[i, :, j] = ynew
return X_resampled
def __init__(self):
self.ni = []
self.prop = []
self.mean = []
self.cov =[]
self.Q = []
self.L = []
self.classnum = [] # to keep right labels
self.tau = 0.0
def BIC(self,x,y,tau=None):
'''
Computes the Bayesian Information Criterion of the model
'''
## Get information from the data
C,d = self.mean.shape
n = x.shape[0]
## Initialization
if tau is None:
TAU=self.tau
else:
TAU=tau
## Penalization
P = C*(d*(d+3)/2) + (C-1)
P *= sp.log(n)
## Compute the log-likelihood
L = 0
for c in range(C):
j = sp.where(y==(c+1))[0]
xi = x[j,:]
invCov,logdet = self.compute_inverse_logdet(c,TAU)
cst = logdet - 2*sp.log(self.prop[c]) # Pre compute the constant
xi -= self.mean[c,:]
temp = sp.dot(invCov,xi.T).T
K = sp.sum(xi*temp,axis=1)+cst
L +=sp.sum(K)
del K,xi
return L + P
c9_30_utility_function_impact_Of_A.py 文件源码
项目:Python-for-Finance-Second-Edition
作者: PacktPublishing
项目源码
文件源码
阅读 30
收藏 0
点赞 0
评论 0
def myUtilityFunction(ret,A=1):
meanDaily=sp.mean(ret)
varDaily=sp.var(ret)
meanAnnual=(1+meanDaily)**252
varAnnual=varDaily*252
return meanAnnual- 0.5*A*varAnnual
c9_21_optimal_portfolio_based_on_Sortino_ratio.py 文件源码
项目:Python-for-Finance-Second-Edition
作者: PacktPublishing
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def treynor(R,w):
betaP=portfolioBeta(betaGiven,w)
mean_return=sp.mean(R,axis=0)
ret = sp.array(mean_return)
return (sp.dot(w,ret) - rf)/betaP
# function 4: for given n-1 weights, return a negative sharpe ratio
c9_19_treynor_ratio.py 文件源码
项目:Python-for-Finance-Second-Edition
作者: PacktPublishing
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def treynor(R,w):
betaP=portfolioBeta(betaGiven,w)
mean_return=sp.mean(R,axis=0)
ret = sp.array(mean_return)
return (sp.dot(w,ret) - rf)/betaP
#
# function 4: for given n-1 weights, return a negative sharpe ratio
c9_44_impact_of_correlation_2stock_portfolio.py 文件源码
项目:Python-for-Finance-Second-Edition
作者: PacktPublishing
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def portfolioRet(R,w):
mean_return=sp.mean(R,axis=0)
ret = sp.array(mean_return)
return sp.dot(w,ret)
c9_18_sharpe_ratio.py 文件源码
项目:Python-for-Finance-Second-Edition
作者: PacktPublishing
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def sharpe(R,w):
var = portfolio_var(R,w)
mean_return=sp.mean(R,axis=0)
ret = sp.array(mean_return)
return (sp.dot(w,ret) - rf)/sp.sqrt(var)
# function 4: for given n-1 weights, return a negative sharpe ratio
c9_32_mean_and_var.py 文件源码
项目:Python-for-Finance-Second-Edition
作者: PacktPublishing
项目源码
文件源码
阅读 75
收藏 0
点赞 0
评论 0
def meanVarAnnual(ret):
meanDaily=sp.mean(ret)
varDaily=sp.var(ret)
meanAnnual=(1+meanDaily)**252
varAnnual=varDaily*252
return meanAnnual, varAnnual
def __MR_superpixel_mean_vector(self,img,labels):
s = sp.amax(labels)+1
vec = sp.zeros((s,3)).astype(float)
for i in range(s):
mask = labels == i
super_v = img[mask].astype(float)
mean = sp.mean(super_v,0)
vec[i] = mean
return vec
def read_data(instruments):
'''
Data pre-processing
'''
nins = len(instruments)
instruments = sp.array([sp.loadtxt('datafiles/'+x) for x in instruments])
def data(data, ins_no):
Time, Radial_Velocity, Err = data.T[:3] # el error de la rv
Radial_Velocity -= sp.mean(Radial_Velocity)
Flag = sp.ones(len(Time)) * ins_no # marca el instrumento al q pertenece
Staract = data.T[3:]
return sp.array([Time, Radial_Velocity, Err, Flag, Staract])
def sortstuff(tryin):
t, rv, er, flag = tryin
order = sp.argsort(t)
return sp.array([x[order] for x in [t, rv, er, flag]])
fd = sp.array([]), sp.array([]), sp.array([]), sp.array([])
for k in range(len(instruments)): # appends all the data in megarg
t, rv, er, flag, star = data(instruments[k], k)
fd = sp.hstack((fd, [t, rv, er, flag] )) # ojo this, list not array
fd[0] = fd[0] - min(fd[0])
alldat = sp.array([])
try:
staract = sp.array([data(instruments[i], i)[4] for i in range(nins)])
except:
staract = sp.array([sp.array([]) for i in range(nins)])
starflag = sp.array([sp.array([i for k in range(len(staract[i]))]) for i in range(len(staract))])
tryin = sortstuff(fd)
for i in range(len(starflag)):
for j in range(len(starflag[i])):
staract[i][j] -= sp.mean(staract[i][j])
totcornum = 0
for correlations in starflag:
if len(correlations) > 0:
totcornum += len(correlations)
return fd, staract, starflag, totcornum
def normal_pdf(x, mean, variance):
var = 2 * variance
return ( - (x - mean) ** 2 / var)
SLIC_new_cityscapes_training_server_1.py 文件源码
项目:SLIC_cityscapes
作者: wpqmanu
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def update(self, centers):
# sums = [scipy.zeros(5) for i in range(len(centers))]
# nums = [0 for i in range(len(centers))]
# width, height = self.img.shape[:2]
print "E step"
new_centers=[]
nan_record=[]
for i in xrange(len(centers)):
current_region=self.xylab[self.assignedindex == i]
if current_region.size>0: #non-empty region
new_centers.append(scipy.mean(current_region, 0))
else: # empty region
nan_record.append(i)
# after we get full nan_record list, update assignment index (elimnate those indexes in reverse order)
for nan_value in nan_record[::-1]:
self.assignedindex[self.assignedindex>nan_value]=self.assignedindex[self.assignedindex>nan_value]-1
for new_center_index in range(len(new_centers)):
# print new_center_index
new_centers[new_center_index][0] = math.floor(new_centers[new_center_index][0])
new_centers[new_center_index][1] = math.floor(new_centers[new_center_index][1])
new_centers[new_center_index][2:]=self.labimg[math.floor(new_centers[new_center_index][0])][math.floor(new_centers[new_center_index][1])]
return new_centers,nan_record
def update(self, centers):
sums = [scipy.zeros(5) for i in range(len(centers))]
nums = [0 for i in range(len(centers))]
width, height = self.img.shape[:2]
print "E step"
return [scipy.mean(self.xylab[self.assignedindex == i], 0) for i in xrange(len(centers))]
SLIC_new_cityscapes_training_server_parallel_spark.py 文件源码
项目:SLIC_cityscapes
作者: wpqmanu
项目源码
文件源码
阅读 49
收藏 0
点赞 0
评论 0
def update(self, centers):
# sums = [scipy.zeros(5) for i in range(len(centers))]
# nums = [0 for i in range(len(centers))]
# width, height = self.img.shape[:2]
print "E step"
new_centers=[]
nan_record=[]
for i in xrange(len(centers)):
current_region=self.xylab[self.assignedindex == i]
if current_region.size>0: #non-empty region
new_centers.append(scipy.mean(current_region, 0))
else: # empty region
nan_record.append(i)
# after we get full nan_record list, update assignment index (elimnate those indexes in reverse order)
for nan_value in nan_record[::-1]:
self.assignedindex[self.assignedindex>nan_value]=self.assignedindex[self.assignedindex>nan_value]-1
for new_center_index in range(len(new_centers)):
# print new_center_index
new_centers[new_center_index][0] = math.floor(new_centers[new_center_index][0])
new_centers[new_center_index][1] = math.floor(new_centers[new_center_index][1])
new_centers[new_center_index][2:]=self.labimg[math.floor(new_centers[new_center_index][0])][math.floor(new_centers[new_center_index][1])]
return new_centers,nan_record
SLIC_new_cityscapes_training_server_parallel.py 文件源码
项目:SLIC_cityscapes
作者: wpqmanu
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def update(self, centers):
# sums = [scipy.zeros(5) for i in range(len(centers))]
# nums = [0 for i in range(len(centers))]
# width, height = self.img.shape[:2]
print "E step"
new_centers=[]
nan_record=[]
for i in xrange(len(centers)):
current_region=self.xylab[self.assignedindex == i]
if current_region.size>0: #non-empty region
new_centers.append(scipy.mean(current_region, 0))
else: # empty region
nan_record.append(i)
# after we get full nan_record list, update assignment index (elimnate those indexes in reverse order)
for nan_value in nan_record[::-1]:
self.assignedindex[self.assignedindex>nan_value]=self.assignedindex[self.assignedindex>nan_value]-1
for new_center_index in range(len(new_centers)):
# print new_center_index
new_centers[new_center_index][0] = math.floor(new_centers[new_center_index][0])
new_centers[new_center_index][1] = math.floor(new_centers[new_center_index][1])
new_centers[new_center_index][2:]=self.labimg[math.floor(new_centers[new_center_index][0])][math.floor(new_centers[new_center_index][1])]
return new_centers,nan_record