def update(self,new):
# Preload
if(self.index < self.N):
self.window[self.index] = new
self.index += 1
# If Window preloaded - start rolling statistics
if(self.index == self.N):
self.average = statistics.mean(self.window)
self.variance = statistics.variance(self.window)
return
# Push element into window list and remove the old element
old = self.window[0]
self.window.pop(0)
self.window.append(new)
oldavg = self.average
newavg = oldavg + (new - old)/self.N
self.average = newavg
if(self.N > 1):
self.variance += (new-old)*(new-newavg+old-oldavg)/(self.N-1)
python类mean()的实例源码
def getmetrics(self, peers=None):
"""
Return a set of metrics based on the data in peers.
If peers is None, use self.peers.
"""
if peers is None:
peers = self.peers
metrics = {}
for t in NTPPeers.peertypes:
# number of peers of this type
metrics[t] = len(peers[t]['address'])
# offset of peers of this type
metrics[t + '-offset-mean'] = NTPPeers.getmean(peers[t]['offset'])
metrics[t + '-offset-stdev'] = NTPPeers.getstdev(peers[t]['offset'], metrics[t + '-offset-mean'])
metrics[t + '-offset-rms'] = NTPPeers.rms(peers[t]['offset'])
# reachability of peers of this type
metrics[t + '-reach-mean'] = NTPPeers.getmean(peers[t]['reach'])
metrics[t + '-reach-stdev'] = NTPPeers.getstdev(peers[t]['reach'], metrics[t + '-reach-mean'])
# The rms of reachability is not very useful, because it's always positive
# (so it should be very close to the mean), but we include it for completeness.
metrics[t + '-reach-rms'] = NTPPeers.rms(peers[t]['reach'])
return metrics
def statisticalNoiseReduction(values, std_factor_threshold = 2):
"""
Eliminates outlier values that go beyond a certain threshold.
:param values: The list of elements that are being filtered.
:param std_factor_threshold: Filtering aggressiveness. The bigger the value, the more it filters.
:return: The filtered list.
"""
if len(values) == 0:
return []
valarray = np.array(values)
mean = valarray.mean()
standard_deviation = valarray.std()
# just return if we only got constant values
if standard_deviation == 0:
return values
# remove outlier values
valarray = valarray[(valarray > mean - std_factor_threshold * standard_deviation)
& (valarray < mean + std_factor_threshold * standard_deviation)]
return list(valarray)
def set_gain_A(self, gain):
if gain == 128:
self._gain_channel_A = gain
elif gain == 64:
self._gain_channel_A = gain
else:
raise ValueError('gain has to be 128 or 64.\nI have got: '
+ str(gain))
# after changing channel or gain it has to wait 50 ms to allow adjustment.
# the data before is garbage and cannot be used.
self._read()
time.sleep(0.5)
return True
############################################################
# zero is function which sets the current data as #
# an offset for particulart channel. It can be used for #
# subtracting the weight of the packaging. #
# max value of times parameter is 99. min 1. Default 10. #
# INPUTS: times # how many times do reading and then mean #
# OUTPUTS: BOOL # if True it is OK #
############################################################
def pooled_sample_variance(sample1, sample2):
"""Find the pooled sample variance for two samples.
Args:
sample1: one sample.
sample2: the other sample.
Returns:
Pooled sample variance, as a float.
"""
deg_freedom = len(sample1) + len(sample2) - 2
mean1 = statistics.mean(sample1)
squares1 = ((x - mean1) ** 2 for x in sample1)
mean2 = statistics.mean(sample2)
squares2 = ((x - mean2) ** 2 for x in sample2)
return (math.fsum(squares1) + math.fsum(squares2)) / float(deg_freedom)
def export_csv(args, bench):
runs = bench.get_runs()
runs_values = [run.values for run in runs if run.values]
rows = []
for run_values in zip(*runs_values):
mean = statistics.mean(run_values)
rows.append([mean])
if six.PY3:
fp = open(args.csv_filename, 'w', newline='', encoding='ascii')
else:
fp = open(args.csv_filename, 'w')
with fp:
writer = csv.writer(fp)
writer.writerows(rows)
def get_student_stats(user):
stats = {}
if user.has_perm(get_perm_name(Actions.see.value, UserGroups.student.value, "balance")):
student_accounts = Account.objects.filter(user__groups__name__contains=UserGroups.student.value)
balances = [a.balance for a in student_accounts]
stats.update({
'sum_money': int(sum(balances)),
'mean_money': int(statistics.mean(balances))
})
if user.has_perm(get_perm_name(Actions.process.value, UserGroups.student.value, "created_transactions")):
stats.update({'created_students_len': Transaction.objects.filter(
creator__groups__name__in=[UserGroups.student.value]).filter(state__name=States.created.value).__len__()})
if user.has_perm(get_perm_name(Actions.process.value, UserGroups.staff.value, "created_transactions")):
stats.update({'created_staff_len': Transaction.objects.filter(
creator__groups__name__in=[UserGroups.staff.value]).filter(state__name=States.created.value).__len__()})
return stats
def run(args):
# Setup parser
p = parser.VCFParser(io.StringIO(HEADER), '<builtin>')
# Parse header
p.parse_header()
# Parse line several times
times = []
for r in range(args.repetitions):
begin = time.clock()
for _ in range(args.line_count):
r = p._record_parser.parse_line(LINE) # noqa
if args.debug:
print(r, file=sys.stderr)
times.append(time.clock() - begin)
print('Took {:.3} seconds (stdev {:.3})'.format(
statistics.mean(times), statistics.stdev(times)), file=sys.stderr)
def demo():
m_1959 = mean(y1959)
m_1960 = mean(y1960)
m_2014 = mean(y2014)
print("1959 mean {:.2f}".format(m_1959))
print("1960 mean {:.2f}".format(m_1960))
print("2014 mean {:.2f}".format(m_2014))
print("1959 v. 1960")
all_combos(y1959, y1960)
print("\n\n1959 v. 2014")
all_combos(y1959, y2014)
print("1959 v. 1960")
randomized(y1959, y1960)
print("\n\n1959 v. 2014")
randomized(y1959, y2014)
def steem_btc_ticker():
prices = {}
urls = [
"https://poloniex.com/public?command=returnTicker",
"https://bittrex.com/api/v1.1/public/getticker?market=BTC-STEEM",
]
rs = (grequests.get(u, timeout=2) for u in urls)
responses = list(grequests.map(rs, exception_handler=lambda x, y: ""))
for r in [x for x in responses if hasattr(x, "status_code") and x.status_code == 200 and x.json()]:
if "poloniex" in r.url:
data = r.json()["BTC_STEEM"]
prices['poloniex'] = {'price': float(data['last']), 'volume': float(data['baseVolume'])}
elif "bittrex" in r.url:
data = r.json()["result"]
price = (data['Bid'] + data['Ask']) / 2
prices['bittrex'] = {'price': price, 'volume': 0}
if len(prices) == 0:
raise RuntimeError("Obtaining STEEM/BTC prices has failed from all sources.")
return mean([x['price'] for x in prices.values()])
Exercises4.py 文件源码
项目:Python-Programming-A-Concise-Introduction
作者: abdullahaalam
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def temp_stat(temps):
""" prints the average, median, std dev, and variance of temps """
import statistics
print(temps)
print("Mean: ", statistics.mean(temps))
print("Median: ", statistics.median(temps))
print("Standard deviation: ", statistics.stdev(temps))
print("Variance: ", statistics.variance(temps))
#%%
Exercises4.py 文件源码
项目:Python-Programming-A-Concise-Introduction
作者: abdullahaalam
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def temp_stat(temps):
""" computes the average, median, std dev, and variance of temps """
import statistics
print(temps)
print("Mean: ", statistics.mean(temps))
print("Median: ", statistics.median(temps))
print("Standard deviation: ", statistics.stdev(temps))
print("Variance: ", statistics.variance(temps))
try:
print("Mode: ", statistics.mode(temps))
except statistics.StatisticsError as e:
print("Mode error: ", e)
#%%
def get_average_eligible_score():
return (statistics.mean([x['score'] for x in get_all_team_scores()]),
statistics.stdev([x['score'] for x in get_all_team_scores()]))
def get_average_problems_solved(eligible=True, scoring=True):
teams = api.team.get_all_teams(show_ineligible=(not eligible))
values = [len(api.problem.get_solved_pids(tid=t['tid'])) for t in teams
if not scoring or len(api.problem.get_solved_pids(tid=t['tid'])) > 0]
return statistics.mean(values), statistics.stdev(values)
def get_average_achievement_number():
earned_achievements = api.achievement.get_earned_achievement_instances()
frequency = defaultdict(int)
for achievement in earned_achievements:
frequency[achievement['uid']] += 1
extra = len(api.team.get_all_teams(show_ineligible=False)) - len(frequency.keys())
values = [0] * extra
for val in frequency.values():
values.append(val)
return statistics.mean(values), statistics.stdev(values)
def mean(values_list):
if len(values_list) > 0:
return sum(values_list) / len(values_list)
else:
return 0.0
#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=
#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=# C L A S S E S =#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=
#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=#=
#--- State Variable Register class
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-
def get_mean(self):
return statistics.mean(self._shift_register) if len(self._shift_register) > 0 and ((self._data_class == float) or (self._data_class == int)) else None
def get_mean_score(hdf5_file, basecall_location):
q = hdf5_file[basecall_location].value.split(b'\n')[3]
return statistics.mean([c - 33 for c in q])
def get_best_fastq_hdf5_location(hdf5_file, names):
"""
This function returns the path in the FAST5 file to the best FASTQ. If there are multiple
basecall locations, it returns the last one (hopefully from the most recent basecalling).
"""
basecall_locations = sorted([x for x in names if x.upper().endswith('FASTQ')])
two_d_locations = [x for x in basecall_locations if 'BASECALLED_2D' in x.upper()]
template_locations = [x for x in basecall_locations if 'TEMPLATE' in x.upper()]
complement_locations = [x for x in basecall_locations if 'COMPLEMENT' in x.upper()]
# If the read has 2D basecalling, then that's what we use.
if two_d_locations:
return two_d_locations[-1]
# If the read has both template and complement basecalling, then we choose the best based on
# mean qscore.
elif template_locations and complement_locations:
template_location = template_locations[-1]
complement_location = complement_locations[-1]
mean_template_qscore = get_mean_score(hdf5_file, template_location)
mean_complement_qscore = get_mean_score(hdf5_file, complement_location)
if mean_template_qscore >= mean_complement_qscore:
return template_location
else:
return complement_location
# If the read has only template basecalling (normal for 1D) or only complement, then that's
# what we use.
elif template_locations:
return template_locations[-1]
elif complement_locations:
return complement_locations[-1]
# If the read has none of the above, but still has a fastq value in its hdf5, that's weird, but
# we'll consider it a 1d read and use it.
elif basecall_locations:
return basecall_locations[-1]
return None
def get_mean_qscore(quals):
"""
Returns the mean qscore over the entire length of the qscore string.
"""
try:
return sum([q - 33 for q in quals]) / len(quals)
except ZeroDivisionError:
return 0.0