def _print(self):
"""Print statistics and other informational text."""
mean = statistics.mean(self.prices)
median = statistics.median(self.prices)
stdev = statistics.stdev(self.prices)
high = mean + stdev
low = mean - stdev
print(dedent('''\
Sourced %d prices in %.3f seconds
Mean:\t$%.2f
Median:\t$%.2f
Hi/Lo:\t$%.2f/$%.2f
StDev:\t%.2f
''' % (len(self.prices), self.duration,
mean, median, high, low, stdev)))
python类stdev()的实例源码
def get_stats(self, metrics, lang=UNSPECIFIED_TRANSLATION, limit=100):
stats = super(NumField, self).get_stats(metrics, lang, limit)
stats.update({
'median': '*',
'mean': '*',
'mode': '*',
'stdev': '*'
})
try:
# require a non empty dataset
stats['mean'] = statistics.mean(self.flatten_dataset(metrics))
stats['median'] = statistics.median(self.flatten_dataset(metrics))
# requires at least 2 values in the dataset
stats['stdev'] = statistics.stdev(self.flatten_dataset(metrics),
xbar=stats['mean'])
# requires a non empty dataset and a unique mode
stats['mode'] = statistics.mode(self.flatten_dataset(metrics))
except statistics.StatisticsError:
pass
return stats
def async_update(self):
"""Get the latest data and updates the states."""
if not self.is_binary:
try:
self.mean = round(statistics.mean(self.states), 2)
self.median = round(statistics.median(self.states), 2)
self.stdev = round(statistics.stdev(self.states), 2)
self.variance = round(statistics.variance(self.states), 2)
except statistics.StatisticsError as err:
_LOGGER.warning(err)
self.mean = self.median = STATE_UNKNOWN
self.stdev = self.variance = STATE_UNKNOWN
if self.states:
self.total = round(sum(self.states), 2)
self.min = min(self.states)
self.max = max(self.states)
else:
self.min = self.max = self.total = STATE_UNKNOWN
def get_average_eligible_score():
return (statistics.mean([x['score'] for x in get_all_team_scores()]),
statistics.stdev([x['score'] for x in get_all_team_scores()]))
def get_average_problems_solved(eligible=True, scoring=True):
teams = api.team.get_all_teams(show_ineligible=(not eligible))
values = [len(api.problem.get_solved_pids(tid=t['tid'])) for t in teams
if not scoring or len(api.problem.get_solved_pids(tid=t['tid'])) > 0]
return statistics.mean(values), statistics.stdev(values)
def get_average_achievement_number():
earned_achievements = api.achievement.get_earned_achievement_instances()
frequency = defaultdict(int)
for achievement in earned_achievements:
frequency[achievement['uid']] += 1
extra = len(api.team.get_all_teams(show_ineligible=False)) - len(frequency.keys())
values = [0] * extra
for val in frequency.values():
values.append(val)
return statistics.mean(values), statistics.stdev(values)
def stdev(self):
return statistics.stdev(self.price)
# ?????
def print_stat(msg, times_taken):
print('{}: mean {:.2f} secs, median {:.2f} secs, stdev {:.2f}'.format(
msg, mean(times_taken), median(times_taken), stdev(times_taken)
))
def get_average_eligible_score():
return (statistics.mean([x['score'] for x in get_all_team_scores()]),
statistics.stdev([x['score'] for x in get_all_team_scores()]))
def get_average_problems_solved(eligible=True, scoring=True):
teams = api.team.get_all_teams(show_ineligible=(not eligible))
values = [len(api.problem.get_solved_pids(tid=t['tid'])) for t in teams
if not scoring or len(api.problem.get_solved_pids(tid=t['tid'])) > 0]
return statistics.mean(values), statistics.stdev(values)
def get_average_achievement_number():
earned_achievements = api.achievement.get_earned_achievement_instances()
frequency = defaultdict(int)
for achievement in earned_achievements:
frequency[achievement['uid']] += 1
extra = len(api.team.get_all_teams(show_ineligible=False)) - len(frequency.keys())
values = [0] * extra
for val in frequency.values():
values.append(val)
return statistics.mean(values), statistics.stdev(values)
def client_pool(func, entries_count, workers, additional_args=[]):
pool = Pool(workers)
start_time = timer()
worker_args = [[entries_count // workers] + additional_args]
finish_times = pool.starmap(func, worker_args * workers)
return (statistics.stdev(finish_times),
statistics.mean(finish_times) - start_time)
def parse_args():
global should_draw
parser = argparse.ArgumentParser()
parser.add_argument('input')
parser.add_argument("-e", "--perc_elim", nargs='?', type=int, const=-1, default=20, help='Percentage of contestants eliminated, set to negative number to specify number of contestants')
parser.add_argument("-t", "--num_gold", nargs='?', type=int, const=5, default=1, help='Number of contestants to place in gold highlighting')
parser.add_argument('-i', '--omit_image', action='store_false', help='Use this flag to not draw image')
args = parser.parse_args()
path = args.input
votes = convert(path)
prompt = open('./twows/{}/prompt.txt'.format(path),'r').read().split('\n')[0]
scores = []
twowers=set()
with open('./twows/{}/responses.csv'.format(path),'r',encoding=encoding) as csvfile:#read responses
reader = csv.reader(csvfile)
for row in reader:
#scoredata format [twower, response, votes/mean, count, boost, final, stdev, votegraph]
name = simplify(row[0])
twowers.add(name)
try:
scores.append([name,row[1],[],0,int(row[2]),0,0,0,[0 for i in range(10)],[]])
except:
scores.append([name,row[1],[],0,0,0,0,0,[0 for i in range(10)],[]])
twowers = list(twowers)
twower_count = len(twowers)
should_draw=args.omit_image
top_number = args.num_gold #chart coloring ranges
elim_number=0
if int(args.perc_elim) < 0:
elim_number = -args.perc_elim
else:
elim_number = round(args.perc_elim*len(twowers)/100)
return (path, prompt, scores, votes, twowers, twower_count, top_number, elim_number)
def get_average_eligible_score():
return (statistics.mean([x['score'] for x in get_all_team_scores()]),
statistics.stdev([x['score'] for x in get_all_team_scores()]))
def get_average_problems_solved(eligible=True, scoring=True):
teams = api.team.get_all_teams(show_ineligible=(not eligible))
values = [len(api.problem.get_solved_pids(tid=t['tid'])) for t in teams
if not scoring or len(api.problem.get_solved_pids(tid=t['tid'])) > 0]
return statistics.mean(values), statistics.stdev(values)
def get_average_achievement_number():
earned_achievements = api.achievement.get_earned_achievement_instances()
frequency = defaultdict(int)
for achievement in earned_achievements:
frequency[achievement['uid']] += 1
extra = len(api.team.get_all_teams(show_ineligible=False)) - len(frequency.keys())
values = [0] * extra
for val in frequency.values():
values.append(val)
return statistics.mean(values), statistics.stdev(values)
def getAabrhRawScoreSummmaryD(strainNamesL,aabrhL,scoresO,geneNames):
'''Given raw scores and a directory with blast output, finds the sets of all around best reciprocal hits. Then for each pair of species, calculates the mean and standard deviation of scores and stores in a dictionary.'''
# now loop through these, sorting scores into a dict keyed by species pair.
# create dictionary, (representing an upper triangular matrix)
spScoreD={}
for i in range(len(strainNamesL)-1):
strain1 = strainNamesL[i]
for j in range(i+1,len(strainNamesL)):
strain2 = strainNamesL[j]
spScoreD[(strain1,strain2)]=[]
# loop through aabrhL and populate
for orthoT in aabrhL:
spScoreD = addPairwiseScores(spScoreD,orthoT,scoresO,geneNames)
# get mean and standard deviation
summaryD = {}
for sp1,sp2 in spScoreD:
mean = statistics.mean(spScoreD[(sp1,sp2)])
std = statistics.stdev(spScoreD[(sp1,sp2)])
summaryD[(sp1,sp2)] = (mean,std)
summaryD[(sp2,sp1)] = (mean,std)
return summaryD
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('input')
parser.add_argument("-e", "--perc_elim", nargs='?', const=5, default=5)
parser.add_argument("-t", "--num_gold", nargs='?', const=5, default=5)
args = parser.parse_args()
path = args.input
votes = convert(path)
prompt = open('./twows/{}/prompt.txt'.format(path),'r').read().split('\n')[0]
scores = []
twowers=set()
with open('./twows/{}/responses.csv'.format(path),'r') as csvfile:#read responses
reader = csv.reader(csvfile)
for row in reader:
#scoredata format [twower, response, votes/mean, count, boost, final, stdev, votegraph]
name = simplify(row[0])
twowers.add(name)
try:
scores.append([name,row[1],[],0,int(row[2]),0,0,0,[0 for i in range(10)]])
except:
scores.append([name,row[1],[],0,0,0,0,0,[0 for i in range(10)]])
twowers = list(twowers)
twower_count = len(twowers)
top_number = int(args.num_gold) #chart coloring ranges
elim_number=0
if int(args.perc_elim) < 0:
elim_number = -int(args.perc_elim)
else:
elim_number = round(int(args.perc_elim)*len(twowers)/100)
return (path, prompt, scores, votes, twowers, twower_count, top_number, elim_number)
def compute_stats(self):
result = {}
for func_name, data_points in self.data.items():
result[self._prefix + func_name] = {
'avg': stats.mean(data_points),
'min': min(data_points),
'max': max(data_points),
'num': len(data_points)
}
if len(data_points) >= 2:
result[func_name]['std'] = stats.stdev(data_points)
return result
def eval_performance(env, agent, n_runs, max_episode_len=None,
explorer=None, logger=None):
"""Run multiple evaluation episodes and return statistics.
Args:
env (Environment): Environment used for evaluation
agent (Agent): Agent to evaluate.
n_runs (int): Number of evaluation runs.
max_episode_len (int or None): If specified, episodes longer than this
value will be truncated.
explorer (Explorer): If specified, the given Explorer will be used for
selecting actions.
logger (Logger or None): If specified, the given Logger object will be
used for logging results. If not specified, the default logger of
this module will be used.
Returns:
Dict of statistics.
"""
scores = run_evaluation_episodes(
env, agent, n_runs,
max_episode_len=max_episode_len,
explorer=explorer,
logger=logger)
stats = dict(
mean=statistics.mean(scores),
median=statistics.median(scores),
stdev=statistics.stdev(scores) if n_runs >= 2 else 0.0,
max=np.max(scores),
min=np.min(scores))
return stats