def test_unique_data_failure(self):
# Test mode exception when data points are all unique.
data = list(range(10))
self.assertRaises(statistics.StatisticsError, self.func, data)
python类mode()的实例源码
Exercises4.py 文件源码
项目:Python-Programming-A-Concise-Introduction
作者: abdullahaalam
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def my_stats(slis):
import statistics
print("Mean: ", statistics.mean(slis))
print("Median: ", statistics.median(slis))
# print("Mode: ", statistics.mode(slis))
try:
print("Mode: ", statistics.mode(slis))
except statistics.StatisticsError as e:
print("Mode error: ", e)
print("Standard Deviation: ", statistics.stdev(slis))
print("Variance: ", statistics.variance(slis))
#%%
feature2.py 文件源码
项目:Sarcasm-Detection-on-Twitter
作者: priyanshu-bajpai
项目源码
文件源码
阅读 16
收藏 0
点赞 0
评论 0
def mode(list):
data = Counter(list)
data.most_common()
value=data.most_common(1)
return value[0][0]
#this is the final function which caluclates the divergence of current tweest from past tweets
feature2.py 文件源码
项目:Sarcasm-Detection-on-Twitter
作者: priyanshu-bajpai
项目源码
文件源码
阅读 17
收藏 0
点赞 0
评论 0
def feature2Extractor(list):
ls=[]
ls.append(st.mean(list))
ls.append(st.median(list))
try:
ls.append(st.mode(list))
except Exception:
ls.append(mode(list))
ls.append(st.stdev(list))
ls.append(min(list))
ls.append(max(list))
featureList = ls
return featureList
def prepare_data(self):
"""Overload method from UnivariateCommonMixin."""
# Make sure test data has exactly one mode.
return [1, 1, 1, 1, 3, 4, 7, 9, 0, 8, 2]
def test_nominal_data(self):
# Test mode with nominal data.
data = 'abcbdb'
self.assertEqual(self.func(data), 'b')
data = 'fe fi fo fum fi fi'.split()
self.assertEqual(self.func(data), 'fi')
def test_discrete_data(self):
# Test mode with discrete numeric data.
data = list(range(10))
for i in range(10):
d = data + [i]
random.shuffle(d)
self.assertEqual(self.func(d), i)
def test_bimodal_data(self):
# Test mode with bimodal data.
data = [1, 1, 2, 2, 2, 2, 3, 4, 5, 6, 6, 6, 6, 7, 8, 9, 9]
assert data.count(2) == data.count(6) == 4
# Check for an exception.
self.assertRaises(statistics.StatisticsError, self.func, data)
def test_unique_data_failure(self):
# Test mode exception when data points are all unique.
data = list(range(10))
self.assertRaises(statistics.StatisticsError, self.func, data)
def mode(values):
"""Returns the mode of the values.
If multiples values tie, one value is returned.
Args:
values: A list of values.
Returns:
The mode.
"""
counts = {k: values.count(k) for k in set(values)}
return sorted(counts, key=counts.__getitem__)[-1]
def calculate_dominance(cppn: FeedForwardNetwork, ca_config: CAConfig) -> float:
alphabet = ca_config.alphabet
neighbourhood = ca_config.neighbourhood
nbhs = list(product(alphabet, repeat=len(neighbourhood)))
rules = create_state_normalization_rules(states=alphabet)
quiescent = alphabet[0]
def transition_f(inputs_discrete_values: Sequence[CELL_STATE_T]) -> CELL_STATE_T:
if all((x == quiescent) for x in inputs_discrete_values):
return quiescent
inputs_float_values = tuple(rules[x] for x in inputs_discrete_values)
outputs = cppn.serial_activate(inputs_float_values)
return max(zip(alphabet, outputs), key=itemgetter(1))[0]
heterogenous, homogenous = 0, 0
for nbh in nbhs:
try:
output = transition_f(nbh)
except OverflowError:
continue
m = mode(nbh)
if output != m:
continue
elif all(x == m for x in nbh):
homogenous += 1
else:
heterogenous += 1
return 3 * homogenous + heterogenous
def mode(text):
"""
Finds the mode of a space-separated list of numbers.
Example::
/mode 33 54 43 65 43 62
"""
return format_output(statistics.mode(parse_numeric_list(text)))
def setup():
commands.add(mean)
commands.add(median)
commands.add(median_low)
commands.add(median_high)
commands.add(median_grouped)
commands.add(mode)
commands.add(pstdev)
commands.add(pvariance)
commands.add(stdev)
commands.add(variance)
def _try_compute_mode(objects):
"""
Computes the mode of a set of object, if a unique such exists.
Args:
objects (list[T]): the object whose mode is to be computed
Returns:
T: the modal value, or None if a unique mode does not exist
"""
try:
numeric_value = statistics.mode(objects) # This _is_ 'None' friendly
except statistics.StatisticsError: # No unique value, or empty data
numeric_value = None
return numeric_value
def select_mode(self):
""" Select a mode: Easy or Hard.
"""
self.tickcount + 1
if self.raspberry:
self.tickcount += 1
bgr_image = self.capture_frame()
# Draw "Easy" and "Hard".
# bgr_image = self.overlayUI(bgr_image)
easy_coord = (self.screenwidth // 8, (self.screenheight * 3) // 4)
draw_text(easy_coord, bgr_image, "Easy", font_scale=3)
hard_coord = (self.screenwidth // 2, (self.screenheight * 3) // 4)
draw_text(hard_coord, bgr_image, "Hard", font_scale=3)
# Listen for mode selection.
if self.currPosX and self.currPosX < self.screenwidth / 2:
cv2.rectangle(self.overlay, (0, 0), (self.screenwidth // 2,
int(self.screenheight)), (211, 211, 211), -1)
else:
cv2.rectangle(self.overlay, (self.screenwidth // 2, 0),
(self.screenwidth, self.screenheight), (211, 211, 211), -1)
if self.click_point_x: # If user clicks left mouse button.
# OPTIONAL: Positional mode selection
# self.easy_mode = True if self.click_point_x < self.screenwidth / 2
# else False
self.easy_mode = True
self.tickcount = 0
self.curr_level = 1
self.click_point_x = None
self.click_point_right_x = None
if self.click_point_right_x:
self.easy_mode = False
self.tickcount = 0
self.curr_level = 1
self.click_point_x = None
self.click_point_right_x = None
# Draw faces.
gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
faces = detect_faces(face_detection, gray_image)
cv2.addWeighted(self.overlay, OPACITY, bgr_image,
1 - OPACITY, 0, bgr_image)
if self.debug:
for face in faces:
draw_bounding_box(face, bgr_image, (255, 0, 0))
# Draw Christmas logo.
self.draw_hats(bgr_image, faces)
self.draw_christmas_logo(bgr_image) # Only for christmas
# Show image.
cv2.imshow('PartyPi', bgr_image)
def main(with_csv=False):
"""Performs some simple data analysis.
If with_csv is True, the csv module is used for loading the data.
Otherwise, a simple custom solution is used.
Args:
with_csv: If True, uses the csv module.
"""
if with_csv:
data = read_with_csv(IRIS_FILE)
else:
data = read_without_csv(IRIS_FILE)
data = make_data_numeric(data, SEPAL_LENGTH, SEPAL_WIDTH,
PETAL_LENGTH, PETAL_WIDTH)
print('Total number of rows:', len(data))
class_counts = count_occurences(data, CLASS)
print('Instances:', class_counts)
sepal_lengths = [d[SEPAL_LENGTH] for d in data]
print('Mean sepal length (statistics):', statistics.mean(sepal_lengths))
print('Mean sepal length (custom):', mean(sepal_lengths))
sepal_l_setosa = [d[SEPAL_LENGTH] for d in data if 'setosa' in d[CLASS]]
print('Mean sepal length (setosa, statistics):',
statistics.mean(sepal_l_setosa))
print('Mean sepal length (setosa, custom):', mean(sepal_l_setosa))
sepal_widths = [d[SEPAL_WIDTH] for d in data]
print('Median sepal width (statistics):', statistics.median(sepal_widths))
print('Median sepal width (custom):', median(sepal_widths))
sepal_w_virginica = [d[SEPAL_WIDTH] for d in data if 'vir' in d[CLASS]]
print('Median sepal width (virginica, statistics):',
statistics.median(sepal_w_virginica))
print('Median sepal width (virginica, custom):', median(sepal_w_virginica))
petal_l_versicolor = [d[PETAL_LENGTH] for d in data if 'ver' in d[CLASS]]
print('Mode petal length (versicolor, statistics):',
statistics.mode(petal_l_versicolor))
print('Mode petal length (versicolor, custom):', mode(petal_l_versicolor))
def _retake_photos_until_valid_mode(self, target_number_cluster, mode_is_invalid=lambda m: m is None) -> None:
"""
Take 0 or more extra photos at the average location of the target numbers to do what we can to ensure a
valid modal numeric value exists.
Args:
target_number_cluster (GlobalNumberCluster):
The different representations of a single real life number to be recognised. This is extended to
include all extra photos taken during this method.
mode_is_invalid:
A function which accepts a given mode (int) and returns true if it is invalid. By default this simply
returns True if a unique mode does not exist.
"""
average_location = target_number_cluster.average_dot_location_yx
numeric_value = target_number_cluster.modal_numeric_value
jitters = np.array([[0, 0],
[10, 0],
[0, 10],
[-10, 0],
[0, -10]])
retry_number = -1
while mode_is_invalid(numeric_value) and retry_number + 1 < len(jitters):
retry_number += 1
# Take a new photo
print('Could not determine number at location ({0[0]:.0f},{0[1]:.0f}), current value {1}\n'
'Retrying...'.format(average_location, numeric_value))
processing_job = self._take_photo_and_extract_numbers(average_location + jitters[retry_number])
self._processing_station.join()
new_global_numbers = processing_job.return_value
new_global_numbers = [n for n in new_global_numbers
if np.linalg.norm(
n.dot_location_yx_mm - average_location) < self._min_millimetres_between_distinct_spots]
number_recognition.print_recognised_global_numbers(new_global_numbers)
target_number_cluster.extend(new_global_numbers)
# Try again to get the mode values
numeric_value = _try_compute_mode([n.numeric_value for n in target_number_cluster])
def get_disaggregated_stats(self, metrics, top_splitters,
lang=UNSPECIFIED_TRANSLATION, limit=100):
parent = super(NumField, self)
stats = parent.get_disaggregated_stats(metrics, top_splitters, lang,
limit)
substats = {}
# transpose the metrics data structure to look like
# {splitter1: [x, y, z], splitter2...}}
inversed_metrics = defaultdict(list)
for val, counter in metrics.items():
if val is None:
continue
for splitter, count in counter.items():
inversed_metrics[splitter].extend([val] * count)
for splitter, values in inversed_metrics.items():
val_stats = substats[splitter] = {
'median': '*',
'mean': '*',
'mode': '*',
'stdev': '*'
}
try:
# require a non empty dataset
val_stats['mean'] = statistics.mean(values)
val_stats['median'] = statistics.median(values)
# requires at least 2 values in the dataset
val_stats['stdev'] = statistics.stdev(values,
xbar=val_stats['mean'])
# requires a non empty dataset and a unique mode
val_stats['mode'] = statistics.mode(values)
except statistics.StatisticsError:
pass
stats.update({
'values': tuple(substats.items())[:limit]
})
return stats
def update_state(self, blocks):
block_version = None
char_offset = None
group_type = None
curr_AB = {0: None, 2: None, None:None}
last_AB = {0: None, 2: None, None:None}
for block in blocks:
blkid = block['ID']
if blkid == "A":
self.PIs.append(block['PI'])
char_offset = None
if blkid == "B":
group_type = block['group_type']
block_version = block['version_AB']
if blkid == "B" and group_type == 0:
curr_AB[group_type] = block['text_AB']
char_offset = block['text_segment'] * 2
if blkid == "B" and group_type == 2:
char_offset = block['text_segment'] * 4
if (curr_AB[group_type] != None) and (block['text_AB'] != curr_AB[group_type]) and (char_offset == 0) and (block_version == 'A'):
print("CLEARING")
self.cur_state[curr_AB[group_type]^1] = ['_']*64
curr_AB[group_type] = block['text_AB']
if (char_offset is not None) and (blkid == "C") and (group_type == 0) and (block_version == 'B'):
self.PIs.append((ord(block['B1'])<<8)+ord(block['B0']))
if char_offset is not None and (blkid == "C") and (group_type == 2):
self.cur_state[curr_AB[group_type]][char_offset] = block['B0']
self.cur_state[curr_AB[group_type]][char_offset+1] = block['B1']
if char_offset is not None and (blkid == "D") and (group_type == 2):
self.cur_state[curr_AB[group_type]][char_offset+2] = block['B0']
self.cur_state[curr_AB[group_type]][char_offset+3] = block['B1']
if (char_offset is not None) and (blkid == "D") and (group_type == 0) and (block_version == 'B'):
self.cur_state[curr_AB[group_type]][char_offset] = block['B0']
self.cur_state[curr_AB[group_type]][char_offset+1] = block['B1']
if (char_offset is not None) and (blkid == "D") and (group_type == 0) and (block_version == 'A'):
self.cur_state[curr_AB[group_type]][char_offset+10] = block['B0']
self.cur_state[curr_AB[group_type]][char_offset+11] = block['B1']
if group_type in (0,2):
#print(blkid, group_type, curr_AB[group_type], block_version)
print(' '.join([str(x) for x in block.values()]))
#print('\n'.join([''.join(x) for x in self.prog_name]))
if blkid == "D":
print('\n'.join([''.join(x) for x in self.cur_state]).replace('\r','?'))
group_type == None
char_offset = None
try:
self.PI = hex(statistics.mode(self.PIs))[2:]
except statistics.StatisticsError:
self.PI = hex(self.PIs[0])[2:]
self.callsign = picode.rdscall(self.PI)
print(self.callsign)