def knn(self, test_X = [], k = 3):
size = self.train_X.shape[0]
# Euclidean algorithm
diff = tile(test_X, (size, 1)) - self.train_X
dist_pow2 = diff ** 2
dist_sum = dist_pow2.sum(axis = 1)
dist_sqrt = dist_sum ** 0.5
dist = dist_sqrt.argsort()
# vote for neighbors
class_count = {}
for i in range(k):
vote_label = self.train_Y[dist[i]]
class_count[vote_label] = class_count.get(vote_label, 0) + 1
sorts = sorted(class_count.iteritems(), key = operator.itemgetter(1), reverse = True)
return sorts[0][0]
python类itemgetter()的实例源码
def draw(self, layer, timestamp):
"""
Draw the next layer
"""
# Yield until the queue becomes active
events = await self.get_input_events()
if len(events) > 0:
self._process_events(events)
# paint circles in descending timestamp order (oldest first)
events = sorted(events, key=operator.itemgetter(0), reverse=True)
for event in events:
distance = 1.0 - event.percent_complete
if distance < 1.0:
radius = self._max_distance * distance
self._draw_circles(layer, radius, event)
return True
return False
def rank_phases(phases, weights, thresholds):
values = {}
scores = []
for attribute, weight in weights.items():
values[attribute] = [getattr(phase, attribute) for phase in phases]
for phase in phases:
scores.append((sum(weight * score([thresholds[attribute]] + values[attribute], getattr(phase, attribute))
for attribute, weight in weights.items()) / sum(weights.values()),
phase))
ranked_phases = []
for rank, phase in sorted(scores, key=itemgetter(0), reverse=True):
phase.attributes['rank'] = rank
ranked_phases.append(phase)
return ranked_phases
def attempt_naive_pov(self):
p1 = self._find_naive_leaks()
p2 = self._find_naive_leaks()
leaked = dict()
for si in p1:
if si in p2:
li = list(set(p2[si]).intersection(set(p1[si])))
if len(li) > 0:
for lb in li:
leaked[lb] = si
# find four contiguous
consecutive_groups = [ ]
for _, g in groupby(enumerate(sorted(leaked)), lambda (i,x):i-x):
consecutive_groups.append(map(itemgetter(1), g))
def get_largest_consecutive(self):
# extra work here because we need to be confident about the bytes
ss = self.state.copy()
ss.add_constraints(self.minimized_ast == ss.se.BVV(ss.se.eval(self.minimized_ast, cast_to=str)))
leaked_bytes = [ ]
for byte in self.possibly_leaked_bytes:
if self._confident_byte(ss, byte):
leaked_bytes.append(byte)
leaked_bytes = sorted(set(leaked_bytes))
consec_bytes = [ ]
# find consecutive leaked bytes
for _, g in groupby(enumerate(leaked_bytes), lambda (i, x): i-x):
consec_bytes.append(map(itemgetter(1), g))
newest_dhcp_lease.py 文件源码
项目:kolla-kubernetes-personal
作者: rthallisey
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def get_vir_network_dhcp_lease(conn, vm_name):
"""Libvirt since 1.2.6 version provides DHCPLeases method in virNetwork.
That's the current official way for getting DHCP leases and this
information isn't stored anywhere else anymore.
"""
network = conn.networkLookupByName('vagrant-private-dhcp')
dhcp_leases = libvirt.virNetwork.DHCPLeases(network)
vm_dhcp_leases = filter(lambda lease: lease['hostname'] == vm_name,
dhcp_leases)
newest_vm_dhcp_lease = sorted(vm_dhcp_leases,
key=operator.itemgetter('expirytime'),
reverse=True)[0]['ipaddr']
return newest_vm_dhcp_lease
def _main():
# Main program for testing.
import os
mod = sys.argv[1]
if os.path.exists(mod):
path = [os.path.dirname(mod)]
mod = os.path.basename(mod)
if mod.lower().endswith(".py"):
mod = mod[:-3]
else:
path = []
dict = readmodule_ex(mod, path)
objs = dict.values()
objs.sort(lambda a, b: cmp(getattr(a, 'lineno', 0),
getattr(b, 'lineno', 0)))
for obj in objs:
if isinstance(obj, Class):
print "class", obj.name, obj.super, obj.lineno
methods = sorted(obj.methods.iteritems(), key=itemgetter(1))
for name, lineno in methods:
if name != "__path__":
print " def", name, lineno
elif isinstance(obj, Function):
print "def", obj.name, obj.lineno
def sort_xy(x, y):
''' Sorts a pair of x and y iterables, returning arrays in order of
ascending x.
Args:
x (`iterable`): a list, numpy ndarray, or other iterable to sort by.
y (`iterable`): a list, numpy ndarray, or other iterable that is y=f(x).
Returns:
tuple containing:
`iterable`: an iterable containing the sorted x elements.
`iterable`: an iterable containing the sorted y elements.
'''
# zip x and y, sort by the 0th element (x) of each tuple in zip()
_ = sorted(zip(x, y), key=itemgetter(0))
sorted_x, sorted_y = zip(*_)
return sorted_x, sorted_y
def print(self, f, format='counts'):
for key, counts in self.count_dict.items():
if (self.source_tf[key] >= self.source_tf_filter) and \
(self.source_df[key] / float(self.count_docs) <= self.source_df_filter):
candidates = [(v, c) for v, c in counts.items() if not self._filtered_trans(v)]
candidates = sorted(candidates, key=itemgetter(1), reverse=True)
elif len(self.source_tf) == 0:
# no tf/df counts - dictionary read from file
candidates = sorted(counts.items(), key=itemgetter(1), reverse=True)
else:
continue
if self.top_n:
candidates = candidates[:self.top_n]
if candidates:
if format == 'counts':
f.write(u'%s\t%s\n' % (key, ' '.join([self._format(v, c) for v, c in candidates])))
elif format == 'solr':
f.write(u'%s => %s\n' % (key, candidates[0][0]))
def historigram(filename):
values = {}
mostpixels = []
im = Image.open(filename)
im = im.convert("P")
his = im.histogram()
for i in range(256):
values[i] = his[i]
print('Id ' + 'Number of pixels')
for j,k in sorted(values.items(), key=itemgetter(1), reverse=True)[:10]:
print(j,k)
mostpixels.append([j,k])
return mostpixels
def write_snp_summary(self, file="snp_summary.csv", summary_parameters=None, sort=False):
if summary_parameters is None:
summary_parameters = ["maf", "hwe", "rep", "call_rate"]
out_file = os.path.join(self.out_path, self.attributes["project"] + "_" + file)
out_data = [["id"] + summary_parameters]
snps = [[snp] + [data[parameter] for parameter in summary_parameters] for snp, data in self.data.items()]
if sort:
snps = sorted(snps, key=operator.itemgetter(*[i for i in range(1, len(summary_parameters)+1)]),
reverse=True)
out_data += snps
with open(out_file, "w") as snp_summary:
writer = csv.writer(snp_summary)
writer.writerows(out_data)
def _compare_entries(self, ids, selector="maf", selector_list=None):
"""
Gets data from dictionary for each duplicate SNP according to 'selector'
and returns the allele identification of the best entry.
Selector list currently sorts descending, that is all selector values must be ranked highest value ("best") -
this is the case for MAF, Call Rate, Rep, Read Counts ...
Later rank the data by QC Score.
"""
if selector_list is None:
entries_stats = [[i, self.data[i][selector]] for i in ids]
entries_ranked = sorted(entries_stats, key=operator.itemgetter(1), reverse=True)
else:
entries_stats = [[i] + [self.data[i][selector] for selector in selector_list] for i in ids]
entries_ranked = sorted(entries_stats, key=operator.itemgetter(*[i for i in range(1, len(selector_list)+1)]),
reverse=True)
return entries_ranked[0][0]
########################################################################################################################
def unique_for_country_code(self, country_code):
shipping = self.filter(
Q(country_code=country_code) |
Q(country_code=ANY_COUNTRY))
shipping = shipping.order_by('shipping_method_id')
shipping = shipping.values_list('shipping_method_id', 'id', 'country_code')
grouped_shipping = groupby(shipping, itemgetter(0))
any_country = ANY_COUNTRY
ids = []
for shipping_method_id, method_values in grouped_shipping:
method_values = list(method_values)
# if there is any country choice and specific one remove any country choice
if len(method_values) == 2:
method = [val for val in method_values if val[2] != any_country][0]
else:
method = method_values[0]
ids.append(method[1])
return self.filter(id__in=ids)
def unique_for_country_code(self, country_code):
shipping = self.filter(
Q(country_code=country_code) |
Q(country_code=ANY_COUNTRY))
shipping = shipping.order_by('shipping_method_id')
shipping = shipping.values_list(
'shipping_method_id', 'id', 'country_code')
grouped_shipping = groupby(shipping, itemgetter(0))
any_country = ANY_COUNTRY
ids = []
for shipping_method_id, method_values in grouped_shipping:
method_values = list(method_values)
# if there is any country choice and specific one remove any
# country choice
if len(method_values) == 2:
method = [val for val in method_values
if val[2] != any_country][0]
else:
method = method_values[0]
ids.append(method[1])
return self.filter(id__in=ids)
def sortby(self, name_or_index):
name, index = None, None
if isinstance(name_or_index, int):
index = name_or_index
else:
name = name_or_index
if name is not None:
try:
colnum = self._colnames.index(name)
except ValueError:
raise ValueError('column {} not in {}'.format(name, self._colnames))
else:
if index < 0 or index >= self._width:
raise ValueError('index out of range 0..{:d}'.format(self._width - 1))
colnum = index
self._rows.sort(key=itemgetter(colnum))
def sort_rows(self, rows, section):
"""
Sort the rows, as appropriate for the section.
Args:
rows(list): List of tuples (all same length, same values in each position)
section(str): Name of section, should match const in Differ class
"""
#print("@@ SORT ROWS:\n{}".format(rows))
# Section-specific determination of sort key
if section.lower() == Differ.CHANGED.lower():
sort_key = Differ.CHANGED_DELTA
else:
sort_key = None
if sort_key is not None:
rows.sort(key=itemgetter(sort_key))
backuprestore.py 文件源码
项目:script.skin.helper.skinbackup
作者: marcelveldt
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def clean_oldbackups(self):
'''auto clean old backups'''
backuppath = self.addon.getSetting("backup_path").decode("utf-8")
max_backups = self.addon.getSetting("max_old_backups")
if max_backups:
max_backups = int(max_backups)
all_files = []
for filename in xbmcvfs.listdir(backuppath)[1]:
if ".zip" in filename and "Skinbackup" in filename:
filename = filename.decode("utf-8")
filepath = backuppath + filename
filestat = xbmcvfs.Stat(filepath)
modified = filestat.st_mtime()
del filestat
log_msg(modified)
all_files.append((filepath, modified))
if len(all_files) > max_backups:
from operator import itemgetter
old_files = sorted(all_files, key=itemgetter(1), reverse=True)[max_backups - 1:]
for backupfile in old_files:
delete_file(backupfile[0])
def restrict(self, support, indices=False):
"""Restrict the features to those in support using feature selection.
This function modifies the estimator in-place.
"""
if not indices:
support = np.where(support)[0]
names = self.feature_names_
new_vocab = {}
for i in support:
new_vocab[names[i]] = len(new_vocab)
self.vocabulary_ = new_vocab
self.feature_names_ = [f for f, i in sorted(six.iteritems(new_vocab),
key=itemgetter(1))]
return self
def get_server_list(sort_by_load=False, sort_by_country=False):
try:
resp = requests.get(API_ADDR + '/server', timeout=TIMEOUT)
if resp.status_code == requests.codes.ok:
server_list = resp.json()
if sort_by_load:
return sorted(server_list, key=itemgetter('load'))
elif sort_by_country:
return sorted(server_list, key=itemgetter('country'))
else:
return server_list
else:
return None
except Exception as ex:
return None
urlrank2.py 文件源码
项目:Software-Architecture-with-Python
作者: PacktPublishing
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def rank(self):
""" Rank the URLs. A tuple is returned with
(url, #occur) in decreasing order of
occurences """
occurs = []
for url in self.urls:
data = requests.get(url).content
words = map(lambda x: x.lower().strip(), data.split())
# Filter empty words
count = words.count(self.word)
occurs.append((url, count))
# Return in sorted order
return sorted(occurs, key=operator.itemgetter(1), reverse=True)