def Zcorrections(self):
'''Qubits on which to apply Z operator to fix the X stabilizer.'''
L = self.L
graph = self.Xwgraph()
matches = {tuple(sorted(_)) for _ in
nx.max_weight_matching(graph, maxcardinality=True).items()}
qubits = set()
for (y1, x1), (y2, x2) in matches:
ym, yM = 2*min(y1, y2), 2*max(y1, y2)
if yM-ym > L:
ym, yM = yM, ym+2*L
horizontal = yM if (x2-x1)*(y2-y1)<0 else ym
else:
horizontal = ym if (x2-x1)*(y2-y1)<0 else yM
xm, xM = min(x1, x2), max(x1, x2)
if xM-xm > L/2:
xm, xM = xM, xm+L
vertical = xM
else:
vertical = xm
qubits.update((horizontal%(2*L), _%L) for _ in range(xm, xM))
qubits.update(((_+1)%(2*L), vertical%L) for _ in range(ym, yM, 2))
return matches, qubits
python类max_weight_matching()的实例源码
def Xcorrections(self):
'''Qubits on which to apply X operator to fix the Z stabilizer.'''
L = self.L
graph = self.Zwgraph()
matches = {tuple(sorted(_)) for _ in
nx.max_weight_matching(graph, maxcardinality=True).items()}
qubits = set()
for (y1, x1), (y2, x2) in matches:
ym, yM = 2*min(y1, y2), 2*max(y1, y2)
if yM-ym > L:
ym, yM = yM, ym+2*L
horizontal = yM if (x2-x1)*(y2-y1)<0 else ym
else:
horizontal = ym if (x2-x1)*(y2-y1)<0 else yM
xm, xM = min(x1, x2), max(x1, x2)
if xM-xm > L/2:
xm, xM = xM, xm+L
vertical = xM
else:
vertical = xm
qubits.update(((horizontal+1)%(2*L), (_+1)%L) for _ in range(xm, xM))
qubits.update(((_+2)%(2*L), vertical%L) for _ in range(ym, yM, 2))
return matches, qubits
def get_max_wt_matching(row_label,column_label, weight_matrix):
"""
From clustering_on_transcript_compatibility_counts, see github for MIT license
"""
# Create a bipartite graph where each group has |unique labels| nodes
G = nx.complete_bipartite_graph(len(row_label), len(column_label))
# Weight each edge by the weight in weight matrix..
for u,v in G.edges(): G[u][v]["weight"]=weight_matrix[u,v-len(row_label)]
# Perform weight matching using Kuhn Munkres
H=nx.max_weight_matching(G)
max_wt=0
for u,v in H.items(): max_wt+=G[u][v]["weight"]/float(2)
return max_wt
def match_objects_uniquely(candidates, targets, threshold=0.5):
g = nx.Graph()
for t in targets:
g.add_node(t)
for c in candidates:
g.add_node(c)
for t in targets:
tbb = BBox(*t.bbox)
for c in candidates:
cbb = BBox(*c.bbox)
intersection = tbb.intersection(cbb).area
union = tbb.area + cbb.area - intersection
try:
current_iou = intersection / float(union)
except ZeroDivisionError:
current_iou = float('inf')
pass
if current_iou >= threshold:
g.add_edge(t, c, weight=current_iou)
target_set = set(targets)
matching = nx.max_weight_matching(g, maxcardinality=True) # <- a dict with v->c and c->v both
hits = [(t, c) for (t, c) in matching.items() if t in target_set]
return hits
def construct_graph(user_ids, disallowed_meetings):
"""
We can use a maximum matching algorithm for this:
https://en.wikipedia.org/wiki/Blossom_algorithm
Yay graphs! Networkx will do all the work for us.
"""
# special weights that be put on the matching potential of each meeting,
# depending on heuristics for what makes a good/bad potential meeting.
meeting_to_weight = {}
# This creates the graph and the maximal matching set is returned.
# It does not return anyone who didn't get matched.
meetings = []
possible_meetings = {
meeting for meeting in itertools.combinations(user_ids, 2)
}
allowed_meetings = possible_meetings - disallowed_meetings
for meeting in allowed_meetings:
weight = meeting_to_weight.get(meeting, 1.0)
meetings.append(meeting + ({'weight': weight},))
graph = nx.Graph()
graph.add_nodes_from(user_ids)
graph.add_edges_from(meetings)
return nx.max_weight_matching(graph)
def DSP_Matching(Syndrome, External, dim, alt_ext):
# Fully connect check operators
for check1 in Syndrome.nodes():
for check2 in Syndrome.nodes():
if check1 != check2:
weight = - common.euclidean_dist(check1, check2) +.1
Syndrome.add_edge(*(check1, check2), weight=weight)
# Generate Boundary Graph
External_Graph = nx.Graph()
for m in Syndrome.nodes():
ext = DSP_AssociatedExternal(m, External)
External_Graph.add_node(ext)
weight = - common.euclidean_dist(m, ext)
Syndrome.add_edge(*(m, ext), weight=weight)
# Ensure even number of elements in Syndrome
# so min weight matching can proceed successfully
if len(Syndrome.nodes()) % 2 != 0:
Syndrome.add_node(alt_ext)
External_Graph.add_node(alt_ext)
# Connect External Nodes
edges = itertools.combinations(External_Graph,2)
Syndrome.add_edges_from(edges, weight = 0)
TempMatching = nx.max_weight_matching(Syndrome, maxcardinality=True)
Matching = {}
# each edge appears twice in TempMatching
# Should only appear once in Matching
for node, neighbor in TempMatching.items():
if neighbor not in Matching and node not in Matching:
if node not in External or neighbor not in External:
if node != alt_ext and neighbor != alt_ext:
Matching[node] = neighbor
return Matching
def max_weight_matching(Mrkt, Agents, verbose=False, maxcardinality=True):
"""
Computes max-weight matching of graph of inputted agents
based on the “blossom” method for finding augmenting paths
and the “primal-dual” method for finding a matching of maximum weight,
both methods invented by Jack Edmonds
Implemented by NetworkX
Runtime: O(n^3) for n nodes
Arguments
---------
Mrkt: mm.Market object
The market in which the matches are made
Agents: list
list of agents initiating matches
maxcardinality: bool
if True, compute the maximum-cardinality matching
with maximum weight among all maximum-cardinality matchings.
verbose: bool
Whether algorithm prints information on action
Returns
-------
dict { agent.name : agent.name } of matches
Reference:
“Efficient Algorithms for Finding Maximum Matching in Graphs”,
Zvi Galil, ACM Computing Surveys, 1986.
"""
if verbose:
print("\nMax Weight Match Algorithm\n")
print("Agents to match ", [a.name for a in Agents], "\n")
# If Agents not whole market, get subgraph
if len(Mrkt.Graph.nodes()) != len(Agents):
to_match = deepcopy(Mrkt.Graph.subgraph(Agents))
else:
to_match = Mrkt.Graph
# Workaround of assertionerror in Nx verifyOptimum() function
# Breaks around integer weights
for u, v, d in to_match.edges(data=True):
d['weight'] = float(d['weight'])
mate = nx.max_weight_matching(to_match, maxcardinality=maxcardinality)
result = {a.name: mate[a].name for a in mate}
return result
def max_matching(total_distance):
graph = nx.Graph()
graph.add_weighted_edges_from(list_of_mergeable_trips)
matching_dictionary = nx.max_weight_matching(graph, maxcardinality=True)
trip_set = set()
distance_saved = 0
for key in matching_dictionary:
if (key in trip_set) and (matching_dictionary[key] in trip_set):
continue
else:
trip_set.add(key)
trip_set.add(matching_dictionary[key])
keyvalue = str(key) + "-" + str(matching_dictionary[key])
keyvalue1 = str(matching_dictionary[key]) + "-" + str(key)
if keyvalue in trips_merged_metadata.keys():
trips = trips_merged_metadata[keyvalue]
# print(str(trips[0].trip_id) + "," + str(trips[0].passenger_count) + "," + str(trips[0].trip_duration_from_source) + "," + str(trips[0].trip_distance_from_source) + "," + str(trips[0].dropoff_lattitude) + "," + str(trips[0].dropoff_longitude) + "," + str(trips[0].willing_to_walk))
# print(str(trips[1].trip_id) + "," + str(trips[1].passenger_count) + "," + str(trips[1].trip_duration_from_source) + "," + str(trips[1].trip_distance_from_source) + "," + str(trips[1].dropoff_lattitude) + "," + str(trips[1].dropoff_longitude) + "," + str(trips[1].willing_to_walk))
print(str(trips[0].trip_id) + "," + str(trips[0].passenger_count) + "," + str(trips[0].dropoff_lattitude) + "," + str(trips[0].dropoff_longitude))
print(str(trips[1].trip_id) + "," + str(trips[1].passenger_count) + "," + str(trips[1].dropoff_lattitude) + "," + str(trips[1].dropoff_longitude))
print("---------------------------------------------------------------------------------------------")
distance_saved += distance_dictionary[keyvalue]
elif keyvalue1 in trips_merged_metadata.keys():
trips = trips_merged_metadata[keyvalue1]
# print(str(trips[0].trip_id) + "," + str(trips[0].passenger_count) + "," + str(trips[0].trip_duration_from_source) + "," + str(trips[0].trip_distance_from_source) + "," + str(trips[0].dropoff_lattitude) + "," + str(trips[0].dropoff_longitude) + "," + str(trips[0].willing_to_walk))
# print(str(trips[1].trip_id) + "," + str(trips[1].passenger_count) + "," + str(trips[1].trip_duration_from_source) + "," + str(trips[1].trip_distance_from_source) + "," + str(trips[1].dropoff_lattitude) + "," + str(trips[1].dropoff_longitude) + "," + str(trips[1].willing_to_walk))
print(str(trips[0].trip_id) + "," + str(trips[0].passenger_count) + "," + str(trips[0].dropoff_lattitude) + "," + str(trips[0].dropoff_longitude))
print(str(trips[1].trip_id) + "," + str(trips[1].passenger_count) + "," + str(trips[1].dropoff_lattitude) + "," + str(trips[1].dropoff_longitude))
print("---------------------------------------------------------------------------------------------")
distance_saved += distance_dictionary[keyvalue1]
else :
print("no there")
print("---------------------------------------------------------------------------------------------")
print("-----------------------------------------------------------------------------------")
print("input_trips_for_algorithm ", str(len(input_trips_for_algorithm)))
print("input_trips_for_max_matching ", str(len(input_trips_for_max_matching)))
print("lone_trip ", input_trips_for_max_matching.difference(matching_dictionary.keys()))
no_trips_saved = len(input_trips_for_algorithm) - len(matching_dictionary.keys()) / 2 - len(input_trips_for_max_matching.difference(matching_dictionary.keys()))
print("no of trips saved " + str(no_trips_saved))
print("total_distance " + str(total_distance))
print("distance saved " + str(total_distance - distance_saved))
print("Total_trips_without_ridesharing " + str(len(input_trips_for_algorithm)))
unmergeable_trips = len(input_trips_for_algorithm) - len(input_trips_for_max_matching)
print("unmergeable trips " + str(unmergeable_trips))
total_trips_due_to_ridesharing = len(matching_dictionary.keys()) / 2 + len(input_trips_for_max_matching.difference(matching_dictionary.keys())) + unmergeable_trips
print("Total_trips_due_to_ridesharing " + str(total_trips_due_to_ridesharing))
print("Merged_trips_only_after_ridesharing(without unmergeable trips and lone trips) " + str(len(matching_dictionary.keys()) / 2))
print("-----------------------------------------------------------------------------------")
def MinWeightMatching(code, Syndrome, type, charge_type):
dim = code.dimension
# Fully connect check operators
for check1 in Syndrome.nodes():
for check2 in Syndrome.nodes():
if check1 != check2:
# weight = - code.distance(type, check1, check2)
weight = - common.euclidean_dist(check1, check2)
Syndrome.add_edge(*(check1, check2), weight=weight)
# Generate Boundary Graph
External_Graph = nx.Graph()
for node in Syndrome.nodes():
charge = Syndrome.node[node]['charge']
external_node = AssociatedExternal(node, code.Dual[type], code.External[type])
External_Graph.add_node(external_node, charge=(-charge) % dim)
# weight = -code.distance(type, node, external_node)
weight = - common.euclidean_dist(node, external_node)
Syndrome.add_edge(*(node, external_node), weight=weight)
# Ensure even number of elements in Syndrome
# so min weight matching can proceed successfully
if len(Syndrome.nodes()) % 2 != 0:
removed_external = External_Graph.nodes()[0]
edge = Syndrome.edges(removed_external)[0]
min_weight = Syndrome.get_edge_data(*edge)['weight']
for external_node in External_Graph.nodes():
edge = Syndrome.edges(external_node)[0]
weight = Syndrome.get_edge_data(*edge)['weight']
if weight < min_weight:
removed_external = external_node
min_weight = weight
External_Graph.remove_node(removed_external)
Syndrome.remove_node(removed_external)
# Connect External Nodes
for ext1 in External_Graph:
for ext2 in External_Graph:
if ext1 != ext2:
Syndrome.add_edge(*(ext1, ext2), weight=0)
TempMatching = nx.max_weight_matching(Syndrome, maxcardinality=True)
Matching = {}
# each edge appears twice in TempMatching
# Should only appear once in Matching
for node, neighbor in TempMatching.items():
if neighbor not in Matching:
if node in code.Dual[type].nodes() or neighbor in code.Dual[type].nodes():
Matching[node] = neighbor
return Matching
# Recovery Operations
# Generate recovery chains to correct for errors during code cycle.