def order_build(graph):
'''
Assumes that packages are in graph.
Builds a temporary graph of relevant nodes and returns it topological sort.
Relevant nodes selected in a breadth first traversal sourced at each pkg
in packages.
'''
reorder_cyclical_test_dependencies(graph)
try:
order = nx.topological_sort(graph, reverse=True)
except nx.exception.NetworkXUnfeasible:
raise ValueError("Cycles detected in graph: %s", nx.find_cycle(graph,
orientation='reverse'))
return order
python类topological_sort()的实例源码
def _get_calc_nodes(self, name):
g = nx.DiGraph()
g.add_nodes_from(self.dag.nodes())
g.add_edges_from(self.dag.edges())
for n in nx.ancestors(g, name):
node = self.dag.node[n]
state = node[_AN_STATE]
if state == States.UPTODATE or state == States.PINNED:
g.remove_node(n)
ancestors = nx.ancestors(g, name)
for n in ancestors:
if state == States.UNINITIALIZED and len(self.dag.pred[n]) == 0:
raise Exception("Cannot compute {} because {} uninitialized".format(name, n))
if state == States.PLACEHOLDER:
raise Exception("Cannot compute {} because {} is placeholder".format(name, n))
ancestors.add(name)
nodes_sorted = nx.topological_sort(g)
return [n for n in nodes_sorted if n in ancestors]
def to_df(self):
"""
Get a dataframe containing the states and value of all nodes of computation
::
>>> comp = loman.Computation()
>>> comp.add_node('foo', value=1)
>>> comp.add_node('bar', value=2)
>>> comp.to_df()
state value is_expansion
bar States.UPTODATE 2 NaN
foo States.UPTODATE 1 NaN
"""
df = pd.DataFrame(index=nx.topological_sort(self.dag))
df[_AN_STATE] = pd.Series(nx.get_node_attributes(self.dag, _AN_STATE))
df[_AN_VALUE] = pd.Series(nx.get_node_attributes(self.dag, _AN_VALUE))
df_timing = pd.DataFrame.from_dict(nx.get_node_attributes(self.dag, 'timing'), orient='index')
df = pd.merge(df, df_timing, left_index=True, right_index=True, how='left')
return df
def get_topological_sorted_tasks(self):
""" Create the tasks Graph and perform topological sorting
A topological sort is a nonunique permutation of the nodes
such that an edge from u to v implies that u appears before
v in the topological sort order.
:return: Return a list of nodes in topological sort order.
"""
# First, map the tasks IDs to their original position
tasks_position = {}
for count_position, task in enumerate(self.workflow['tasks']):
tasks_position[task['id']] = count_position
sorted_tasks_id = nx.topological_sort(self.graph, reverse=False)
return sorted_tasks_id
def workflow_execution_parcial(self):
topological_sort = self.get_topological_sorted_tasks()
for node_obj in topological_sort:
# print self.workflow_graph.node[node]
print (nx.ancestors(self.graph, node_obj),
self.graph.predecessors(node_obj),
node_obj,
self.graph.node[node_obj]['in_degree_required'],
self.graph.node[node_obj]['in_degree'],
self.graph.node[node_obj]['out_degree_required'],
self.graph.node[node_obj]['out_degree']
)
return True
# only to debug
def get_nodes(self, name, next_layer=False, last_layer=False, type=None):
if type is None:
name2node = {node.name: node for node in self.nodes()}
else:
name2node = {node.name: node for node in self.nodes() if node.type in type}
assert name in name2node.keys(), " Name must be uniqiue"
node = name2node[name]
if next_layer:
if type is None:
return self.successors(node)
else:
poss_list, begin = [], False
for poss in nx.topological_sort(self):
if poss == node:
begin = True
continue
if begin and poss in name2node.values():
poss_list.append(poss)
return [poss_list[0]]
elif last_layer:
return self.predecessors(node)
else:
return [node]
def update(self):
self.type2ind = {}
for node in self.nodes():
import re
ind = int(re.findall(r'^\w+?(\d+)$', node.name)[0])
self.type2ind[node.type] = self.type2ind.get(node.type, []) + [ind]
for node in nx.topological_sort(self):
if node.type in ['Conv2D', 'Group', 'Conv2D_Pooling']:
plus = 1
else:
plus = 0
if len(self.predecessors(node)) == 0:
node.depth = 0
else:
pre_depth = [_node.depth for _node in self.predecessors(node)]
pre_depth = max(pre_depth)
node.depth = self.max_depth = pre_depth + plus
def longestWeightedPath(self, G):
"""
find longest path in a weighted DAG
:param G: dag (networkx graph object)
:return: longest_weighted_path (list)
"""
dist = {} # stores [node, distance] pair
# start with topological sorted order
for node in nx.topological_sort(G):
# pairs of dist,node for all incoming edges
pairs = [(dist[v][0] + G[v][node]['weight'], v) for v in G.pred[node]] # incoming pairs
if pairs:
dist[node] = max(pairs)
else:
dist[node] = (0, node)
node, (length,_) = max(dist.items(), key=lambda x:x[1])
path = []
while length > 0:
path.append(node)
length, node = dist[node]
return list(reversed(path))
def _sort_deps_to_targets(self):
assert(self.state == TransactionState.INIT)
self.touches = list(reversed(list(nx.topological_sort(self.depend_G))))
self.installs = [
target for target in self.touches if not target.is_local
]
def exclude_subtree_from(self, package):
sub = dfs_tree(self.G, package)
root = nx.topological_sort(self.G)[0]
seen = {package}
for n in sub.nodes():
if n in seen:
continue
seen.add(n)
if local_node_connectivity(self.G, root, n) > 1:
sub.remove_nodes_from(InstalledGraph(sub).exclude_subtree_from(n))
return InstalledGraph(sub)
def root_node(self):
"""
Gives the root node of this graph.
:return: datamodel.base.node.Node instance
"""
return nx.topological_sort(self._nxgraph, reverse=True)[-1]
def execute(self, result_label="result"):
"""
Starts from the leaf nodes, calculates their outputs and feeds them as
inputs to their parent ones. The loop stops once the root node is reached.
Optionally, you can assign a custom label to the output of the root node.
:param result_label: str (optional)
:return:
"""
# Cannote execute graphs with isles
if self.has_isles():
raise exceptions.GraphExecutionError("Cannot execute graphs with "
"isolated nodes")
# Sort post-order (leaf nodes before, root node at then end)
ordered_nodes = nx.topological_sort(self._nxgraph, reverse=True)
# Assign a label to the output of the very last node to be executed:
# the root node!
self.root_node.set_output_label(result_label)
# Output of node N is input for its parent
try:
for n in ordered_nodes:
output = n.execute()
predecessors = self._nxgraph.predecessors(n)
if not predecessors:
return output
for parent in predecessors:
parent.input(output)
except exceptions.StopGraphExecutionSignal as e:
console.info(e.message)
return None
except Exception as e:
console.error(traceback.format_exc())
raise exceptions.GraphExecutionError(e.message)
def build_involved_dag(self, data_keys):
# get the nodes and edges that will be considered during the generation
involved_dag = self._dag.build_directed_graph(data_keys,
root_node_key='generate')
involved_dag.reverse(copy=False)
generation_order = nx.topological_sort(involved_dag)[:-1]
involved_dag.node['generate']['skipped'] = False
self._dag_prune_can_skip(involved_dag, generation_order)
return involved_dag, generation_order
def setup_pyramid(self, N):
# pyramid of normals with known covariances and latent means
assert(N > 1)
self.N = N # number of layers in the pyramid
lambdas = [1.1 * (k + 1) / N for k in range(N + 2)]
self.lambdas = list(map(lambda x: Variable(torch.Tensor([x])), lambdas))
# generate data
self.data = []
self.N_data = 3
bottom_layer_size = 2 ** (N - 1)
for i in range(bottom_layer_size):
data_i = []
for k in range(self.N_data):
data_i.append(Variable(torch.Tensor([0.25]) +
(0.1 + 0.4 * (i + 1) / bottom_layer_size) * torch.randn(1)))
self.data.append(data_i)
self.data_sums = [sum(self.data[i]) for i in range(bottom_layer_size)]
self.N_data = Variable(torch.Tensor([self.N_data]))
self.q_dag = self.construct_q_dag()
# compute the order in which guide samples are generated
self.q_topo_sort = list(networkx.topological_sort(self.q_dag))
self.which_nodes_reparam = self.setup_reparam_mask(len(self.q_topo_sort))
self.calculate_variational_targets()
self.set_model_permutations()
# for choosing which latents should be reparameterized
def forward(self, *inputs):
self.assert_graph_is_valid()
input_nodes = self.input_nodes
output_nodes = self.output_nodes
assert len(inputs) == len(input_nodes), "Was expecting {} " \
"arguments for as many input nodes, got {}."\
.format(len(input_nodes), len(inputs))
# Unpack inputs to input nodes
for input, input_node in zip(inputs, input_nodes):
self.forward_through_node(input_node, input=input)
# Toposort the graph
toposorted = topological_sort(self.graph)
# Remove all input and output nodes
toposorted = [name for name in toposorted
if name not in input_nodes and name not in output_nodes]
# Forward
for node in toposorted:
self.forward_through_node(node)
# Read outputs from output nodes
outputs = []
for output_node in output_nodes:
# Get all incoming edges to output node
outputs_from_node = [self.graph[incoming][this]['payload']
for incoming, this in self.graph.in_edges(output_node)]
outputs.append(pyu.from_iterable(outputs_from_node))
# Clear payloads for next pass
self.clear_payloads()
# Done.
return pyu.from_iterable(outputs)
def order_build(graph, packages=None, level=0, filter_dirty=True):
'''
Assumes that packages are in graph.
Builds a temporary graph of relevant nodes and returns it topological sort.
Relevant nodes selected in a breadth first traversal sourced at each pkg
in packages.
Values expected for packages is one of None, sequence:
None: build the whole graph
empty sequence: build nodes marked dirty
non-empty sequence: build nodes in sequence
'''
if not packages:
packages = graph.nodes()
if filter_dirty:
packages = dirty(graph)
tmp_global = graph.subgraph(packages)
# copy relevant node data to tmp_global
for n in tmp_global.nodes_iter():
tmp_global.node[n] = graph.node[n]
try:
order = nx.topological_sort(tmp_global, reverse=True)
except nx.exception.NetworkXUnfeasible:
raise ValueError("Cycles detected in graph: {0}".format(nx.find_cycle(tmp_global,
orientation='ignore')))
return tmp_global, order
def get_named_nodes(self, name):
"""Return a list of "op" nodes with the given name."""
nlist = []
if name not in self.basis:
raise DAGCircuitError("%s is not in the list of basis operations"
% name)
# Iterate through the nodes of self in topological order
ts = nx.topological_sort(self.multi_graph)
for n in ts:
nd = self.multi_graph.node[n]
if nd["type"] == "op" and nd["name"] == name:
nlist.append(n)
return nlist
def serial_layers(self):
"""Return a list of layers for all gates of this circuit.
A serial layer is a circuit with one gate. The layers have the
same structure as in layers().
"""
layers_list = []
ts = nx.topological_sort(self.multi_graph)
for n in ts:
nxt_nd = self.multi_graph.node[n]
if nxt_nd["type"] == "op":
new_layer = DAGCircuit()
for k, v in self.qregs.items():
new_layer.add_qreg(k, v)
for k, v in self.cregs.items():
new_layer.add_creg(k, v)
new_layer.basis = copy.deepcopy(self.basis)
new_layer.gates = copy.deepcopy(self.gates)
# Save the support of the operation we add to the layer
support_list = []
# Operation data
qa = copy.copy(nxt_nd["qargs"])
ca = copy.copy(nxt_nd["cargs"])
pa = copy.copy(nxt_nd["params"])
co = copy.copy(nxt_nd["condition"])
cob = self._bits_in_condition(co)
# Add node to new_layer
new_layer.apply_operation_back(nxt_nd["name"],
qa, ca, pa, co)
# Add operation to partition
if nxt_nd["name"] != "barrier":
# support_list.append(list(set(qa) | set(ca) | set(cob)))
support_list.append(list(set(qa)))
l_dict = {"graph": new_layer, "partition": support_list}
layers_list.append(l_dict)
return layers_list
def collect_runs(self, namelist):
"""Return a set of runs of "op" nodes with the given names.
For example, "... h q[0]; cx q[0],q[1]; cx q[0],q[1]; h q[1]; .."
would produce the tuple of cx nodes as an element of the set returned
from a call to collect_runs(["cx"]). If instead the cx nodes were
"cx q[0],q[1]; cx q[1],q[0];", the method would still return the
pair in a tuple. The namelist can contain names that are not
in the circuit's basis.
Nodes must have only one successor to continue the run.
"""
group_list = []
# Iterate through the nodes of self in topological order
# and form tuples containing sequences of gates
# on the same qubit(s).
ts = nx.topological_sort(self.multi_graph)
nodes_seen = dict(zip(ts, [False] * len(ts)))
for node in ts:
nd = self.multi_graph.node[node]
if nd["type"] == "op" and nd["name"] in namelist \
and not nodes_seen[node]:
group = [node]
nodes_seen[node] = True
s = self.multi_graph.successors(node)
while len(s) == 1 and \
self.multi_graph.node[s[0]]["type"] == "op" and \
self.multi_graph.node[s[0]]["name"] in namelist:
group.append(s[0])
nodes_seen[s[0]] = True
s = self.multi_graph.successors(s[0])
if len(group) > 1:
group_list.append(tuple(group))
return set(group_list)
def pre_transpile(self, workflow, graph, params=None):
params = params or {}
for visitor in self.VISITORS:
visitor().visit(workflow, nx.topological_sort(graph),
self.operations, params)
def topological_order(self, reverse=False):
"""
Topological sort of the graph.
:param reverse: whether to reverse the sort
:return: list which represents the topological sort
"""
task_ids = topological_sort(self._graph)
if reverse:
task_ids = reversed(tuple(task_ids))
for task_id in task_ids:
yield self.get_task(task_id)
def calc_choice_weight(self, evolution_choice_list, model):
model_depth = 0
for node in nx.topological_sort(model.graph):
if node.type in ['Conv2D', 'Group', 'Conv2D_Pooling']:
model_depth = model_depth + 1
model_max_depth = model.config.model_max_depth
max_pooling_limit = model.config.max_pooling_limit
max_pooling_cnt = model.config.max_pooling_cnt
weight = {}
if 'deeper_with_pooling' in evolution_choice_list:
weight['deeper_with_pooling'] = int(
model_max_depth - model_max_depth / (2 * max_pooling_limit) * max_pooling_cnt)
if 'deeper' in evolution_choice_list:
weight['deeper'] = model_max_depth / 2
if 'wider' in evolution_choice_list:
weight['wider'] = model_max_depth / 2
if 'add_skip' in evolution_choice_list:
weight['add_skip'] = model_depth / 2 * 2
if 'add_group' in evolution_choice_list:
weight['add_group'] = model_depth / 2 * 2
# choice_len = len(evolution_choice_list)
# return [1] * choice_len # equal weight now
return weight
def add_skip(self, model, config):
assert nx.is_directed_acyclic_graph(model.graph)
topo_nodes = nx.topological_sort(model.graph)
names = [node.name for node in topo_nodes
if node.type == 'Conv2D' or node.type == 'Group' or node.type == 'Conv2D_Pooling']
if len(names) <= 2:
logger.info('can\'t find a suitable layer to apply add_skip operation,return origin model')
return model, False
max_iter = 100
for i in range(max_iter + 1):
if i == max_iter:
logger.info('can\'t find a suitable layer to apply add_skip operation,return origin model')
return model, False
from_idx = np.random.randint(0, len(names) - 2)
to_idx = from_idx + 1
next_nodes = model.graph.get_nodes(names[to_idx], next_layer=True, last_layer=False)
if 'Add' in [node.type for node in next_nodes]:
continue
else:
break
from_name = names[from_idx]
to_name = names[to_idx]
logger.info('choose {} and {} to add_skip'.format(from_name, to_name))
return self.skip(model, from_name, to_name, config), True
# add group operation
def main():
func_list = parse.parse(open(sys.argv[1]).read())
G = foo(func_list)
#G = callgraph(func_list)
nx.write_dot(G,"G.dot")
#H = nx.dfs_tree(G,'solver')
#nx.write_dot(H,"H.dot")
#print nx.topological_sort(H)
def get_schedule(self, simulation):
schedule = {host: [] for host in simulation.hosts}
hosts_count = len(simulation.hosts)
graph = simulation.get_task_graph()
for idx, task in enumerate(networkx.topological_sort(graph)):
schedule[simulation.hosts[idx % hosts_count]].append(task)
return schedule
def get_schedule(self, simulation):
schedule = {host: [] for host in simulation.hosts}
graph = simulation.get_task_graph()
for task in networkx.topological_sort(graph):
schedule[random.choice(simulation.hosts)].append(task)
return schedule
def oct_dict(cls, nxgraph, platform_model):
"""
Build optimistic cost table as an dict task->array.
Args:
nxgraph: networkx representation of task graph
platform_model: platform linear model
"""
result = dict()
for task in networkx.topological_sort(nxgraph, reverse=True):
oct_row = numpy.zeros(platform_model.host_count)
if not nxgraph[task]:
result[task] = oct_row
continue
for host, idx in platform_model.host_map.items():
child_results = []
for child, edge_dict in nxgraph[task].items():
row = result[child].copy()
row += child.amount / platform_model.speed
comm_cost = numpy.ones(platform_model.host_count) * edge_dict["weight"] / platform_model.mean_bandwidth
comm_cost += platform_model.mean_latency
comm_cost[idx] = 0
row += comm_cost
child_results.append(row.min())
oct_row[idx] = max(child_results)
result[task] = oct_row
return result
def get_schedule(self, simulation):
schedule = {host: [] for host in simulation.hosts}
graph = simulation.get_task_graph()
for task in networkx.topological_sort(graph):
schedule[random.choice(simulation.hosts)].append(task)
return schedule
def findLongestPath(self, chr):
"""
find the longest path for a chr
:param chr: chromosome name (str)
:return: order (list), longest_weighted_path (list)
"""
# topological sorting and find the longest path
dag = self.DAGs[chr]
order = nx.topological_sort(dag)
longest_weighted_path = self.longestWeightedPath(dag)
# sys.stderr.write("[execute]\tPerforming topological sorting for " + str(chr) + "\n")
# sys.stderr.write("[execute]\tFinding longest paths in " + str(chr) + "\n")
print("[results]\tLongest weighted path in", chr, "\n", longest_weighted_path)
return order, longest_weighted_path
def ordered_steps(cls, cfg):
"""
Return ordered steps from config
"""
return nx.topological_sort(cls.create_dag(cfg))