def Euler_Tour(multigraph):
""" Uses Fleury's algorithm to find the Euler Tour of the MultiGraph.
"""
tour = []
temp_graph = nx.MultiGraph()
graph_nodes = nx.nodes(multigraph)
current_node = graph_nodes[0]
tour.append(current_node)
while nx.number_of_edges(multigraph) > 0:
for edge in multigraph.edges(current_node):
temp_graph = copy.deepcopy(multigraph)
temp_graph.remove_edge(edge[0], edge[1], key=None)
if nx.is_connected(temp_graph):
tour.append(edge[1])
current_node = edge[1]
multigraph.remove_edge(edge[0], edge[1], key=None)
break
else:
tour.append(edge[1])
current_node = edge[1]
multigraph.remove_edge(edge[0], edge[1], key=None)
multigraph.remove_nodes_from(nx.isolates(multigraph))
return tour
python类nodes()的实例源码
def ambiguous_nodes(self, node):
"""
Takes a node in parameter and returns the number of possible candidate
(ambiguous) nodes in the graph.
"""
k = 0
#if node == "," + "/" + "," :
# return k
while(self.graph.has_node((node, k))):
k += 1
return k
#-B-----------------------------------------------------------------------B-
#-T-----------------------------------------------------------------------T-
def test_new_reviewer(self):
"""Test new reviewer has a given name.
"""
name = "test-name"
reviewer = self.graph.new_reviewer(name)
self.assertEqual(reviewer.name, name)
self.assertIn(reviewer, nx.nodes(self.graph.graph))
def test_new_product(self):
"""Test new product has a given name.
"""
name = "test-product"
product = self.graph.new_product(name)
self.assertEqual(product.name, name)
self.assertIn(product, nx.nodes(self.graph.graph))
def dag(recipe_folder, config, packages="*", format='gml', hide_singletons=False):
"""
Export the DAG of packages to a graph format file for visualization
"""
dag, name2recipes = utils.get_dag(utils.get_recipes(recipe_folder, packages), config)
if hide_singletons:
for node in nx.nodes(dag):
if dag.degree(node) == 0:
dag.remove_node(node)
if format == 'gml':
nx.write_gml(dag, sys.stdout.buffer)
elif format == 'dot':
write_dot(dag, sys.stdout)
elif format == 'txt':
subdags = sorted(map(sorted, nx.connected_components(dag.to_undirected())))
subdags = sorted(subdags, key=len, reverse=True)
singletons = []
for i, s in enumerate(subdags):
if len(s) == 1:
singletons += s
continue
print("# subdag {0}".format(i))
subdag = dag.subgraph(s)
recipes = [
recipe for package in nx.topological_sort(subdag)
for recipe in name2recipes[package]]
print('\n'.join(recipes) + '\n')
if not hide_singletons:
print('# singletons')
recipes = [recipe for package in singletons for recipe in
name2recipes[package]]
print('\n'.join(recipes) + '\n')
def get_production_rule(G, child, itx):
rhs = nx.Graph()
for n in G.subgraph(child).nodes():
rhs.add_node(n)
for e in G.subgraph(child).edges():
rhs.add_edge(e[0], e[1])
# remove links between external nodes (edges in itx)
for x in itertools.combinations(itx, 2):
if rhs.has_edge(x[0], x[1]):
rhs.remove_edge(x[0], x[1])
return rhs
def get_graph_hops(graph, num_samples):
c = Counter()
for i in range(0, num_samples):
node = sample(graph.nodes(), 1)[0]
b = nx.bfs_successors(graph, node)
for l, h in hops(b, node):
c[l] += h
hopper = Counter()
for l in c:
hopper[l] = float(c[l]) / float(num_samples)
return hopper
def draw_network_value(orig_g, mG):
"""
Network values: The distribution of eigenvector components (indicators of "network value")
associated to the largest eigenvalue of the graph adjacency matrix has also been found to be
skewed (Chakrabarti et al., 2004).
"""
eig_cents = [nx.eigenvector_centrality_numpy(g) for g in mG] # nodes with eigencentrality
srt_eig_cents = sorted(eig_cents, reverse=True)
net_vals = []
for cntr in eig_cents:
net_vals.append(sorted(cntr.values(), reverse=True))
df = pd.DataFrame(net_vals)
plt.xscale('log')
plt.yscale('log')
plt.fill_between(df.columns, df.mean() - df.sem(), df.mean() + df.sem(), color='blue', alpha=0.2, label="se")
h, = plt.plot(df.mean(), color='blue', aa=True, linewidth=4, ls='--', label="H*")
orig, = plt.plot(sorted(nx.eigenvector_centrality(orig_g).values(), reverse=True), color='black', linewidth=4,
ls='-', label="H")
plt.title('Principle Eigenvector Distribution')
plt.ylabel('Principle Eigenvector')
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='off') # labels along the bottom edge are off
plt.legend([orig, h], ['$H$', 'HRG $H^*$'], loc=3)
# fig = plt.gcf()
# fig.set_size_inches(5, 4, forward=True)
plt.show()
def get_directed_context(self, node, k, dir='all', non_pos=False):
"""
Returns the directed context of a given node, i.e. a list of word/POS of
the left or right neighboring nodes in the graph. The function takes
four parameters :
- node is the word/POS tuple
- k is the node identifier used when multiple nodes refer to the same
word/POS (e.g. k=0 for (the/DET, 0), k=1 for (the/DET, 1), etc.)
- dir is the parameter that controls the directed context calculation,
it can be set to left, right or all (default)
- non_pos is a boolean allowing to remove stopwords from the context
(default is false)
"""
# Define the context containers
l_context = []
r_context = []
# For all the sentence/position tuples
for sid, off in self.graph.node[(node, k)]['info']:
prev = self.sentence[sid][off-1][0] + self.sep +\
self.sentence[sid][off-1][1]
next = self.sentence[sid][off+1][0] + self.sep +\
self.sentence[sid][off+1][1]
if non_pos:
if self.sentence[sid][off-1][0] not in self.stopwords:
l_context.append(prev)
if self.sentence[sid][off+1][0] not in self.stopwords:
r_context.append(next)
else:
l_context.append(prev)
r_context.append(next)
# Returns the left (previous) context
if dir == 'left':
return l_context
# Returns the right (next) context
elif dir == 'right':
return r_context
# Returns the whole context
else:
l_context.extend(r_context)
return l_context
#-B-----------------------------------------------------------------------B-
#-B-----------------------------------------------------------------------B-
#-T-----------------------------------------------------------------------T-
def execute(self, G, epsilon=0.25, weighted=False, min_community_size=3):
"""
Execute Demon algorithm
:param G: the networkx graph on which perform Demon
:param epsilon: the tolerance required in order to merge communities (default 0.25)
:param weighted: Whether the graph is weighted or not
:param min_community_size:min nodes needed to form a community
"""
#######
self.G = G
self.epsilon = epsilon
self.min_community_size = min_community_size
for n in self.G.nodes():
G.node[n]['communities'] = [n]
self.weighted = weighted
#######
all_communities = {}
total_nodes = len(nx.nodes(self.G))
actual = 0
for ego in nx.nodes(self.G):
percentage = float(actual * 100)/total_nodes
if (int(percentage) % 1) == 0:
print 'Ego-network analyzed: %d/100 (communities identified: %d)' % (
percentage, len(all_communities))
actual += 1
#ego_minus_ego
ego_minus_ego = nx.ego_graph(self.G, ego, 1, False)
community_to_nodes = self.__overlapping_label_propagation(ego_minus_ego, ego)
#merging phase
for c in community_to_nodes.keys():
if len(community_to_nodes[c]) > self.min_community_size:
actual_community = community_to_nodes[c]
all_communities = self.__merge_communities(all_communities, actual_community)
#print communities
out_file_com = open("communities", "w")
idc = 0
for c in all_communities.keys():
out_file_com.write("%d\t%s\n" % (idc, ','.join([str(x) for x in sorted(c)])))
idc += 1
out_file_com.flush()
out_file_com.close()
return
def bfs_eff_diam(G, NTestNodes, P):
EffDiam = -1
FullDiam = -1
AvgSPL = -1
DistToCntH = {}
NodeIdV = nx.nodes(G)
random.shuffle(NodeIdV)
for tries in range(0, min(NTestNodes, nx.number_of_nodes(G))):
NId = NodeIdV[tries]
b = nx.bfs_successors(G, NId)
for l, h in hops(b, NId):
if h is 0: continue
if not l + 1 in DistToCntH:
DistToCntH[l + 1] = h
else:
DistToCntH[l + 1] += h
DistNbrsPdfV = {}
SumPathL = 0.0
PathCnt = 0.0
for i in DistToCntH.keys():
DistNbrsPdfV[i] = DistToCntH[i]
SumPathL += i * DistToCntH[i]
PathCnt += DistToCntH[i]
oDistNbrsPdfV = collections.OrderedDict(sorted(DistNbrsPdfV.items()))
CdfV = oDistNbrsPdfV
for i in range(1, len(CdfV)):
if not i + 1 in CdfV:
CdfV[i + 1] = 0
CdfV[i + 1] = CdfV[i] + CdfV[i + 1]
EffPairs = P * CdfV[next(reversed(CdfV))]
for ValN in CdfV.keys():
if CdfV[ValN] > EffPairs: break
if ValN >= len(CdfV): return next(reversed(CdfV))
if ValN is 0: return 1
# interpolate
DeltaNbrs = CdfV[ValN] - CdfV[ValN - 1];
if DeltaNbrs is 0: return ValN;
return ValN - 1 + (EffPairs - CdfV[ValN - 1]) / DeltaNbrs