本文整理汇总了Python中networkx.degree_centrality函数的典型用法代码示例。如果您正苦于以下问题:Python degree_centrality函数的具体用法?Python degree_centrality怎么用?Python degree_centrality使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了degree_centrality函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: degree_removal
def degree_removal(g, recalculate=False):
"""
Performs robustness analysis based on degree centrality,
on the network specified by infile using sequential (recalculate = True)
or simultaneous (recalculate = False) approach. Returns a list
with fraction of nodes removed, a list with the corresponding sizes of
the largest component of the network, and the overall vulnerability
of the network.
"""
m = nx.degree_centrality(g)
l = sorted(m.items(), key=operator.itemgetter(1), reverse=True)
x = []
y = []
dimension = fd.fractal_dimension(g, iterations=100, debug=False)
n = len(g.nodes())
x.append(0)
y.append(dimension)
for i in range(1, n-1):
g.remove_node(l.pop(0)[0])
if recalculate:
m = nx.degree_centrality(g)
l = sorted(m.items(), key=operator.itemgetter(1),
reverse=True)
dimension = fd.fractal_dimension(g, iterations=100, debug=False)
x.append(i * 1. / n)
y.append(dimension)
return x, y
开发者ID:hernandcb,项目名称:complexNetworksMeasurements,代码行数:30,代码来源:dimensionPlotsOBCA.py
示例2: degree_fracture
def degree_fracture(infile, outfile, fraction, recalculate = False):
"""
Removes given fraction of nodes from infile network in reverse order of
degree centrality (with or without recalculation of centrality values
after each node removal) and saves the network in outfile.
"""
g = networkx.read_gml(infile)
m = networkx.degree_centrality(g)
l = sorted(m.items(), key = operator.itemgetter(1), reverse = True)
largest_component = max(networkx.connected_components(g), key = len)
n = len(g.nodes())
for i in range(1, n - 1):
g.remove_node(l.pop(0)[0])
if recalculate:
m = networkx.degree_centrality(g)
l = sorted(m.items(), key = operator.itemgetter(1),
reverse = True)
largest_component = max(networkx.connected_components(g), key = len)
if i * 1. / n >= fraction:
break
components = networkx.connected_components(g)
component_id = 1
for component in components:
for node in component:
g.node[node]["component"] = component_id
component_id += 1
networkx.write_gml(g, outfile)
开发者ID:swamiiyer,项目名称:robustness,代码行数:28,代码来源:robustness.py
示例3: degree
def degree(infile, recalculate = False):
"""
Performs robustness analysis based on degree centrality,
on the network specified by infile using sequential (recalculate = True)
or simultaneous (recalculate = False) approach. Returns a list
with fraction of nodes removed, a list with the corresponding sizes of
the largest component of the network, and the overall vulnerability
of the network.
"""
g = networkx.read_gml(infile)
m = networkx.degree_centrality(g)
l = sorted(m.items(), key = operator.itemgetter(1), reverse = True)
x = []
y = []
largest_component = max(networkx.connected_components(g), key = len)
n = len(g.nodes())
x.append(0)
y.append(len(largest_component) * 1. / n)
R = 0.0
for i in range(1, n - 1):
g.remove_node(l.pop(0)[0])
if recalculate:
m = networkx.degree_centrality(g)
l = sorted(m.items(), key = operator.itemgetter(1),
reverse = True)
largest_component = max(networkx.connected_components(g), key = len)
x.append(i * 1. / n)
R += len(largest_component) * 1. / n
y.append(len(largest_component) * 1. / n)
return x, y, 0.5 - R / n
开发者ID:swamiiyer,项目名称:robustness,代码行数:31,代码来源:robustness.py
示例4: degree_component
def degree_component(seed_num, graph=None, graph_json_filename=None, graph_json_str=None):
if graph_json_filename is None and graph_json_str is None and graph is None:
return []
G = None
if graph is not None:
G = graph
elif graph_json_str is None:
G = util.load_graph(graph_json_filename=graph_json_filename)
else:
G = util.load_graph(graph_json_str=graph_json_str)
components = list(nx.connected_components(G))
components = filter(lambda x: len(x) > 0.1 * len(G), components)
total_size = sum(map(lambda x: len(x), components))
total_nodes = 0
rtn = []
for comp in components[1:]:
num_nodes = int(float(len(comp)) / total_size * seed_num)
component = G.subgraph(list(comp))
clse_cent = nx.degree_centrality(component)
collector = collections.Counter(clse_cent)
clse_cent = collector.most_common(num_nodes)
rtn += map(lambda (x, y): x, clse_cent)
total_nodes += num_nodes
num_nodes = seed_num - total_nodes
component = G.subgraph(list(components[0]))
clse_cent = nx.degree_centrality(component)
collector = collections.Counter(clse_cent)
clse_cent = collector.most_common(num_nodes)
rtn += map(lambda (x, y): x, clse_cent)
return rtn
开发者ID:shimmy1996,项目名称:Pandemaniac,代码行数:33,代码来源:high_degree_component.py
示例5: sna_calculations
def sna_calculations(g, play_file):
"""
:param g: a NetworkX graph object
:type g: object
:param play_file: the location of a play in .txt format
:type play_file: string
:return: returns a dictionary containing various network related figures
:rtype: dict
:note: also writes into results/file_name-snaCalculations.csv and results/allCharacters.csv
"""
file_name = os.path.splitext(os.path.basename(play_file))[0]
sna_calculations_list = dict()
sna_calculations_list['playType'] = file_name[0]
sna_calculations_list['avDegreeCentrality'] = numpy.mean(numpy.fromiter(iter(nx.degree_centrality(g).values()),
dtype=float))
sna_calculations_list['avDegreeCentralityStd'] = numpy.std(
numpy.fromiter(iter(nx.degree_centrality(g).values()), dtype=float))
sna_calculations_list['avInDegreeCentrality'] = numpy.mean(
numpy.fromiter(iter(nx.in_degree_centrality(g).values()), dtype=float))
sna_calculations_list['avOutDegreeCentrality'] = numpy.mean(
numpy.fromiter(iter(nx.out_degree_centrality(g).values()), dtype=float))
try:
sna_calculations_list['avShortestPathLength'] = nx.average_shortest_path_length(g)
except:
sna_calculations_list['avShortestPathLength'] = 'not connected'
sna_calculations_list['density'] = nx.density(g)
sna_calculations_list['avEigenvectorCentrality'] = numpy.mean(
numpy.fromiter(iter(nx.eigenvector_centrality(g).values()), dtype=float))
sna_calculations_list['avBetweennessCentrality'] = numpy.mean(
numpy.fromiter(iter(nx.betweenness_centrality(g).values()), dtype=float))
sna_calculations_list['DegreeCentrality'] = nx.degree_centrality(g)
sna_calculations_list['EigenvectorCentrality'] = nx.eigenvector_centrality(g)
sna_calculations_list['BetweennessCentrality'] = nx.betweenness_centrality(g)
# sna_calculations.txt file
sna_calc_file = csv.writer(open('results/' + file_name + '-snaCalculations.csv', 'wb'), quoting=csv.QUOTE_ALL,
delimiter=';')
for key, value in sna_calculations_list.items():
sna_calc_file.writerow([key, value])
# all_characters.csv file
if not os.path.isfile('results/allCharacters.csv'):
with open('results/allCharacters.csv', 'w') as f:
f.write(
'Name;PlayType;play_file;DegreeCentrality;EigenvectorCentrality;BetweennessCentrality;speech_amount;AverageUtteranceLength\n')
all_characters = open('results/allCharacters.csv', 'a')
character_speech_amount = speech_amount(play_file)
for character in sna_calculations_list['DegreeCentrality']:
all_characters.write(character + ';' + str(sna_calculations_list['playType']) + ';' + file_name + ';' + str(
sna_calculations_list['DegreeCentrality'][character]) + ';' + str(
sna_calculations_list['EigenvectorCentrality'][character]) + ';' + str(
sna_calculations_list['BetweennessCentrality'][character]) + ';' + str(
character_speech_amount[0][character]) + ';' + str(character_speech_amount[1][character]) + '\n')
all_characters.close()
return sna_calculations
开发者ID:IngoKl,项目名称:shakespearesna1406,代码行数:59,代码来源:ShakespeareSnaAnalysis.py
示例6: __init__
def __init__(self, time, voteomat):
self.foldername = voteomat.network_func_name + voteomat.distribution_func_name
self.foldertime = time
self.path = "Statistics//"+self.foldername+"//"
self.path += g_candidates_affecting_nodes + "=" + str(voteomat.candidates_affecting) + "_"
self.path += g_candidates_affected_by_median + "=" + str(voteomat.candidates_affected) + "_"
self.path += g_neighbours_affecting_each_other + "=" + str(voteomat.affecting_neighbours) + "_"
self.path += g_counterforce_affecting_candidates + "=" + str(voteomat.counter_force_affecting) + "_"
self.path += "counterforce_left="+str(voteomat.counter_force_left)+"_"+"counterforce_right="+str(voteomat.counter_force_right)+ "_" + time
self.make_sure_path_exists(self.path)
self.file = open(self.path + "//statistic.csv", 'w')
self.statistic = {}
self.statistic["networkfunc"] = voteomat.network_func_name
self.statistic["distributionfunc"] = voteomat.distribution_func_name
self.statistic["acceptance"] = voteomat.acceptance
median, avg, std = voteomat.get_statistic()
self.statistic["median"] = []
self.statistic["median"].append(median)
self.statistic["avg"] = []
self.statistic["avg"].append(avg)
self.statistic["std"] = []
self.statistic["std"].append(std)
self.statistic["node_with_highest_degree_centrality"] = []
self.max_degree_node = max( nx.degree_centrality(voteomat.get_network()).items(),key = lambda x: x[1])[0]
self.statistic["node_with_highest_degree_centrality"].append(voteomat.get_network().nodes(data = True)[self.max_degree_node][1]["orientation"])
self.statistic["node_with_minimum_degree_centrality"] = []
self.min_degree_node = min(nx.degree_centrality(voteomat.get_network()).items(), key = lambda x: x[1])[0]
self.statistic["node_with_minimum_degree_centrality"].append(voteomat.get_network().nodes(data = True)[self.min_degree_node][1]["orientation"])
self.statistic["node_with_highest_closeness_centrality"] = []
self.max_closeness_node = max( nx.closeness_centrality(voteomat.get_network()).items(),key = lambda x: x[1])[0]
self.statistic["node_with_highest_closeness_centrality"].append(voteomat.get_network().nodes(data = True)[self.max_closeness_node][1]["orientation"])
self.statistic["node_with_highest_betweenness_centrality"] = []
self.max_betweenness_node = max(nx.betweenness_centrality(voteomat.get_network()).items() ,key = lambda x: x[1])[0]
self.statistic["node_with_highest_betweenness_centrality"].append(voteomat.get_network().nodes(data = True)[self.max_betweenness_node][1]["orientation"])
try:
self.statistic["node_with_highest_eigenvector_centrality"] = []
self.max_eigenvector_node = max( nx.eigenvector_centrality(voteomat.get_network(), max_iter = 1000).items(),key = lambda x: x[1])[0]
self.statistic["node_with_highest_eigenvector_centrality"].append(voteomat.get_network().nodes(data = True)[self.max_eigenvector_node][1]["orientation"])
except nx.NetworkXError:
print "Eigenvector centrality not possible."
freeman = self.freeman_centrality([x[1] for x in nx.degree_centrality(voteomat.get_network()).items()], max( nx.degree_centrality(voteomat.get_network()).items(),key = lambda x: x[1])[1])
self.statistic["freeman_centrality"] = round(freeman,2)
self.statistic["affecting_neighbours"] = voteomat.affecting_neighbours
self.statistic["affecting_candidates"] = voteomat.candidates_affecting
self.statistic["affected_canddiates"] = voteomat.candidates_affected
self.statistic["affecting_counter_force"] = voteomat.counter_force_affecting
self.statistic["affecting_counter_force_left"] = voteomat.counter_force_left
self.statistic["affecting_counter_force_right"] = voteomat.counter_force_right
self.statistic["candidates"] = []
for candidate in voteomat.candidates:
self.statistic["candidates"].append(candidate.to_save())
self.statistic["network"] = voteomat.get_network().nodes(data=True);
开发者ID:aoberegg,项目名称:masterProjekt,代码行数:59,代码来源:Statistic.py
示例7: degree_apl
def degree_apl(g, recalculate=False):
"""
Performs robustness analysis based on degree centrality,
on the network specified by infile using sequential (recalculate = True)
or simultaneous (recalculate = False) approach. Returns a list
with fraction of nodes removed, a list with the corresponding sizes of
the largest component of the network, and the overall vulnerability
of the network.
"""
m = networkx.degree_centrality(g)
l = sorted(m.items(), key=operator.itemgetter(1), reverse=True)
x = []
y = []
average_path_length = 0.0
number_of_components = 0
n = len(g.nodes())
for sg in networkx.connected_component_subgraphs(g):
average_path_length += networkx.average_shortest_path_length(sg)
number_of_components += 1
average_path_length = average_path_length / number_of_components
initial_apl = average_path_length
x.append(0)
y.append(average_path_length * 1. / initial_apl)
r = 0.0
for i in range(1, n - 2):
g.remove_node(l.pop(0)[0])
if recalculate:
m = networkx.degree_centrality(g)
l = sorted(m.items(), key=operator.itemgetter(1),
reverse=True)
average_path_length = 0.0
number_of_components = 0
for sg in networkx.connected_component_subgraphs(g):
if len(sg.nodes()) > 1:
average_path_length += networkx.average_shortest_path_length(sg)
number_of_components += 1
average_path_length = average_path_length / number_of_components
x.append(i * 1. / initial_apl)
r += average_path_length * 1. / initial_apl
y.append(average_path_length * 1. / initial_apl)
return x, y, r / initial_apl
开发者ID:computational-center,项目名称:complexNetworksMeasurements,代码行数:52,代码来源:robustness2.py
示例8: labels
def labels(G, threshhold = 95):
'''return labels(dictionary) for nodes with high centrality for a given percentile'''
labels = {}
# create cutoff based on the given percentile
cen_cutoff = np.percentile(list(nx.degree_centrality(G).values()), threshhold)
# put nodes label in the dictionary if the centrality passes the threshold
for key,value in nx.degree_centrality(G).items():
if value >= cen_cutoff:
labels[key] = key
return labels
开发者ID:tyty233,项目名称:Music-Classification-and-Ranking-Analysis,代码行数:13,代码来源:networkv2.py
示例9: __init__
def __init__(self) :
self.g = nx.barabasi_albert_graph(random.randint(100,1000),random.randint(2,7))
self.degree_centrality = nx.degree_centrality(self.g)
self.deg = nx.degree_centrality(self.g)
self.sorted_deg = sorted(self.deg.items(), key=operator.itemgetter(1))
self.nodes = len(self.g.nodes())
self.edges = len(self.g.edges())
self.degree_rank()
self.degree_dict = self.g.degree()
self.avg_deg = sum(self.g.degree().values())/float(len(self.g.nodes()))
#print self.rank
#print self.degree_dict
self.form_dataset()
开发者ID:gauravcse,项目名称:Graph-Learning,代码行数:13,代码来源:Neural+BA.py
示例10: degree_centrality
def degree_centrality(self, withme=True, node=None, average=False):
if node==None:
if withme:
my_dict = nx.degree_centrality(self.mynet)
new = {}
new2={}
for i in my_dict:
new[self.id_to_name(i)] = my_dict[i]
new2[i] = my_dict[i]
if average:
print "The average is " + str(round(sum(new.values())/float(len(new.values())),4))
else:
for i,j in new.items():
print i, round(j,4)
return new2
else:
my_dict = nx.degree_centrality(self.no_ego_net)
new = {}
new2={}
for i in my_dict:
new[self.id_to_name(i)] = my_dict[i]
new2[i] = my_dict[i]
if average:
print "The average is " + str(round(sum(new.values())/float(len(new.values())),4))
else:
for i,j in new.items():
print i, round(j,4)
return new2
else:
if withme:
my_dict = nx.degree_centrality(self.mynet)
try:
print "The coefficient for node "+str(node)+ "is "+ str(round(my_dict[node],4))
except:
try:
return my_dict [self.name_to_id(node)]
except:
print "Invalid node name"
else:
my_dict = nx.degree_centrality(self.no_ego_net)
try:
print "The coefficient for node "+str(node)+ "is "+ str(round(my_dict[node],4))
except:
try:
print "The coefficient for node "+str(node)+ "is "+ str(round(my_dict[[self.name_to_id(node)]],4))
except:
print "Invalid node name"
开发者ID:atwel,项目名称:BigData2015,代码行数:50,代码来源:networks_lab.py
示例11: centralities
def centralities(self):
'''
Get info on centralities of data
Params:
None
Returns:
dictionary of centrality metrics with keys(centralities supported):
degree - degree centrality
betweeness - betweeness centrality
eigenvector - eigenvector centrality
hub - hub scores - not implemented
authority - authority scores - not implemented
katz - katz centrality with params X Y
pagerank - pagerank centrality with params X Y
'''
output = {}
output['degree'] = nx.degree_centrality(self.G)
output['betweeness'] = nx.betweenness_centrality(self.G)
try:
output['eigenvector'] = nx.eigenvector_centrality(self.G)
output['katz'] = nx.katz_centrality(self.G)
except:
output['eigenvector'] = 'empty or exception'
output['katz'] = 'empty or exception'
# output['hub'] = 'Not implemented'
# output['authority'] = 'Not implemented'
# output['pagerank'] = 'Not implemented'
return output
开发者ID:harrisonhunter,项目名称:groupcest,代码行数:28,代码来源:data_object.py
示例12: run_main
def run_main(file):
NumberOfStations=465
print file
adjmatrix = np.loadtxt(file,delimiter=' ',dtype=np.dtype('int32'))
# for i in range (0,NumberOfStations):
# if(adjmatrix[i,i]==1):
# print "posicion: ["+str(i)+","+str(i)+"]"
g = nx.from_numpy_matrix(adjmatrix, create_using = nx.MultiGraph())
degree = g.degree()
density = nx.density(g)
degree_centrality = nx.degree_centrality(g)
clossness_centrality = nx.closeness_centrality(g)
betweenless_centrality = nx.betweenness_centrality(g)
print degree
print density
print degree_centrality
print clossness_centrality
print betweenless_centrality
#nx.draw(g)
# np.savetxt(OutputFile, Matrix, delimiter=' ',newline='\n',fmt='%i')
开发者ID:Joan93,项目名称:BigData,代码行数:25,代码来源:AdjMatrix_Analisys.py
示例13: calculate_network_measures
def calculate_network_measures(net, analyser):
deg=nx.degree_centrality(net)
clust=[]
if(net.is_multigraph()):
net = analyser.flatGraph(net)
if(nx.is_directed(net)):
tmp_net=net.to_undirected()
clust=nx.clustering(tmp_net)
else:
clust=nx.clustering(net)
if(nx.is_directed(net)):
tmp_net=net.to_undirected()
paths=nx.shortest_path(tmp_net, source=None, target=None, weight=None)
else:
paths=nx.shortest_path(net, source=None, target=None, weight=None)
lengths = [map(lambda a: len(a[1]), x[1].items()[1:]) for x in paths.items()]
all_lengths=[]
for a in lengths:
all_lengths.extend(a)
max_value=max(all_lengths)
#all_lengths = [x / float(max_value) for x in all_lengths]
return deg.values(),clust.values(),all_lengths
开发者ID:dfeng808,项目名称:multiplex,代码行数:29,代码来源:NetworkComparison.py
示例14: mean_degree_centrality
def mean_degree_centrality(pg, normalize=0):
"""
mean_degree_centrality(pg) calculates mean in- and out-degree
centralities for directed graphs and simple degree-centralities
for undirected graphs. If the normalize flag is set, each node's
centralities are weighted by the number of edges in the (di)graph.
"""
centrality = {}
try:
if networkx.is_directed_acyclic_graph(pg):
cent_sum_in, cent_sum_out = 0, 0
for n in pg.nodes():
n_cent_in = pg.in_degree(n)
n_cent_out = pg.out_degree(n)
if normalize:
n_cent_in = float(n_cent_in) / float(pg.size()-1)
n_cent_out = float(n_cent_out) / float(pg.size()-1)
cent_sum_in = cent_sum_in + n_cent_in
cent_sum_out = cent_sum_out + n_cent_out
centrality['in'] = cent_sum_in / float(pg.order())
centrality['out'] = cent_sum_out / float(pg.order())
else:
cent_sum = 0
for n in pg.nodes():
if not normalize:
n_cent = pg.degree(n)
else:
n_cent = networkx.degree_centrality(pg,n)
cent_sum = cent_sum + n_cent
centrality['all'] = cent_sum / float(pg.order())
except:
logging.error('pyp_network.mean_degree_centrality() failed!')
return centrality
开发者ID:sam-m888,项目名称:pypedal,代码行数:33,代码来源:pyp_network.py
示例15: allocate
def allocate(G_phy, G_bgp):
log.info("Allocating route reflectors")
graph_phy = G_phy._graph
for asn, devices in G_phy.groupby("asn").items():
routers = [d for d in devices if d.is_router]
router_ids = ank_utils.unwrap_nodes(routers)
subgraph_phy = graph_phy.subgraph(router_ids)
if len(subgraph_phy) == 1:
continue # single node in graph, no ibgp
betw_cen = nx.degree_centrality(subgraph_phy)
ordered = sorted(subgraph_phy.nodes(), key = lambda x: betw_cen[x], reverse = True)
rr_count = len(subgraph_phy)/5 # Take top 20% to be route reflectors
route_reflectors = ordered[:rr_count] # most connected 20%
rr_clients = ordered[rr_count:] # the other routers
route_reflectors = list(ank_utils.wrap_nodes(G_bgp, route_reflectors))
rr_clients = list(ank_utils.wrap_nodes(G_bgp, rr_clients))
G_bgp.update(route_reflectors, route_reflector = True) # mark as route reflector
# rr <-> rr
over_links = [(rr1, rr2) for rr1 in route_reflectors for rr2 in route_reflectors if rr1 != rr2]
G_bgp.add_edges_from(over_links, type = 'ibgp', direction = 'over')
# client -> rr
up_links = [(client, rr) for (client, rr) in itertools.product(rr_clients, route_reflectors)]
G_bgp.add_edges_from(up_links, type = 'ibgp', direction = 'up')
# rr -> client
down_links = [(rr, client) for (client, rr) in up_links] # opposite of up
G_bgp.add_edges_from(down_links, type = 'ibgp', direction = 'down')
log.debug("iBGP done")
开发者ID:coana,项目名称:ank_v3_dev,代码行数:33,代码来源:route_reflectors.py
示例16: modularity
def modularity(self):
"""
Compute the modularity.
Returns:
Numerical value of the modularity of the graph.
"""
g = self.gr
A = nx.adj_matrix(g)
degDict = nx.degree_centrality(g)
adjDict = {}
n = A.shape[0]
B = A.sum(axis=1)
for i in range(n):
adjDict[g.nodes()[i]] = B[i,0]
m = len(g.edges())
connComponents = nx.connected_components(g)
mod = 0
for c in connComponents:
edgesWithinCommunity = 0
randomEdges = 0
for u in c:
edgesWithinCommunity += adjDict[u]
randomEdges += degDict[u]
mod += (float(edgesWithinCommunity) - float(randomEdges * randomEdges)/float(2 * m))
mod = mod/float(2 * m)
return mod
开发者ID:Jverma,项目名称:TextGraphics,代码行数:33,代码来源:Modularity.py
示例17: degree_centrality
def degree_centrality(graph, records):
""" Reports on the most central individuals in the graph """
dc = nx.degree_centrality(graph)
nodes = sorted(dc.items(), key=operator.itemgetter(1), reverse=True)[:records]
print("Degree Centrality - top {} individuals".format(records))
for n in nodes:
print(" {:30}:\t{}".format(n[0], n[1]))
开发者ID:csrhau,项目名称:sandpit,代码行数:7,代码来源:networker_old.py
示例18: plotGraph
def plotGraph(graph, color="r", figsize=(12, 8)):
labels = {n:n for n in graph.nodes()}
d = nx.degree_centrality(graph)
layout=nx.spring_layout
pos=layout(graph)
plt.figure(figsize=figsize)
plt.subplots_adjust(left=0,right=1,bottom=0,top=0.95,wspace=0.01,hspace=0.01)
# nodes
nx.draw_networkx_nodes(graph,pos,
nodelist=graph.nodes(),
node_color=color,
node_size=[v * 250 for v in d.values()],
alpha=0.8)
nx.draw_networkx_edges(graph,pos,
with_labels=False,
edge_color=color,
width=0.50
)
if graph.order() < 1000:
nx.draw_networkx_labels(graph,pos, labels)
return plt
开发者ID:shaw2thefloor,项目名称:tgacmine16,代码行数:28,代码来源:factnet.py
示例19: degree_centrality_report
def degree_centrality_report(graph, n):
""" Reports on the top n most central individuals on the graph """
pr = nx.degree_centrality(graph)
nodes = sorted(pr.items(), key=operator.itemgetter(1), reverse=True)[:n]
print("Degree Centrality - top {} individuals".format(n))
for n in nodes:
print(" {:30}:\t{}".format(n[0], n[1]))
开发者ID:csrhau,项目名称:sandpit,代码行数:7,代码来源:networker.py
示例20: compute_static_graph_statistics
def compute_static_graph_statistics(G,start_time,end_time):
verts = G.vertices
n = len(verts)
m = float(end_time - start_time)
agg_statistics = [dict.fromkeys(verts,0),dict.fromkeys(verts,0),dict.fromkeys(verts,0)]*3
avg_statistics = [dict.fromkeys(verts,0),dict.fromkeys(verts,0),dict.fromkeys(verts,0)]*3
aggregated_graph = nx.Graph()
aggregated_graph.add_nodes_from(verts)
start_time = max(1,start_time)
for t in xrange(start_time,end_time+1):
aggregated_graph.add_edges_from(G.snapshots[t].edges_iter())
dc = G.snapshots[t].degree()
cc = nx.closeness_centrality(G.snapshots[t])
bc = nx.betweenness_centrality(G.snapshots[t])
for v in verts:
avg_statistics[0][v] += dc[v]/(n-1.0)
avg_statistics[1][v] += cc[v]
avg_statistics[2][v] += bc[v]
for v in verts:
avg_statistics[0][v] = avg_statistics[0][v]/m
avg_statistics[1][v] = avg_statistics[1][v]/m
avg_statistics[2][v] = avg_statistics[2][v]/m
dc = nx.degree_centrality(aggregated_graph)
cc = nx.closeness_centrality(aggregated_graph)
bc = nx.betweenness_centrality(aggregated_graph)
for v in verts:
agg_statistics[0][v] = dc[v]
agg_statistics[1][v] = cc[v]
agg_statistics[2][v] = bc[v]
return (agg_statistics, avg_statistics)
开发者ID:juancamilog,项目名称:temporal_centrality,代码行数:33,代码来源:temporal_graph.py
注:本文中的networkx.degree_centrality函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论