本文整理汇总了Python中networkx.betweenness_centrality函数的典型用法代码示例。如果您正苦于以下问题:Python betweenness_centrality函数的具体用法?Python betweenness_centrality怎么用?Python betweenness_centrality使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了betweenness_centrality函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: compareGraphs
def compareGraphs(g1, g2):
"""#Compares the quantitative properties of two graph. So I can check the coarse graining. """
#Nodes and edges
print 'Graph1: #(Nodes, Edges) = (' + str(len(g1.nodes())) + ', ' + str(len(g1.edges())) + ')'
print 'Graph2: #(Nodes, Edges) = (' + str(len(g2.nodes())) + ', ' + str(len(g2.edges())) + ')'
#Connected Components
#print '\n#CCs for graph 1: ' + str(len(nx.connected_components(g1)))
#print '#CCs for graph 2: ' + str(len(nx.connected_components(g2)))
plt.hist([len(i) for i in nx.connected_components(g1)])
plt.hist([len(i) for i in nx.connected_components(g2)])
plt.title('Cluster Size')
plt.xlabel('Cluster Size')
plt.ylabel('#Cluster')
show()
#Degree Distribution
plt.hist(nx.degree_histogram(g1))
plt.hist(nx.degree_histogram(g2))
plt.title('Degree Distribution' )
plt.xlabel('Degree')
plt.ylabel('#Nodes')
show()
#Betweeness --- this is by far the most compuationally demanding.
plt.hist(nx.betweenness_centrality(g1, normalized = False).values())
plt.hist(nx.betweenness_centrality(g2, normalized = False).values())
plt.title('Distribution of Betweenness' )
plt.xlabel('Betweenness')
plt.ylabel('#Nodes')
show()
开发者ID:Khev,项目名称:coarse_grain_networks,代码行数:35,代码来源:CoarseGrainLibrary.py
示例2: compute_static_graph_statistics
def compute_static_graph_statistics(G,start_time,end_time):
verts = G.vertices
n = len(verts)
m = float(end_time - start_time)
agg_statistics = [dict.fromkeys(verts,0),dict.fromkeys(verts,0),dict.fromkeys(verts,0)]*3
avg_statistics = [dict.fromkeys(verts,0),dict.fromkeys(verts,0),dict.fromkeys(verts,0)]*3
aggregated_graph = nx.Graph()
aggregated_graph.add_nodes_from(verts)
start_time = max(1,start_time)
for t in xrange(start_time,end_time+1):
aggregated_graph.add_edges_from(G.snapshots[t].edges_iter())
dc = G.snapshots[t].degree()
cc = nx.closeness_centrality(G.snapshots[t])
bc = nx.betweenness_centrality(G.snapshots[t])
for v in verts:
avg_statistics[0][v] += dc[v]/(n-1.0)
avg_statistics[1][v] += cc[v]
avg_statistics[2][v] += bc[v]
for v in verts:
avg_statistics[0][v] = avg_statistics[0][v]/m
avg_statistics[1][v] = avg_statistics[1][v]/m
avg_statistics[2][v] = avg_statistics[2][v]/m
dc = nx.degree_centrality(aggregated_graph)
cc = nx.closeness_centrality(aggregated_graph)
bc = nx.betweenness_centrality(aggregated_graph)
for v in verts:
agg_statistics[0][v] = dc[v]
agg_statistics[1][v] = cc[v]
agg_statistics[2][v] = bc[v]
return (agg_statistics, avg_statistics)
开发者ID:juancamilog,项目名称:temporal_centrality,代码行数:33,代码来源:temporal_graph.py
示例3: show_network_metrics
def show_network_metrics(G):
'''
Print the local and global metrics of the network
'''
print(nx.info(G))
# density
print("Density of the network")
print(nx.density(G))
# average betweeness
print("Average betweeness of the network")
print(np.sum(list(nx.betweenness_centrality(G).values()))/len(nx.betweenness_centrality(G)))
# Average clustering coefficient
print("Average clustering coefficient:")
print(nx.average_clustering(G))
#create metrics dataframe
by_node_metrics = pd.DataFrame({"Betweeness_Centrality":nx.betweenness_centrality(G),"Degree_Centrality":nx.degree_centrality(G),
"Clustering_Coefficient":nx.clustering(G), "Triangels":nx.algorithms.cluster.triangles(G)})
print(by_node_metrics)
by_node_metrics.to_excel("metrics.xlsx")
开发者ID:tyty233,项目名称:Music-Classification-and-Ranking-Analysis,代码行数:25,代码来源:networkv2.py
示例4: betweenness_fracture
def betweenness_fracture(infile, outfile, fraction, recalculate = False):
"""
Removes given fraction of nodes from infile network in reverse order of
betweenness centrality (with or without recalculation of centrality values
after each node removal) and saves the network in outfile.
"""
g = networkx.read_gml(infile)
m = networkx.betweenness_centrality(g)
l = sorted(m.items(), key = operator.itemgetter(1), reverse = True)
largest_component = max(networkx.connected_components(g), key = len)
n = len(g.nodes())
for i in range(1, n):
g.remove_node(l.pop(0)[0])
if recalculate:
m = networkx.betweenness_centrality(g)
l = sorted(m.items(), key = operator.itemgetter(1),
reverse = True)
largest_component = max(networkx.connected_components(g), key = len)
if i * 1. / n >= fraction:
break
components = networkx.connected_components(g)
component_id = 1
for component in components:
for node in component:
g.node[node]["component"] = component_id
component_id += 1
networkx.write_gml(g, outfile)
开发者ID:swamiiyer,项目名称:robustness,代码行数:28,代码来源:robustness.py
示例5: betweenness
def betweenness(infile, recalculate = False):
"""
Performs robustness analysis based on betweenness centrality,
on the network specified by infile using sequential (recalculate = True)
or simultaneous (recalculate = False) approach. Returns a list
with fraction of nodes removed, a list with the corresponding sizes of
the largest component of the network, and the overall vulnerability
of the network.
"""
g = networkx.read_gml(infile)
m = networkx.betweenness_centrality(g)
l = sorted(m.items(), key = operator.itemgetter(1), reverse = True)
x = []
y = []
largest_component = max(networkx.connected_components(g), key = len)
n = len(g.nodes())
x.append(0)
y.append(len(largest_component) * 1. / n)
R = 0.0
for i in range(1, n):
g.remove_node(l.pop(0)[0])
if recalculate:
m = networkx.betweenness_centrality(g)
l = sorted(m.items(), key = operator.itemgetter(1),
reverse = True)
largest_component = max(networkx.connected_components(g), key = len)
x.append(i * 1. / n)
R += len(largest_component) * 1. / n
y.append(len(largest_component) * 1. / n)
return x, y, 0.5 - R / n
开发者ID:swamiiyer,项目名称:robustness,代码行数:31,代码来源:robustness.py
示例6: betweenness_removal
def betweenness_removal(g, recalculate=False):
"""
Performs robustness analysis based on betweenness centrality,
on the network specified by infile using sequential (recalculate = True)
or simultaneous (recalculate = False) approach. Returns a list
with fraction of nodes removed, a list with the corresponding sizes of
the largest component of the network, and the overall vulnerability
of the network.
"""
m = nx.betweenness_centrality(g)
l = sorted(m.items(), key=operator.itemgetter(1), reverse=True)
x = []
y = []
dimension = fd.fractal_dimension(g, iterations=100, debug=False)
n = len(g.nodes())
x.append(0)
y.append(dimension)
for i in range(1, n-1):
g.remove_node(l.pop(0)[0])
if recalculate:
m = nx.betweenness_centrality(g)
l = sorted(m.items(), key=operator.itemgetter(1),
reverse=True)
dimension = fd.fractal_dimension(g, iterations=100, debug=False)
x.append(i * 1. / n)
y.append(dimension)
return x, y
开发者ID:hernandcb,项目名称:complexNetworksMeasurements,代码行数:33,代码来源:dimensionPlotsOBCA.py
示例7: recalculated_betweenness
def recalculated_betweenness(ex):
# Copy the graph
ex = ex.copy()
# Calculate betweenness of full graph
between = nx.betweenness_centrality(ex, weight='distance', normalized=False)
# Create a copy to track the recalculated betweenness
rebetween = between
while len(ex.edges()) > 0:
# Recalculate betweenness
between = nx.betweenness_centrality(ex, weight='distance', normalized=False)
# Store recalculated values if they're higher
for node, value in between.iteritems():
if value > rebetween[node]:
rebetween[node] = value
# Remove all edges from most central node
node, value = sorted(between.items(), key=lambda x: x[1], reverse=True)[0]
if (value == 0):
# All remaining edges are trivial shortest paths
break
for tail, head in ex.edges(node):
ex.remove_edge(tail, head)
sys.stdout.write('.')
sys.stdout.flush()
print
return rebetween
开发者ID:c4fcm,项目名称:WhatWeWatch-Analysis,代码行数:25,代码来源:findexposure.py
示例8: sna_calculations
def sna_calculations(g, play_file):
"""
:param g: a NetworkX graph object
:type g: object
:param play_file: the location of a play in .txt format
:type play_file: string
:return: returns a dictionary containing various network related figures
:rtype: dict
:note: also writes into results/file_name-snaCalculations.csv and results/allCharacters.csv
"""
file_name = os.path.splitext(os.path.basename(play_file))[0]
sna_calculations_list = dict()
sna_calculations_list['playType'] = file_name[0]
sna_calculations_list['avDegreeCentrality'] = numpy.mean(numpy.fromiter(iter(nx.degree_centrality(g).values()),
dtype=float))
sna_calculations_list['avDegreeCentralityStd'] = numpy.std(
numpy.fromiter(iter(nx.degree_centrality(g).values()), dtype=float))
sna_calculations_list['avInDegreeCentrality'] = numpy.mean(
numpy.fromiter(iter(nx.in_degree_centrality(g).values()), dtype=float))
sna_calculations_list['avOutDegreeCentrality'] = numpy.mean(
numpy.fromiter(iter(nx.out_degree_centrality(g).values()), dtype=float))
try:
sna_calculations_list['avShortestPathLength'] = nx.average_shortest_path_length(g)
except:
sna_calculations_list['avShortestPathLength'] = 'not connected'
sna_calculations_list['density'] = nx.density(g)
sna_calculations_list['avEigenvectorCentrality'] = numpy.mean(
numpy.fromiter(iter(nx.eigenvector_centrality(g).values()), dtype=float))
sna_calculations_list['avBetweennessCentrality'] = numpy.mean(
numpy.fromiter(iter(nx.betweenness_centrality(g).values()), dtype=float))
sna_calculations_list['DegreeCentrality'] = nx.degree_centrality(g)
sna_calculations_list['EigenvectorCentrality'] = nx.eigenvector_centrality(g)
sna_calculations_list['BetweennessCentrality'] = nx.betweenness_centrality(g)
# sna_calculations.txt file
sna_calc_file = csv.writer(open('results/' + file_name + '-snaCalculations.csv', 'wb'), quoting=csv.QUOTE_ALL,
delimiter=';')
for key, value in sna_calculations_list.items():
sna_calc_file.writerow([key, value])
# all_characters.csv file
if not os.path.isfile('results/allCharacters.csv'):
with open('results/allCharacters.csv', 'w') as f:
f.write(
'Name;PlayType;play_file;DegreeCentrality;EigenvectorCentrality;BetweennessCentrality;speech_amount;AverageUtteranceLength\n')
all_characters = open('results/allCharacters.csv', 'a')
character_speech_amount = speech_amount(play_file)
for character in sna_calculations_list['DegreeCentrality']:
all_characters.write(character + ';' + str(sna_calculations_list['playType']) + ';' + file_name + ';' + str(
sna_calculations_list['DegreeCentrality'][character]) + ';' + str(
sna_calculations_list['EigenvectorCentrality'][character]) + ';' + str(
sna_calculations_list['BetweennessCentrality'][character]) + ';' + str(
character_speech_amount[0][character]) + ';' + str(character_speech_amount[1][character]) + '\n')
all_characters.close()
return sna_calculations
开发者ID:IngoKl,项目名称:shakespearesna1406,代码行数:59,代码来源:ShakespeareSnaAnalysis.py
示例9: __init__
def __init__(self, view, controller, use_ego_betw=False, **kwargs):
super(CacheLessForMore, self).__init__(view, controller)
topology = view.topology()
if use_ego_betw:
self.betw = dict((v, nx.betweenness_centrality(nx.ego_graph(topology, v))[v])
for v in topology.nodes_iter())
else:
self.betw = nx.betweenness_centrality(topology)
开发者ID:Estoque86,项目名称:Comparison_New_Simulators,代码行数:8,代码来源:strategy.py
示例10: weighted_betweenness_centrality_distribution
def weighted_betweenness_centrality_distribution(G, return_dictionary=False):
"""Return a distribution of weighted betweenness centralities.
If return_dictionary is specified, we return a dictionary indexed by
vertex name, rather than just the values (as returned by default).
"""
if return_dictionary:
return nx.betweenness_centrality(G, weighted_edges=True)
else:
return nx.betweenness_centrality(G, weighted_edges=True).values()
开发者ID:jovo,项目名称:shuffled-graph-theory,代码行数:9,代码来源:graph_invariants.py
示例11: betweenness_centrality_distribution
def betweenness_centrality_distribution(G, return_dictionary=False):
"""Return a distribution of unweighted betweenness centralities,
as used in Borges, Coppersmith, Meyer, and Priebe 2011.
If return_dictionary is specified, we return a dictionary indexed by
vertex name, rather than just the values (as returned by default).
"""
if return_dictionary:
return nx.betweenness_centrality(G)
else:
return nx.betweenness_centrality(G).values()
开发者ID:jovo,项目名称:shuffled-graph-theory,代码行数:10,代码来源:graph_invariants.py
示例12: centrality_measures
def centrality_measures(self):
centrality_measures = []
txt = ''
# betweenness
# unweighted
self.unweighted_betweenness_distribution = nx.betweenness_centrality(self.G)
statistics = self.Stats.get_distribution_info(self.unweighted_betweenness_distribution)
centrality_measures.extend(statistics[:5])
centrality_measures.extend(statistics[5])
txt += ',average betweenness centrality (unweighted)' + self.standard_text_distribution
# # weighted
self.weighted_betweenness_distribution = nx.betweenness_centrality(self.G, weight = self.weight_id)
# statistics = self.Stats.get_distribution_info(self.weighted_betweenness_distribution)
# centrality_measures.extend(statistics[:5])
# centrality_measures.extend(statistics[5])
# txt += ',average betweenness centrality (weighted)' + self.standard_text_distribution
# closeness
# unweighted
self.unweighted_closeness_distribution = nx.closeness_centrality(self.G)
statistics = self.Stats.get_distribution_info(self.unweighted_closeness_distribution)
centrality_measures.extend(statistics[:5])
centrality_measures.extend(statistics[5])
txt += ',average closeness centrality (unweighted)' + self.standard_text_distribution
# eigen vector
# right
try:
self.right_eigenvector_distribution = nx.eigenvector_centrality(self.G)
statistics = self.Stats.get_distribution_info(self.right_eigenvector_distribution)
centrality_measures.extend(statistics[:5])
centrality_measures.extend(statistics[5])
except:
centrality_measures.extend([0,0,0,0,0])
centrality_measures.extend([0]*len(statistics[5]))
txt += ',average right eigenvector' + self.standard_text_distribution
# left
try:
G_rev = self.G.reverse()
self.lef_eigenvector_distribution = nx.eigenvector_centrality(G_rev)
statistics = self.Stats.get_distribution_info(self.lef_eigenvector_distribution)
centrality_measures.extend(statistics[:5])
centrality_measures.extend(statistics[5])
except:
centrality_measures.extend([0,0,0,0,0])
centrality_measures.extend([0]*len(statistics[5]))
txt += ',average left eigenvector' + self.standard_text_distribution
return [centrality_measures, txt]
开发者ID:andresportocarrero,项目名称:NetGen,代码行数:53,代码来源:network_handler.py
示例13: genSeedsMaxDegree
def genSeedsMaxDegree(self,p,bwness):
"""Generate seeds based on maximum degree. Also handles Betweenness.
Optional input argument sets randomization. 0<p<1"""
numSeeds = self.numSeeds
if bwness:
numSeeds = numSeeds*1.5
if bwness:
k_val = int(2000/math.sqrt(len(self.adj)))
if k_val > len(self.adj):
bw_node = nx.betweenness_centrality(self.nxgraph)
else:
bw_node = nx.betweenness_centrality(self.nxgraph, k = k_val )
numMax=int(self.numSeeds/(1.0*p))
seeds=[None]*numMax
deg=[0]*numMax
for key,value in self.adj.iteritems():
#fill seeds
curr_deg=len(value)
for j in range(numMax):
if curr_deg>deg[j]:
deg.insert(j,curr_deg)
seeds.insert(j,key)
break
seeds=seeds[:numMax]
deg=deg[:numMax]
if bwness:
numMax=int(self.numSeeds/(1.0*p))
dict_bw = bw_node
seeds_degree = seeds
seeds = dict()
for node in seeds_degree:
value = dict_bw.get(node)
key = node
seeds[key] = value
seeds_fin = dict(sorted(seeds.iteritems(), key=operator.itemgetter(1), reverse=True)[:numMax])
seeds = seeds_fin.keys()
#shuffle
if p!=1:
random.shuffle(seeds)
return seeds[:self.numSeeds]
开发者ID:miguelarocao,项目名称:Pandemaniac,代码行数:52,代码来源:Pandemaniac.py
示例14: node_graph
def node_graph(tup):
h=nx.Graph()
h.add_edges_from(tup)
print "edges:" ,h.edges()
#%matplotlib inline
BLUE="#99CCFF"
nx.draw(h, node_color=BLUE,with_labels=True)
print "Degree Distribution:",h.degree()
print "Degree Centrality:",nx.degree_centrality(h)
print "Betweenness Centrality : ",nx.betweenness_centrality(h)
print "Betweenness Centrality Non-Normalized : ",nx.betweenness_centrality(h, normalized=False)
print "Closeness Centrality:", nx.closeness_centrality(h)
pyplot.show()
开发者ID:NikhilCherian,项目名称:Visibility-Graph,代码行数:13,代码来源:natural_visibility_graph_Inflation.py
示例15: betweenness_apl
def betweenness_apl(g, recalculate=False):
"""
Performs robustness analysis based on betweenness centrality,
on the network specified by infile using sequential (recalculate = True)
or simultaneous (recalculate = False) approach. Returns a list
with fraction of nodes removed, a list with the corresponding sizes of
the largest component of the network, and the overall vulnerability
of the network.
"""
m = networkx.betweenness_centrality(g)
l = sorted(m.items(), key=operator.itemgetter(1), reverse=True)
x = []
y = []
average_path_length = 0.0
number_of_components = 0
n = len(g.nodes())
for sg in networkx.connected_component_subgraphs(g):
average_path_length += networkx.average_shortest_path_length(sg)
number_of_components += 1
average_path_length = average_path_length / number_of_components
initial_apl = average_path_length
x.append(0)
y.append(average_path_length * 1. / initial_apl)
r = 0.0
for i in range(1, n):
g.remove_node(l.pop(0)[0])
if recalculate:
m = networkx.betweenness_centrality(g)
l = sorted(m.items(), key=operator.itemgetter(1),
reverse=True)
average_path_length = 0.0
number_of_components = 0
for sg in networkx.connected_component_subgraphs(g):
if len(sg.nodes()) > 1:
average_path_length += networkx.average_shortest_path_length(sg)
number_of_components += 1
average_path_length = average_path_length / number_of_components
x.append(i * 1. / initial_apl)
r += average_path_length
y.append(average_path_length)
return x, y, r / initial_apl
开发者ID:computational-center,项目名称:complexNetworksMeasurements,代码行数:50,代码来源:robustness2.py
示例16: betweenness_centrality
def betweenness_centrality(self, withme=False, node=None,average=False):
if node==None:
if withme:
my_dict = nx.betweenness_centrality(self.mynet)
new = {}
new2={}
for i in my_dict:
new[self.id_to_name(i)] = my_dict[i]
new2[i] = my_dict[i]
if average:
print "The average is " + str(round(sum(new.values())/float(len(new.values())),4))
else:
for i,j in new.items():
print i, round(j,4)
return new2
else:
my_dict = nx.betweenness_centrality(self.no_ego_net)
new = {}
new2={}
for i in my_dict:
new[self.id_to_name(i)] = my_dict[i]
new2[i] = my_dict[i]
if average:
print "The average is " + str(round(sum(new.values())/float(len(new.values())),4))
else:
for i,j in new.items():
print i, round(j,4)
return new2
else:
if withme:
my_dict = nx.betweenness_centrality(self.mynet)
try:
print "The coefficient for node "+str(node)+ "is "+ str(round(my_dict[node],4))
except:
try:
return my_dict[self.name_to_id(node)]
except:
print "Invalid node name"
else:
my_dict = nx.betweenness_centrality(self.no_ego_net)
try:
print "The coefficient for node "+str(node)+ "is "+ str(round(my_dict[node],4))
except:
try:
print "The coefficient for node "+str(node)+ "is "+ str(round(my_dict[[self.name_to_id(node)]],4))
except:
print "Invalid node name"
开发者ID:atwel,项目名称:BigData2015,代码行数:49,代码来源:networks_lab.py
示例17: betweenValue
def betweenValue(charList, graphFile, bookNetworksPath):
# Compute betweenness for all characters in the current chapter graph.
g = nx.read_gexf(graphFile)
betCentrality = nx.betweenness_centrality(g, k=None, normalized=True, weight="Weight", endpoints=False, seed=None)
betweenValues = betCentrality.values()
# NORMALISE betweenness values
d = decimal.Decimal
maxBetween = max(betweenValues)
minBetween = min(betweenValues)
maxMinusMin = d(maxBetween) - d(minBetween)
if not charList:
# Get top 10 overall characters from overall.gexf graph
overallGraphFile = bookNetworksPath + "overall.gexf"
overall_g = nx.read_gexf(overallGraphFile)
overallBetweenCent = nx.betweenness_centrality(
overall_g, k=None, normalized=True, weight="Weight", endpoints=False, seed=None
)
# Quick fix for getting all characters.
# sortedCentrality = dict(sorted(overallBetweenCent.iteritems(), key=itemgetter(1), reverse=True)[:10])
sortedCentrality = dict(sorted(overallBetweenCent.iteritems(), key=itemgetter(1), reverse=True))
sortedCentrality = sorted(sortedCentrality.iteritems(), key=itemgetter(1), reverse=True)
charList = [seq[0] for seq in sortedCentrality]
return charList
else:
charList = [item for item in charList]
for index, item in enumerate(charList):
currentChar = None
for key, value in betCentrality.iteritems():
if key == item:
nummerator = d(value) - d(minBetween)
if nummerator == 0:
charList[index] = (key, str(0))
else:
norm_value = (d(value) - d(minBetween)) / d(maxMinusMin)
charList[index] = (key, str(norm_value))
currentChar = key
# If current character is not present in the current chapter assign 0 influence.
if not currentChar:
charList[index] = (item, 0)
return charList
开发者ID:pivots,项目名称:networkx-sna-fiction,代码行数:48,代码来源:snaData.py
示例18: test_fast_versions_properties_threshold_graphs
def test_fast_versions_properties_threshold_graphs(self):
cs='ddiiddid'
G=nxt.threshold_graph(cs)
assert_equal(nxt.density('ddiiddid'), nx.density(G))
assert_equal(sorted(nxt.degree_sequence(cs)),
sorted(G.degree().values()))
ts=nxt.triangle_sequence(cs)
assert_equal(ts, list(nx.triangles(G).values()))
assert_equal(sum(ts) // 3, nxt.triangles(cs))
c1=nxt.cluster_sequence(cs)
c2=list(nx.clustering(G).values())
assert_almost_equal(sum([abs(c-d) for c,d in zip(c1,c2)]), 0)
b1=nx.betweenness_centrality(G).values()
b2=nxt.betweenness_sequence(cs)
assert_true(sum([abs(c-d) for c,d in zip(b1,b2)]) < 1e-14)
assert_equal(nxt.eigenvalues(cs), [0, 1, 3, 3, 5, 7, 7, 8])
# Degree Correlation
assert_true(abs(nxt.degree_correlation(cs)+0.593038821954) < 1e-12)
assert_equal(nxt.degree_correlation('diiiddi'), -0.8)
assert_equal(nxt.degree_correlation('did'), -1.0)
assert_equal(nxt.degree_correlation('ddd'), 1.0)
assert_equal(nxt.eigenvalues('dddiii'), [0, 0, 0, 0, 3, 3])
assert_equal(nxt.eigenvalues('dddiiid'), [0, 1, 1, 1, 4, 4, 7])
开发者ID:NikitaVAP,项目名称:pycdb,代码行数:28,代码来源:test_threshold.py
示例19: __init__
def __init__(self, graph, node_1=None, node_2=None):
self.graph = graph
self.node_1 = node_1
self.node_2 = node_2
self.clustering_dict = nx.clustering(graph)
self.betweenness_dict = nx.betweenness_centrality(graph)
self.average_neighbor_degree_dict = nx.average_neighbor_degree(graph)
self.attributes_map = {
"adamic_adar_similarity": self.adamic_adar_similarity,
"average_clustering_coefficient": self.average_clustering_coefficient,
"average_neighbor_degree_sum": self.average_neighbor_degree_sum,
"betweenness_centrality": self.betweenness_centrality,
"closeness_centrality_sum": self.closeness_centrality_sum,
"clustering_coefficient_sum": self.clustering_coefficient_sum,
"common_neighbors": self.common_neighbors,
"cosine": self.cosine,
"jaccard_coefficient": self.jaccard_coefficient,
"katz_measure": self.katz_measure,
"preferential_attachment": self.preferential_attachment,
"square_clustering_coefficient_sum": self.square_clustering_coefficient_sum,
"sum_of_neighbors": self.sum_of_neighbors,
"sum_of_papers": self.sum_of_papers,
"get_shortest_path_length": self.get_shortest_path_length,
"get_second_shortest_path_length": self.get_second_shortest_path_length
}
if(self.node_1 != None and self.node_2 != None):
self.neighbors_1 = self.all_neighbors(self.node_1)
self.neighbors_2 = self.all_neighbors(self.node_2)
开发者ID:joaomarcosgris,项目名称:Predicao-de-Links,代码行数:30,代码来源:features.py
示例20: __init__
def __init__(self, n=1000, k=10, p=0.02947368):
self.n = n
self.k = k
self.p = p
self.ws = nx.watts_strogatz_graph(self.n, self.k, self.p, seed='nsll')
nx.set_node_attributes(self.ws, 'SIR', 'S')
self.clustering = nx.clustering(self.ws)
self.betweenness = nx.betweenness_centrality(self.ws)
p_r_0 = 0.001
r_0 = int(self.n * p_r_0)
if r_0 < 1:
r_0 = 1
random.seed('nsll')
self.r = random.sample(self.ws.nodes(), r_0)
i_0 = 4
if i_0 < r_0:
i_0 += 1
random.seed('nsll')
self.infected = random.sample(self.ws.nodes(), i_0)
for n in self.infected:
self.ws.node[n]['SIR'] = 'I'
for n in self.r:
self.ws.node[n]['SIR'] = 'R'
self.s = self.n - len(self.infected) - len(self.r)
print(self.r)
print(self.infected)
开发者ID:nasyxx,项目名称:CUFE_Math_modeling_Final,代码行数:27,代码来源:p03.py
注:本文中的networkx.betweenness_centrality函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论