本文整理汇总了Python中networkx.clustering函数的典型用法代码示例。如果您正苦于以下问题:Python clustering函数的具体用法?Python clustering怎么用?Python clustering使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了clustering函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: stdCCnx
def stdCCnx(G):
'''returns the standard deviation of clustering coefficients in a subgraph'''
nodeCCs = []
for CC in nx.clustering(G):
# print CC
nodeCCs.append(nx.clustering(G,CC))
return np.std(nodeCCs)
开发者ID:mtphamine,项目名称:phonaesthemes-networks,代码行数:7,代码来源:main.py
示例2: test_path
def test_path(self):
G = nx.path_graph(10)
assert_equal(list(nx.clustering(G).values()),
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
assert_equal(nx.clustering(G),
{0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0,
5: 0.0, 6: 0.0, 7: 0.0, 8: 0.0, 9: 0.0})
开发者ID:AmesianX,项目名称:networkx,代码行数:7,代码来源:test_cluster.py
示例3: calculate_network_measures
def calculate_network_measures(net, analyser):
deg=nx.degree_centrality(net)
clust=[]
if(net.is_multigraph()):
net = analyser.flatGraph(net)
if(nx.is_directed(net)):
tmp_net=net.to_undirected()
clust=nx.clustering(tmp_net)
else:
clust=nx.clustering(net)
if(nx.is_directed(net)):
tmp_net=net.to_undirected()
paths=nx.shortest_path(tmp_net, source=None, target=None, weight=None)
else:
paths=nx.shortest_path(net, source=None, target=None, weight=None)
lengths = [map(lambda a: len(a[1]), x[1].items()[1:]) for x in paths.items()]
all_lengths=[]
for a in lengths:
all_lengths.extend(a)
max_value=max(all_lengths)
#all_lengths = [x / float(max_value) for x in all_lengths]
return deg.values(),clust.values(),all_lengths
开发者ID:dfeng808,项目名称:multiplex,代码行数:29,代码来源:NetworkComparison.py
示例4: set_clustering_distribution
def set_clustering_distribution(self):
# only indirected
G_undirected = self.G.to_undirected()
clustering_distributions = []
txt = ''
# unweighted
self.unweighted_clustering_distribution = nx.clustering(G_undirected)
statistics = self.Stats.get_distribution_info(self.unweighted_clustering_distribution)
#storing complete distribution for statistical analysis
self.Stats.ks_store(self.unweighted_clustering_distribution, "unweighted clustering distribution")
clustering_distributions.extend(statistics[:5])
clustering_distributions.extend(statistics[5])
txt += ',average clustering coeficient (unweighted)' + self.standard_text_distribution
# # weighted
self.weighted_clustering_distribution = nx.clustering(G_undirected, G_undirected.nodes(), self.weight_id)
# statistics = self.Stats.get_distribution_info(self.weighted_clustering_distribution)
# #storing complete distribution for statistical analysis
# self.Stats.ks_store(self.weighted_clustering_distribution, "weighted clustering distribution")
# clustering_distributions.extend(statistics[:5])
# clustering_distributions.extend(statistics[5])
# txt += ',average clustering coeficient (weighted)' + self.standard_text_distribution
return [clustering_distributions,txt]
开发者ID:andresportocarrero,项目名称:NetGen,代码行数:28,代码来源:network_handler.py
示例5: meanCCnx
def meanCCnx(G):
'''returns the mean clustering coefficient in a subgraph'''
nodeCCs = []
for CC in nx.clustering(G):
# print CC
nodeCCs.append(nx.clustering(G,CC))
return np.mean(nodeCCs)
开发者ID:mtphamine,项目名称:phonaesthemes-networks,代码行数:7,代码来源:main.py
示例6: test_k5
def test_k5(self):
G = nx.complete_graph(5)
assert_equal(list(nx.clustering(G,weight='weight').values()),[1, 1, 1, 1, 1])
assert_equal(nx.average_clustering(G,weight='weight'),1)
G.remove_edge(1,2)
assert_equal(list(nx.clustering(G,weight='weight').values()),
[5./6., 1.0, 1.0, 5./6., 5./6.])
assert_equal(nx.clustering(G,[1,4],weight='weight'),{1: 1.0, 4: 0.83333333333333337})
开发者ID:AmesianX,项目名称:networkx,代码行数:8,代码来源:test_cluster.py
示例7: test_cubical
def test_cubical(self):
G = nx.cubical_graph()
assert_equal(list(nx.clustering(G).values()),
[0, 0, 0, 0, 0, 0, 0, 0])
assert_equal(nx.clustering(G,1),0)
assert_equal(list(nx.clustering(G,[1,2]).values()),[0, 0])
assert_equal(nx.clustering(G,1),0)
assert_equal(nx.clustering(G,[1,2]),{1: 0, 2: 0})
开发者ID:AmesianX,项目名称:networkx,代码行数:8,代码来源:test_cluster.py
示例8: clustering_coefficient_distribution
def clustering_coefficient_distribution(G, return_dictionary=False):
"""Returns the distribution of clustering coefficients, amenable
to applications similar to Borges, Coppersmith, Meyer, and Priebe 2011.
If return_dictionary is specified, we return a dictionary indexed by
vertex name, rather than just the values (as returned by default).
"""
if return_dictionary:
return nx.clustering(G)
else:
return nx.clustering(G).values()
开发者ID:jovo,项目名称:shuffled-graph-theory,代码行数:10,代码来源:graph_invariants.py
示例9: node_data
def node_data(self, node):
''' Returns node data related with the network structure '''
if not self.graph.is_multigraph():
clustering = nx.clustering(self.graph, node)
original_clustering = nx.clustering(self.original_graph, node)
else:
clustering = None
original_clustering = None
return {'degree': self.graph.degree(node),
'clustering': clustering,
'original-degree': self.original_graph.degree(node),
'original-clustering': original_clustering}
开发者ID:escalant3,项目名称:plexigraph,代码行数:12,代码来源:interaction.py
示例10: main
def main():
#print 'main running!'
#g=nx.read_adjlist("te.adj",nodetype=int)
#ad=list()
#mi=list()
#su=list()
##print sel3(g,3,ad,mi,su)
g=nx.Graph()
g=nx.read_pajek("a.net")
sh(g)
nx.clustering(g)
开发者ID:liupenggl,项目名称:hybrid,代码行数:13,代码来源:rsel.py
示例11: main
def main():
graph = FBGraph(auth)
data = graph.get_friends()
for line in data:
print line
friends = graph.make_friends(data)
graph.add_friend_nodes(friends)
graph.add_likes(friends)
friend_graph = graph.get_friend_graph()
print '\n'
print nx.clustering(friend_graph)
print '\n'
for edge in sorted(friend_graph.edges(data=True), key= lambda x: -1*x[2].get('weight', 1)):
print edge
nx.draw_random(friend_graph)
开发者ID:calebwang,项目名称:fb-graph,代码行数:15,代码来源:crawl.py
示例12: get_node_features
def get_node_features(self, graph, node):
""" Node features based on NetSimile paper
:param node:
:type node:
:return:
:rtype:
"""
"""
degree of node
cluserting coef of node
avg number of node's two-hop away neighbors
avg clustering coef of Neighbors(node)
number of edges in node i's egonet
number of outgoing edges from ego(node)
number of neighbors(ego(node))
"""
neighbors = graph.neighbors(node)
degree = graph.degree(node)
cl_coef = networkx.clustering(graph, node)
nbrs_two_hops = 0.0
nbrs_cl_coef = 0.0
for neighbor in neighbors:
nbrs_two_hops += graph.degree(neighbor)
nbrs_cl_coef += networkx.clustering(graph, neighbor)
try:
avg_two_hops = nbrs_two_hops / degree
avg_cl_coef = nbrs_cl_coef / degree
except ZeroDivisionError:
avg_two_hops = 0.0
avg_cl_coef = 0.0
egonet = networkx.ego_graph(graph, node)
ego_size = egonet.size()
ego_out = 0
ego_nbrs = set()
for ego_node in egonet:
for nbr in graph.neighbors(ego_node):
if nbr not in neighbors:
ego_out += 1
ego_nbrs.add(nbr)
return [degree, cl_coef, avg_two_hops, avg_cl_coef, ego_size, ego_out, len(ego_nbrs)]
开发者ID:toejamhoney,项目名称:nabu,代码行数:48,代码来源:pdf.py
示例13: test_fast_versions_properties_threshold_graphs
def test_fast_versions_properties_threshold_graphs(self):
cs='ddiiddid'
G=nxt.threshold_graph(cs)
assert_equal(nxt.density('ddiiddid'), nx.density(G))
assert_equal(sorted(nxt.degree_sequence(cs)),
sorted(G.degree().values()))
ts=nxt.triangle_sequence(cs)
assert_equal(ts, list(nx.triangles(G).values()))
assert_equal(sum(ts) // 3, nxt.triangles(cs))
c1=nxt.cluster_sequence(cs)
c2=list(nx.clustering(G).values())
assert_almost_equal(sum([abs(c-d) for c,d in zip(c1,c2)]), 0)
b1=nx.betweenness_centrality(G).values()
b2=nxt.betweenness_sequence(cs)
assert_true(sum([abs(c-d) for c,d in zip(b1,b2)]) < 1e-14)
assert_equal(nxt.eigenvalues(cs), [0, 1, 3, 3, 5, 7, 7, 8])
# Degree Correlation
assert_true(abs(nxt.degree_correlation(cs)+0.593038821954) < 1e-12)
assert_equal(nxt.degree_correlation('diiiddi'), -0.8)
assert_equal(nxt.degree_correlation('did'), -1.0)
assert_equal(nxt.degree_correlation('ddd'), 1.0)
assert_equal(nxt.eigenvalues('dddiii'), [0, 0, 0, 0, 3, 3])
assert_equal(nxt.eigenvalues('dddiiid'), [0, 1, 1, 1, 4, 4, 7])
开发者ID:NikitaVAP,项目名称:pycdb,代码行数:28,代码来源:test_threshold.py
示例14: __init__
def __init__(self, graph, node_1=None, node_2=None):
self.graph = graph
self.node_1 = node_1
self.node_2 = node_2
self.clustering_dict = nx.clustering(graph)
self.betweenness_dict = nx.betweenness_centrality(graph)
self.average_neighbor_degree_dict = nx.average_neighbor_degree(graph)
self.attributes_map = {
"adamic_adar_similarity": self.adamic_adar_similarity,
"average_clustering_coefficient": self.average_clustering_coefficient,
"average_neighbor_degree_sum": self.average_neighbor_degree_sum,
"betweenness_centrality": self.betweenness_centrality,
"closeness_centrality_sum": self.closeness_centrality_sum,
"clustering_coefficient_sum": self.clustering_coefficient_sum,
"common_neighbors": self.common_neighbors,
"cosine": self.cosine,
"jaccard_coefficient": self.jaccard_coefficient,
"katz_measure": self.katz_measure,
"preferential_attachment": self.preferential_attachment,
"square_clustering_coefficient_sum": self.square_clustering_coefficient_sum,
"sum_of_neighbors": self.sum_of_neighbors,
"sum_of_papers": self.sum_of_papers,
"get_shortest_path_length": self.get_shortest_path_length,
"get_second_shortest_path_length": self.get_second_shortest_path_length
}
if(self.node_1 != None and self.node_2 != None):
self.neighbors_1 = self.all_neighbors(self.node_1)
self.neighbors_2 = self.all_neighbors(self.node_2)
开发者ID:joaomarcosgris,项目名称:Predicao-de-Links,代码行数:30,代码来源:features.py
示例15: __init__
def __init__(self, n=1000, k=10, p=0.02947368):
self.n = n
self.k = k
self.p = p
self.ws = nx.watts_strogatz_graph(self.n, self.k, self.p, seed='nsll')
nx.set_node_attributes(self.ws, 'SIR', 'S')
self.clustering = nx.clustering(self.ws)
self.betweenness = nx.betweenness_centrality(self.ws)
p_r_0 = 0.001
r_0 = int(self.n * p_r_0)
if r_0 < 1:
r_0 = 1
random.seed('nsll')
self.r = random.sample(self.ws.nodes(), r_0)
i_0 = 4
if i_0 < r_0:
i_0 += 1
random.seed('nsll')
self.infected = random.sample(self.ws.nodes(), i_0)
for n in self.infected:
self.ws.node[n]['SIR'] = 'I'
for n in self.r:
self.ws.node[n]['SIR'] = 'R'
self.s = self.n - len(self.infected) - len(self.r)
print(self.r)
print(self.infected)
开发者ID:nasyxx,项目名称:CUFE_Math_modeling_Final,代码行数:27,代码来源:p03.py
示例16: plot_clustering_spectrum
def plot_clustering_spectrum (graph, path):
"""Plot the clusttering spectrum of the graph and save the figure
at the given path. On X-axis we have degrees and on Y-axis we have
average clustering coefficients of the nodes that have that degree"""
node_to_degree = graph.degree()
node_to_clustering = nx.clustering(graph)
degree_to_clustering = {}
# calculate average clustering coefficients for nodes with certain degree
for node in node_to_degree:
deg = node_to_degree[node]
tmp = degree_to_clustering.get(deg, [])
tmp.append(node_to_clustering[node])
degree_to_clustering[deg] = tmp
for degree in degree_to_clustering:
tmp = degree_to_clustering[degree]
degree_to_clustering[degree] = float(sum(tmp)) / len(tmp)
x = sorted(degree_to_clustering.keys(), reverse = True)
y = [degree_to_clustering[i] for i in x]
plt.loglog(x, y, 'b-', marker = '.')
plt.title("Clustering Spectrum")
plt.ylabel("Average clustering coefficient")
plt.xlabel("Degree")
plt.axis('tight')
plt.savefig(path)
开发者ID:jillzz,项目名称:protein-interaction,代码行数:29,代码来源:interaction_graph_info.py
示例17: calculate_local_clustering_coeff
def calculate_local_clustering_coeff(graph):
hash_clus = nx.clustering(graph)
clus = 0
nodes = graph.nodes()
for itr in graph.nodes():
clus = clus + hash_clus[itr]
print(clus/len(nodes))
开发者ID:bhuvneshdev,项目名称:social_media_mining,代码行数:7,代码来源:phase4.py
示例18: describe
def describe(G, ny_tri, chems):
global describeNetwork
'''
Describe the network: degrees, clustering, and centrality measures
'''
# Degree
# The number of connections a node has to other nodes.
degrees= nx.degree(G)
degrees_df = pd.DataFrame(degrees.items(), columns=['Facility', 'Degrees'])
values = sorted(set(degrees.values()))
hist = [degrees.values().count(x) for x in values]
plt.figure()
plt.plot(values, hist,'ro-') # degree
plt.xlabel('Degree')
plt.ylabel('Number of nodes')
plt.title('Degree Distribution')
plt.savefig('output/degree_distribution.png')
# Clustering coefficients
# The bipartie clustering coefficient is a measure of local density of connections.
clust_coefficients = nx.clustering(G)
clust_coefficients_df = pd.DataFrame(clust_coefficients.items(), columns=['Facility', 'Clustering Coefficient'])
clust_coefficients_df = clust_coefficients_df.sort('Clustering Coefficient', ascending=False)
#print clust_coefficients_df
# Node centrality measures
FCG=list(nx.connected_component_subgraphs(G, copy=True))[0]
# Current flow betweenness centrality
# Current-flow betweenness centrality uses an electrical current model for information spreading
# in contrast to betweenness centrality which uses shortest paths.
betweeness = nx.current_flow_betweenness_centrality(FCG)
betweeness_df = pd.DataFrame(betweeness.items(), columns=['Facility', 'Betweeness'])
betweeness_df = betweeness_df.sort('Betweeness', ascending=False)
# Closeness centrality
# The closeness of a node is the distance to all other nodes in the graph
# or in the case that the graph is not connected to all other nodes in the connected component containing that node.
closeness = nx.closeness_centrality(FCG)
closeness_df = pd.DataFrame(closeness.items(), columns=['Facility', 'Closeness'])
closeness_df = closeness_df.sort('Closeness', ascending=False)
# Eigenvector centrality
# Eigenvector centrality computes the centrality for a node based on the centrality of its neighbors.
# In other words, how connected a node is to other highly connected nodes.
eigenvector = nx.eigenvector_centrality(FCG)
eigenvector_df = pd.DataFrame(eigenvector.items(), columns=['Facility', 'Eigenvector'])
eigenvector_df = eigenvector_df.sort('Eigenvector', ascending=False)
# Create dataframe of facility info
fac_info = ny_tri[['tri_facility_id','facility_name', 'primary_naics', 'parent_company_name']].drop_duplicates()
fac_info.rename(columns={'facility_name':'Facility'}, inplace=True)
# Merge everything
describeNetwork = degrees_df.merge(
clust_coefficients_df,on='Facility').merge(
betweeness_df,on='Facility').merge(
closeness_df, on='Facility').merge(
eigenvector_df, on='Facility').merge(
fac_info, on='Facility', how='left').merge(
chems, on='Facility', how='left')
describeNetwork = describeNetwork.sort('Degrees', ascending=False)
describeNetwork.to_csv('output/describeNetwork.csv')
开发者ID:stevecarrea,项目名称:ny_tri_networkAnalysis,代码行数:60,代码来源:buildNetwork.py
示例19: nodal_summaryOut
def nodal_summaryOut(graph):
"""Compute statistics for individual nodes.
Parameters
----------
graph: networkx graph
An undirected graph.
Returns
-------
dictionary
The keys of this dictionary are lp (which refers to path
length), clust (clustering coefficient), b_cen (betweenness
centrality), c_cen (closeness centrality), nod_eff (nodal
efficiency), loc_eff (local efficiency), and deg (degree). The
values are arrays (or lists, in some cases) of metrics, in
ascending order of node labels.
"""
lp = nodal_pathlengths(graph)
clust_dict = nx.clustering(graph)
clust = np.array([clust_dict[n] for n in sorted(clust_dict)])
b_cen_dict = nx.betweenness_centrality(graph)
b_cen = np.array([b_cen_dict[n] for n in sorted(b_cen_dict)])
c_cen_dict = nx.closeness_centrality(graph)
c_cen = np.array([c_cen_dict[n] for n in sorted(c_cen_dict)])
nod_eff = nodal_efficiency(graph)
loc_eff = local_efficiency(graph)
deg_dict = graph.degree()
deg = [deg_dict[n] for n in sorted(deg_dict)]
return dict(lp=lp, clust=clust, b_cen=b_cen, c_cen=c_cen, nod_eff=nod_eff,
loc_eff=loc_eff, deg=deg)
开发者ID:cgallen,项目名称:brainx,代码行数:32,代码来源:metrics.py
示例20: show_network_metrics
def show_network_metrics(G):
'''
Print the local and global metrics of the network
'''
print(nx.info(G))
# density
print("Density of the network")
print(nx.density(G))
# average betweeness
print("Average betweeness of the network")
print(np.sum(list(nx.betweenness_centrality(G).values()))/len(nx.betweenness_centrality(G)))
# Average clustering coefficient
print("Average clustering coefficient:")
print(nx.average_clustering(G))
#create metrics dataframe
by_node_metrics = pd.DataFrame({"Betweeness_Centrality":nx.betweenness_centrality(G),"Degree_Centrality":nx.degree_centrality(G),
"Clustering_Coefficient":nx.clustering(G), "Triangels":nx.algorithms.cluster.triangles(G)})
print(by_node_metrics)
by_node_metrics.to_excel("metrics.xlsx")
开发者ID:tyty233,项目名称:Music-Classification-and-Ranking-Analysis,代码行数:25,代码来源:networkv2.py
注:本文中的networkx.clustering函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论