本文整理汇总了Python中networkx.read_edgelist函数的典型用法代码示例。如果您正苦于以下问题:Python read_edgelist函数的具体用法?Python read_edgelist怎么用?Python read_edgelist使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了read_edgelist函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: gen_random_graphs
def gen_random_graphs(seed, db):
print "generating random graph with seed " + str(seed)
directory = db.get_rnd_graphs_path()
if not path.exists(directory):
makedirs(directory)
filename = db.get_rnd_graph_full_name(str(seed), str(db.get_final_time()))
if(path.exists(filename)):
print "random graph with seed " + str(seed) + " already exists! Skipping..."
return
pathD = db.get_graphs_path()
filename = pathD + db.get_windowed_graph_name(0)
G=nx.read_edgelist(filename, nodetype = int, data=(('weight',float),))
GR = get_random_graph_from(G, seed)
save_random_graph(GR,1, db)
for i in range(2,db.get_final_time()+1):
filename = pathD + db.get_windowed_graph_name(str(i))
if(not path.exists(filename)):
f = open(filename,'w')
f.close()
G=nx.read_edgelist(filename, nodetype = int, data=(('weight',float),))
GRnew = get_random_graph_from(G, seed)
GR.graph['nmerges'] = i-2
GR = merge_temporal_graphs(GR, GRnew)
GR = compute_edge_features(GR)
save_random_graph(GR,i, db)
print("G_RND[" + str(i) + "] has " + str(GR.number_of_edges()) + " edges")
开发者ID:lab-csx-ufmg,项目名称:RECAST,代码行数:33,代码来源:RandomGGen.py
示例2: gen_random_graphs
def gen_random_graphs(seed):
# create windowed random graphs for each real graph
# obtain aggreggated graph
# calculate features of random graph
print "GENERATING RANDOM GRAPHS"
day = 1
final_day = which_day(_maxtime)+1
filename = str(results_folder) + "Graphs_Data/windowed_graph_" + str(day) + str(".txt")
print filename
G = nx.read_edgelist(filename, nodetype = int, data = (('top',float),))
# print G
GR = get_random_graph_from(G, seed)
for i in range(2,final_day):
day = i
filename = str(results_folder) + "Graphs_Data/windowed_graph_" + str(day) + str(".txt")
G = nx.read_edgelist(filename, nodetype = int, data = (('top',float),))
GRnew = get_random_graph_from(G, seed)
GR.graph['nmerges'] = i - 2
GR = merge_temporal_graphs(GR, GRnew)
GR = compute_edge_features(GR)
save_random_graph(GR,i,seed)
开发者ID:jvrmed,项目名称:mocha,代码行数:30,代码来源:generate_distributions.py
示例3: incorrectness_uncertain_from_file
def incorrectness_uncertain_from_file(before_file, after_file, sample_file, n_samples, bins):
# compute sig_list_b, bucket_list_b ONCE !
start = time.clock()
bG = nx.read_edgelist(before_file, '#', '\t', None, nodetype=int)
# G = nx.read_edgelist(after_file, '#', '\t', None, nodetype=int, data=True)
print "read bG: DONE, elapsed :", time.clock() - start
h2_list = equivalence_class_H2_open(bG, None)
cand_size, bin_size, sig_list_b, bucket_list_b = bucket_H2(h2_list, bins)
# print "len B:", len(sig_list_b), len(bucket_list_b)
# H1 score, H2 score
start = time.clock()
score_H1 = 0.0
score_H2 = 0.0
count = 0
for i in range(n_samples):
file_name = sample_file + str(i)
aG = nx.read_edgelist(file_name, '#', '\t', create_using=nx.MultiGraph(), nodetype=int, data=False) # IMPORTANT: MultiGraph
# H1
sum_re_prob, re_prob_dict = incorrectness_H1(bG, aG, bins)
score_H1 += sum_re_prob
# H2
sum_re_prob, re_prob_dict = incorrectness_H2_open(aG, sig_list_b, bucket_list_b, bins)
score_H2 += sum_re_prob
print "count =", count
count += 1
#
score_H1 = score_H1/n_samples
score_H2 = score_H2/n_samples
print "compute score_H1, score_H2: DONE, elapsed :", time.clock() - start
#
return score_H1, score_H2
开发者ID:hiepbkhn,项目名称:itce2011,代码行数:35,代码来源:incorrectness_measure_multigraph.py
示例4: loadNwU
def loadNwU(dsName, path, cd, wccOnly, revEdges, undir):
print(" Opening " + dsName + " and loading graph... ")
t1 = time.clock()
fh = open(path + dsName, "rb")
if undir:
if cd:
prodNet = nx.read_edgelist(fh, delimiter=",")
else:
prodNet = nx.read_edgelist(fh)
# prodNet = prodNet.to_directed()
else:
if cd:
prodNet = nx.read_edgelist(fh, delimiter=",", create_using=nx.DiGraph())
else:
prodNet = nx.read_edgelist(fh, create_using=nx.DiGraph())
fh.close()
if wccOnly:
prodNet = nx.algorithms.weakly_connected.weakly_connected_component_subgraphs(prodNet)[0]
prodNet.remove_edges_from(prodNet.selfloop_edges())
if revEdges:
prodNet.reverse(False)
numNodes = str(prodNet.__len__())
numEdges = str(prodNet.size())
t2 = time.clock()
print(" -> graph loaded: " + numNodes + " nodes, " + numEdges + " edges (" + str(t2 - t1) + " sec).")
return prodNet
开发者ID:joeyh321,项目名称:ORCA,代码行数:30,代码来源:ltDecomp3.py
示例5: k_obfuscation_measure
def k_obfuscation_measure(before_file, after_file, n_nodes, k_arr, data=True):
print "n_nodes =", n_nodes
# before_file
bG = nx.read_edgelist(before_file, '#', '\t', None, nodetype=int)
print "read bG - DONE"
# if bG.number_of_nodes() < n_nodes:
# bG.add_nodes_from(range(n_nodes)) # only for er_100k
# Case 1 - aG = bG
if after_file == before_file: # after_file is before_file
for e in bG.edges_iter():
bG[e[0]][e[1]]['p'] = 1.0
return compute_eps_multi(bG, bG, k_arr)
# Case 2 - aG is a sample
# after_file
if data == True:
aG = nx.read_edgelist(after_file, '#', '\t', None, nodetype=int, data=True)
else:
aG = nx.read_edgelist(after_file, '#', '\t', None, nodetype=int, data=False)
# if aG.number_of_nodes() < n_nodes:
# aG.add_nodes_from(range(n_nodes)) # only for the cases of KeyError !
for e in aG.edges_iter():
aG[e[0]][e[1]]['p'] = 1.0
print "read aG - DONE"
return compute_eps_multi(bG, aG, k_arr)
开发者ID:hiepbkhn,项目名称:itce2011,代码行数:29,代码来源:incorrectness_measure.py
示例6: main
def main():
"""
Pre-processing:
load data, compute centrality measures, write files with node data
"""
print(nx.__version__)
# Load network data, create storage dict, and extract main component
depends=nx.read_edgelist("data/depends.csv",delimiter=",",create_using=nx.DiGraph(),nodetype=str,data=(("weight",time_from_today),))
depends.name="depends"
suggests=nx.read_edgelist("data/suggests.csv",delimiter=",",create_using=nx.DiGraph(),nodetype=str,data=(("weight",time_from_today),))
suggests.name="suggests"
imports=nx.read_edgelist("data/imports.csv",delimiter=",",create_using=nx.DiGraph(),nodetype=str,data=(("weight",time_from_today),))
imports.name="imports"
nets_dict={"depends":depends,"suggests":suggests,"imports":imports}
for k in nets_dict.keys():
main_component=nx.connected_component_subgraphs(nets_dict[k].to_undirected())[0].nodes()
nets_dict[k]=nx.subgraph(nets_dict[k],main_component)
# Run multiple measures on graphs and normalize weights
measure_list=[nx.in_degree_centrality,nx.betweenness_centrality,nx.pagerank]
for g in nets_dict.values():
multiple_measures(g,measure_list)
normalize_weights(g)
# Output networks in GraphML format (to store node attributes)
for i in nets_dict.items():
# print(i[1].edges(data=True))
nx.write_graphml(i[1],"data/"+i[0]+"_data.graphml")
print("")
print("All files written with data")
"""Visualization:
开发者ID:johnmyleswhite,项目名称:cran_analysis,代码行数:32,代码来源:r_dependency_net.py
示例7: main
def main():
parser = createParser()
options = parser.parse_args()
gtGraphNames = glob.glob("{0}/*.sim.cut".format(options.gtruth))
gtGraphs = { fn.split("/")[-1][:-8] : nx.read_edgelist(fn) for fn in gtGraphNames }
print(gtGraphs)
print(gtGraphNames)
oGraphNames = [ "{0}/{1}.out.ppi".format(options.other, k) for k in gtGraphs.keys() ]
oGraphs = { fn.split("/")[-1][:-8] : nx.read_weighted_edgelist(fn) for fn in oGraphNames }
inputGraphNames = glob.glob("{0}/bZIP*.cut".format(options.other))
print(inputGraphNames)
inputGraph = nx.read_edgelist(inputGraphNames[0])
print(oGraphNames)
cutoff = 0.99
paranaGraph = graphWithCutoff(options.parana, 0.0)
c = findSuggestedCutoff( paranaGraph, inputGraph, cutoff )
evaluation.printStats( filteredGraph(paranaGraph, inputGraph.nodes(), cutoff=c ), inputGraph )
print >>sys.stderr, "Parana 2.0 : {0}".format(getCurve(paranaGraph, inputGraph))
for gtName, gtGraph in gtGraphs.iteritems():
print(gtName)
c = findSuggestedCutoff( paranaGraph, gtGraph, cutoff )
print("Parana cutoff = {0}".format(c))
print("==================")
evaluation.printStats( filteredGraph(oGraphs[gtName], gtGraph.nodes()), gtGraph )
print >>sys.stderr, "Pinney et. al : {0}".format(getCurve(oGraphs[gtName], gtGraph))
evaluation.printStats( filteredGraph(paranaGraph, gtGraph.nodes(), cutoff=c ), gtGraph )
print >>sys.stderr, "Parana 2.0 : {0}".format(getCurve(paranaGraph, gtGraph))
print("\n")
sys.exit(0)
开发者ID:rob-p,项目名称:Parana2-CPP,代码行数:35,代码来源:AnalyzePredictions.py
示例8: graph_properties
def graph_properties(filename, directed=False):
# Read in rec as undirected graph
if directed:
G=nx.read_edgelist(filename, nodetype=int, create_using=nx.DiGraph())
else:
G=nx.read_edgelist(filename, nodetype=int, create_using=nx.Graph())
props = {}
# Calculate number of edges
props['num_edges'] = G.number_of_edges()
# Calculate number of nodes
props['num_nodes'] = len(G)
# Calculate largest connected component
largest_component = nx.connected_component_subgraphs(G)[0]
props['size_largestcc'] = len(largest_component)
props['proportion_in_largestcc'] = float(len(largest_component)) / len(G)
# Calculate clustering coefficient
props['average_clustering'] = nx.average_clustering(G)
# Calculate diameter of largest connected component
# props['diameter'] = nx.diameter(largest_component)
return props
开发者ID:jcccf,项目名称:twitterdc,代码行数:27,代码来源:graph_functions.py
示例9: calGraph
def calGraph(infile, mode = 1):
#init Parameter
inputpath = 'edge_list/'
outputpath = 'network_output/'
n = mode
Data_G = inputpath+infile+'_'+str(n)+'.edgelist'
#init Graph
G = nx.read_edgelist(Data_G, create_using=nx.DiGraph())
GU = nx.read_edgelist(Data_G)
#basci info
print nx.info(G),'\n', nx.info(GU)
average_degree = float(sum(nx.degree(G).values()))/len(G.nodes())
print 'average degree :', average_degree
degree_histogram = nx.degree_histogram(G)
print 'degree histogram max :', degree_histogram[1]
desity = nx.density(G)
print 'desity :', desity
#Approximation
#Centrality
degree_centrality = nx.degree_centrality(G)
print 'degree centrality top 10 !', sorted_dict(degree_centrality)[:2]
out_degree_centrality = nx.out_degree_centrality(G)
print 'out degree centrality top 10 !', sorted_dict(out_degree_centrality)[:2]
开发者ID:carlzhangxuan,项目名称:For_Recruit,代码行数:25,代码来源:L3_NetworkX_basic.py
示例10: load
def load(self,fname):
fext = (str(fname).split("."))[1]
self.fname = (str(fname).split("."))[0]
if self.directed_graph == False:
self.G = nx.read_edgelist(path=fname)
else:
self.G = nx.read_edgelist(path=fname,create_using=nx.DiGraph())
开发者ID:shuchu,项目名称:graph,代码行数:8,代码来源:er_generator.py
示例11: test_edgelist_integers
def test_edgelist_integers(self):
G=nx.convert_node_labels_to_integers(self.G)
(fd,fname)=tempfile.mkstemp()
nx.write_edgelist(G,fname)
H=nx.read_edgelist(fname,nodetype=int)
H2=nx.read_edgelist(fname,nodetype=int)
G.remove_node(5) # isolated nodes are not written in edgelist
assert_equal(sorted(H.nodes()),sorted(G.nodes()))
assert_equal(sorted(H.edges()),sorted(G.edges()))
os.close(fd)
os.unlink(fname)
开发者ID:123jefferson,项目名称:MiniBloq-Sparki,代码行数:11,代码来源:test_edgelist.py
示例12: test_edgelist_multidigraph
def test_edgelist_multidigraph(self):
G = self.XDG
(fd, fname) = tempfile.mkstemp()
nx.write_edgelist(G, fname)
H = nx.read_edgelist(fname, nodetype=int, create_using=nx.MultiDiGraph())
H2 = nx.read_edgelist(fname, nodetype=int, create_using=nx.MultiDiGraph())
assert_not_equal(H, H2) # they should be different graphs
assert_nodes_equal(list(H), list(G))
assert_edges_equal(list(H.edges()), list(G.edges()))
os.close(fd)
os.unlink(fname)
开发者ID:yamaguchiyuto,项目名称:networkx,代码行数:11,代码来源:test_edgelist.py
示例13: test_edgelist_digraph
def test_edgelist_digraph(self):
G = self.DG
(fd, fname) = tempfile.mkstemp()
nx.write_edgelist(G, fname)
H = nx.read_edgelist(fname, create_using=nx.DiGraph())
G.remove_node('g') # isolated nodes are not written in edgelist
H2 = nx.read_edgelist(fname, create_using=nx.DiGraph())
assert_not_equal(H, H2) # they should be different graphs
assert_nodes_equal(list(H), list(G))
assert_edges_equal(list(H.edges()), list(G.edges()))
os.close(fd)
os.unlink(fname)
开发者ID:yamaguchiyuto,项目名称:networkx,代码行数:12,代码来源:test_edgelist.py
示例14: test_edgelist_graph
def test_edgelist_graph(self):
G=self.G
(fd,fname)=tempfile.mkstemp()
nx.write_edgelist(G,fname)
H=nx.read_edgelist(fname)
H2=nx.read_edgelist(fname)
assert_not_equal(H,H2) # they should be different graphs
G.remove_node('g') # isolated nodes are not written in edgelist
assert_equal(sorted(H.nodes()),sorted(G.nodes()))
assert_equal(sorted(H.edges()),sorted(G.edges()))
os.close(fd)
os.unlink(fname)
开发者ID:123jefferson,项目名称:MiniBloq-Sparki,代码行数:12,代码来源:test_edgelist.py
示例15: calGraph
def calGraph(infile, mode = 1):
#init Parameter
inputpath = 'edge_list/'
n = mode
Data_G = inputpath+infile+'_'+str(n)+'.edgelist'
#init Graph
G = nx.read_edgelist(Data_G, create_using=nx.DiGraph())
GU = nx.read_edgelist(Data_G)
average_clustering = nx.average_clustering(GU)
transitivity = nx.transitivity(G)
return [average_clustering, transitivity]
开发者ID:carlzhangxuan,项目名称:For_Recruit,代码行数:12,代码来源:L3_NetworkX_cluster_daily.py
示例16: comorbid_count_compare
def comorbid_count_compare(net_dir, icd_gene_clinical, cancer_info, alterations, weighted=False):
# = 'humannet.9'
graph = networkx.read_edgelist(net_dir + '/network',nodetype=str)
ct = neighbor_count_comorbid(graph, alterations['peak_mut'], icd_gene_clinical, cancer_info, comorbid_only = True, weighted=weighted)
import os
randdir = net_dir + '/rand/'
randnets = os.listdir(randdir)
x = scipy.zeros([len(randnets)])
for i,f in enumerate(randnets):
net = networkx.read_edgelist(randdir + f, nodetype = str, data=weighted)
x[i] = neighbor_count_comorbid(net, alterations['peak_mut'], icd_gene_clinical, cancer_info, comorbid_only = True, weighted = weighted)
print 'comorbid_edges= ' + str(ct) + "\tngreater=" +str(sum(x >= ct)) + '\tp=' + str(sum(x >= ct)/float(len(randnets)))
return ct, x
开发者ID:RDMelamed,项目名称:melamed_comorbidity,代码行数:13,代码来源:network_ops.py
示例17: test_read_edgelist_3
def test_read_edgelist_3(self):
s = b"""\
# comment line
1 2 {'weight':2.0}
# comment line
2 3 {'weight':3.0}
"""
bytesIO = io.BytesIO(s)
G = nx.read_edgelist(bytesIO,nodetype=int,data=False)
assert_equal_edges(G.edges(),[(1,2),(2,3)])
bytesIO = io.BytesIO(s)
G = nx.read_edgelist(bytesIO,nodetype=int,data=True)
assert_equal_edges(G.edges(data=True),[(1,2,{'weight':2.0}),(2,3,{'weight':3.0})])
开发者ID:123jefferson,项目名称:MiniBloq-Sparki,代码行数:14,代码来源:test_edgelist.py
示例18: read_graph
def read_graph():
'''
Reads the input network in networkx.
'''
if args.weighted:
G = nx.read_edgelist(args.input, nodetype=int, data=(('weight',float),), create_using=nx.DiGraph())
else:
G = nx.read_edgelist(args.input, nodetype=int, create_using=nx.DiGraph())
for edge in G.edges():
G[edge[0]][edge[1]]['weight'] = 1
if not args.directed:
G = G.to_undirected()
return G
开发者ID:aditya-grover,项目名称:node2vec,代码行数:15,代码来源:main.py
示例19: read_graph
def read_graph(self, nx_g):
if self.is_weighted:
self.G = nx.read_edgelist(nx_g, data=(('weight', float),), create_using=nx.DiGraph(), edgetype=str)
else:
self.G = nx.read_edgelist(nx_g, create_using=nx.DiGraph(), edgetype=str)
for edge in self.G.edges():
self.G[edge[0]][edge[1]]['weight'] = 1
if not self.is_directed:
self.G = self.G.to_undirected()
开发者ID:Loricanal,项目名称:entity2rec,代码行数:15,代码来源:node2vec.py
示例20: write_communities
def write_communities(graph, name_to_size):
shortname = str(graph.split('/')[-1].strip('.ncol'))
nxgraph = networkx.read_edgelist(graph)
partition = community.best_partition(networkx.read_edgelist(graph))
count = 0
if shortname in name_to_size.keys():
for com in set(partition.values()):
count = count + 1.
list_nodes = [nodes for nodes in partition.keys() if partition[nodes] == com]
size_com = len(list_nodes)
if size_com > name_to_size[shortname]:
community_subgraph = nxgraph.subgraph(list_nodes)
with open("/net/data/graph-models/louvain-clusters/communities/" + shortname +"_" +str(count), 'a') as fout1:
networkx.write_edgelist(community_subgraph, fout1)
开发者ID:yangxiaoxiaoo,项目名称:cs281sec09,代码行数:15,代码来源:Graph_louvain_outputcomms.py
注:本文中的networkx.read_edgelist函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论