本文整理汇总了Python中networkx.info函数的典型用法代码示例。如果您正苦于以下问题:Python info函数的具体用法?Python info怎么用?Python info使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了info函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: calGraph
def calGraph(infile, mode = 1):
#init Parameter
inputpath = 'edge_list/'
outputpath = 'network_output/'
n = mode
Data_G = inputpath+infile+'_'+str(n)+'.edgelist'
#init Graph
G = nx.read_edgelist(Data_G, create_using=nx.DiGraph())
GU = nx.read_edgelist(Data_G)
#basci info
print nx.info(G),'\n', nx.info(GU)
average_degree = float(sum(nx.degree(G).values()))/len(G.nodes())
print 'average degree :', average_degree
degree_histogram = nx.degree_histogram(G)
print 'degree histogram max :', degree_histogram[1]
desity = nx.density(G)
print 'desity :', desity
#Approximation
#Centrality
degree_centrality = nx.degree_centrality(G)
print 'degree centrality top 10 !', sorted_dict(degree_centrality)[:2]
out_degree_centrality = nx.out_degree_centrality(G)
print 'out degree centrality top 10 !', sorted_dict(out_degree_centrality)[:2]
开发者ID:carlzhangxuan,项目名称:For_Recruit,代码行数:25,代码来源:L3_NetworkX_basic.py
示例2: simpleDisplay
def simpleDisplay(ipaddress = "localhost",port = "9999"):
'''
利用每次处理后保存的图来进行恢复展示
:return:
'''
# client,repo,stargazers,user = getRespond()
# g = addTOGraph(repo,stargazers,user)
# addEdge(stargazers,client,g)
# getPopular(g)
# savaGraph1(g)
# top10(g)
g = nx.read_gpickle("data/github.1")
print nx.info(g)
print
mtsw_users = [n for n in g if g.node[n]['type'] == 'user']
h = g.subgraph(mtsw_users)
print nx.info(h)
print
d = json_graph.node_link_data(h)
json.dump(d, open('data/githubRec.json', 'w'))
cmdstr = "python3 -m http.server %s" % port
webbrowser.open_new_tab("http://%s:%s/%s.html"%(ipaddress,port, "display_githubRec"))
os.system(cmdstr)
开发者ID:ch710798472,项目名称:GithubRecommended,代码行数:25,代码来源:githubRec.py
示例3: add_partitions_to_digraph
def add_partitions_to_digraph(graph, partitiondict):
''' Add the partition numbers to a graph - in this case, using this to update the digraph, with partitions calc'd off the undirected graph. Yes, it's a bad hack.
'''
g = graph
nx.set_node_attributes(g, 'partition', partitiondict)
nx.info(g)
return
开发者ID:ageek,项目名称:kaggle-machine-learning,代码行数:7,代码来源:networkx_functs.py
示例4: kinetic
def kinetic(fileName='P2_1_9_p07', M=None, N=None, axis=None):
#FILE = "/home/xingzhong/MicrosoftGestureDataset-RC/data/%s"%fileName
FILE = "/Users/xingzhong/Downloads/MicrosoftGestureDataset-RC/data/%s"%fileName
truth = np.genfromtxt(FILE+'.tagstream', delimiter=';', skiprows=1, dtype=None, converters={0: lambda x: (int(x) *1000 + 49875/2)/49875})
nd = np.loadtxt(FILE+'.csv')
nd = nd[np.where(nd[:,80]!=0)]# remove empty rows
idx, ndd = map(int, nd[:,0]), nd[:, 1:] # unpack index and data
m, n = ndd.shape
panel = pd.Panel( ndd.reshape((m, 20, 4)), items=idx, major_axis=AGENTS, minor_axis=['x','y','z','v'] ).transpose(2, 0, 1)
panel['dx'] = 1000* panel['x'].diff().fillna(0)
panel['dy'] = 1000* panel['y'].diff().fillna(0)
panel['dz'] = 1000* panel['z'].diff().fillna(0)
panel = panel.transpose(2, 1, 0)
samples = [s for s in instance_generator(panel, truth)]
g = EventGraph()
X = [np.array([0])]
for aid, seq in enumerate (samples[0]):
if M is not None and aid > M :
break
for t, atom in enumerate (seq):
if N is not None and t > N:
break
elif not atom is None and t!=0:
if axis:
g.addEvent( Event(t, aid, atom ))
X.append(atom)
else:
g.addEvent( Event(t, aid, np.array([atom[axis]]) ))
X.append( np.array([atom[axis]]) )
g.buildEdges(delta = 1)
print nx.info(g)
return g, X
开发者ID:xingzhong,项目名称:grammar_learning,代码行数:33,代码来源:kinetic.py
示例5: check
def check(graph,path_lenght,removable,protlist,path):
rem=[]
ess=[]
for i in removable:
count=0
flag=0
rem.append(i)
H=graph.copy()
H.remove_nodes_from(rem)
for j in path:
try:
lenght=nx.shortest_path_length(H, j[0], j[1])
except:
lenght=-1
if lenght==-1 or (lenght+1)!=path_lenght[j]:
ess.append(i)
flag=1
break
else:
count=count+1
if count==len(path):
rem.append(i)
elif flag==1:
rem.remove(i)
graph.add_nodes_from(protlist)
graph.remove_nodes_from(rem)
print nx.info(graph)
#drawgraph(graph,protlist)
return graph
开发者ID:girolamogiudice,项目名称:nbea,代码行数:32,代码来源:filterlib6.py
示例6: draw_citing_users_follower_count
def draw_citing_users_follower_count():
df = pd.read_csv('Results/twtrs_follower_network.tsv', sep='\t', header=None)
df.columns = ['src', 'followers']
count_followers = lambda row: len(row[1].split(','))
df['fCnt'] = df.apply(count_followers, axis=1)
edglstdf = pd.read_csv('Results/clustered_relevant_users.tsv', sep='\t', header=None)
eldf = edglstdf.apply(lambda row: [x.lstrip('[').rstrip(']') for x in row])
eldf.columns = ['src','trg']
eldf[['src']] = eldf[['src']].apply(pd.to_numeric)
df = pd.merge(eldf,df, on='src')
df[['src','trg','fCnt']].to_csv('Results/procjson_edglst.tsv', sep='\t', header=False, index=False)
g=nx.Graph()
g.add_edges_from(df[['src','trg']].values)
print nx.info(g)
f, axs = plt.subplots(1, 1, figsize=(1.6*6., 1*6.))
# nx.draw_networkx(g, pos=nx.spring_layout(g), ax=axs, with_labels=False, node_size=df[['fCnt']]/float(len(df)), alpha=.5)
pos=nx.spring_layout(g)
# nx.draw_networkx(g, pos=pos, ax=axs, with_labels=False, alpha=.5, node_size=30)
nx.draw_networkx_edges(g, pos=pos, ax=axs, alpha=0.5, width=0.8)
nx.draw_networkx_nodes(g, pos=pos, ax=axs, nodelist=list(df['src'].values), node_color='#7A83AC', node_size=30, alpha=0.5)
nx.draw_networkx_nodes(g, pos=pos, ax=axs, nodelist=list(df['trg'].values), node_color='k', node_size=20, alpha=0.8)
axs.patch.set_facecolor('None')
axs.set_xticks([]) #[None]# grid(True, which='both')
axs.set_yticks([]) #[None]# grid(True, which='both')
plt.savefig('figures/outfig', bbox_inches='tight', pad_inches=0)
return
开发者ID:abitofalchemy,项目名称:ScientificImpactPrediction,代码行数:34,代码来源:procjson_tograph.py
示例7: main
def main():
### Undirected graph ###
# Initialize model using the Petersen graph
model=gmm.gmm(nx.petersen_graph())
old_graph=model.get_base()
model.set_termination(node_ceiling)
model.set_rule(rand_add)
# Run simualation with tau=4 and Poisson density for motifs
gmm.algorithms.simulate(model,4)
# View results
new_graph=model.get_base()
print(nx.info(new_graph))
# Draw graphs
old_pos=nx.spring_layout(old_graph)
new_pos=nx.spring_layout(new_graph,iterations=2000)
fig1=plt.figure(figsize=(15,7))
fig1.add_subplot(121)
#fig1.text(0.1,0.9,"Base Graph")
nx.draw(old_graph,pos=old_pos,node_size=25,with_labels=False)
fig1.add_subplot(122)
#fig1.text(0.1,0.45,"Simulation Results")
nx.draw(new_graph,pos=new_pos,node_size=20,with_labels=False)
fig1.savefig("undirected_model.png")
### Directed graph ###
# Initialize model using random directed Barabasi-Albert model
directed_base=nx.barabasi_albert_graph(25,2).to_directed()
directed_model=gmm.gmm(directed_base)
directed_model.set_termination(node_ceiling)
directed_model.set_rule(rand_add)
# Run simualation with tau=4 and Poisson density for motifs
gmm.algorithms.simulate(directed_model,4)
# View results
new_directed=directed_model.get_base()
print(nx.info(new_directed))
# Draw directed graphs
old_dir_pos=new_pos=nx.spring_layout(directed_base)
new_dir_pos=new_pos=nx.spring_layout(new_directed,iterations=2000)
fig2=plt.figure(figsize=(7,10))
fig2.add_subplot(211)
fig2.text(0.1,0.9,"Base Directed Graph")
nx.draw(directed_base,pos=old_dir_pos,node_size=25,with_labels=False)
fig2.add_subplot(212)
fig2.text(0.1,0.45, "Simualtion Results")
nx.draw(new_directed,pos=new_dir_pos,node_size=20,with_labels=False)
fig2.savefig("directed_model.png")
# Export files
nx.write_graphml(model.get_base(), "base_model.graphml")
nx.write_graphml(directed_model.get_base(), "directed_model.graphml")
nx.write_graphml(nx.petersen_graph(), "petersen_graph.graphml")
开发者ID:drewconway,项目名称:GMM,代码行数:60,代码来源:basic_model.py
示例8: build_graph
def build_graph(self):
'''
Build a networkx graph from WordNet
'''
for synset in list(self.wordnet.all_synsets()):
#for synset in list(self.wordnet.all_synsets('n'))[:10]:
self.G.add_node(synset.name)
self.add_edges(synset, synset.hypernyms())
self.add_edges(synset, synset.hyponyms())
self.add_edges(synset, synset.instance_hypernyms())
self.add_edges(synset, synset.instance_hyponyms())
self.add_edges(synset, synset.member_holonyms())
self.add_edges(synset, synset.substance_holonyms())
self.add_edges(synset, synset.part_holonyms())
self.add_edges(synset, synset.member_meronyms())
self.add_edges(synset, synset.substance_meronyms())
self.add_edges(synset, synset.part_meronyms())
self.add_edges(synset, synset.attributes())
self.add_edges(synset, synset.entailments())
self.add_edges(synset, synset.causes())
self.add_edges(synset, synset.also_sees())
self.add_edges(synset, synset.verb_groups())
self.add_edges(synset, synset.similar_tos())
print nx.info(self.G)
开发者ID:Sandy4321,项目名称:text-annotation,代码行数:27,代码来源:kbgraph.py
示例9: data_prep
def data_prep(infofile, graphfile):
# read in the total biz file
# Preparing the data files
df = pd.read_csv(infofile)
#removing duplicate records
df = df.groupby('pageid').first()
print df.columns
print df.index
print df.shape
print df.isnull().sum()
df = df[df['latitude'] != 'N']
print "Dropping loc, lat = N: ", df.shape
df = df.dropna() #df[df['latitude'] != 'N']
print "Dropping NA", df.shape #df.isnull().sum()
# read in th original edgelist as a directed graph
globalgraph= nx.read_edgelist(graphfile, create_using=nx.DiGraph(), nodetype=int)
print "Original Graph:", nx.info(globalgraph)
print "Keeping it consistent, removing all nodes not in database:"
pageids = list(df.index)
prunedglobalgraph = globalgraph.subgraph(pageids)
print nx.info(prunedglobalgraph)
return df, globalgraph
开发者ID:tsaxena,项目名称:Tripti_SNA,代码行数:26,代码来源:recommend.py
示例10: correlation_betweenness_degree_on_ER
def correlation_betweenness_degree_on_ER():
N = 1000
p = 0.004
G = nx.erdos_renyi_graph(N, p)
print nx.info(G)
ND, ND_lambda = ECT.get_number_of_driver_nodes(G)
ND, driverNodes = ECT.get_driver_nodes(G)
degrees = []
betweenness = []
tot_degree = nx.degree_centrality(G)
tot_betweenness = nx.betweenness_centrality(G,weight=None)
for node in driverNodes:
degrees.append(tot_degree[node])
betweenness.append(tot_betweenness[node])
with open("results/driver_degree_ER.txt", "w") as f:
for x in degrees:
print >> f, x
with open("results/driver_betweenness_ER.txt", "w") as f:
for x in betweenness:
print >> f, x
with open("results/tot_degree_ER.txt", "w") as f:
for key, value in tot_degree.iteritems():
print >> f, value
with open("results/tot_betweenness_ER.txt", "w") as f:
for key, value in tot_betweenness.iteritems():
print >> f, value
开发者ID:python27,项目名称:NetworkControllability,代码行数:31,代码来源:Degree_Betweenness_correlation.py
示例11: get_community_biconnections
def get_community_biconnections(commid, df, graph):
print "Find biconnections in the community :", commid
print nx.info(graph)
biconnected_nodes = []
for e in graph.edges():
a, b = e
if graph.has_edge(b,a) and a != b:
# check if already there in the list
if (a,b) in biconnected_nodes or (b,a) in biconnected_nodes:
pass
else:
biconnected_nodes.append((a,b))
print "number of biconnected edges:", len(biconnected_nodes)
source_nodes, target_nodes = zip(*biconnected_nodes)
all_subgraph_nodes = set(source_nodes).union(set(target_nodes))
print "Unique nodes in the biconnections", len(all_subgraph_nodes)
# get the subgraph of all biconnected edges
# plot
dfname = biconnbase+ str(commid) + '_biz_info.csv'
bicon_df = df.loc[all_subgraph_nodes]
print bicon_df.shape
bicon_df.to_csv(dfname)
# subgraph generated from the coordinates
sgname = biconnbase+ str(commid) + '_sg_edgelist.ntx'
sg = graph.subgraph(list(all_subgraph_nodes))
print nx.info(sg)
nx.write_edgelist(sg, sgname, data=False)
开发者ID:tsaxena,项目名称:Tripti_SNA,代码行数:34,代码来源:community_analysis.py
示例12: correlation_betweenness_degree_on_ErdosNetwork
def correlation_betweenness_degree_on_ErdosNetwork():
G = nx.read_pajek("dataset/Erdos971.net")
isolated_nodes = nx.isolates(G)
G.remove_nodes_from(isolated_nodes)
print nx.info(G)
ND, ND_lambda = ECT.get_number_of_driver_nodes(G)
print "ND = ", ND
print "ND lambda:", ND_lambda
ND, driverNodes = ECT.get_driver_nodes(G)
print "ND =", ND
degrees = []
betweenness = []
tot_degree = nx.degree_centrality(G)
tot_betweenness = nx.betweenness_centrality(G,weight=None)
for node in driverNodes:
degrees.append(tot_degree[node])
betweenness.append(tot_betweenness[node])
with open("results/driver_degree_Erdos.txt", "w") as f:
for x in degrees:
print >> f, x
with open("results/driver_betweenness_Erdos.txt", "w") as f:
for x in betweenness:
print >> f, x
with open("results/tot_degree_Erdos.txt", "w") as f:
for key, value in tot_degree.iteritems():
print >> f, value
with open("results/tot_betweenness_Erdos.txt", "w") as f:
for key, value in tot_betweenness.iteritems():
print >> f, value
开发者ID:python27,项目名称:NetworkControllability,代码行数:34,代码来源:Degree_Betweenness_correlation.py
示例13: correlation_betweenness_degree_on_BA
def correlation_betweenness_degree_on_BA():
n = 1000
m = 2
G = nx.barabasi_albert_graph(n, m)
print nx.info(G)
ND, ND_lambda = ECT.get_number_of_driver_nodes(G)
print "ND = ", ND
print "ND lambda:", ND_lambda
ND, driverNodes = ECT.get_driver_nodes(G)
print "ND =", ND
degrees = []
betweenness = []
tot_degree = nx.degree_centrality(G)
tot_betweenness = nx.betweenness_centrality(G,weight=None)
for node in driverNodes:
degrees.append(tot_degree[node])
betweenness.append(tot_betweenness[node])
with open("results/driver_degree_BA.txt", "w") as f:
for x in degrees:
print >> f, x
with open("results/driver_betweenness_BA.txt", "w") as f:
for x in betweenness:
print >> f, x
with open("results/tot_degree_BA.txt", "w") as f:
for key, value in tot_degree.iteritems():
print >> f, value
with open("results/tot_betweenness_BA.txt", "w") as f:
for key, value in tot_betweenness.iteritems():
print >> f, value
开发者ID:python27,项目名称:NetworkControllability,代码行数:34,代码来源:Degree_Betweenness_correlation.py
示例14: correlation_betweenness_degree_on_WS
def correlation_betweenness_degree_on_WS():
n = 1000
k = 4
p = 0.01
G = nx.watts_strogatz_graph(n, k, p)
print nx.info(G)
ND, ND_lambda = ECT.get_number_of_driver_nodes(G)
ND, driverNodes = ECT.get_driver_nodes(G)
degrees = []
betweenness = []
tot_degree = nx.degree_centrality(G)
tot_betweenness = nx.betweenness_centrality(G,weight=None)
for node in driverNodes:
degrees.append(tot_degree[node])
betweenness.append(tot_betweenness[node])
with open("results/driver_degree_WS.txt", "w") as f:
for x in degrees:
print >> f, x
with open("results/driver_betweenness_WS.txt", "w") as f:
for x in betweenness:
print >> f, x
with open("results/tot_degree_WS.txt", "w") as f:
for key, value in tot_degree.iteritems():
print >> f, value
with open("results/tot_betweenness_WS.txt", "w") as f:
for key, value in tot_betweenness.iteritems():
print >> f, value
开发者ID:python27,项目名称:NetworkControllability,代码行数:34,代码来源:Degree_Betweenness_correlation.py
示例15: simplify_edges
def simplify_edges(G):
nodes = []
print "Compacting nodes of degree 2"
for n in G.nodes():
if G.degree(n) == 2:
nodes.append(n)
G.node[n]['pos'] = n
nodes = list(set(nodes))
print "Simplifying an estimated %i nodes...."%len(nodes)
while nodes:
while nodes:
nodes = list(set(nodes))
n = nodes.pop()
neighbors = G.neighbors(n)
G.remove_node(n)
G.add_path(neighbors)
for nn in neighbors:
if G.degree(n) == 2:
nodes.append(nn)
for n in G.nodes():
if G.degree(n) == 2:
nodes.append(n)
nodes = list(set(nodes))
G = max(nx.connected_component_subgraphs(G), key=len)
print nx.info(G)
#return G
for n in G.nodes():
G.node[n]['pos'] = n
开发者ID:argenos,项目名称:ros_multirobot,代码行数:33,代码来源:map.py
示例16: get_distance_dict
def get_distance_dict(filename):
g = nx.read_edgelist(filename)
print "Read in edgelist file ", filename
print nx.info(g)
path_length = nx.all_pairs_shortest_path_length(g)
print len(path_length.keys())
print path_length
开发者ID:tsaxena,项目名称:Tripti_SNA,代码行数:7,代码来源:recommend.py
示例17: main111
def main111():
if 1:
G = nx.read_edgelist(infname)
print nx.info(G)
# Graph adj matix
A = nx.to_scipy_sparse_matrix(G)
print type(A)
from scipy import sparse, io
io.mmwrite("Results/test.mtx", A)
exit()
# write to disk clustering coeffs for this graph
snm.get_clust_coeff([G], 'orig', 'mmonth')
# write to disk egienvalue
snm.network_value_distribution([G], [], 'origMmonth')
if 0:
edgelist = np.loadtxt(infname, dtype=str, delimiter='\t')
print edgelist[:4]
idx = np.arange(len(edgelist))
np.random.shuffle(idx)
subsamp_edgelist = edgelist[idx[:100]]
G = nx.Graph()
G.add_edges_from([(long(x), long(y)) for x, y in subsamp_edgelist])
# visualize this graph
# visualize_graph(G)
exit()
G = nx.Graph()
G.add_edges_from([(long(x), long(y)) for x, y in edgelist])
print nx.info(G)
print 'Done'
开发者ID:abitofalchemy,项目名称:ScientificImpactPrediction,代码行数:32,代码来源:procjson_tograph.py
示例18: main
def main():
# Load Zachary data, randomly delete nodes, and report
zachary=nx.Graph(nx.read_pajek("karate.net")) # Do not want graph in default MultiGraph format
zachary.name="Original Zachary Data"
print(nx.info(zachary))
zachary_subset=rand_delete(zachary, 15) # Remove half of the structure
zachary_subset.name="Randomly Deleted Zachary Data"
print(nx.info(zachary_subset))
# Create model, and simulate
zachary_model=gmm.gmm(zachary_subset,R=karate_rule,T=node_ceiling_34)
gmm.algorithms.simulate(zachary_model,4,poisson=False,new_name="Simulation from sample") # Use tau=4 because data is so small (it's fun!)
# Report and visualize
print(nx.info(zachary_model.get_base()))
fig=plt.figure(figsize=(30,10))
fig.add_subplot(131)
nx.draw_spring(zachary,with_labels=False,node_size=45,iterations=5000)
plt.text(0.01,-0.1,"Original Karate Club",color="darkblue",size=20)
fig.add_subplot(132)
nx.draw_spring(zachary_subset,with_labels=False,node_size=45,iterations=5000)
plt.text(0.01,-0.1,"Random sample of Karate Club",color="darkblue",size=20)
fig.add_subplot(133)
nx.draw_spring(zachary_model.get_base(),with_labels=False,node_size=45,iterations=5000)
plt.text(0.01,-0.1,"Simulation from random sample",color="darkblue",size=20)
plt.savefig("zachary_simulation.png")
开发者ID:drewconway,项目名称:GMM,代码行数:26,代码来源:zachary_regen.py
示例19: info
def info(self , verbose = False):
print "--------------Cloud_Reg_graph info:-----------------"
print nx.info(self)
ncloud = 0
nreg = 0
for node in self.nodes_iter():
if isinstance(node, nx.DiGraph):
ncloud += 1
if node.number_of_nodes() == 0:
if verbose: print "cloud ::\n empty cloud\n"
continue
if verbose: print "cloud ::"
for prim in node.nodes_iter():
assert isinstance(prim, cc.circut_module), "cloud type %s " % str(prim.__class__)
if verbose: prim.__print__()
else:
assert isinstance(node ,cc.circut_module) ,"reg type %s " % str(node.__class__)
if verbose:
print "fd ::"
node.__print__()
nreg += 1
assert len(self.big_clouds) == ncloud ,"%d %d"%(len(self.big_clouds),ncloud)
print "Number of cloud:%d " % ncloud
print "Number of register:%d" % nreg
print "--------------------------------------"
开发者ID:weco2015,项目名称:netlist_util,代码行数:25,代码来源:crgraph.py
示例20: get_k_core
def get_k_core(reviews_path,k_val):
# Report start of process
print "=================================="
print "EXTRACTING K-CORE OF PID GRAPH "
print "=================================="
print "AT STEP #1: Determine which reviewer reviewed which products"
# with ufora.remotely.downloadAll():
(PID_to_lines,PID_to_reviewerID) = get_PID_facts(reviews_path)
print "At STEP #2: Created weighted edges"
# with ufora.remotely.downloadAll():
weighted_edges = get_weighted_edges(PID_to_reviewerID)
print "AT STEP #3: Create PID graph structure"
# with ufora.remotely.downloadAll():
PID_graph = create_graph(PID_to_reviewerID,weighted_edges)
print nx.info(PID_graph)
print "AT STEP #4: Extracting K-core"
# with ufora.remotely.downloadAll():
k_core_graph = nx.k_core(PID_graph,k_val)
print nx.info(k_core_graph)
pickle.dump(graph,open("graph",'w'))
print "DONE!"
开发者ID:words-sdsc,项目名称:recsys,代码行数:26,代码来源:extractKCore.py
注:本文中的networkx.info函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论