本文整理汇总了Python中networkx.to_numpy_matrix函数的典型用法代码示例。如果您正苦于以下问题:Python to_numpy_matrix函数的具体用法?Python to_numpy_matrix怎么用?Python to_numpy_matrix使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了to_numpy_matrix函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: map_flows
def map_flows(catalog):
import analysis as trans
fm = trans.FlowMapper()
read_exceptions = {}
for i,fn in enumerate(os.listdir('.\\repository_data\\')):
print i, fn
try:
sys = catalog.read(''.join(['.\\repository_data\\',fn]))
except Exception as e:
read_exceptions[fn] = e
print '\t',e.message
fm.add_system(sys)
if i > 5:
break
graph = fm.transformation_graph()
fm.stats()
nx.draw_graphviz(graph,prog='dot',root='energy')
print nx.to_numpy_matrix(graph) > 0
# pdg = nx.to_pydot(graph)
# pdg.write_png('transform.png')
# nx.graphviz_layout(graph,prog='neato')
# nx.draw_graphviz(graph)
plt.show()
开发者ID:btciavol,项目名称:TechEngine,代码行数:25,代码来源:main.py
示例2: addforwardScale
def addforwardScale(self):
"""This method should add a unit gain node to all nodes with an out-degree
of 1; now all of these nodes should have an out-degree of 2. Therefore
all nodes with pointers should have 2 or more edges pointing away from
them.
It uses the no dummy variables to construct these gain, connection
and variable name matrices. """
M = nx.DiGraph()
#construct the graph with connections
for u in range(self.nodummyN):
for v in range(self.nodummyN):
if (self.nodummyconnection[u, v] != 0):
M.add_edge(self.nodummyvariablelist[v], self.nodummyvariablelist[u], weight = self.nodummygain[u,v])
#now add connections where out degree == 1
counter = 1
for node in M.nodes():
if M.out_degree(node) == 1:
nameofscale = 'DV'+str(counter)
M.add_edge(node, nameofscale, weight = 1.0)
counter = counter + 1
self.scaledforwardconnection = transpose(nx.to_numpy_matrix(M, weight = None))
self.scaledforwardgain = transpose(nx.to_numpy_matrix(M, weight = 'weight'))
self.scaledforwardvariablelist = M.nodes() #i sincerely hope this works!... After some testing, I think it does!!!
开发者ID:stelmo,项目名称:LoopRanking,代码行数:31,代码来源:formatmatrices.py
示例3: addforwardscale
def addforwardscale(self):
"""This method adds a unit gain node to all nodes with an out-degree
of 1; now all of these nodes should have an out-degree of 2.
Therefore all nodes with pointers should have 2 or more edges pointing
away from them.
It uses the number of dummy variables to construct these gain,
connection and variable name matrices.
"""
m_graph = nx.DiGraph()
# Construct the graph with connections
for u in range(self.nodummy_nodes):
for v in range(self.nodummy_nodes):
if (self.nodummyconnection[u, v] != 0):
m_graph.add_edge(self.nodummyvariablelist[v],
self.nodummyvariablelist[u],
weight=self.nodummygain[u, v])
# Add connections where out degree == 1
counter = 1
for node in m_graph.nodes():
if m_graph.out_degree(node) == 1:
nameofscale = 'DV' + str(counter)
m_graph.add_edge(node, nameofscale, weight=1.0)
counter = counter + 1
self.scaledforwardconnection = transpose(
nx.to_numpy_matrix(m_graph, weight=None))
self.scaledforwardgain = transpose(
nx.to_numpy_matrix(m_graph, weight='weight'))
self.scaledforwardvariablelist = m_graph.nodes()
开发者ID:alchemyst,项目名称:LoopRank,代码行数:31,代码来源:formatmatrices.py
示例4: energy
def energy(self):
e = 0.0
data = self.data
for i,p in self.graph.node.iteritems():
thisval = data[:,i]
if np.isnan(p['eta']):
marg = p['marginal']
e -= np.log((thisval*marg + (1-thisval)*(1-marg))).sum()
else:
delta = p['delta']
eta = p['eta']
parval = data[:,self.graph.predecessors(i)[0]]
prob = thisval*(parval*(1-delta) + (1-parval)*eta) + \
(1-thisval)*(parval*delta + (1-parval)*(1-eta))
np.clip(prob, 1e-300, 1.0)
e -= np.log(prob).sum()
mat = np.array(nx.to_numpy_matrix(self.graph),dtype=np.int32)
if self.template:
tempmat = np.array(nx.to_numpy_matrix(self.template),dtype=np.int32)
else:
tempmat = np.zeros_like(mat)
e += self.priorweight * float(np.abs(mat - tempmat).sum())
return e
开发者ID:binarybana,项目名称:samcnet,代码行数:25,代码来源:treenet.py
示例5: test_weight_keyword
def test_weight_keyword(self):
WP4 = nx.Graph()
WP4.add_edges_from( (n,n+1,dict(weight=0.5,other=0.3)) for n in range(3) )
P4 = path_graph(4)
A = nx.to_numpy_matrix(P4)
np_assert_equal(A, nx.to_numpy_matrix(WP4,weight=None))
np_assert_equal(0.5*A, nx.to_numpy_matrix(WP4))
np_assert_equal(0.3*A, nx.to_numpy_matrix(WP4,weight='other'))
开发者ID:123jefferson,项目名称:MiniBloq-Sparki,代码行数:8,代码来源:test_convert_numpy.py
示例6: test_round_trip
def test_round_trip(self):
W_ = W.from_networkx(self.known_nx)
np.testing.assert_allclose(W_.sparse.toarray(), self.known_amat)
nx2 = W_.to_networkx()
np.testing.assert_allclose(nx.to_numpy_matrix(nx2), self.known_amat)
nxsquare = self.known_W.to_networkx()
np.testing.assert_allclose(self.known_W.sparse.toarray(), nx.to_numpy_matrix(nxsquare))
W_square = W.from_networkx(nxsquare)
np.testing.assert_allclose(self.known_W.sparse.toarray(), W_square.sparse.toarray())
开发者ID:jGaboardi,项目名称:pysal,代码行数:9,代码来源:test_nx.py
示例7: test_numpy_multigraph
def test_numpy_multigraph(self):
G=nx.MultiGraph()
G.add_edge(1,2,weight=7)
G.add_edge(1,2,weight=70)
A=nx.to_numpy_matrix(G)
assert_equal(A[1,0],77)
A=nx.to_numpy_matrix(G,multigraph_weight=min)
assert_equal(A[1,0],7)
A=nx.to_numpy_matrix(G,multigraph_weight=max)
assert_equal(A[1,0],70)
开发者ID:123jefferson,项目名称:MiniBloq-Sparki,代码行数:10,代码来源:test_convert_numpy.py
示例8: original_generate_token_graph
def original_generate_token_graph():
corp = []
sentences = [] # Initialize an empty list of sentences
input_folders = [ sub_dir for sub_dir in listdir(dataset_folder) if isdir(join(dataset_folder, sub_dir)) ]
for folder in input_folders:
dir_path = dataset_folder + os.sep + folder + os.sep
files = [ f for f in listdir(dir_path) if isfile(join(dir_path,f)) ]
for file in files:
file_path = dir_path + file
file_name, file_extension = splitext(file_path)
doc = ""
if file_extension == ".pdf":
doc = convert_pdf_to_txt(file_path)
elif file_extension == ".docx":
doc = convert_docx_to_txt(file_path)
else:
continue
if doc != "":
doc = doc.decode("utf8")
#doc = words_to_phrases(doc)
doc = doc.lower()
doc = doc_to_wordlist(doc,True)
corp = it.chain(corp,doc)
#sentences += doc_to_sentences(doc, tokenizer, remove_stopwords=False)
corp = list(corp)
graph = nx.Graph()
weights = Counter()
edges = set()
window = corp[0:5]
for tup in it.permutations(window,2):
weights[tup] += 1
for i in range(3,len(corp)-2):
for j in range(i-2,i+2):
weights[(corp[j],corp[i+2])] += 1
weights[(corp[i+2],corp[j])] += 1
edges.add((corp[i+2],corp[j]))
for e in edges:
graph.add_edge(e[0], e[1], {'weight':weights[e]})
print graph
nx.write_weighted_edgelist(graph, "graph.g")
print nx.to_numpy_matrix(graph)
np.savetxt("graph.adj", nx.to_numpy_matrix(graph))
print "finished"
开发者ID:mehtakash93,项目名称:Research_Work,代码行数:52,代码来源:corpus2graph.py
示例9: plot_weight_distribution
def plot_weight_distribution(brain, output_file=None, **kwargs):
"""
It uses matplotlib to plot a histogram of the weights of the edges.
Requires that the brain was thresholded before and ignores NaNs for plotting
Parameters
----------
brain: maybrain.brain.Brain
An instance of the `Brain` class
output_file: str
If you want to create a file. It then calls fig.savefig(output_file) from matplotlib
kwargs
keyword arguments if you need to pass them to matplotlib's hist()
Returns
-------
fig, ax : tuple
if output_file is None, this returns (fig, ax) from the figure created
"""
fig, ax = plt.subplots()
if isinstance(brain, nx.Graph):
arr = np.copy(nx.to_numpy_matrix(brain, nonedge=np.nan))
else:
arr = np.copy(nx.to_numpy_matrix(brain.G, nonedge=np.nan))
upper_values = np.triu_indices(np.shape(arr)[0], k=1)
weights = np.array(arr[upper_values])
# If directed, also add the lower down part of the adjacency matrix
if not isinstance(brain, nx.Graph) and brain.directed:
below_values = np.tril_indices(np.shape(arr)[0], k=-1)
weights.extend(np.array(below_values))
# Removing NaNs for correct plotting
weights = weights[~np.isnan(weights)]
# the histogram of the data
ax.hist(weights, **kwargs)
ax.set_title('Weights')
# Tweak spacing to prevent clipping of ylabel
fig.tight_layout()
# If outputfile is defined, fig is correctly closed,
# otherwise returned so others can add more information to it
if output_file is not None:
fig.savefig(output_file)
plt.close(fig)
else:
return fig, ax
开发者ID:rittman,项目名称:maybrain,代码行数:51,代码来源:histograms.py
示例10: stakeholder_analysis
def stakeholder_analysis(args):
G, nodes = create_graph_from_csv((args.input_file)[0])
print "\n\nAdjacency Matrix:\n"
print nx.to_numpy_matrix(G)
print "\n"
status, cycles = get_cycles(G, (args.you_stakeholder)[0])
if status == 1:
print "No cycles found. Did you mispell the name of your system (" + (args.you_stakeholder)[0] + ")?\n"
else:
loopWeight = calculate_cycles_weights(G, cycles)
endAveragesSorted = get_stakeholders_importances(loopWeight, cycles, nodes)
if args.print_graph:
draw_graph(G, "graph.png")
开发者ID:seakers,项目名称:rapidarch,代码行数:15,代码来源:stakeHolderAnalysis.py
示例11: brute_iso
def brute_iso (self) :
print("--- BRUTE ---")
# disregard graphs that are equal, have same amount of nodes,
# same amount of edges and that are nor balanced
if (self.isEqual()) :
return True
if (self.l1 != self.l2) :
return False
if (len(self.g1.edges()) != len(self.g2.edges())) :
return False
if (not self.is_balanced()):
return False
start = time.clock()
# compute all permutations on color classes
dictionary = list(chain.from_iterable(self.cc2))
permut = self.partial_permutations(self.cc1)
g1_permutations = self.translate_permutations(permut, dictionary)
# print("dictionary: ", dictionary)
# print("permut: ", permut)
# print("g1_permutations: ", g1_permutations)
# print("g1_Nodes: ", self.g1.nodes())
# print("g2_Nodes: ", self.g2.nodes())
#print(g1_permutations)
#elapsed = time.clock()
#elapsed = elapsed - start
#num_perm = len(g1_permutations)
#print("Time spent in (generating permutations) is: ", elapsed)
#print("Number of permutations generated: ", num_perm)
#progress = math.floor(num_perm/10)
ad_mat_g2 = nx.to_numpy_matrix(self.g2)
#i=0
# compare each permutation of G with H
for perms in g1_permutations:
#i = i+1
'''
if i % progress == 0:
print("10%% of brute_iso is done.")
'''
ad_mat_g1 = nx.to_numpy_matrix(self.g1, perms)
# print(nx.to_numpy_matrix(self.g1))
# print("ad_mat_g1: ")
# print(ad_mat_g1)
# print("ad_mat_g2: ")
# print(ad_mat_g2)
if (np.array_equal(ad_mat_g1, ad_mat_g2)):
return True
return False
开发者ID:sanklamm,项目名称:Graph_Isomorphism,代码行数:48,代码来源:graph_iso.py
示例12: directed_weighted_clustering
def directed_weighted_clustering(g,weightString):
n = g.number_of_nodes()
from numpy import linalg as LA
#adjacency matrix
A = nx.to_numpy_matrix(g,nodelist=g.nodes(),weight=None)
A2 = LA.matrix_power(A,2)
AT = A.T
Asum = AT + A
cVector = [i for i in range(n)]
cVector = np.asmatrix(cVector)
kin = {i:np.dot(AT[i],cVector.T) for i in range(n)}
kout = {i:np.dot(A[i],cVector.T)for i in range(n)}
kparallel = {i:np.dot(Asum[i],cVector.T)for i in range(n)}
#print "kin"
#print kin
#weight matrix
W = nx.to_numpy_matrix(g,nodelist=g.nodes(),weight=weightString)
WT = W.T
W2 = LA.matrix_power(W,2)
W3 = LA.matrix_power(W,3)
WWTW = W*WT*W
WTW2 = WT*W2
W2WT = W2*WT
ccycle = {i:0 for i in range(n)}
cmiddle = {i:0 for i in range(n)}
cin = {i:0 for i in range(n)}
cout = {i:0 for i in range(n)}
for i in range(n):
if kin[i]*kout[i] - kparallel[i] > 0:
ccycle[i] = W3[i,i] / float((kin[i]*kout[i] - kparallel[i]))
cmiddle[i] = WWTW[i,i] / float((kin[i]*kout[i] - kparallel[i]))
if kin[i] > 1:
cin[i] = WTW2[i,i] / float((kin[i]*(kin[i]-1)))
if kout[i] > 1:
cout[i] = W2WT[i,i] / float((kout[i]*(kout[i]-1)))
#print type((np.mean(ccycle.values()),np.mean(cmiddle.values()),np.mean(cin.values()),np.mean(cout.values())))
#print "here"
#input()
#return (np.mean(ccycle.values()),np.mean(cmiddle.values()),np.mean(cin.values()),np.mean(cout.values()))
return (ccycle,cmiddle,cin,cout)
开发者ID:vhatzopoulos,项目名称:Eurovision_project,代码行数:48,代码来源:graph_algorithm_collection.py
示例13: simulate
def simulate():
data = get_data()
adjacency = data["adjacency"]
t = 10
t_f = 100
t = np.linspace(0, t, num=t_f).astype(np.float32)
# a = 0.
# b = 100.
# r = np.array([
# [a, 0.],
# [a+2.,0.],
# ])
# v = np.array([
# [0.,10.],
# [0., -10.],
# ])
#
# w = np.array([
# [0,1],
# [1,0]
# ]).astype(np.float32)
n = 5
G = nx.grid_2d_graph(n,n)
N = 25
w = nx.to_numpy_matrix(G)*10
r = np.random.rand(N,3)
d = r.shape[-1]
v = r*0.
k=1.
return sim_particles(t,r,v,w)
开发者ID:openworm,项目名称:neuronal-analysis,代码行数:33,代码来源:Space+Embedding+of+Nematode+Network.py
示例14: normalized_laplacian
def normalized_laplacian(G,nodelist=None):
"""Return normalized Laplacian of G as a numpy matrix.
See Spectral Graph Theory by Fan Chung-Graham.
CBMS Regional Conference Series in Mathematics, Number 92,
1997.
"""
# FIXME: this isn't the most efficient way to do this...
try:
import numpy as np
except ImportError:
raise ImportError, \
"normalized_laplacian() requires numpy: http://scipy.org/ "
n=G.order()
I=np.identity(n)
A=np.asarray(networkx.to_numpy_matrix(G,nodelist=nodelist))
d=np.sum(A,axis=1)
L=I*d-A
osd=np.zeros(len(d))
for i in range(len(d)):
if d[i]>0: osd[i]=np.sqrt(1.0/d[i])
T=I*osd
L=np.dot(T,np.dot(L,T))
return L
开发者ID:JaneliaSciComp,项目名称:Neuroptikon,代码行数:25,代码来源:spectrum.py
示例15: normalized_min_cut
def normalized_min_cut(graph):
"""Clusters graph nodes according to normalized minimum cut algorithm.
All nodes must have at least 1 edge. Uses zero as decision boundary.
Parameters
-----------
graph: a networkx graph to cluster
Returns
-----------
vector containing -1 or 1 for every node
References
----------
J. Shi and J. Malik, *Normalized Cuts and Image Segmentation*,
IEEE Transactions on Pattern Analysis and Machine Learning, vol. 22, pp. 888-905
"""
m_adjacency = np.array(nx.to_numpy_matrix(graph))
D = np.diag(np.sum(m_adjacency, 0))
D_half_inv = np.diag(1.0 / np.sqrt(np.sum(m_adjacency, 0)))
M = np.dot(D_half_inv, np.dot((D - m_adjacency), D_half_inv))
(w, v) = np.linalg.eig(M)
#find index of second smallest eigenvalue
index = np.argsort(w)[1]
v_partition = v[:, index]
v_partition = np.sign(v_partition)
return v_partition
开发者ID:IAS-ZHAW,项目名称:machine_learning_scripts,代码行数:29,代码来源:normalized_min_cut.py
示例16: draw_adjacency_matrix
def draw_adjacency_matrix(G, node_order=None, partitions=[], colors=[]):
"""
- G is a netorkx graph
- node_order (optional) is a list of nodes, where each node in G
appears exactly once
- partitions is a list of node lists, where each node in G appears
in exactly one node list
- colors is a list of strings indicating what color each
partition should be
If partitions is specified, the same number of colors needs to be
specified.
"""
adjacency_matrix = nx.to_numpy_matrix(G, dtype=np.bool, nodelist=node_order)
#Plot adjacency matrix in toned-down black and white
fig = pyplot.figure(figsize=(5, 5)) # in inches
pyplot.imshow(adjacency_matrix,
cmap="Greys",
interpolation="none")
# The rest is just if you have sorted nodes by a partition and want to
# highlight the module boundaries
assert len(partitions) == len(colors)
ax = pyplot.gca()
for partition, color in zip(partitions, colors):
current_idx = 0
for module in partition:
ax.add_patch(patches.Rectangle((current_idx, current_idx),
len(module), # Width
len(module), # Height
facecolor="none",
edgecolor=color,
linewidth="1"))
current_idx += len(module)
开发者ID:aculich,项目名称:S222_S13,代码行数:34,代码来源:matrix.py
示例17: graphToCSV
def graphToCSV(G,graphtype, section, test):
directory = "Datarows/"+graphtype+"/"
if not os.path.exists(directory):
os.makedirs(directory)
writer_true = csv.writer(open(directory+section+"_true.csv", "a"))
writer_false = csv.writer(open(directory+section+"_false.csv", "a"))
A = nx.to_numpy_matrix(G)
A = np.reshape(A, -1)
arrGraph = np.squeeze(np.asarray(A))
nb_nodes = 0
for node in nx.nodes_iter(G):
if len(G.neighbors(node))>0:
nb_nodes += 1
meta_info = [test,nb_nodes,G.number_of_edges(),nx.number_connected_components(G)]
# On garde la même taille d'élemt de valeur de vérité #
if test:
if os.path.getsize(directory+section+"_true.csv") <= os.path.getsize(directory+section+"_false.csv"):
writer_true.writerow(np.append(arrGraph, meta_info))
return True
else:
return False
else:
if os.path.getsize(directory+section+"_false.csv") <= os.path.getsize(directory+section+"_true.csv"):
writer_false.writerow(np.append(arrGraph, meta_info))
return True
else:
return False
开发者ID:thecoons,项目名称:minerQuest,代码行数:29,代码来源:utils.py
示例18: Net2AdjMatrix
def Net2AdjMatrix(red,sep,form):
Red = GetNetX(red,sep,form)
genesDIC = Red[1]
G = Red[0]
sep = separador(sep)
name = red.split('.')
output = name[0]+'.txt'
SALIDA = open(output,"w")
nodes = []
for i in G.nodes():
nodes.append(genesDIC[i-1])
#print i
Q = nx.to_numpy_matrix(G,weight='w')
SALIDA.write(sep)
for i in nodes:
SALIDA.write(("%s"+sep) % i)
SALIDA.write("\n")
for i in range(len(Q)):
SALIDA.write(("%s"+sep) % nodes[i])
for j in range(len(Q)):
SALIDA.write(("%s"+sep) % Q.item((i,j)))
SALIDA.write("\n")
SALIDA.close()
开发者ID:saac,项目名称:ComplexNetworks-ToolBox,代码行数:32,代码来源:NetXAnalyzer.py
示例19: __init__
def __init__(self, G_list, max_num_nodes, features='id'):
self.max_num_nodes = max_num_nodes
self.adj_all = []
self.len_all = []
self.feature_all = []
for G in G_list:
adj = nx.to_numpy_matrix(G)
# the diagonal entries are 1 since they denote node probability
self.adj_all.append(
np.asarray(adj) + np.identity(G.number_of_nodes()))
self.len_all.append(G.number_of_nodes())
if features == 'id':
self.feature_all.append(np.identity(max_num_nodes))
elif features == 'deg':
degs = np.sum(np.array(adj), 1)
degs = np.expand_dims(np.pad(degs, [0, max_num_nodes - G.number_of_nodes()], 0),
axis=1)
self.feature_all.append(degs)
elif features == 'struct':
degs = np.sum(np.array(adj), 1)
degs = np.expand_dims(np.pad(degs, [0, max_num_nodes - G.number_of_nodes()],
'constant'),
axis=1)
clusterings = np.array(list(nx.clustering(G).values()))
clusterings = np.expand_dims(np.pad(clusterings,
[0, max_num_nodes - G.number_of_nodes()],
'constant'),
axis=1)
self.feature_all.append(np.hstack([degs, clusterings]))
开发者ID:taeyen,项目名称:graph-generation,代码行数:30,代码来源:data.py
示例20: process_graph_component
def process_graph_component(graph):
#print("Processing component of size {}".format(graph.size()))
X = nx.to_numpy_matrix(graph)
n_clusters = min(10, int(graph.size()/20))
sp = SpectralClustering(
n_clusters=n_clusters, n_components=min(1000,graph.size()), affinity='precomputed').fit(X)
return sp, X
开发者ID:rrricharrrd,项目名称:xwords,代码行数:7,代码来源:anagrind_finder.py
注:本文中的networkx.to_numpy_matrix函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论