本文整理汇总了Python中networkx.to_scipy_sparse_matrix函数的典型用法代码示例。如果您正苦于以下问题:Python to_scipy_sparse_matrix函数的具体用法?Python to_scipy_sparse_matrix怎么用?Python to_scipy_sparse_matrix使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了to_scipy_sparse_matrix函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: eig_calc_OLD
def eig_calc_OLD(G,normalize=None):
import scipy as sp
#print '\n\t~~~~~~ eig_calc ~~~~~~'; startTime = datetime.now(); sys.stdout.flush()
startTime = datetime.now()
n = G.number_of_nodes()
if n == 1:
eigvec = np.array([1])
elif n == 2: # for handling ValueError: k must be less than ndim(A)-1, k=1
return power_iteration(G,normalize=normalize)
else: # the graph contains more than 2 nodes
A=nx.to_scipy_sparse_matrix(G)
'''print '--- eig_calc: is sub graph stochastic? ' + str(gm.check_if_stochastic_matrix(nx.to_numpy_matrix(G)))#; sys.stdout.flush()
print '--- eig_calc: is sub graph strongly connected? ' + str(nx.is_strongly_connected(G))#; sys.stdout.flush()
print '--- eig_calc: is sub graph aperiodic? ' + str(nx.is_aperiodic(G));# sys.stdout.flush()
print '--- eig_calc: debug step took: '+str(datetime.now()-tmpTime); tmpTime = datetime.now(); sys.stdout.flush()
'''
try:
eigval,eigvec = sp.sparse.linalg.eigen.arpack.eigs(A.T, k=1, sigma=1, which='LM')
except RuntimeError:
B=nx.to_scipy_sparse_matrix(add_noise(G))
eigval,eigvec = sp.sparse.linalg.eigen.arpack.eigs(B.T, k=1, sigma=1, which='LM')
#eigval,eigvec = sp.sparse.linalg.eigen.arpack.eigs(A.T, k=1, which='LM')
#print '--- eig_calc: eigs took: '+str(datetime.now()-tmpTime); sys.stdout.flush()
#print '--- eig_calc: sub graph eigval- '+str(eigval)
eigvec = eigvec/eigvec.sum()
if normalize:
norm_factor = float(n)/normalize
eigvec = eigvec*norm_factor
#if (eigvec.imag.sum() != 0. ):
# print '##### COMPLEX VECTOR!!!! returning the real part only!!! #####'; #sys.stdout.flush(
results_dict = dict(zip(G.nodes(),map(float,eigvec.real)))
if n > 100: print '--- eig_calc: calc of class contains '+str(n)+' nodes, ('+str(float(n)/normalize)+'% of the non-isolates nodes from the graph) took-'+str(datetime.now()-startTime); sys.stdout.flush()
return results_dict
开发者ID:michaly,项目名称:Risk_Ranking_System,代码行数:33,代码来源:salsa.py
示例2: gRa
def gRa(g, w):
'''w为图中的边数,表示经过减边p扰动后仍然留在数据中的边数'''
tg = g.copy()
Rq = nx.to_scipy_sparse_matrix(g)
Rq = Rq.toarray()
bw = nx.edge_betweenness_centrality(g, normalized=False)
norm = sum(bw.values())
e_num = len(g.edges())
n = len(g)
N = (n * (n - 1)) / 2
for k, v in bw.items():
g.add_edge(*k, weight=v)
# print g.edges(data=True)
R = nx.to_scipy_sparse_matrix(g, weight='weight')
Rp = R.toarray()
Rp = w * Rp * 2.0 / Rp.sum()
q = float(e_num - w) / (N - e_num)
for i, each in enumerate(Rq):
for j, e in enumerate(each):
if e == 0:
Rp[i, j] = q # 超级绕采用特别方式在Rp中加入Rq
for i in range(n):
Rp[i,i]=0 #去除对角线上的q
return Rp
开发者ID:liupenggl,项目名称:dpr,代码行数:29,代码来源:grandom.py
示例3: test_weight_keyword
def test_weight_keyword(self):
WP4 = nx.Graph()
WP4.add_edges_from((n, n + 1, dict(weight=0.5, other=0.3)) for n in range(3))
P4 = path_graph(4)
A = nx.to_scipy_sparse_matrix(P4)
np_assert_equal(A.todense(), nx.to_scipy_sparse_matrix(WP4, weight=None).todense())
np_assert_equal(0.5 * A.todense(), nx.to_scipy_sparse_matrix(WP4).todense())
np_assert_equal(0.3 * A.todense(), nx.to_scipy_sparse_matrix(WP4, weight="other").todense())
开发者ID:GccX11,项目名称:networkx,代码行数:8,代码来源:test_convert_scipy.py
示例4: dists
def dists(G, nbunch = None):
G = G.copy()
if nbunch is None:
nbunch = G.nodes()
try:
out_degree = G.out_degree(nbunch = nbunch)
in_degree = G.in_degree(nbunch = nbunch)
gross_out_weight = G.out_degree(weighted = True, nbunch = nbunch)
gross_in_weight = G.in_degree(weighted = True, nbunch = nbunch)
except TypeError:
out_degree = G.out_degree(nbunch = nbunch)
in_degree = G.in_degree(nbunch = nbunch)
gross_out_weight = G.out_degree(weight = 'weight', nbunch = nbunch)
gross_in_weight = G.in_degree(weight = 'weight', nbunch = nbunch)
A = nx.to_scipy_sparse_matrix(G, nodelist = nbunch)
i, j, grosscells = extract.find(A)
selfloops = G.selfloop_edges(data = True)
G.remove_edges_from(selfloops)
try:
net_out_weight = G.out_degree(weighted = True, nbunch = nbunch)
net_in_weight = G.in_degree(weighted = True, nbunch = nbunch)
except TypeError:
net_out_weight = G.out_degree(weight = 'weight', nbunch = nbunch)
net_in_weight = G.in_degree(weight = 'weight', nbunch = nbunch)
A = nx.to_scipy_sparse_matrix(G, nodelist = nbunch)
i, j, netcells = extract.find(A)
dists = {
'out-degree':
np.array([out_degree[i] for i in nbunch],dtype = np.float32),
'in-degree':
np.array([in_degree[i] for i in nbunch],dtype = np.float32),
'gross_out-weight':
np.array([gross_out_weight[i] for i in nbunch],dtype = np.float32),
'gross_in-weight':
np.array([gross_in_weight[i] for i in nbunch],dtype = np.float32),
'net_out-weight':
np.array([net_out_weight[i] for i in nbunch],dtype = np.float32),
'net_in-weight':
np.array([net_in_weight[i] for i in nbunch],dtype = np.float32),
'gross_cells': grosscells,
'net_cells': netcells
}
return dists
开发者ID:recalcc,项目名称:interbank,代码行数:57,代码来源:basic_stats_funcs.py
示例5: to_sparse
def to_sparse( G ):
"""
DiGraph to scipy sparse matrix.
"""
try:
return nx.to_scipy_sparse_matrix( G.graph, dtype=int, format='csr' )
# in case one sends in G.graph instead.
except AttributeError:
return nx.to_scipy_sparse_matrix( G, dtype=int, format='csr' )
开发者ID:caosuomo,项目名称:rads,代码行数:9,代码来源:utils.py
示例6: eig_calc
def eig_calc(G,normalize=None,nstart_norm=None):
'''
Calculates the dominant eigenvector of graph (the one related to eigenvector = 1).
Parameters
----------
G - networkx directed graph, the strongly connected component (subGraph) in our case
normalized - int, the number of nodes in the original (entire) graph- for normlizing the resulted eigenvector as per the proportion of the component from the entire (original) graph
nstart_norm - float, the weight [0,1] for normalizing the resulted eigenvector (for referring the risk proportion of the component from the entire (original) graph).
*NOTE: normalize and nstart_norm cannot come together!! only one of them can be different from None!
Returns
-------
results_dict - a dict of the (normalized) dominant eigenvector (the keys are G nodes names- basically integer)
'''
import scipy as sp
#print '\n\t~~~~~~ eig_calc ~~~~~~'; startTime = datetime.now(); sys.stdout.flush()
startTime = datetime.now()
n = G.number_of_nodes()
if n == 1:
eigvec = np.array([1])
elif n == 2: # for handling ValueError: k must be less than ndim(A)-1, k=1
return power_iteration(G,normalize=normalize,nstart_norm=nstart_norm)
else: # the graph contains more than 2 nodes
A=nx.to_scipy_sparse_matrix(G)
'''print '--- eig_calc: is sub graph stochastic? ' + str(gm.check_if_stochastic_matrix(nx.to_numpy_matrix(G)))#; sys.stdout.flush()
print '--- eig_calc: is sub graph strongly connected? ' + str(nx.is_strongly_connected(G))#; sys.stdout.flush()
print '--- eig_calc: is sub graph aperiodic? ' + str(nx.is_aperiodic(G));# sys.stdout.flush()
print '--- eig_calc: debug step took: '+str(datetime.now()-tmpTime); tmpTime = datetime.now(); sys.stdout.flush()
'''
try:
eigval,eigvec = sp.sparse.linalg.eigen.arpack.eigs(A.T, k=1, sigma=1, which='LM')
except RuntimeError:
B=nx.to_scipy_sparse_matrix(add_noise(G))
eigval,eigvec = sp.sparse.linalg.eigen.arpack.eigs(B.T, k=1, sigma=1, which='LM')
#eigval,eigvec = sp.sparse.linalg.eigen.arpack.eigs(A.T, k=1, which='LM')
#print '--- eig_calc: eigs took: '+str(datetime.now()-tmpTime); sys.stdout.flush()
#print '--- eig_calc: sub graph eigval- '+str(eigval)
eigvec = eigvec/eigvec.sum()
if normalize:
norm_factor = float(n)/normalize
eigvec = eigvec*norm_factor
if n > 100: print '--- eig_calc: calc of class contains ',n,' nodes, (',float(n)/normalize,'% of the non-isolates nodes from the graph) took-',datetime.now()-startTime; sys.stdout.flush()
elif nstart_norm != None:
eigvec = eigvec*nstart_norm
if n > 100: print '--- eig_calc: calc of class contains ',n,' nodes, took-',datetime.now()-startTime; sys.stdout.flush()
#if (eigvec.imag.sum() != 0. ):
# print '##### COMPLEX VECTOR!!!! returning the real part only!!! #####'; #sys.stdout.flush(
results_dict = dict(zip(G.nodes(),map(float,eigvec.real)))
return results_dict
开发者ID:michaly,项目名称:Risk_Ranking_System,代码行数:52,代码来源:salsa.py
示例7: mypr
def mypr(G, alpha=0.85, personalization=None,
max_iter=100, tol=1.0e-6, weight='weight'):
nodelist=G.nodes()
M=nx.to_scipy_sparse_matrix(G,nodelist=nodelist,weight=weight)
(n,m)=M.shape # should be square
S = scipy.array(M.sum(axis=1))
S[S>0] = 1.0 / S[S>0]
Sm = scipy.sparse.lil_matrix((n,n))
Sm.setdiag(S.flat)
Sm = Sm.tocsr()
M = Sm * M
## Q = scipy.sparse.spdiags(S.T, 0, *M.shape, format='csr')
## M = Q * M
x=scipy.ones((n))/n # initial guess
dangle=scipy.array(scipy.where(M.sum(axis=1)==0,1.0/n,0)).flatten()
# add "teleportation"/personalization
v=x
i=0
while i <= max_iter:
# power iteration: make up to max_iter iterations
xlast=x
x=alpha*(x*M+scipy.dot(dangle,xlast))+(1-alpha)*v
x=x/x.sum()
# check convergence, l1 norm
err=scipy.absolute(x-xlast).sum()
if err < n*tol:
r = dict(zip(nodelist,x))
return r
i+=1
print 'Failed to converge'
开发者ID:aweinstein,项目名称:scrapcode,代码行数:35,代码来源:page_rank.py
示例8: modularity_matrix
def modularity_matrix(G, nodelist=None, weight=None):
"""Return the modularity matrix of G.
The modularity matrix is the matrix B = A - <A>, where A is the adjacency
matrix and <A> is the average adjacency matrix, assuming that the graph
is described by the configuration model.
More specifically, the element B_ij of B is defined as
A_ij - k_i k_j / 2 * m
where k_i(in) is the degree of node i, and were m is the number of edges
in the graph. When weight is set to a name of an attribute edge, Aij, k_i,
k_j and m are computed using its value.
Parameters
----------
G : Graph
A NetworkX graph
nodelist : list, optional
The rows and columns are ordered according to the nodes in nodelist.
If nodelist is None, then the ordering is produced by G.nodes().
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used for
the edge weight. If None then all edge weights are 1.
Returns
-------
B : Numpy matrix
The modularity matrix of G.
Examples
--------
>>> import networkx as nx
>>> k =[3, 2, 2, 1, 0]
>>> G = nx.havel_hakimi_graph(k)
>>> B = nx.modularity_matrix(G)
See Also
--------
to_numpy_matrix
adjacency_matrix
laplacian_matrix
directed_modularity_matrix
References
----------
.. [1] M. E. J. Newman, "Modularity and community structure in networks",
Proc. Natl. Acad. Sci. USA, vol. 103, pp. 8577-8582, 2006.
"""
if nodelist is None:
nodelist = list(G)
A = nx.to_scipy_sparse_matrix(G, nodelist=nodelist, weight=weight,
format='csr')
k = A.sum(axis=1)
m = k.sum() * 0.5
# Expected adjacency matrix
X = k * k.transpose() / (2 * m)
return A - X
开发者ID:AllenDowney,项目名称:networkx,代码行数:60,代码来源:modularitymatrix.py
示例9: compute_slice_matrices
def compute_slice_matrices(self,G):
#Create node and edge layers
node_layer = defaultdict(list)
for n in G.nodes():
node_layer[n[0]].append(n)
edge_layer = defaultdict(list)
for e in G.edges(data=True):
edge_layer[e[2]['etype']].append(e)
ALLNTYPES = [ntype for ntype in node_layer]
ALLETYPES = [etype for etype in edge_layer]
#### Transform everything into linear algebra...
self.OrderedNodes=[]
for ntype in ALLNTYPES:
self.OrderedNodes = self.OrderedNodes + node_layer[ntype]
self.NodeIndex = {}
for idx,n in enumerate(self.OrderedNodes):
self.NodeIndex[n]=idx
#Construct Adjacency Matrices for various slices (single edge type)
self.AdjMat = {}
self.Degs = {} # Degre
#Invdegs = {}
for etype in ALLETYPES:
print '--computing slice for edge type "'+etype+'"'
H = graph_slice(G,etypes=etype)
self.AdjMat[etype] = nx.to_scipy_sparse_matrix(H,self.OrderedNodes,format='csr')
self.Degs[etype] = np.array([[max(1,float(H.degree(n)))] for n in self.OrderedNodes])
开发者ID:Mango-information-systems,项目名称:SNA,代码行数:31,代码来源:sliced_graph.py
示例10: compute_pagerank
def compute_pagerank(network : nx.DiGraph, damping : float=0.85):
Adj = nx.to_scipy_sparse_matrix(network, dtype='float', format='csr')
deg = np.ravel(Adj.sum(axis=1))
Dinv = sparse.diags(1 / deg)
Trans = (Dinv @ Adj).T
pr = pagerank_power(Trans, damping=damping)
return pr
开发者ID:jni,项目名称:prin,代码行数:7,代码来源:spectral.py
示例11: main111
def main111():
if 1:
G = nx.read_edgelist(infname)
print nx.info(G)
# Graph adj matix
A = nx.to_scipy_sparse_matrix(G)
print type(A)
from scipy import sparse, io
io.mmwrite("Results/test.mtx", A)
exit()
# write to disk clustering coeffs for this graph
snm.get_clust_coeff([G], 'orig', 'mmonth')
# write to disk egienvalue
snm.network_value_distribution([G], [], 'origMmonth')
if 0:
edgelist = np.loadtxt(infname, dtype=str, delimiter='\t')
print edgelist[:4]
idx = np.arange(len(edgelist))
np.random.shuffle(idx)
subsamp_edgelist = edgelist[idx[:100]]
G = nx.Graph()
G.add_edges_from([(long(x), long(y)) for x, y in subsamp_edgelist])
# visualize this graph
# visualize_graph(G)
exit()
G = nx.Graph()
G.add_edges_from([(long(x), long(y)) for x, y in edgelist])
print nx.info(G)
print 'Done'
开发者ID:abitofalchemy,项目名称:ScientificImpactPrediction,代码行数:32,代码来源:procjson_tograph.py
示例12: unroll_adjacency_matrix
def unroll_adjacency_matrix(G):
'''"Unrolls" the adjacency matrix of the input graph into a vector. This
is done by extracting all off-diagonal elements of the nxn adjacency matrix
and concatenating them into an n(n - 1)/2 dimensional array.
Example:
[[0, 1, 0],
[1, 0, 1],
[0, 1, 0]]
gives [1, 0, 1].'''
# Number of nodes in the graph
n = len(G)
# Length of the unrolled matrix
dim = n*(n - 1)//2
# Sparse matrix to hold the results
result = sp.sparse.lil_matrix((1, dim))
# Adjacency matrix for the graph
M = nx.to_scipy_sparse_matrix(G, format = "coo")
for i,j,v in zip(M.row, M.col, M.data):
# Only care about northeastern corner of the matrix
if not j > i:
continue
ind = i*n - (i*(i+1))//2 + j - i - 1 # Nothing to see here, move along.
# Add the encountered element at the appropriate index of result
result[0, ind] = v
return result
开发者ID:bjarkemoensted,项目名称:multiplex,代码行数:28,代码来源:mptools.py
示例13: page_rank_scipy
def page_rank_scipy(G,alpha=0.85,max_iter=100,tol=1.0e-4,nodelist=None):
"""Return a numpy array of the PageRank of G.
PageRank computes the largest eigenvector of the stochastic
adjacency matrix of G.
The eigenvector calculation is done by the power iteration method
and has no guarantee of convergence.
A starting vector for the power iteration can be given in the
dictionary nstart.
This implementation requires scipy.
"""
import scipy.sparse
M=NX.to_scipy_sparse_matrix(G,nodelist=nodelist)
(n,m)=M.shape # should be square
S=scipy.array(M.sum(axis=1)).flatten()
index=scipy.where(S<>0)[0]
for i in index:
M[i,:]*=1.0/S[i]
x=scipy.ones((n))/n # initial guess
dangle=scipy.array(scipy.where(M.sum(axis=1)==0,1.0/n,0)).flatten()
for i in range(max_iter):
xlast=x
x=alpha*(M.rmatvec(x)+scipy.dot(dangle,xlast))+(1-alpha)*xlast.sum()/n
# check convergence, l1 norm
err=scipy.absolute(x-xlast).sum()
if err < n*tol:
return x
raise NetworkXError("page_rank: power iteration failed to converge in %d iterations."%(i+1))
开发者ID:SuperbBob,项目名称:trust-metrics,代码行数:33,代码来源:page_rank.py
示例14: r_perturbSa
def r_perturbSa(g,p=None):
'''固定参数的随机扰动方法,p伯努利实验成功的概率'''
A=nx.to_scipy_sparse_matrix(g)
B=sparse.triu(A).toarray()
#print B
n=len(g)
e_num=len(g.edges())#图中存在的边数
q = e_num * (1 - p) / ((n * (n - 1)) / 2 - e_num)
#print q
i = 0
ts=0
listp=stats.bernoulli.rvs(p,size=e_num)
listp=listp.tolist()
listq=stats.bernoulli.rvs(q,size=(n * (n - 1)) / 2 - e_num)
listq=listq.tolist()
while i<n:
j=i+1#略过对角线上的0
while j<n:
if(B[i,j]==1):
B[i,j] = listp.pop()#参数p伯努利实验成功的概率
#ts=ts + 1
# print "+",ts, ":", i, ",", j, ",", B[i, j]
else:
B[i,j] = listq.pop()#参数q伯努利实验成功的概率
#ts=ts + 1
# print "-",ts, ":", i, ",", j, ",", B[i, j]
j = j + 1
i=i+1
return nx.from_numpy_matrix(B,create_using=nx.Graph())#重新构建了Graph类型的返回对象
开发者ID:liupenggl,项目名称:dpr,代码行数:32,代码来源:grandom.py
示例15: r_perturbR
def r_perturbR(g,R):
'''可变参数的随机扰动方法'''
A=nx.to_scipy_sparse_matrix(g)
B=sparse.triu(A).toarray()
#print B
n=len(g)
i = 0
ts=0
while i<n:
j=i+1
while j<n:
if(B[i,j]==1):
if R[i,j]<1:
B[i,j] = stats.bernoulli.rvs(R[i,j])#参数p伯努利实验成功的概率
else:
B[i, j] = stats.bernoulli.rvs(1) #其实可以去掉
ts=ts + 1
#print "+",ts, ":", i, ",", j, ",", B[i, j]
else:
if R[i,j]<1:
B[i,j] = stats.bernoulli.rvs(R[i,j])#参数q伯努利实验成功的概率
else:
B[i, j] = stats.bernoulli.rvs(0) #其实可以去掉
ts=ts + 1
#print "-",ts, ":", i, ",", j, ",", B[i, j]
j = j + 1
i=i+1
return nx.from_numpy_matrix(B,create_using=nx.Graph())#重新构建了Graph类型的返回对象
开发者ID:liupenggl,项目名称:dpr,代码行数:30,代码来源:grandom.py
示例16: _backward
def _backward(T, edge_to_P, root, root_prior_distn1d, node_to_data_lmap):
"""
This is the first pass of a forward-backward algorithm.
Parameters
----------
{params}
"""
# Define a toposort node ordering and a corresponding csr matrix.
nodes = nx.topological_sort(T, [root])
node_to_idx = dict((na, i) for i, na in enumerate(nodes))
m = nx.to_scipy_sparse_matrix(T, nodes)
# Stack the transition matrices into a single array.
nnodes = len(nodes)
nstates = root_prior_distn1d.shape[0]
trans = np.empty((nnodes-1, nstates, nstates), dtype=float)
for (na, nb), P in edge_to_P.items():
edge_idx = node_to_idx[nb] - 1
trans[edge_idx, :, :] = P
# Stack the data into a single array.
data = np.empty((nnodes, nstates), dtype=float)
for i, na in enumerate(nodes):
data[i, :] = node_to_data_lmap[na]
# Compute the partial likelihoods.
lhood = np.empty((nnodes, nstates), dtype=float)
validation = 0
_wrapped_first_pass(m.indices, m.indptr, trans, data, lhood, validation)
lhood[0, :] *= root_prior_distn1d
# Convert the output into a dictionary.
return dict((na, lhood[i, :]) for i, na in enumerate(nodes))
开发者ID:argriffing,项目名称:npmctree,代码行数:35,代码来源:cy_dynamic_lmap_lhood.py
示例17: classify_samples
def classify_samples(data, labels, unmarked_idxs,
sample_size, n_runs, n_clusters):
unmarked_point_probs = {}
all_idxs = range(len(unmarked_idxs))
random.shuffle(all_idxs)
keep_raw_idxs = sorted(all_idxs[:sample_size])
delete_raw_idxs = sorted(all_idxs[sample_size:])
keep_idxs, delete_idxs = (unmarked_idxs[keep_raw_idxs],
unmarked_idxs[delete_raw_idxs])
bagging_graph = nx.from_scipy_sparse_matrix(data)
bagging_graph.remove_nodes_from(delete_idxs)
bagging_adj_matrix = nx.to_scipy_sparse_matrix(bagging_graph)
bagging_labels = np.delete(labels, delete_idxs, 0)
bagging_unmarked_idxs = np.where(
bagging_labels[:, 0] == -1)[0]
clf = TransductiveClassifier(n_runs, n_clusters)
clf.fit(bagging_adj_matrix, bagging_labels)
assert len(keep_idxs) == len(bagging_unmarked_idxs)
for i, idx in enumerate(keep_idxs):
unmarked_point_probs[idx] = clf.transduction_[
bagging_unmarked_idxs[i]]
return unmarked_point_probs
开发者ID:rsbowman,项目名称:yeast-protein,代码行数:25,代码来源:transduction.py
示例18: configuration_model
def configuration_model(self, return_copy=False):
""" Reads AdjMatrixSequence Object and returns an edge randomized version.
Result is written to txt file.
"""
if self.is_directed:
nx_creator = nx.DiGraph()
else:
nx_creator = nx.Graph()
if return_copy:
x = self[:]
else:
x = self
# t_edges=[]
for i in range(len(self)):
print "configuration model: ", i
graphlet = nx.from_scipy_sparse_matrix(x[i], create_using=nx_creator)
graphlet = gwh.randomize_network(graphlet)
x[i] = nx.to_scipy_sparse_matrix(graphlet, dtype="int")
# for u,v in graphlet.edges():
# t_edges.append((u,v,i))
# gwh.write_array(t_edges,"Configuration_model.txt")
if return_copy:
return x
else:
return
开发者ID:hartmutlentz,项目名称:lonetop,代码行数:29,代码来源:MatrixList_obsolete.py
示例19: test_ordering
def test_ordering(self):
G = nx.DiGraph()
G.add_edge(1,2)
G.add_edge(2,3)
G.add_edge(3,1)
M = nx.to_scipy_sparse_matrix(G,nodelist=[3,2,1])
np_assert_equal(M.todense(), np.matrix([[0,0,1],[1,0,0],[0,1,0]]))
开发者ID:argriffing,项目名称:networkx,代码行数:7,代码来源:test_convert_scipy.py
示例20: list_directed_cc
def list_directed_cc (H):
adj_matrix = nx.to_scipy_sparse_matrix(H) # Return the graph adjacency matrix as a SciPy sparse matrix
list_cc = sp.sparse.csgraph.connected_components(adj_matrix, directed=True, connection='weak', return_labels=True)
print(" All cc: ", list_cc)
return list_cc
开发者ID:charly-blanche-t,项目名称:Bugula,代码行数:8,代码来源:directed_connected_components.py
注:本文中的networkx.to_scipy_sparse_matrix函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论