本文整理汇总了Python中networkx.from_scipy_sparse_matrix函数的典型用法代码示例。如果您正苦于以下问题:Python from_scipy_sparse_matrix函数的具体用法?Python from_scipy_sparse_matrix怎么用?Python from_scipy_sparse_matrix使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了from_scipy_sparse_matrix函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_from_scipy_sparse_matrix_parallel_edges
def test_from_scipy_sparse_matrix_parallel_edges(self):
"""Tests that the :func:`networkx.from_scipy_sparse_matrix` function
interprets integer weights as the number of parallel edges when
creating a multigraph.
"""
A = sparse.csr_matrix([[1, 1], [1, 2]])
# First, with a simple graph, each integer entry in the adjacency
# matrix is interpreted as the weight of a single edge in the graph.
expected = nx.DiGraph()
edges = [(0, 0), (0, 1), (1, 0)]
expected.add_weighted_edges_from([(u, v, 1) for (u, v) in edges])
expected.add_edge(1, 1, weight=2)
actual = nx.from_scipy_sparse_matrix(A, parallel_edges=True,
create_using=nx.DiGraph())
assert_graphs_equal(actual, expected)
actual = nx.from_scipy_sparse_matrix(A, parallel_edges=False,
create_using=nx.DiGraph())
assert_graphs_equal(actual, expected)
# Now each integer entry in the adjacency matrix is interpreted as the
# number of parallel edges in the graph if the appropriate keyword
# argument is specified.
edges = [(0, 0), (0, 1), (1, 0), (1, 1), (1, 1)]
expected = nx.MultiDiGraph()
expected.add_weighted_edges_from([(u, v, 1) for (u, v) in edges])
actual = nx.from_scipy_sparse_matrix(A, parallel_edges=True,
create_using=nx.MultiDiGraph())
assert_graphs_equal(actual, expected)
expected = nx.MultiDiGraph()
expected.add_edges_from(set(edges), weight=1)
# The sole self-loop (edge 0) on vertex 1 should have weight 2.
expected[1][1][0]['weight'] = 2
actual = nx.from_scipy_sparse_matrix(A, parallel_edges=False,
create_using=nx.MultiDiGraph())
assert_graphs_equal(actual, expected)
开发者ID:argriffing,项目名称:networkx,代码行数:35,代码来源:test_convert_scipy.py
示例2: load_train_test_graphs
def load_train_test_graphs(dataset, recache_input):
raw_mat_path = 'data/{}.npz'.format(dataset)
train_graph_path = 'data/{}/train_graph.pkl'.format(dataset)
test_graph_path = 'data/{}/test_graph.pkl'.format(dataset)
if recache_input:
print('loading sparse matrix from {}'.format(raw_mat_path))
m = load_sparse_csr(raw_mat_path)
print('splitting train and test...')
train_m, test_m = split_train_test(
m,
weights=[0.9, 0.1])
print('converting to nx.DiGraph')
train_g = nx.from_scipy_sparse_matrix(train_m, create_using=nx.DiGraph(), edge_attribute='sign')
test_g = nx.from_scipy_sparse_matrix(test_m, create_using=nx.DiGraph(), edge_attribute='sign')
print('saving train and test graphs...')
nx.write_gpickle(train_g, train_graph_path)
nx.write_gpickle(test_g, test_graph_path)
else:
print('loading train and test graphs...')
train_g = nx.read_gpickle(train_graph_path)
test_g = nx.read_gpickle(test_graph_path)
return train_g, test_g
开发者ID:xiaohan2012,项目名称:snpp,代码行数:26,代码来源:data.py
示例3: submatrix_pull_via_networkx
def submatrix_pull_via_networkx(matrix, node_array, directed=True):
if directed:
graph = nx.from_scipy_sparse_matrix(matrix, create_using=nx.DiGraph())
else:
graph = nx.from_scipy_sparse_matrix(matrix, create_using=nx.Graph())
sub_graph = graph.subgraph(list(node_array))
sub_matrix = nx.to_scipy_sparse_matrix(sub_graph, dtype=np.float64, format="csr")
return sub_matrix
开发者ID:Auguraculums,项目名称:reveal-user-classification,代码行数:12,代码来源:snow_2014_graph_dataset_util.py
示例4: community
def community(document):
sentences = sent_tokenize(document)
bow_matrix = CountVectorizer(stop_words = 'english').fit_transform(sentences)
normalized = TfidfTransformer().fit_transform(bow_matrix)
similarity_graph = normalized * normalized.T
nx_graph = nx.from_scipy_sparse_matrix(similarity_graph)
sub_graphs = []
#n gives the number of sub graphs
edge_wts = nx_graph.edges(data=True)
edge_wts.sort(key=lambda (a, b, dct): dct['weight'],reverse=True)
k = 10 #number of sentence in summary
G = nx.Graph()
for i in nx_graph.nodes():
G.add_node(i)
for u,v,d in edge_wts:
G.add_edge(u,v,d)
sub_graphs = nx.connected_component_subgraphs(G)
# print sub_graphs
n = len(sub_graphs)
if n == k: break
inSummary = [0 for i in range(len(sentences))]
n = len(sub_graphs)
for i in range(n):
sen = [sentences[j] for j in (sub_graphs[i].nodes())]
arr = [j for j in (sub_graphs[i].nodes())]
scores = textrank(sen)
# print (scores)
# print (arr)
for j in range(len(arr)):
inSummary[arr[j]] = scores[j];
# print inSummary
summ = [(sentences[i],inSummary[i]) for i in range(len(inSummary)) ]
# print summ[0]
return summ
开发者ID:Shushman,项目名称:news-article-summarizer,代码行数:35,代码来源:community.py
示例5: draw_adjacency_graph
def draw_adjacency_graph (A,
node_color=[],
size=10,
layout='graphviz',
prog = 'neato',
node_size=80):
graph = nx.from_scipy_sparse_matrix(A)
plt.figure(figsize=(size,size))
plt.grid(False)
plt.axis('off')
if layout == 'graphviz':
pos = nx.graphviz_layout(graph, prog = prog)
else:
pos = nx.spring_layout(graph)
if not node_color:
node_color='gray'
nx.draw_networkx_nodes(graph, pos,
node_color = node_color,
alpha = 0.6,
node_size = node_size,
cmap = plt.get_cmap('autumn'))
nx.draw_networkx_edges(graph, pos, alpha = 0.5)
plt.show()
开发者ID:nickgentoo,项目名称:pyEDeN,代码行数:27,代码来源:display.py
示例6: classify_samples
def classify_samples(data, labels, unmarked_idxs,
sample_size, n_runs, n_clusters):
unmarked_point_probs = {}
all_idxs = range(len(unmarked_idxs))
random.shuffle(all_idxs)
keep_raw_idxs = sorted(all_idxs[:sample_size])
delete_raw_idxs = sorted(all_idxs[sample_size:])
keep_idxs, delete_idxs = (unmarked_idxs[keep_raw_idxs],
unmarked_idxs[delete_raw_idxs])
bagging_graph = nx.from_scipy_sparse_matrix(data)
bagging_graph.remove_nodes_from(delete_idxs)
bagging_adj_matrix = nx.to_scipy_sparse_matrix(bagging_graph)
bagging_labels = np.delete(labels, delete_idxs, 0)
bagging_unmarked_idxs = np.where(
bagging_labels[:, 0] == -1)[0]
clf = TransductiveClassifier(n_runs, n_clusters)
clf.fit(bagging_adj_matrix, bagging_labels)
assert len(keep_idxs) == len(bagging_unmarked_idxs)
for i, idx in enumerate(keep_idxs):
unmarked_point_probs[idx] = clf.transduction_[
bagging_unmarked_idxs[i]]
return unmarked_point_probs
开发者ID:rsbowman,项目名称:yeast-protein,代码行数:25,代码来源:transduction.py
示例7: identity_conversion
def identity_conversion(self, G, A, create_using):
GG = nx.from_scipy_sparse_matrix(A, create_using=create_using)
self.assert_equal(G, GG)
GW = nx.to_networkx_graph(A, create_using=create_using)
self.assert_equal(G, GW)
GI = create_using.__class__(A)
self.assert_equal(G, GI)
ACSR = A.tocsr()
GI = create_using.__class__(ACSR)
self.assert_equal(G, GI)
ACOO = A.tocoo()
GI = create_using.__class__(ACOO)
self.assert_equal(G, GI)
ACSC = A.tocsc()
GI = create_using.__class__(ACSC)
self.assert_equal(G, GI)
AD = A.todense()
GI = create_using.__class__(AD)
self.assert_equal(G, GI)
AA = A.toarray()
GI = create_using.__class__(AA)
self.assert_equal(G, GI)
开发者ID:argriffing,项目名称:networkx,代码行数:29,代码来源:test_convert_scipy.py
示例8: configuration_model
def configuration_model(self, return_copy=False):
""" Reads AdjMatrixSequence Object and returns an edge randomized version.
Result is written to txt file.
"""
if self.is_directed:
nx_creator = nx.DiGraph()
else:
nx_creator = nx.Graph()
if return_copy:
x = self[:]
else:
x = self
# t_edges=[]
for i in range(len(self)):
print "configuration model: ", i
graphlet = nx.from_scipy_sparse_matrix(x[i], create_using=nx_creator)
graphlet = gwh.randomize_network(graphlet)
x[i] = nx.to_scipy_sparse_matrix(graphlet, dtype="int")
# for u,v in graphlet.edges():
# t_edges.append((u,v,i))
# gwh.write_array(t_edges,"Configuration_model.txt")
if return_copy:
return x
else:
return
开发者ID:hartmutlentz,项目名称:lonetop,代码行数:29,代码来源:MatrixList_obsolete.py
示例9: format_out_relations
def format_out_relations(relations, out_):
"""Format relations in the format they is detemined in parameter out_.
Parameters
----------
relations: scipy.sparse matrix
the relations expressed in a sparse way.
out_: optional, ['sparse', 'network', 'sp_relations']
the output format we desired.
Returns
-------
relations: decided format
the relations expressed in the decided format.
"""
if out_ == 'sparse':
relations_o = relations
elif out_ == 'network':
relations_o = nx.from_scipy_sparse_matrix(relations)
elif out_ == 'sp_relations':
relations_o = RegionDistances(relations)
elif out_ == 'list':
relations_o = []
for i in range(relations.shape[0]):
relations_o.append(list(relations.getrow(i).nonzero()[0]))
return relations_o
开发者ID:tgquintela,项目名称:pySpatialTools,代码行数:28,代码来源:formatters.py
示例10: plot_subgraph_links
def plot_subgraph_links(sparse_m, query, degree=0, layout="std", graph=None):
cond = np.where(query)[0]
if graph is None:
graph = nx.from_scipy_sparse_matrix(sparse_m)
if degree == 0:
sub1 = cond
node_color = "r"
elif degree == 1:
sub1 = list(set(cond) | set(
compute_sub_adj(sparse_m, cond)))
# print(sub1)
node_color = [("r" if (n in cond) else "b") for n in sub1]
# print(node_color)
elif degree == 2:
sub0 = set(cond) | set(compute_sub_adj(sparse_m, cond))
sub1 = list(sub0 | set(compute_sub_adj(sparse_m, list(sub0))))
node_color = [("r" if (n in cond) else "b" if (
n in sub0) else "y") for n in sub1]
renderer[layout](
graph.subgraph(sub1),
nodelist=list(sub1),
node_color=node_color,
alpha=0.5,
labels={n: str(n) for n in sub1})
开发者ID:yama1968,项目名称:graph_clustering,代码行数:28,代码来源:graph_helpers.py
示例11: learnStructure
def learnStructure(dataP, dataS, Pp, Ps, TAN= True):
tempMatrix = [[0 for i in range(len(dataP))] for j in range(len(dataP))]
for i in range(len(dataP)):
for j in range(i+1, len(dataP)):
temp = 0.0
if np.corrcoef(dataP[i], dataP[j])[0][1] != 1.0:
temp += Pp * math.log(1-((np.corrcoef(dataP[i], dataP[j])[0][1])**2))
if np.corrcoef(dataS[i], dataS[j])[0][1] != 1.0:
temp += Ps * math.log(1-((np.corrcoef(dataS[i], dataS[j])[0][1])**2))
temp *= (0.5)
tempMatrix[i][j] = temp
#tempMatrix[j][i] = temp
MaxG = nx.DiGraph()
if TAN:
G = nx.from_scipy_sparse_matrix(minimum_spanning_tree(csr_matrix(tempMatrix)))
adjList = G.adj
i = 0
notReturnable = {}
MaxG = getDirectedTree(adjList, notReturnable, MaxG, i)
else:
G = nx.Graph(np.asmatrix(tempMatrix))
adjList = sorted([(u,v,d['weight']) for (u,v,d) in G.edges(data=True)], key=lambda x:x[2])
i = 2
MaxG = getDirectedGraph(adjList, MaxG, i)
return MaxG
开发者ID:SriganeshNk,项目名称:fMRI,代码行数:25,代码来源:ETL.py
示例12: textrank
def textrank(sentences):
bow_matrix = CountVectorizer().fit_transform(sentences)
normalized = TfidfTransformer().fit_transform(bow_matrix)
similarity_graph = normalized * normalized.T
nx_graph = nx.from_scipy_sparse_matrix(similarity_graph)
scores = nx.pagerank(nx_graph)
return sorted(((scores[i], i, s) for i, s in enumerate(sentences)), reverse=True)
开发者ID:ankit141189,项目名称:bing,代码行数:7,代码来源:generate_document.py
示例13: find_min_spanning_tree
def find_min_spanning_tree(A):
"""
Input:
A : Adjecency matrix in scipy.sparse format.
Output:
T : Minimum spanning tree.
run_time : Total runtime to find minimum spanning tree
"""
# Record start time.
start = time.time()
# Check if graph is pre-processed, if yes then don't process it again.
if os.path.exists('../Data/dcg_graph.json'):
with open('../Data/dcg_graph.json') as data:
d = json.load(data)
G = json_graph.node_link_graph(d)
# If graph is not preprocessed then convert it to a Graph and save it to a JSON file.
else:
G = from_scipy_sparse_matrix(A)
data = json_graph.node_link_data(G)
with open('../Data/dcg_graph.json', 'w') as outfile:
json.dump(data, outfile)
# Find MST.
T = minimum_spanning_tree(G)
#Record total Runtime
run_time = time.time()-start
return T, run_time
开发者ID:harshaneelhg,项目名称:Thesis,代码行数:31,代码来源:spanning_tree.py
示例14: plot2d
def plot2d(self, title=None, domain=[-1, 1], codomain=[-1, 1], predict=True):
f, ax = plt.subplots()
x1 = np.linspace(*domain, 100)
x2 = np.linspace(*codomain, 100)
n_samples, n_features = self.X_.shape
G = nx.from_scipy_sparse_matrix(self.A_)
pos = {i: self.X_[i] for i in range(n_samples)}
cm_sc = ListedColormap(["#AAAAAA", "#FF0000", "#0000FF"])
if title is not None:
ax.set_title(title)
ax.set_xlabel("$x_1$")
ax.set_ylabel("$x_2$")
ax.set_xlim(domain)
ax.set_ylim(codomain)
nx.draw_networkx_nodes(G, pos, ax=ax, node_size=25, node_color=self.y_, cmap=cm_sc)
if predict:
xx1, xx2 = np.meshgrid(x1, x2)
xfull = np.c_[xx1.ravel(), xx2.ravel()]
z = self.predict(xfull).reshape(100, 100)
levels = np.array([-1, 0, 1])
cm_cs = plt.cm.RdYlBu
if self.params["gamma_i"] != 0.0:
nx.draw_networkx_edges(G, pos, ax=ax, edge_color="#AAAAAA")
ax.contourf(xx1, xx2, z, levels, cmap=cm_cs, alpha=0.25)
return (f, ax)
开发者ID:Y-oHr-N,项目名称:TextCategorization,代码行数:35,代码来源:multiclass.py
示例15: textrank
def textrank(document):
pst = PunktSentenceTokenizer()
sentences = pst.tokenize(document)
# Bag of Words
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer()
bow_matrix = cv.fit_transform(sentences)
from sklearn.feature_extraction.text import TfidfTransformer
normalized_matrix = TfidfTransformer().fit_transform(bow_matrix)
## mirrored matrix where the rows and columns correspond to
## sentences, and the elements describe how similar the
## sentences are. score 1 means sentences are exactly the same.
similarity_graph = normalized_matrix * normalized_matrix.T
similarity_graph.toarray()
# PageRank
import networkx as nx
nx_graph = nx.from_scipy_sparse_matrix(similarity_graph)
## mapping of sentence indices to scores. use them to associate
## back to the original sentences and sort them
scores = nx.pagerank(nx_graph)
ranked = sorted(((scores[i], s) for i,s in enumerate(sentences)), reverse=True)
print ranked[0][1]
开发者ID:ko,项目名称:random,代码行数:27,代码来源:textrank.py
示例16: get_key_sentences
def get_key_sentences(self, n=5):
'''
Uses a simple implementation of TextRank to extract the top N sentences
from a document.
Sources:
- Original paper: http://acl.ldc.upenn.edu/acl2004/emnlp/pdf/Mihalcea.pdf
- Super useful blog post: http://joshbohde.com/blog/document-summarization
- Wikipedia: http://en.wikipedia.org/wiki/Automatic_summarization#Unsupervised_keyphrase_extraction:_TextRank
'''
# Tokenize the document into sentences. More NLP preprocesing should also happen here.
sentence_tokenizer = PunktSentenceTokenizer()
sentences = sentence_tokenizer.tokenize(self.doc)
# Calculate word counts and TFIDF vectors
word_counts = CountVectorizer(min_df=0).fit_transform(sentences)
normalized = TfidfTransformer().fit_transform(word_counts)
# Normalized graph * its transpose yields a sentence-level similarity matrix
similarity_graph = normalized * normalized.T
nx_graph = nx.from_scipy_sparse_matrix(similarity_graph)
scores = nx.pagerank(nx_graph)
return sorted(((scores[i],s) for i,s in enumerate(sentences)),
reverse=True)[n]
开发者ID:joannaskao,项目名称:judgmental,代码行数:25,代码来源:metadata.py
示例17: cover
def cover(socp_data, N):
if not settings.paths['graclus']:
raise Exception(
"Please provide a path to graclus: settings.paths['graculus'] = PATH.")
"""stacks the socp data and partitions it into N
local dicts describing constraints R <= s"""
n = socp_data['c'].shape[0]
# form the Laplacian and use graculus to partition
L = form_laplacian(socp_data)
graph = nx.from_scipy_sparse_matrix(L)
d = nx.convert.to_dict_of_lists(graph)
edgepath = "graclus.edgelist"
with open(edgepath, "w") as f:
f.write("%d %d\n" % (graph.number_of_nodes(), graph.number_of_edges()))
for k, v in d.iteritems():
f.write("%d %s\n" %
(k + 1, ' '.join(map(lambda x: str(x + 1), v))))
import subprocess
outpath = "graclus.edgelist.part.%d" % N
proc = subprocess.Popen([settings.paths['graclus'], edgepath, str(N)])
proc.wait()
lines = open(outpath, "r").readlines()
part_vert = []
for l in lines:
part_vert.append(int(l.strip()))
return part_vert[n:]
开发者ID:echu,项目名称:dist_ecos,代码行数:34,代码来源:graclus.py
示例18: compute_clusters_statistic
def compute_clusters_statistic(test_statistic, proximity_matrix, verbose=False):
"""Given a test statistic for each unit and a boolean proximity
matrix among units, compute the cluster statistic using the
connected components graph algorithm. It works for sparse
proximity matrices as well.
Returns the clusters and their associated cluster statistic.
"""
# Build a graph from the proximity matrix:
if issparse(proximity_matrix):
graph = from_scipy_sparse_matrix(proximity_matrix)
else:
graph = from_numpy_matrix(proximity_matrix)
# Compute connected components:
clusters = connected_components(graph)
if verbose: print("Nr. of clusters: %s. Clusters sizes: %s" % (len(clusters), np.array([len(cl) for cl in clusters])))
# Compute the cluster statistic:
cluster_statistic = np.zeros(len(clusters))
for i, cluster in enumerate(clusters):
cluster_statistic[i] = test_statistic[cluster].sum()
# final cleanup to prepare easy-to-use results:
idx = np.argsort(cluster_statistic)[::-1]
clusters = np.array([np.array(cl, dtype=np.int) for cl in clusters], dtype=np.object)[idx]
if clusters[0].dtype == np.object: # THIS FIXES A NUMPY BUG (OR FEATURE?)
# The bug: it seems not possible to create ndarray of type
# np.object from arrays all of the *same* lenght and desired
# dtype, i.e. dtype!=np.object. In this case the desired dtype
# is automatically changed into np.object. Example:
# array([array([1], dtype=int)], dtype=object)
clusters = clusters.astype(np.int)
cluster_statistic = cluster_statistic[idx]
return clusters, cluster_statistic
开发者ID:smkia,项目名称:cbpktst,代码行数:35,代码来源:cbpktst.py
示例19: test_graph_degree
def test_graph_degree():
"Graph: Graph Degree"
A = rand_dm(25, 0.5)
deg = graph_degree(A.data)
G = nx.from_scipy_sparse_matrix(A.data)
nx_deg = G.degree()
nx_deg = array([nx_deg[k] for k in range(25)])
assert_equal((deg - nx_deg).all(), 0)
开发者ID:justzx2011,项目名称:qutip,代码行数:8,代码来源:test_graph.py
示例20: __test_save_and_load_graph_npz
def __test_save_and_load_graph_npz(self, x):
'''Test save and load a Networkx DiGraph in npz format with np-array wrapping.'''
out_file = tempfile.TemporaryFile()
np.savez(out_file, x=np.array([nx.to_scipy_sparse_matrix(x)]))
out_file.seek(0) # Only needed here to simulate closing & reopening file
x2 = np.load(out_file)
y = nx.from_scipy_sparse_matrix(x2['x'][0], nx.DiGraph())
assert_equal(x.nodes(), y.nodes(), 'Saving and loading did not restore the original object')
assert_equal(x.edges(), y.edges(), 'Saving and loading did not restore the original object')
开发者ID:orenlivne,项目名称:ober,代码行数:9,代码来源:TestIo.py
注:本文中的networkx.from_scipy_sparse_matrix函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论