本文整理汇总了Python中networkx.read_gexf函数的典型用法代码示例。如果您正苦于以下问题:Python read_gexf函数的具体用法?Python read_gexf怎么用?Python read_gexf使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了read_gexf函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: prepare_data_for_pr
def prepare_data_for_pr(topic_id, date, window_size, topicname, real_topic_id):
tmp_file = tempfile.NamedTemporaryFile(delete=False)
ds_tmp_file = tempfile.NamedTemporaryFile(delete=False)
topic = topicname
if not topic:
return None
#g, gg, new_attribute_dict, ds_dg, ds_udg, ds_new_attribute_dict= make_network(topic, date, window_size, attribute_add=False)
key = str(real_topic_id) + '_' + str(date) + '_' + str(window_size)
g = nx.read_gexf(str(GRAPH_PATH)+str(key)+'_g_graph.gexf')
ds_dg = nx.read_gexf(str(GRAPH_PATH)+str(key)+'_ds_dg_graph.gexf')
if not g or not ds_dg:
return None
N = len(g)
print 'topic source network size %s' % N
ds_N = len(ds_dg)
print 'topic direct superior network size %s' % ds_N
if not N or not ds_N:
return None
'''
在临时文件中存放网络结构,将写入临时文件的过程写为方法write_tmp_file
'''
print 'start PageRank tmp_file, ds_tmp_file'
tmp_file = write_tmp_file(tmp_file, g, N)
ds_tmp_file = write_tmp_file(ds_tmp_file, ds_dg, ds_N)
print 'end PageRank tmp_file, ds_tmp_file'
return tmp_file, N, ds_tmp_file, ds_N
开发者ID:huxiaoqian,项目名称:case,代码行数:31,代码来源:area.py
示例2: occurenceCounter
def occurenceCounter(charList, graphFile, bookNetworksPath):
g = nx.read_gexf(graphFile)
if not charList:
# Get characters from overall.gexf graph
overallGraphFile = bookNetworksPath + "overall.gexf"
overall_g = nx.read_gexf(overallGraphFile)
overallChars = nx.nodes(overall_g)
# Sort dictionary by name (key of dictionary)
sortedChars = sorted(overallChars)
return sortedChars
else:
charList = [item for item in charList]
for index, item in enumerate(charList):
currentChar = None
for node in nx.nodes(g):
if node == item:
occurrence = 1
charList[index] = (item, occurrence)
currentChar = node
# If current character is not present in the current chapter assign 0 influence.
if not currentChar:
occurrence = 0
charList[index] = (item, occurrence)
return charList
开发者ID:pivots,项目名称:networkx-sna-fiction,代码行数:30,代码来源:snaData.py
示例3: analyze_Reff_chains
def analyze_Reff_chains():
fl_m20 = nx.read_gexf("Flute_vs_EpiFast/Flute_match20.gexf")
reffs_fl_m20 = tv.build_Reff_txm_chain(fl_m20)
ef_m20 = nx.read_gexf("Flute_vs_EpiFast/Epifast_match20.gexf")
reffs_ef_m20 = tv.build_Reff_txm_chain(ef_m20)
max_gens = 31
开发者ID:bryanleroylewis,项目名称:TreeViz,代码行数:9,代码来源:Flute_EF_comparison.py
示例4: add_LPU
def add_LPU(self, data_file, gexf_file=None, LPU=None, win=None,
is_input=False):
'''
Add data associated with a specific LPU to a visualization.
To add a plot containing neurons from a particular LPU,
the LPU needs to be added to the visualization using this
function. Note that outputs from multiple neurons can
be visualized using the same visualizer object.
Parameters
----------
data_file: str
Location of the h5 file generated by neurokernel
containing the output of the LPU
gexf_file: str
Location of the gexf file describing the LPU.
If not specified, it will be assumed that the h5 file
contains input.
LPU: str
Name of the LPU. Will be used as identifier to add plots.
For input signals, the name of the LPU will be prepended
with 'input_'. For example::
V.add_LPU('vision_in.h5', LPU='vision')
will create the LPU identifier 'input_vision'.
Therefore, adding a plot depicting this input can be done by::
V.add_plot({''type':'image',imlim':[-0.5,0.5]},LPU='input_vision)
win: slice/list
Can be used to limit the visualization to a specific time window.
'''
if gexf_file and not is_input:
self._graph[LPU] = nx.read_gexf(gexf_file)
# Map neuron ids to index into output data array:
self._id_to_data_idx[LPU] = {m:i for i, m in \
enumerate(sorted([int(n) for n, k in \
self._graph[LPU].nodes_iter(True) if k['spiking']]))}
else:
if LPU:
LPU = 'input_' + str(LPU)
else:
LPU = 'input_' + str(len(self._data))
if gexf_file:
self._graph[LPU] = nx.read_gexf(gexf_file)
if not LPU:
LPU = len(self._data)
self._data[LPU] = np.transpose(sio.read_array(data_file))
if win is not None:
self._data[LPU] = self._data[LPU][:,win]
if self._maxt:
self._maxt = min(self._maxt, self._data[LPU].shape[1])
else:
self._maxt = self._data[LPU].shape[1]
开发者ID:AdamRTomkins,项目名称:libSpineML2NK,代码行数:56,代码来源:visualizer.py
示例5: eigValue
def eigValue(charList, graphFile, bookNetworksPath):
# Compute eigenvectors for all characters in the current chapter graph.
g = nx.read_gexf(graphFile)
eigCentrality = nx.eigenvector_centrality(g, max_iter=100, tol=1.0e-6, nstart=None, weight="Weight")
eigValues = eigCentrality.values()
# NORMALISE eigenvector values
d = decimal.Decimal
maxEig = max(eigValues)
minEig = min(eigValues)
maxMinusMin = d(maxEig) - d(minEig)
if not charList:
# Get top 10 overall characters from overall.gexf graph
overallGraphFile = bookNetworksPath + "overall.gexf"
overall_g = nx.read_gexf(overallGraphFile)
overallEigCent = nx.eigenvector_centrality(overall_g, max_iter=100, tol=1.0e-6, nstart=None, weight="Weight")
# sortedCentrality = dict(sorted(overallEigCent.iteritems(), key=itemgetter(1), reverse=True)[:10])
sortedCentrality = dict(sorted(overallEigCent.iteritems(), key=itemgetter(1), reverse=True))
sortedCentrality = sorted(sortedCentrality.iteritems(), key=itemgetter(1), reverse=True)
charList = [seq[0] for seq in sortedCentrality]
return charList
else:
charList = [item for item in charList]
for index, item in enumerate(charList):
currentChar = None
for key, value in eigCentrality.iteritems():
if key == item:
# Unnormalised version...
charList[index] = (key, str(value))
currentChar = key
# if key == item:
# nummerator = d(value)-d(minEig)
# if nummerator==0:
# charList[index] = (key, str(0))
# else:
# norm_value = (d(value)-d(minEig))/d(maxMinusMin)
# charList[index] = (key, str(norm_value))
# currentChar = key
# If current character is not present in the current chapter assign 0 influence.
if not currentChar:
charList[index] = (item, 0)
return charList
开发者ID:pivots,项目名称:networkx-sna-fiction,代码行数:48,代码来源:snaData.py
示例6: betweenValue
def betweenValue(charList, graphFile, bookNetworksPath):
# Compute betweenness for all characters in the current chapter graph.
g = nx.read_gexf(graphFile)
betCentrality = nx.betweenness_centrality(g, k=None, normalized=True, weight="Weight", endpoints=False, seed=None)
betweenValues = betCentrality.values()
# NORMALISE betweenness values
d = decimal.Decimal
maxBetween = max(betweenValues)
minBetween = min(betweenValues)
maxMinusMin = d(maxBetween) - d(minBetween)
if not charList:
# Get top 10 overall characters from overall.gexf graph
overallGraphFile = bookNetworksPath + "overall.gexf"
overall_g = nx.read_gexf(overallGraphFile)
overallBetweenCent = nx.betweenness_centrality(
overall_g, k=None, normalized=True, weight="Weight", endpoints=False, seed=None
)
# Quick fix for getting all characters.
# sortedCentrality = dict(sorted(overallBetweenCent.iteritems(), key=itemgetter(1), reverse=True)[:10])
sortedCentrality = dict(sorted(overallBetweenCent.iteritems(), key=itemgetter(1), reverse=True))
sortedCentrality = sorted(sortedCentrality.iteritems(), key=itemgetter(1), reverse=True)
charList = [seq[0] for seq in sortedCentrality]
return charList
else:
charList = [item for item in charList]
for index, item in enumerate(charList):
currentChar = None
for key, value in betCentrality.iteritems():
if key == item:
nummerator = d(value) - d(minBetween)
if nummerator == 0:
charList[index] = (key, str(0))
else:
norm_value = (d(value) - d(minBetween)) / d(maxMinusMin)
charList[index] = (key, str(norm_value))
currentChar = key
# If current character is not present in the current chapter assign 0 influence.
if not currentChar:
charList[index] = (item, 0)
return charList
开发者ID:pivots,项目名称:networkx-sna-fiction,代码行数:48,代码来源:snaData.py
示例7: read_from_json_gexf
def read_from_json_gexf(fname=None,label_field_name='APIs',conv_undir = False):
'''
Load the graph files (.gexf or .json only supported)
:param fname: graph file name
:param label_field_name: filed denoting the node label
:param conv_undir: convert to undirected graph or not
:return: graph in networkx format
'''
if not fname:
logging.error('no valid path or file name')
return None
else:
try:
try:
with open(fname, 'rb') as File:
org_dep_g = json_graph.node_link_graph(json.load(File))
except:
org_dep_g = nx.read_gexf (path=fname)
g = nx.DiGraph()
for n, d in org_dep_g.nodes_iter(data=True):
g.add_node(n, attr_dict={'label': '-'.join(d[label_field_name].split('\n'))})
g.add_edges_from(org_dep_g.edges_iter())
except:
logging.error("unable to load graph from file: {}".format(fname))
# return 0
logging.debug('loaded {} a graph with {} nodes and {} egdes'.format(fname, g.number_of_nodes(),g.number_of_edges()))
if conv_undir:
g = nx.Graph (g)
logging.debug('converted {} as undirected graph'.format (g))
return g
开发者ID:SongFGH,项目名称:subgraph2vec_tf,代码行数:31,代码来源:make_subgraph2vec_corpus.py
示例8: read_params_file
def read_params_file(paramsDir, fName):
"""Read an xml parameters file into a list of tuples
Each tuple consists of the following:
entityType simulator, economy or bank
entityName
dataType parameter or attribute
dataName
dataValue a string
@param fName:
"""
tree = ET.parse(paramsDir + fName)
pList = read_params_from_xml(tree.getroot(), fName)
graphFile = None
for eType, eName, dataType, dataName, dataValue in pList:
if dataName == "graphFile":
graphFile = dataValue
break
if graphFile is None:
msg = "No graph file specified in %s" % fName
logger.error(msg)
raise ParameterError(msg)
basicGraph = nx.read_gexf(paramsDir + graphFile) # This gives us a graph of possible lending
return pList, basicGraph
开发者ID:B-Leslie,项目名称:systemshock,代码行数:26,代码来源:utils.py
示例9: __update_structure
def __update_structure(self):
imported_graph = nx.read_gexf(self.file_path)
if not isinstance(imported_graph, nx.Graph):
raise Exception("Imported graph is not undirected")
self.structure = nx.convert_node_labels_to_integers(imported_graph)
开发者ID:jim-pansn,项目名称:sybil_detection,代码行数:7,代码来源:graphs.py
示例10: test_read_simple_directed_graphml
def test_read_simple_directed_graphml(self):
G = self.simple_directed_graph
H = nx.read_gexf(self.simple_directed_fh)
assert_equal(sorted(G.nodes()), sorted(H.nodes()))
assert_equal(sorted(G.edges()), sorted(H.edges()))
assert_equal(sorted(G.edges(data=True)), sorted(H.edges(data=True)))
self.simple_directed_fh.seek(0)
开发者ID:rainest,项目名称:dance-partner-matching,代码行数:7,代码来源:test_gexf.py
示例11: GET
def GET(self):
web.header('Access-Control-Allow-Origin', '*')
output = dict()
getInput = web.input(start='2012-3-03 16:00:00', end='2012-3-03 21:00:00')
start_time=pd.to_datetime(getInput.start).tz_localize('US/Eastern') - pd.DateOffset(hours=10)
end_time=pd.to_datetime(getInput.end).tz_localize('US/Eastern') - pd.DateOffset(hours=10)
output_nodes = set()
all_schedules = pd.read_json('all_schedules.json')
allnodes = pd.read_json('allnodes.json')
nodes = set(allnodes.nodes)
all_schedules['end'] = all_schedules['end'].map(lambda x: datetime.datetime.fromtimestamp(x/1000000000))
all_schedules['start'] = all_schedules['start'].map(lambda x: datetime.datetime.fromtimestamp(x/1000000000))
night_sched = all_schedules[(all_schedules.start >= start_time) & (all_schedules.end <= end_time)]
on_nodes = set()
for idx,show in night_sched.iterrows():
on_nodes.add(show[2])
off_nodes = nodes.difference(on_nodes)
imported_graph = nx.read_gexf('./finished_network3.gexf')
for i in off_nodes:
try:
imported_graph.remove_node(i)
except:
continue
pr=nx.pagerank(imported_graph,alpha=0.9,weight='newweight',tol=.01, max_iter=200)
output['nodes'] = [(i,v*1000000) for i,v in pr.items()]
output['input_params'] = getInput
return json.dumps(output)
开发者ID:nosarcasm,项目名称:philoexplorer,代码行数:33,代码来源:nodes_active.py
示例12: dump_melodic_phrases_in_network
def dump_melodic_phrases_in_network(network_file, output_dir, myDatabase, base_name):
"""
This function dumps all the mp3 files for the patterns in the 'network' (gexf file)
"""
cmd1 = "select file.filename, pattern.start_time, pattern.end_time from pattern join file on (pattern.file_id = file.id) where pattern.id = %d"
#reading the network
full_net = nx.read_gexf(network_file)
labels = nx.get_node_attributes(full_net, 'label')
patterns = full_net.nodes()
try:
con = psy.connect(database=myDatabase, user='sankalp')
cur = con.cursor()
for ii, pattern in enumerate(patterns):
pattern = labels[pattern]
cur.execute(cmd1%int(pattern))
filename, start, end = cur.fetchone()
clipAudio(output_dir, os.path.join(base_name, filename), start, end, int(pattern))
except psy.DatabaseError, e:
print 'Error %s' % e
if con:
con.rollback()
con.close()
sys.exit(1)
开发者ID:sankalpg,项目名称:WebInterfaces_MelodicPatterns,代码行数:29,代码来源:graph_generate.py
示例13: create_genealogy
def create_genealogy(graph_id = 'deusto.aitoralmeida'):
print 'Loading graph'
merged = nx.read_gexf('merged_genealogy.gexf', node_type = None)
print 'Loading edge index'
dict_edges = load_merged_edge_index()
print 'Building genealogy'
to_process = [graph_id]
tree = set()
#get all the ascenstors in tree
while len(to_process) > 0:
current = to_process[0]
to_process.remove(current)
tree.add(current)
try:
to_process += dict_edges[current]
except:
pass
print 'Creating graph'
G = nx.DiGraph()
for person in tree:
print person
G.add_node(person, name = merged.node[person]['name'])
for target in merged.edge[person].keys():
#add edges with the ancestors only
if target in tree:
G.add_edge(person, target)
print 'Writing file'
nx.write_gexf(G, 'created_genealogy.gexf')
开发者ID:aitoralmeida,项目名称:academic-genealogy,代码行数:31,代码来源:genealogy_merger.py
示例14: write_estrada
def write_estrada(path_to_file):
graphe = nx.read_gexf(path_to_file + ".gexf")
if type(graphe) == nx.MultiDiGraph:
print 'has_multiple_edges'
graphe = nx.DiGraph(graphe)
if type(graphe) == nx.MultiGraph:
print 'has_multiple_edges'
graphe = nx.Graph(graphe)
adj_mat = nx.to_numpy_matrix(graphe, weight=None)
adj_mat = adj_mat.tolist()
w, v = eig(adj_mat)
argmax = np.argmax(w)
sh = np.sinh(w)
square = np.square(v)
gamma = square[:, argmax]
gammaideal = np.dot(square, sh) / sh[argmax]
delta = 0.5 * np.log10(gamma / gammaideal)
deltaplus = delta[(delta > 0)]
deltaminus = delta[(delta < 0)]
eplus = math.sqrt(mean(deltaplus ** 2))
eminus = math.sqrt(mean(deltaminus ** 2))
if math.isnan(eplus) or math.isnan(eminus) or eplus == float('inf') or eminus == float('inf'):
print gamma
print gammaideal
print deltaplus
print deltaminus
classe = 1
if eminus > 0.01: classe += 1
if eplus > 0.01: classe += 2
add_results_estrada(eplus, eminus, classe,path_to_file)
开发者ID:FourquetDavid,项目名称:evo,代码行数:33,代码来源:estrada.py
示例15: __prepare__
def __prepare__(data_dir):
"""
Takes each file in .gexf format and converts it into the igraph-readable graphml.
"""
"""
Takes a list of files in .gexf format, and converts each to GraphML.
"""
for f in glob.glob(os.path.join(data_dir, "*.gexf")):
print(f)
newFileName = f[:f.rfind('.')] + ".graphml"
if os.path.exists(newFileName):
continue
G = nx.read_gexf(f)
for node in G.node:
for attrib in G.node[node]:
if type(G.node[node][attrib]) == dict:
# graphML doesn't play nice with dictionaries as attributes.
# this line just deletes positional information.
G.node[node][attrib] = 0
newFileName = f[:f.rfind('.')] + ".graphml"
nx.write_graphml(G, newFileName)
开发者ID:Lab41,项目名称:circulo-abridged,代码行数:25,代码来源:run.py
示例16: restore_path
def restore_path(meta_id, start_index, count, k): # k估计参数权值
test_path = TestPath(meta_id, start_index, count)
visual_map4000 = nx.read_gexf('/home/elvis/map/analize/analizeTime/countXEntTime/visualMapTop4000.gexf')
paths = []
# while len(paths) == 0:
# paths = find_paths(test_path.path_time[0], test_path.path_time[-1], visual_map4000, k)
# k *= 1.1
paths = find_paths(test_path.path_time[0], test_path.path_time[-1], visual_map4000, k)
if len(paths) == 0:
print_cache.append(['{}'.format(meta_id), 0,])
# print('{},not find'.format(meta_id))
return
delta_time = (test_path.path_time[-1][1] - test_path.path_time[0][1]).total_seconds()
map_paths = visual_to_map(paths, delta_time)
best_path = best_alternative(map_paths, delta_time)
file_path = '/home/elvis/map/analize/path_restore/restore_path/{k}/{meta_id}/{meta_id}-{s}-{c}'.format(
meta_id=meta_id,
s=start_index,
c=count, k=k)
to_path_txt(map_paths, file_path, mode='all')
best_file_path = '/home/elvis/map/analize/path_restore/restore_path/{k}/{meta_id}/best'.format(meta_id=meta_id, k=k)
to_path_txt(best_path, best_file_path, mode='best')
print_cache.append(['{}'.format(meta_id),])
# print('{}'.format(meta_id), end=' ')
similarity(best_path, test_path.path_time)
开发者ID:elvis2els,项目名称:map,代码行数:25,代码来源:A_star_path.py
示例17: abrirred
def abrirred():
global G
global fpname
global file_path
file_path = tkFileDialog.askopenfilename(title="Open file", filetypes=[("GEXF files", ".gexf")])
if file_path != "":
G = nx.read_gexf(file_path, relabel=False)
fp = file_path.split("/")
fpname = fp[-1]
print "Loaded network:", fpname
editmenu.entryconfig(1, state=NORMAL) ## Estadisticas
editmenu.entryconfig(9, state=NORMAL) ## Sigma-JS
editmenu.entryconfig(10, state=NORMAL) ## D3 HTML Graph
else:
print "You have to choose a file"
return False
tmpdir = "tmp/"
shutil.copyfile(file_path, tmpdir + "tmpfile.gexf")
fp = file_path.split("/")
fpname = fp[-1]
fname = file("tmp/enamgexf.nme", "w")
print >> fname, fpname[0:-4]
fname.close()
return False
开发者ID:paredespablo,项目名称:NodosApp,代码行数:27,代码来源:NodosApp+0.9.py
示例18: main
def main():
# the description link graph
g = nx.read_gexf('data/subreddits_edged_by_description_links.gexf')
# an empty graph for showing communities
g1 = nx.Graph()
communities = get_coalesced_communities(g)
for c in communities:
g1.add_node(c.name)
g1.node[c.name]['size'] = len(c.members)
count = 0
ratio_weight = 0.0
for c1, c2 in product(communities, communities):
if c1.id == c2.id or g1.has_edge(c1.name, c2.name) or len(c1.members) > len(c2.members):
continue
overlap = len(c1.members & c2.members)
if overlap > 0:
g1.add_edge(c1.name, c2.name, weight=overlap / len(c1.members))
ratio_weight += overlap / len(c1.members)
count += 1
average_weight_ratio = ratio_weight / count
print "average weight ratio: %s" % str(average_weight_ratio)
g1.remove_edges_from(filter(lambda x: x[2]['weight'] < average_weight_ratio, g1.edges(data=True)))
print "%d subreddits included" % len(reduce(lambda x,y: x.union(y.members), communities, set()))
nx.write_gexf(g1, 'test_coalesce.gexf')
开发者ID:TomDunn,项目名称:RedditGraph,代码行数:33,代码来源:coalesce_communities.py
示例19: classify
def classify(request, pk):
#gets object based on id given
graph_file = get_object_or_404(Document, pk=pk)
#reads file into networkx graph based on extension
if graph_file.extension() == ".gml":
G = nx.read_gml(graph_file.uploadfile)
else:
G = nx.read_gexf(graph_file.uploadfile)
#closes file so we can delete it
graph_file.uploadfile.close()
#loads the algorithm and tests the algorithm against the graph
g_json = json_graph.node_link_data(G)
#save graph into json file
with open(os.path.join(settings.MEDIA_ROOT, 'graph.json'), 'w') as graph:
json.dump(g_json, graph)
with open(os.path.join(settings.MEDIA_ROOT, 'rf_classifier.pkl'), 'rb') as malgo:
algo_loaded = pickle.load(malgo, encoding="latin1")
dataset = np.array([G.number_of_nodes(), G.number_of_edges(), nx.density(G), nx.degree_assortativity_coefficient(G), nx.average_clustering(G), nx.graph_clique_number(G)])
print (dataset)
#creates X to test against
X = dataset
prediction = algo_loaded.predict(X)
graph_type = check_prediction(prediction)
graph = GraphPasser(G.number_of_nodes(), G.number_of_edges(), nx.density(G), nx.degree_assortativity_coefficient(G), nx.average_clustering(G), nx.graph_clique_number(G))
#gives certain variables to the view
return render(
request,
'classification/classify.html',
{'graph': graph, 'prediction': graph_type}
)
开发者ID:Kaahan,项目名称:networkclassification,代码行数:34,代码来源:views.py
示例20: importGexf
def importGexf(self, url ):
# TODO once files are stored in a standard upload directory this will need to be changed
import platform
if platform.system() == 'Windows':
PATH = 'c:\\inetpub\\wwwroot\\pydev\\systemshock\\modellingengine\\fincat\\parameters\\'
else:
PATH = '/var/lib/geonode/src/GeoNodePy/geonode/modellingengine/fincat/parameters/'
G = nx.read_gexf(PATH + url)
# ensure the nodes are labelled with integers starting from 0
# TODO might need to start from current number of nodes in G
G = nx.convert_node_labels_to_integers(G, first_label=0)
for node in G.nodes(data=True):
nodeid = node[0] #node array index 0 is the node id, index 1 is the attribute list
attributes = node[1]
attributes['guid'] = nodeid
if 'wkt' in attributes:
attributes['geometry'] = self.WKTtoGeoJSON(attributes['wkt'])
for edge in G.edges(data=True):
edgeid = unicode(edge[0]) + '-' + unicode(edge[1])
attributes = edge[2]
attributes['guid'] = edgeid
self.layergraphs.append(G) # add the new layer graph to the overall network
return True
开发者ID:CentreForRiskStudies,项目名称:systemshock,代码行数:29,代码来源:network.py
注:本文中的networkx.read_gexf函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论