• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python networkx.read_gpickle函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中networkx.read_gpickle函数的典型用法代码示例。如果您正苦于以下问题:Python read_gpickle函数的具体用法?Python read_gpickle怎么用?Python read_gpickle使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了read_gpickle函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: load_train_test_graphs

def load_train_test_graphs(dataset, recache_input):
    raw_mat_path = 'data/{}.npz'.format(dataset)
    train_graph_path = 'data/{}/train_graph.pkl'.format(dataset)
    test_graph_path = 'data/{}/test_graph.pkl'.format(dataset)

    if recache_input:
        print('loading sparse matrix from {}'.format(raw_mat_path))
        m = load_sparse_csr(raw_mat_path)

        print('splitting train and test...')
        train_m, test_m = split_train_test(
            m,
            weights=[0.9, 0.1])

        print('converting to nx.DiGraph')
        train_g = nx.from_scipy_sparse_matrix(train_m, create_using=nx.DiGraph(), edge_attribute='sign')
        test_g = nx.from_scipy_sparse_matrix(test_m, create_using=nx.DiGraph(), edge_attribute='sign')
                
        print('saving train and test graphs...')
        nx.write_gpickle(train_g, train_graph_path)
        nx.write_gpickle(test_g, test_graph_path)
    else:
        print('loading train and test graphs...')
        train_g = nx.read_gpickle(train_graph_path)
        test_g = nx.read_gpickle(test_graph_path)
    return train_g, test_g
开发者ID:xiaohan2012,项目名称:snpp,代码行数:26,代码来源:data.py


示例2: main

def main():
    rev_forward_g = nx.read_gpickle("rev_forward.gpickle")
    rev_backward_g = nx.read_gpickle("rev_backward.gpickle")
    with open("ids.pickle", "rb") as f:
        ids = pickle.load(f)

    cutoff = 12
    dists = []
    for start, forward_rounds, backward_rounds, end in ids:
        s = count(start)
        if not 4 <= s <= 6:
            continue

        # backward extension
        backward_extension_rounds = 3
        rounds = forward_rounds + backward_rounds + backward_extension_rounds

        for p, w in propagate(
            rev_backward_g,
            end,
            backward_extension_rounds - 1,
            cutoff
        ):
            p = add_last_round(p)
            dists.append((start, p, w, rounds))

            print("{} ... X ... {} with probability {}, {} rounds".format(
                start,
                " <- ".join(str(v) for v in p),
                math.exp(-w),
                rounds
            ))

    with open("dists.pickle", "wb") as f:
        pickle.dump(dists, f)
开发者ID:jamella,项目名称:idc,代码行数:35,代码来源:create_dist.py


示例3: ntwks_to_matrices

def ntwks_to_matrices(in_files, edge_key):
    first = nx.read_gpickle(in_files[0])
    files = len(in_files)
    nodes = len(first.nodes())
    matrix = np.zeros((nodes, nodes, files))
    for idx, name in enumerate(in_files):
        graph = nx.read_gpickle(name)
        for u, v, d in graph.edges(data=True):
            graph[u][v]['weight'] = d[edge_key]  # Setting the edge requested edge value as weight value
        matrix[:, :, idx] = nx.to_numpy_matrix(graph)  # Retrieve the matrix
    return matrix
开发者ID:Alunisiira,项目名称:nipype,代码行数:11,代码来源:nbs.py


示例4: _build_and_store_new_graph

def _build_and_store_new_graph(data_file, name=""):
    """
    Reads the nodes and edges files stored in the 1.1 version and build a new Graph compatible with 2.0
    :param data_file: path to temporary directory
    :param name: name of the network
    :return: new Graph compatible with version 2.0
    """
    data_file += name
    edges = networkx.read_gpickle(data_file + "_edges" + ".gpickle")
    nodes = networkx.read_gpickle(data_file + "_nodes" + ".gpickle")
    net = networkx.Graph()
    net.add_nodes_from(nodes)
    net.add_edges_from(edges)
    return net
开发者ID:the-virtual-brain,项目名称:tvb-data,代码行数:14,代码来源:networkx_migration.py


示例5: diff_history

def diff_history(directory, length = 1):
    glob_dir = os.path.join(directory, "*.pickle.tar.gz")
    pickle_files = glob.glob(glob_dir)
    pickle_files = sorted(pickle_files)
    pairs = [(a, b) for (a, b) in zip(pickle_files, pickle_files[1:])]
    pairs = pairs[-1*length:]
    diffs = []
    for fileA, fileB in pairs:
        graphA = nx.read_gpickle(fileA)
        graphB = nx.read_gpickle(fileB)
        diff = compare(graphA, graphB)
        # remove render folder which is timestamps
        diffs.append(diff)

    return diffs
开发者ID:coana,项目名称:ank_v3_dev,代码行数:15,代码来源:diff.py


示例6: __init__

	def __init__(self, handle):
		super(SourcePairCombiner, self).__init__()
		self.handle = handle
		self.dirhandle = 'reassortant_edges'
		self.G = nx.read_gpickle('{0} Full Complement Graph.pkl'.format(self.handle))
		self.current_sourcepair = None # current sourcepair graph
		self.current_noi = None # current node of interest
开发者ID:ericmjl,项目名称:influenza-reassortment-detector,代码行数:7,代码来源:source_pair_combiner.py


示例7: __init__

 def __init__(self,filename) :
     #self.graph = nx.barabasi_albert_graph(100,5)
     self.graph = nx.Graph()
     self.graph = nx.read_gpickle(filename)
     self.nodes = self.graph.nodes()
     self.graph = self.mapper()
     self.edges = self.graph.edges()
开发者ID:gauravcse,项目名称:Graph-Learning,代码行数:7,代码来源:real_world.py


示例8: main

def main():
  seed(0) #set seed
  #get graph info
  G = nx.read_gpickle("input/graphMTC_CentroidsLength6.gpickle") #noCentroidsLength15.gpickle") #does not have centroidal links. There is also the choice of a proper multidigraph: nx.read_gpickle("input/graphMTC_CentroidsLength5.gpickle")
  G = nx.freeze(G) #prevents edges or nodes to be added or deleted
  #get od info. This is in format of a dict keyed by od, like demand[sd1][sd2] = 200000.
  demand = bd.build_demand('input/BATS2000_34SuperD_TripTableData.csv', 'input/superdistricts_centroids.csv')
  #get earthquake info
  q = QuakeMaps('input/20130210_mtc_total_lnsas3.pkl', 'input/20130210_mtc_magnitudes3.pkl', 'input/20130210_mtc_faults3.pkl', 'input/20130210_mtc_weights3.pkl', 'input/20130210_mtc_scenarios3.pkl') #input/20130107_mtc_total_lnsas1.pkl', 'input/20130107_mtc_magnitudes1.pkl','input/20130107_mtc_faults1.pkl', 'input/20130107_mtc_weights1.pkl', 'input/20130107_mtc_scenarios1.pkl') #'input/20130210_mtc_total_lnsas3.pkl', 'input/20130210_mtc_magnitudes3.pkl', 'input/20130210_mtc_faults3.pkl', 'input/20130210_mtc_weights3.pkl', 'input/20130210_mtc_scenarios3.pkl') #('input/20130107_mtc_total_lnsas1.pkl', 'input/20130107_mtc_magnitudes1.pkl',  #totalfilename=None, magfilename=None, faultfilename=None, weightsfilename=None, scenariofilename=None):
  print 'weights: ', q.weights
  q.num_sites = len(q.lnsas[0])
  #determine which scenarios you want to run
  good_indices = pick_scenarios(q.lnsas, q.weights)
  
  travel_index_times = []
  index = 0
  #loop over scenarios
  print 'size of lnsas: ', len(q.lnsas)
  for scenario in q.lnsas: #each 'scenario' has 1557 values of lnsa, i.e. one per site
    if index in good_indices:
      print 'index: ', index
      (bridges, flow, path, path2) = run_simple_iteration(G, scenario, demand, False)
      travel_index_times.append((index, bridges, flow, path, path2))
#      print 'new travel times: ', travel_index_times
      if index%1000 ==0:
        util.write_2dlist(time.strftime("%Y%m%d")+'_bridges_flow_paths4.txt',travel_index_times)
    index += 1 #IMPORTANT
  util.write_2dlist(time.strftime("%Y%m%d")+'_bridges_flow_paths4.txt',travel_index_times)
  print 'the number of scenarios I considered doing: ', index
  print 'the number of scenarios I actually did: ', len(travel_index_times)
开发者ID:muenchner,项目名称:ita,代码行数:30,代码来源:travel_main_simple.py


示例9: _read_celltype_graph

    def _read_celltype_graph(self, celltypes_file, format="gml"):
        """
        Read celltype-celltype connectivity graph from file.

        celltypes_file -- the path of the file containing
        the graph.
        
        format -- format of the file. allowed values: gml, graphml, edgelist, pickle, yaml.

        """
        start = datetime.now()
        celltype_graph = None
        try:
            if format == "gml":
                celltype_graph = nx.read_gml(celltypes_file)
            elif format == "edgelist":
                celltype_graph = nx.read_edgelist(celltypes_file)
            elif format == "graphml":
                celltype_graph = nx.read_graphml(celltypes_file)
            elif format == "pickle":
                celltype_graph = nx.read_gpickle(celltypes_file)
            elif format == "yaml":
                celltype_graph = nx.read_yaml(celltypes_file)
            else:
                print "Unrecognized format %s" % (format)
        except Exception, e:
            print e
开发者ID:BhallaLab,项目名称:thalamocortical,代码行数:27,代码来源:traubnet.py


示例10: load_data

def load_data():
    start = time.time()
    try:
        print("Loading data from /data pickles and hfd5 adj matrices")
        f = h5py.File('data/cosponsorship_data.hdf5', 'r')
        for chamber in ['house', 'senate']:
            for congress in SUPPORTED_CONGRESSES:
                adj_matrix_lookup[(chamber, congress)] = np.asarray(f[chamber + str(congress)])

                igraph_graph = igraph.load("data/" + chamber + str(congress) + "_igraph.pickle", format="pickle")
                igraph_graph_lookup[(chamber, congress, False)] = igraph_graph

                nx_graph = nx.read_gpickle("data/" + chamber + str(congress) + "_nx.pickle")
                nx_graph_lookup[(chamber, congress, False)] = nx_graph
    except IOError as e:
        print("Loading data from cosponsorship files")
        f = h5py.File("data/cosponsorship_data.hdf5", "w")
        for chamber in ['house', 'senate']:
            for congress in SUPPORTED_CONGRESSES:
                print("Starting %s %s" % (str(congress), chamber))
                adj_matrix = load_adjacency_matrices(congress, chamber)
                data = f.create_dataset(chamber + str(congress), adj_matrix.shape, dtype='f')
                data[0: len(data)] = adj_matrix

                # igraph
                get_cosponsorship_graph(congress, chamber, False).save("data/" + chamber + str(congress) + "_igraph.pickle", "pickle")
                # networkx
                nx.write_gpickle(get_cosponsorship_graph_nx(congress, chamber, False), "data/" + chamber + str(congress) + "_nx.pickle")

                print("Done with %s %s" % (str(congress), chamber))
    print("Data loaded in %d seconds" % (time.time() - start))
开发者ID:TomWerner,项目名称:congress_cosponsorship,代码行数:31,代码来源:project.py


示例11: main

def main():
  seed(0) #set seed
  #get graph info
  G = nx.read_gpickle("input/graphMTC_CentroidsLength5.gpickle") #noCentroidsLength15.gpickle") #does not have centroidal links
  print '|V| = ', len(G.nodes())
  print '|E| = ', len(G.edges())
  G = nx.freeze(G) #prevents edges or nodes to be added or deleted
  #get od info. This is in format of a dict keyed by od, like demand[sd1][sd2] = 200000.
  demand = bd.build_demand('input/BATS2000_34SuperD_TripTableData.csv', 'input/superdistricts_centroids.csv') #bd.build_demand('input/BATS2000_34SuperD_TripTableData.csv', 'input/superdistricts_centroids.csv')
  #get earthquake info
  q = QuakeMaps('input/20130210_mtc_total_lnsas3.pkl', 'input/20130210_mtc_magnitudes3.pkl', 'input/20130210_mtc_faults3.pkl', 'input/20130210_mtc_weights3.pkl', 'input/20130210_mtc_scenarios3.pkl') #(input/20130107_mtc_total_lnsas1.pkl', 'input/20130107_mtc_magnitudes1.pkl', 'input/20130107_mtc_faults1.pkl', 'input/20130107_mtc_weights1.pkl', 'input/20130107_mtc_scenarios1.pkl') #totalfilename=None, magfilename=None, faultfilename=None, weightsfilename=None, scenariofilename=None): 'input/20130210_mtc_total_lnsas3.pkl', 'input/20130210_mtc_magnitudes3.pkl', 'input/20130210_mtc_faults3.pkl', 'input/20130210_mtc_weights3.pkl', 'input/20130210_mtc_scenarios3.pkl') #(


  q.num_sites = len(q.lnsas[0])
  #determine which scenarios you want to run
  good_indices = pick_scenarios(q.lnsas, q.weights)
  
  travel_index_times = []
  index = 0
  #loop over scenarios
  for scenario in q.lnsas: #each 'scenario' has 1557 values of lnsa, i.e. one per site
    if index in good_indices:
      print 'index: ', index
      (travel_time, vmt) = run_iteration(G, scenario, demand)
      travel_index_times.append((index, travel_time, vmt))
#      print 'new travel times: ', travel_index_times
      if index%100 ==0:
        util.write_2dlist(time.strftime("%Y%m%d")+'_travel_time.txt',travel_index_times)
    index += 1 #IMPORTANT
  util.write_2dlist(time.strftime("%Y%m%d")+'_travel_time.txt',travel_index_times)
开发者ID:muenchner,项目名称:ita,代码行数:30,代码来源:travel_main.py


示例12: experiment_4

def experiment_4():
    G = nx.Graph()
    G.add_edge(0, 11, weight=91)
    G.add_edge(1, 11, weight=72)
    G.add_edge(1, 13, weight=96)
    G.add_edge(2, 13, weight=49)
    G.add_edge(2, 6, weight=63)
    G.add_edge(2, 3, weight=31)
    G.add_edge(3, 9, weight=98)
    G.add_edge(3, 7, weight=1)
    G.add_edge(3, 12, weight=59)
    G.add_edge(4, 7, weight=6)
    G.add_edge(4, 9, weight=6)
    G.add_edge(4, 8, weight=95)
    G.add_edge(5, 11, weight=44)
    G.add_edge(6, 11, weight=53)
    G.add_edge(8, 10, weight=2)
    G.add_edge(8, 12, weight=48)
    G.add_edge(9, 12, weight=32)
    G.add_edge(10, 14, weight=16)
    G.add_edge(11, 13, weight=86)

    G = nx.read_gpickle('G.gpickle')
    
    path_nx = nx.dijkstra_path(G, 0, 14)
    path = dijkstra(G, 0, 14, True)
    if path_cost(G, path) > path_cost(G, path_nx):
        print 'Error'
    else:
        print 'Correct'
        
    return locals()
开发者ID:aweinstein,项目名称:path_planning,代码行数:32,代码来源:dijkstra.py


示例13: remove_unconnected_graphs_and_threshold

def remove_unconnected_graphs_and_threshold(in_file):
    import nipype.interfaces.cmtk as cmtk
    import nipype.pipeline.engine as pe
    import os
    import os.path as op
    import networkx as nx
    from nipype.utils.filemanip import split_filename
    connected = []
    if in_file == None or in_file == [None]:
        return None
    elif len(in_file) == 0:
        return None
    graph = nx.read_gpickle(in_file)
    if not graph.number_of_edges() == 0:
        connected.append(in_file)
        _, name, ext = split_filename(in_file)
        filtered_network_file = op.abspath(name + '_filt' + ext)
    if connected == []:
        return None

    #threshold_graphs = pe.Node(interface=cmtk.ThresholdGraph(), name="threshold_graphs")
    threshold_graphs = cmtk.ThresholdGraph()
    from nipype.interfaces.cmtk.functional import tinv
    weight_threshold = 1  # tinv(0.95, 198-30-1)
    threshold_graphs.inputs.network_file = in_file
    threshold_graphs.inputs.weight_threshold = weight_threshold
    threshold_graphs.inputs.above_threshold = True
    threshold_graphs.inputs.edge_key = "weight"
    threshold_graphs.inputs.out_filtered_network_file = op.abspath(
        filtered_network_file)
    threshold_graphs.run()
    return op.abspath(filtered_network_file)
开发者ID:GIGA-Consciousness,项目名称:structurefunction,代码行数:32,代码来源:helpers.py


示例14: topology

def topology(data, ell):
    """
    Computation of topological characteristics.
    
    Parameters
    ------------
    data : array of pathes to the graphs
    ell : list of length scales
    """    

    for i in data:
        G = nx.read_gpickle(i)
        B = nx.number_of_edges(G)
        V = nx.number_of_nodes(G)
        Euler = V - B
        C = (B-V)/float(V)
        eu.append(Euler)
        c_t.append(C)
        vert.append(V)
        bran.append(B)

    plt.plot(ell, c_t, '.', label='v23')
    #
    #np.save('/backup/yuliya/v23/graphs_largedom/Euler.npy', eu)
    #np.save('/backup/yuliya/v23/graphs_largedom/C_t.npy', c_t)
    #np.save('/backup/yuliya/v23/graphs_largedom/V.npy', vert)
    #np.save('/backup/yuliya/v23/graphs_largedom/B.npy', bran)
    #np.save('/backup/yuliya/vsi01/graphs_largdom/time.npv23/graphs_largedom/y', t)
    plt.yscale('log')
开发者ID:YuliyaKar,项目名称:Skeleton_to_graph-labeling-,代码行数:29,代码来源:graph_analisys.py


示例15: short_branches

def short_branches():
    """
    Visualization of short branches of the skeleton.
    
    """
    data1_sk = glob.glob('/backup/yuliya/vsi05/skeletons_largdom/*.h5')
    data1_sk.sort()

    for i,j, k in zip(d[1][37:47], data1_sk[46:56], ell[1][37:47]):
        g = nx.read_gpickle(i)
        dat = tb.openFile(j)
        skel = np.copy(dat.root.skel)
        bra = np.copy(dat.root.branches)
        mask = np.zeros_like(skel)    
        dat.close()
    
        length = nx.get_edge_attributes(g, 'length')
        number = nx.get_edge_attributes(g, 'number')
        num_dict = {}
        for m in number:
            for v in number[m]:
                num_dict.setdefault(v, []).append(m)
        find_br = ndimage.find_objects(bra)
        for l in list(length.keys()):
            if length[l]<0.5*k: #Criteria
                for b in number[l]:
                    mask[find_br[b-1]] = bra[find_br[b-1]]==b
        mlab.figure(bgcolor=(1,1,1), size=(1200,1200))
        mlab.contour3d(skel, colormap='hot')
        mlab.contour3d(mask)
        mlab.savefig('/backup/yuliya/vsi05/skeletons/short_bran/'+ i[42:-10] + '.png')
        mlab.close()
开发者ID:YuliyaKar,项目名称:Skeleton_to_graph-labeling-,代码行数:32,代码来源:graph_analisys.py


示例16: graph_preprocessing_with_counts

def graph_preprocessing_with_counts(G_input=None, save_file=None):

    if not G_input:
        graph_file = os.path.join(work_dir, "adj_graph.p")
        G = nx.read_gpickle(graph_file)
    else:
        G = G_input.copy()

    print "Raw graph size:", G.size()
    print "Raw graph nodes", G.number_of_nodes()

    profile2prob = {l.split()[0]: float(l.split()[1]) for l in open(os.path.join(work_dir, 'profile_weight.txt'))}

    for edge in G.edges(data=True):
        nodes = edge[:2]
        _weight = edge[2]['weight']
        _count = edge[2]['count']
        
        if _count < 3:
            G.remove_edge(*nodes)

    print "Pre-processed graph size", G.size()
    print "Pre-processed graph nodes", G.number_of_nodes()

    G.remove_nodes_from(nx.isolates(G))

    print "Pre-processed graph size", G.size()
    print "Pre-processed graph nodes", G.number_of_nodes()
    
    if save_file:
        print "Saving to", save_file
        nx.write_gpickle(G,save_file)

    return G
开发者ID:kyrgyzbala,项目名称:NewSystems,代码行数:34,代码来源:graph_analysis.py


示例17: __init__

    def __init__(self, fname, interactive=True):
        self.fname = fname
        self.graph = nx.read_gpickle(fname)
        
        #apply_workaround(self.graph, thr=1e-3)
        #remove_intersecting_edges(self.graph)

        print "Number of connected components:", \
                nx.number_connected_components(self.graph)

        self.selected_path_verts = []
        
        if interactive:
            self.fig = plt.figure()
            self.path_patch = None
            
            G_p = nx.connected_component_subgraphs(self.graph)[0]
            #G_p = nx.connected_component_subgraphs(prune_graph(self.graph))[0]

            plot.draw_leaf(G_p, fixed_width=True)

            plt.ion()
            plt.show()

            self.edit_loop()    
开发者ID:debsankha,项目名称:generalized-cycle,代码行数:25,代码来源:graphedit.py


示例18: pullnodeIDs

def pullnodeIDs(in_network, name_key='dn_name'):
    """ This function will return the values contained, for each node in
    a network, given an input key. By default it will return the node names
    """
    import networkx as nx
    import numpy as np
    from nipype.interfaces.base import isdefined
    if not isdefined(in_network):
        raise ValueError
        return None
    try:
        ntwk = nx.read_graphml(in_network)
    except:
        ntwk = nx.read_gpickle(in_network)
    nodedata = ntwk.node
    ids = []
    integer_nodelist = []
    for node in nodedata.keys():
        integer_nodelist.append(int(node))
    for node in np.sort(integer_nodelist):
        try:
            nodeid = nodedata[node][name_key]
        except KeyError:
            nodeid = nodedata[str(node)][name_key]
        ids.append(nodeid)
    return ids
开发者ID:ChadCumba,项目名称:nipype,代码行数:26,代码来源:group_connectivity.py


示例19: extract_all

def extract_all((networks, net_type, setup, args)):
    """
    Open a pickled network and extract data from it.

    Parameters
    ----------
    networks: iterable
        Iterable of filenames
    net_type: str
        Specific string labelling this data
    setup: str
        String describing the parameters
    args: tuple
        Attributes to be collected (e.g., "robustness")
    """
    getter = attrgetter(*args)
    z_getter = attrgetter("zscores")
    res = list()
    for filename in networks:
        try:
            net = nx.read_gpickle(filename)
        except (IOError, EOFError):
            print "failed to load network file '%s'" % filename
            os.rename(filename, filename + ".failed")
            continue
#        update(net, filename)
        results = list(z_getter(net))
        results.extend(list(getter(net)))
        results.append(numpy.mean(net.shortest_paths))
        # stripping .pkl file extension
        results.append(os.path.basename(filename)[:-4])
        results.append(net_type)
        results.append(setup)
        res.append(results)
    return res
开发者ID:Midnighter,项目名称:rfn-analysis,代码行数:35,代码来源:extraction.py


示例20: readNetworks

def readNetworks(fileNames):
	networks = []

	for filename in fileNames:
		networks.append(nx.read_gpickle(filename))

	return networks
开发者ID:cvanoort,项目名称:ComplexNetworks,代码行数:7,代码来源:CSYS303assignment04-Q1.py



注:本文中的networkx.read_gpickle函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python networkx.read_graphml函数代码示例发布时间:2022-05-27
下一篇:
Python networkx.read_gml函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap