• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python nltk.Tree类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中nltk.Tree的典型用法代码示例。如果您正苦于以下问题:Python Tree类的具体用法?Python Tree怎么用?Python Tree使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了Tree类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: read_treefile

def read_treefile(hyptreefile,reftreefile):
    hfile = codecs.open(hyptreefile,"r",encoding='utf-8')
    rfile = codecs.open(reftreefile,"r",encoding='utf-8')
    scoredic = {}
    #store rtree into rtreelist suppose there are more than one reference
    rtreel = []
    for i in rfile:
        refl = []
        if i.strip() != "":
            refl.append(i.strip())
            rstr = " ".join(refl)
            rtree = Tree.fromstring(rstr)
        rtreel.append(rtree)
    #store hyptree into hyplist    
    htreel = []
    senl = []
    for i in hfile:
        if i.strip() != "":
            senl.append(i.strip())
        else:
            htreel.append(Tree.fromstring(" ".join(senl)))
            senl = []
            
    #loop and score
    for r in rtreel:
        for h in htreel:
            score,hword,rword= score_similarity(h,r)
            scoredic[" ".join(hword)] = score
            
    return scoredic     
开发者ID:rillaha,项目名称:similarit,代码行数:30,代码来源:score_similarity.py


示例2: test_pcfg

    def test_pcfg(self):
        o = pcfg.PCFG()
        tree = Tree('S', (Tree('NP', ('foo',)), Tree('VP', ('bar',))))

        o.update_counts(tree)
        self.assertSetEqual(
                set([(p, 1) for p in tree.productions()]),
                set(o.production_counts.items()))
        self.assertSetEqual(set([(p.lhs(), 1) for p in tree.productions()]),
                set(o.lhs_counts.items()))
        o.update_counts(tree)

        tree = Tree('S', (Tree('VP', ('foo',)), Tree('NP', ('bar',))))
        o.update_counts(tree)
        o.update_counts(tree)
        self.assertEqual(6, len(o.production_counts))
        for count in o.production_counts.values():
            self.assertEqual(2, count)
        self.assertEqual(3, len(o.lhs_counts))
        for count in o.lhs_counts.values():
            self.assertEqual(4, count)

        o.compute_scores()
        for production, score in o.scored_productions.items():
            self.assertAlmostEqual(-0.69314718055, score, msg='%s' % production)
开发者ID:divyag9,项目名称:2018-summer-main,代码行数:25,代码来源:pcfg_test.py


示例3: _muc_read_text

def _muc_read_text(s, top_node):
    # The tokenizer sometimes splits within coref tags.
    def __fix_tokenization(sents):
        for index in range(len(sents)):
            next = 1
            while sents[index].count('<COREF') != sents[index].count('</COREF>'):
                sents[index] += ' '
                sents[index] += sents[index + next]
                sents[index + next] = ''
                next += 1
        sents = filter(None, sents)
        return sents
    if s:
        tree = Tree(top_node, [])        
        if _MUC6_PARA_RE.match(s):
            for para in _MUC6_PARA_RE.findall(s):
                if para and para[0] and para[0].strip():
                    tree.append(Tree('P', []))
                    for sent in _MUC6_SENT_RE.findall(para[0]):
                        words = _MUC6_SENT_RE.match(sent[0]).group('sent').strip()
                        # There are empty sentences <s></s> in the MUC6 corpus.
                        if words:
                            tree[-1].append(_muc_read_words(words, 'S'))                
        elif _MUC7_PARA_RE.match(s):
            for para in _MUC7_PARA_SPLIT_RE.split(s):
                if para and para.strip():
                    tree.append(Tree('P', []))
                    for sent in __fix_tokenization(_SENT_TOKENIZER.tokenize(para)):
                        tree[-1].append(_muc_read_words(sent, 'S'))
        return tree
开发者ID:knowlp,项目名称:nltk_contrib,代码行数:30,代码来源:muc.py


示例4: match

    def match(self, tree):
        try:
            if tree.label() != 'ROOT':
                raise IndexError
            if tree[0].label() != 'SBARQ':
                raise IndexError
            if tree[0][0][0].label() != 'WRB':
                raise IndexError
            if tree[0][0][0][0].lower() != 'when':
                raise IndexError
            if tree[0][1].label() != 'SQ':
                raise IndexError
            if tree[0][1][0].label() != 'VBD':
                raise IndexError
            if tree[0][1][1].label() != 'NP':
                raise IndexError
            if tree[0][1][2].label() != 'VP':
                raise IndexError

            part = Pattern.Part()
            part.object = ParentedTree.fromstring(str(tree[0][1][1]))
            part.property = ParentedTree.fromstring(str(Tree('VP', [
                Tree.fromstring(str(tree[0][0][0])),
                Tree.fromstring(str(tree[0][1][0])),
                Tree.fromstring(str(tree[0][1][2]))
            ])))

            return [part]
        except IndexError:
            return []
开发者ID:EIFSDB,项目名称:search-engine,代码行数:30,代码来源:PatternWhenWasObjectAction.py


示例5: parser_output_to_parse_deriv_trees

def parser_output_to_parse_deriv_trees(output):
    lines = output.strip().split("\n")
    deriv_tree_lines = lines[::2]
    parse_tree_lines = lines[1::2]

    parse_trees = [Tree.fromstring(line.replace('\x06', 'epsilon_')) for line in parse_tree_lines if line != '']
    deriv_trees = [Tree.fromstring(line) for line in deriv_tree_lines if line != '']
    return parse_trees, deriv_trees
开发者ID:jonpiffle,项目名称:ltag_parser,代码行数:8,代码来源:investigate_parser_output.py


示例6: test_evalb_correctly_scores_identical_trees

 def test_evalb_correctly_scores_identical_trees(self):
     tree1 = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
     tree2 = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
     evalb_scorer = EvalbBracketingScorer()
     evalb_scorer([tree1], [tree2])
     metrics = evalb_scorer.get_metric()
     assert metrics["evalb_recall"] == 1.0
     assert metrics["evalb_precision"] == 1.0
     assert metrics["evalb_f1_measure"] == 1.0
开发者ID:apmoore1,项目名称:allennlp,代码行数:9,代码来源:evalb_bracketing_scorer_test.py


示例7: add_top_to_tree

def add_top_to_tree(treebank_file):
    f = open(treebank_file, "r")
    root_set = set([])
    for sentence in f:
        t = Tree.fromstring(sentence, remove_empty_top_bracketing=False)
        top_node = Tree("TOP", [])
        top_node.append(t)
        print NewTree.flat_print(top_node)
    f.close()
开发者ID:srush,项目名称:PhraseDep,代码行数:9,代码来源:PTBRoot.py


示例8: test_evalb_correctly_calculates_bracketing_metrics_over_multiple_trees

 def test_evalb_correctly_calculates_bracketing_metrics_over_multiple_trees(self):
     tree1 = Tree.fromstring("(S (VP (D the) (NP dog)) (VP (V chased) (NP (D the) (N cat))))")
     tree2 = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
     evalb_scorer = EvalbBracketingScorer()
     evalb_scorer([tree1, tree2], [tree2, tree2])
     metrics = evalb_scorer.get_metric()
     assert metrics["evalb_recall"] == 0.875
     assert metrics["evalb_precision"] == 0.875
     assert metrics["evalb_f1_measure"] == 0.875
开发者ID:apmoore1,项目名称:allennlp,代码行数:9,代码来源:evalb_bracketing_scorer_test.py


示例9: _construct_node_from_actions

    def _construct_node_from_actions(self,
                                     current_node: Tree,
                                     remaining_actions: List[List[str]],
                                     add_var_function: bool) -> List[List[str]]:
        """
        Given a current node in the logical form tree, and a list of actions in an action sequence,
        this method fills in the children of the current node from the action sequence, then
        returns whatever actions are left.

        For example, we could get a node with type ``c``, and an action sequence that begins with
        ``c -> [<r,c>, r]``.  This method will add two children to the input node, consuming
        actions from the action sequence for nodes of type ``<r,c>`` (and all of its children,
        recursively) and ``r`` (and all of its children, recursively).  This method assumes that
        action sequences are produced `depth-first`, so all actions for the subtree under ``<r,c>``
        appear before actions for the subtree under ``r``.  If there are any actions in the action
        sequence after the ``<r,c>`` and ``r`` subtrees have terminated in leaf nodes, they will be
        returned.
        """
        if not remaining_actions:
            logger.error("No actions left to construct current node: %s", current_node)
            raise ParsingError("Incomplete action sequence")
        left_side, right_side = remaining_actions.pop(0)
        if left_side != current_node.label():
            logger.error("Current node: %s", current_node)
            logger.error("Next action: %s -> %s", left_side, right_side)
            logger.error("Remaining actions were: %s", remaining_actions)
            raise ParsingError("Current node does not match next action")
        if right_side[0] == '[':
            # This is a non-terminal expansion, with more than one child node.
            for child_type in right_side[1:-1].split(', '):
                if child_type.startswith("'lambda"):
                    # We need to special-case the handling of lambda here, because it's handled a
                    # bit weirdly in the action sequence.  This is stripping off the single quotes
                    # around something like `'lambda x'`.
                    child_type = child_type[1:-1]
                child_node = Tree(child_type, [])
                current_node.append(child_node)  # you add a child to an nltk.Tree with `append`
                if not self.is_terminal(child_type):
                    remaining_actions = self._construct_node_from_actions(child_node,
                                                                          remaining_actions,
                                                                          add_var_function)
        elif self.is_terminal(right_side):
            # The current node is a pre-terminal; we'll add a single terminal child.  We need to
            # check first for whether we need to add a (var _) around the terminal node, though.
            if add_var_function and right_side in self._lambda_variables:
                right_side = f"(var {right_side})"
            if add_var_function and right_side == 'var':
                raise ParsingError('add_var_function was true, but action sequence already had var')
            current_node.append(Tree(right_side, []))  # you add a child to an nltk.Tree with `append`
        else:
            # The only way this can happen is if you have a unary non-terminal production rule.
            # That is almost certainly not what you want with this kind of grammar, so we'll crash.
            # If you really do want this, open a PR with a valid use case.
            raise ParsingError(f"Found a unary production rule: {left_side} -> {right_side}. "
                               "Are you sure you want a unary production rule in your grammar?")
        return remaining_actions
开发者ID:pyknife,项目名称:allennlp,代码行数:56,代码来源:world.py


示例10: drawTrees

def drawTrees(chart):
	for state in chart[-1]:
		if state.isParse(grammar):
			treeString = buildTreeString(state,'')
			tree = Tree(treeString)
			print 'Showing parse tree. Close window to continue.'
			tree.draw()
			ans = raw_input('Do you want to see another parse tree?(y/n): ')
			if ans == 'n': return
	print 'No more valid parses'
开发者ID:hillelweintraub,项目名称:NLP,代码行数:10,代码来源:parse.py


示例11: extract_itg

def extract_itg(alignments_file_name, parses_file_name, inv_extension):
    """Extract a inversion transduction grammar (ITG)
    from the given files.
    
    Keyword arguments:
    alignments_file_name -- name of file containing alignments
        between sentences in l1_file_name and l2_file_name
    parses_file_name -- name of file containing parse trees
        of the sentences in l1_file_name
    inv_extension -- extension denoting whether a node is inverted
        
    Returns a Counter of binary ITG rules and unary rules. Each ITG rule is 
    represented as the tuple (lhs, rhs), where rhs is a tuple of nodes."""
    binary_itg = Counter()
    unary_itg = Counter()
    num_lines = number_of_lines(parses_file_name)
    alignments_file = open(alignments_file_name)
    parses_file = open(parses_file_name)
    
    for i, l1_parse in enumerate(parses_file):
        if i % (num_lines/100) is 0:
            sys.stdout.write('\r%d%%' % (i*100/num_lines,))
            sys.stdout.flush()

        try: # TODO remove try/catch
            reordered_indexes = str_to_reordered_indexes(alignments_file.next())
            # remove outer brackets from Berkeley parse
            l1_parse = l1_parse.strip()
            l1_parse = l1_parse[1:len(l1_parse)-1]
            l1_parse = l1_parse.strip()
            parse_tree = Tree(l1_parse)            
            parse_forest = generate_forest(parse_tree, 
                reordered_indexes, inv_extension)
        except:
            error_log = open('error.log', 'a')
            error_log.write('%s -- in extract_itg/3\n' % time.asctime())
            error_log.write('line: %s\n' % i)
            error_log.write('%s\n' % l1_parse.strip())
            error_log.write('%s\n' % reordered_indexes)
            error_log.write('\n')
            error_log.close()
            print 'Error in extract_itg/3. See error.log'
            raise

        binary_rules, unary_rules = extract_rules(parse_forest, 
                                                  parse_tree.leaves())
        for rule in binary_rules:
            binary_itg[rule] += 1

        for rule in unary_rules:
            unary_itg[rule] += 1

    alignments_file.close()
    parses_file.close()
    return binary_itg, unary_itg
开发者ID:macabot,项目名称:SRITG,代码行数:55,代码来源:sritg.py


示例12: test_evalb_correctly_scores_imperfect_trees

 def test_evalb_correctly_scores_imperfect_trees(self):
     # Change to constiutency label (VP ... )should effect scores, but change to POS
     # tag (NP dog) should have no effect.
     tree1 = Tree.fromstring("(S (VP (D the) (NP dog)) (VP (V chased) (NP (D the) (N cat))))")
     tree2 = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
     evalb_scorer = EvalbBracketingScorer()
     evalb_scorer([tree1], [tree2])
     metrics = evalb_scorer.get_metric()
     assert metrics["evalb_recall"] == 0.75
     assert metrics["evalb_precision"] == 0.75
     assert metrics["evalb_f1_measure"] == 0.75
开发者ID:apmoore1,项目名称:allennlp,代码行数:11,代码来源:evalb_bracketing_scorer_test.py


示例13: get_sentence_and_indexes

def get_sentence_and_indexes(parsed_sentence):

  sentence_tree = Tree(parsed_sentence)
  if sentence_tree.node == bitpar_top: #remove designated TOP-symbol    
    sentence_tree = sentence_tree[0]
    
  rlist = [0]*len(sentence_tree.leaves())
  slist = [""]*len(sentence_tree.leaves())
  get_sentence_and_indexes_rec_helper(sentence_tree, rlist, slist)
  reordered_sentence = " ".join(slist)
  
  return reordered_sentence, rlist
开发者ID:agnesvanbelle,项目名称:SSLP2,代码行数:12,代码来源:testing.py


示例14: test_evalb_with_terrible_trees_handles_nan_f1

 def test_evalb_with_terrible_trees_handles_nan_f1(self):
     # If precision and recall are zero, evalb returns nan f1.
     # This checks that we handle the zero division.
     tree1 = Tree.fromstring("(PP (VROOT (PP That) (VROOT (PP could) "
                             "(VROOT (PP cost) (VROOT (PP him))))) (PP .))")
     tree2 = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
     evalb_scorer = EvalbBracketingScorer()
     evalb_scorer([tree1], [tree2])
     metrics = evalb_scorer.get_metric()
     assert metrics["evalb_recall"] == 0.0
     assert metrics["evalb_precision"] == 0.0
     assert metrics["evalb_f1_measure"] == 0.0
开发者ID:apmoore1,项目名称:allennlp,代码行数:12,代码来源:evalb_bracketing_scorer_test.py


示例15: _build_hierplane_tree

    def _build_hierplane_tree(self, tree: Tree, index: int, is_root: bool) -> JsonDict:
        """
        Recursively builds a JSON dictionary from an NLTK ``Tree`` suitable for
        rendering trees using the `Hierplane library<https://allenai.github.io/hierplane/>`.

        Parameters
        ----------
        tree : ``Tree``, required.
            The tree to convert into Hierplane JSON.
        index : int, required.
            The character index into the tree, used for creating spans.
        is_root : bool
            An indicator which allows us to add the outer Hierplane JSON which
            is required for rendering.

        Returns
        -------
        A JSON dictionary render-able by Hierplane for the given tree.
        """
        children = []
        for child in tree:
            if isinstance(child, Tree):
                # If the child is a tree, it has children,
                # as NLTK leaves are just strings.
                children.append(self._build_hierplane_tree(child, index, is_root=False))
            else:
                # We're at a leaf, so add the length of
                # the word to the character index.
                index += len(child)

        label = tree.label()
        span = " ".join(tree.leaves())
        hierplane_node = {
                "word": span,
                "nodeType": label,
                "attributes": [label],
                "link": label
        }
        if children:
            hierplane_node["children"] = children
        # TODO(Mark): Figure out how to span highlighting to the leaves.
        if is_root:
            hierplane_node = {
                    "linkNameToLabel": LINK_TO_LABEL,
                    "nodeTypeToStyle": NODE_TYPE_TO_STYLE,
                    "text": span,
                    "root": hierplane_node
            }
        return hierplane_node
开发者ID:apmoore1,项目名称:allennlp,代码行数:49,代码来源:constituency_parser.py


示例16: evaluate

def evaluate(fragments, sumfunc, condition, normalization, verbose=True, perbook=False, topfragments=False, breakdown=True, conftable=False):
	green = "\033[32m"; red = "\033[31m"; gray = "\033[0m" # ANSI codes
	names = set(map(getauthor, fragments.values()[0]))
	results = {}
	# heading
	if verbose and not perbook:
		print "\n &", 21 * " ",
		print "&".join(a.rjust(16) for a in sorted(names)),
		print "&\tguess &\t\t\tconfidence\\\\"
	prev = "foo.bar"
	# loop over texts to be classified
	for text in sorted(fragments):
		if perbook and getauthor(text) != getauthor(prev):
			print "\n &", 21 * " ",
			print " &".join("\\rotatebox{45}{%s}" % a.split(" - ")[-1].split(".")[0].replace("&","\\&") for a in sorted(fragments[text])), "\\\\"
		if verbose: print text.split(" - ")[-1].split(".")[0][:25].replace("&","\\&").ljust(25),
		inter = {}
		# loop over possible authors
		for author in sorted(fragments[text]):
			inter[author] = sum(map(sumfunc, filter(condition, fragments[text][author].items()))) / normalization(text, author)
		if verbose:
			for author in sorted(inter):
				if inter[author] == max(inter.values()): l,r = "\\textbf{","}"
				else: l, r = "".ljust(8), " "
				if isinstance(inter[author], float): print ("& %s%.2f%s" % (l,inter[author],r)).rjust(16),
				elif isinstance(inter[author], int): print ("& %s%d%s" % (l,inter[author],r)).rjust(16),
				else: print "& %s%s" % (l,repr(inter[author]).rjust(8),r),
		actualauthor = getauthor(text)
		guess = max(inter, key=inter.get)
		results.setdefault(actualauthor, []).append(guess)
		if verbose and not perbook:
			print "&",
			print green+"correct:" if getauthor(guess) == actualauthor else red+"wrong:  ",
			print getauthor(guess).ljust(10), gray,
			try: confidence = (100 * (max(inter.values()) - sorted(inter.values())[-2]) / float(max(inter.values())))
			except ZeroDivisionError: confidence = 0.0
			except IndexError: confidence = 0.0
			print "& %s%5.2f%s " % ((red if confidence < 50 else green), confidence, gray)
		elif verbose: print "\\\\"
		prev = text
	if verbose: print

	if topfragments: print "top fragments"
	for name in sorted(names) if topfragments else ():
		for text in sorted(fragments):
			if not getauthor(text) == name: continue
			print text
			for label in ("(ROOT", "(S ", "(NP ", "(VP ", "(PP "):
				guess = max(fragments[text], key=lambda x: sum(sumfunc(a) for a in fragments[text][x].items() if condition(a)) / norm(x))
				try:
					frag = max((a[0] for a in fragments[text][guess].iteritems() if condition(a) and a[0].startswith(label)), key=lambda x: (sumfunc((x,fragments[text][guess][x])), fragments[text][guess][x]))
				except ValueError: pass
				else:
					f1 = Tree(frag)
					f2 = Tree(frag)
					print "%2d" % fragments[text][guess][frag], " ".join(a.replace(" ", "_")[:-1] for a in re.findall(r" \)|[^ )]+\)", frag)),
					try: f2.un_chomsky_normal_form()
					except: print f1.pprint(margin=9999, parens=("[", " ]"))
					else: print f2.pprint(margin=9999, parens=("[", " ]"))
开发者ID:andreasvc,项目名称:authident,代码行数:59,代码来源:evaluate.py


示例17: reduce_nps

def reduce_nps(sentence):
    """
    take any occurrences of NP trees that contain only one  NP tree and reduce them
    """
    res = Tree('S',[])
    for child in sentence:
        #print child
        if isinstance(child, Tree):
            #print len(child)

            if len(child) == 1:
                res.append(child[0])
                continue
        res.append(child)
    return res
开发者ID:phdowling,项目名称:CompLingApplications,代码行数:15,代码来源:parseNP.py


示例18: tag

    def tag(self, input_tree):
        """
        Tag an input tree using the rules in parsed grammars.
        """
        #clean input tree:
        input_tree = self.clean(input_tree)
        
        text = self.from_tree_to_text(input_tree)
        #print "INPUT TEXT: "+text
        for rule in self.rules:
            rule_name = rule.keys()[0]
            rule = rule.values()[0]

            matches = re.finditer(rule, text, re.I)
            for match in matches:
                match_text = match.group(rule_name)
                #eliminar espacios al principio y al final del matching text,
                #para controlar que cada subarbol <NAME> está bien delimitado
                #en el texto resultante (no se come espacios opcionales):
                match_text = match_text.strip()
                text = string.replace(text, match_text, "<"+rule_name+">")
                #print "TEXT = "+text
                self.stack.append(match_text)

        #print "OUTPUT TEXT : "+text
        output_tree_str = "(S "+self.from_text_to_tree_str(text)+" )"
        #print "OUTPUT TREE STR: "+output_tree_str
        output_tree = Tree.parse(output_tree_str, parse_leaf=self.from_string_token_to_tuple)
        return output_tree
开发者ID:azizur77,项目名称:ruleparser,代码行数:29,代码来源:ruleparser.py


示例19: test_simple_tags

    def test_simple_tags(self):
        grammar = "ANIMAL : {<ANIMAL>}"
        rp = ruleparser.RuleParser(grammar)

        expected = Tree.parse("(S el/DT (ANIMAL perro/NN/ANIMAL) ladra/VB al/DT (ANIMAL gato/NN/ANIMAL))", parse_leaf=rp.from_string_token_to_tuple)
        result = rp.tag(self.text)
        self.assertEqual(result, expected)
开发者ID:azizur77,项目名称:ruleparser,代码行数:7,代码来源:test.py


示例20: parse_ccgbank_tree

def parse_ccgbank_tree(s):
    t = Tree.parse(s, 
                   parse_node=parse_ccgbank_node, 
                   parse_leaf=parse_ccgbank_leaf, 
                   node_pattern=ccgbank_node_pattern, 
                   leaf_pattern=ccgbank_leaf_pattern)
    return excise_empty_nodes(t)
开发者ID:AlexWang90,项目名称:openccg,代码行数:7,代码来源:ccg_draw_tree.py



注:本文中的nltk.Tree类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python nltk.WordNetLemmatizer类代码示例发布时间:2022-05-27
下一篇:
Python nltk.RegexpTokenizer类代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap