• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python nltk.parse_cfg函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中nltk.parse_cfg函数的典型用法代码示例。如果您正苦于以下问题:Python parse_cfg函数的具体用法?Python parse_cfg怎么用?Python parse_cfg使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了parse_cfg函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: contextFreeGrammar

def contextFreeGrammar():
    print "page 298 Context-Free Grammar"
    print "=============== A Simple Grammar ==============="
    grammar1 = nltk.parse_cfg("""
        S -> NP VP
        VP -> V NP | V NP PP  
        PP -> P NP  
        V -> "saw" | "ate" | "walked"  
        NP -> "John" | "Mary" | "Bob" | Det N | Det N PP  
        Det -> "a" | "an" | "the" | "my"  
        N -> "man" | "dog" | "cat" | "telescope" | "park"  
        P -> "in" | "on" | "by" | "with"  
        """)
    sent = "Mary saw Bob".split()
    rd_parser = nltk.RecursiveDescentParser(grammar1)
    for tree in rd_parser.nbest_parse(sent):
        print tree

    print "=============== Writing Your Own Grammars ==============="
    grammar1 = nltk.data.load('file:mygrammar.cfg')
    sent = "Mary saw Bob".split()
    rd_parser = nltk.RecursiveDescentParser(grammar1)
    for tree in rd_parser.nbest_parse(sent):
        print tree

    print "=============== Recursion in Syntactic Structure ==============="
    grammar2 = nltk.parse_cfg("""  S  -> NP VP  NP -> Det Nom | PropN  Nom -> Adj Nom | N  VP -> V Adj | V NP | V S | V NP PP  PP -> P NP  PropN -> 'Buster' | 'Chatterer' | 'Joe'  Det -> 'the' | 'a'  N -> 'bear' | 'squirrel' | 'tree' | 'fish' | 'log'  Adj  -> 'angry' | 'frightened' |  'little' | 'tall'  V ->  'chased'  | 'saw' | 'said' | 'thought' | 'was' | 'put'  P -> 'on'  """)
开发者ID:hbdhj,项目名称:python,代码行数:27,代码来源:chapter8.py


示例2: cfgMatch

def cfgMatch ( nlQuery ):
    terminalList = [ 'find','search','display','tell','faculty','student','staff','other' ]
    grammar = nltk.parse_cfg("""
                    S -> A B
                    A -> 'find'|'search'|'display'|'tell'
                    B -> 'faculty'|'student'|'staff'|'other'
                    """)
# Since grammar crashes if a non term not in grammar is used.
# We have to check and report error if such a word is used anywhere
##################################################################
# Check and errors reporting here
    tokenizedList = list( word_tokenize( nlQuery  ) )
    for word in tokenizedList:
        if word not in terminalList:
            print "ERROR"
            return -1
##################################################################
    parser = nltk.RecursiveDescentParser ( grammar )
    parseTree = parser.nbest_parse ( tokenizedList, 1 )

    for tree in parseTree:
        print tree
        for elem in tree:
            for i in tree.node:
                print i
开发者ID:amoghtolay,项目名称:iitgminiUID,代码行数:25,代码来源:queryParser.py


示例3: generate_grammar

def generate_grammar(sentence):
    grammar = "\n".join([r for r, freq in frequent_rules])
    for (word, pos_tag) in sentence:
        grammar += "%s -> '%s' \n" %(pos_tag, word)

    #print grammar
    return nltk.parse_cfg(grammar)
开发者ID:phdowling,项目名称:CompLingApplications,代码行数:7,代码来源:parseNP.py


示例4: demo

def demo():
    from nltk import Nonterminal, parse_cfg
    nonterminals = 'S VP NP PP P N Name V Det'
    (S, VP, NP, PP, P, N, Name, V, Det) = [Nonterminal(s)
                                           for s in nonterminals.split()]

    grammar = parse_cfg("""
    S -> NP VP
    PP -> P NP
    NP -> Det N
    NP -> NP PP
    VP -> V NP
    VP -> VP PP
    Det -> 'a'
    Det -> 'the'
    Det -> 'my'
    NP -> 'I'
    N -> 'dog'
    N -> 'man'
    N -> 'park'
    N -> 'statue'
    V -> 'saw'
    P -> 'in'
    P -> 'up'
    P -> 'over'
    P -> 'with'
    """)

    def cb(grammar): print(grammar)
    top = Tk()
    editor = CFGEditor(top, grammar, cb)
    Label(top, text='\nTesting CFG Editor\n').pack()
    Button(top, text='Quit', command=top.destroy).pack()
    top.mainloop()
开发者ID:Arttii,项目名称:TextBlob,代码行数:34,代码来源:cfg.py


示例5: parseSimile

def parseSimile(tokensWithIndices):
    #The grammar used to check a simile
    grammar = nltk.parse_cfg("""
    S -> NP "like" NP | "ADJ" "like" "NP" | NP V "like" NP | "EX" "like" "NP" | NP "as" "ADJ" "as" NP | V "as" "ADJ" "as" NP |OTH
    NP -> N | "ADJ" N | "DET" NP 
    N -> "NP" | "PRO" | "N"
    V -> "VD" | "V" | "VG"
    OTH -> "OTH" "PUNC" "FW" "WH" "TO" "NUM" "ADV" "VD" "VG" "L" "VN" "N" "P" "S" "EX" "V" "CNJ" "UH" "PRO" "MOD"  
    """)  
    tokens = map(lambda i: i[0], tokensWithIndices)
    indices = map(lambda i: i[1], tokensWithIndices)
    parser = nltk.ChartParser(grammar)
    simile_indices = []
    start_token = 0
    while (start_token < len(tokens) - 2):
        end_token = start_token + 2 #can't have simile smaller than 4 words
        simile = False
        while ( (not simile) and (end_token <= len(tokens))):
            if (len(parser.nbest_parse(tokens[start_token:end_token])) > 0): #If a parse tree was formed
                simile_indices.extend(indices[start_token:end_token])
                start_token = end_token
                simile = True            
            else:    
                end_token += 1
        start_token += 1
    return simile_indices
开发者ID:HalleyYoung,项目名称:Poetry_Analysis,代码行数:26,代码来源:similes.py


示例6: parse

def parse(wordlist, grammar, generator):
    """
    Parse this thang. Call off to nltk's chart parser (which is
    the only one fast enough to parse the massive grammar). Only
    use the top best tree. If no parse tree is found, the program
    dies. The pass along the tree for actual symantic analysis,
    and then print out the parse and we're done!
    """

    import nltk

    try:
        gr = nltk.parse_cfg(grammar)
        parts = [w.reduced() for w in wordlist]

        parser = nltk.BottomUpChartParser(gr)
        trees = parser.nbest_parse(parts)

        classifiers = ClassifierCollection(generator)
        ct = 0
        for tree in trees:
            rparse(tree, classifiers, False)
            ct += 1
            break

        if ct == 0:
            raise ParserException('No parse trees found')

        classifiers.finish()
        classifiers.pprint()

    except ValueError, e:
        raise ParserException(str(e))
开发者ID:eberle1080,项目名称:newman,代码行数:33,代码来源:parser.py


示例7: test_returnRelevantTuples_2

	def test_returnRelevantTuples_2(self):
		# arrange
		testGrammar = """
S -> NP VP

VP -> VP PP
VP -> V NP
VP -> 'eats'

PP -> P NP

NP -> Det N
NP -> 'she'

V -> 'eats'

P -> 'with'

N -> 'fish'
N -> 'fork'

Det -> 'a'
"""
		grammar = nltk.parse_cfg(testGrammar)

		sent = ['she', 'eats', 'a', 'fish', 'with', 'a', 'fork']

		inst = cyk.Cyk(sent, grammar.productions())

		# act		
		inst.executeAlgorithm()

		# assert
		self.assertTrue(inst.isInGrammar())
开发者ID:roylanceMichael,项目名称:compling_571_deepprocessing_washington,代码行数:34,代码来源:test.py


示例8: __init__

	def __init__(self, cfgGrammar):
		self.pb = productionBuilder.ProductionBuilder()

		self.grammar = nltk.parse_cfg(cfgGrammar)
		self.terminalTransformProductions = []
		self.nonTerminalTransformProductions = []
		self.singleNonTerminalTransformProductions = []
开发者ID:roylanceMichael,项目名称:compling_571_deepprocessing_washington,代码行数:7,代码来源:cfgToCnfBuilder.py


示例9: __init__

    def __init__(self, blackboard, tense = "present", person = 1):
        super(SentenceExpert, self).__init__(blackboard, "Sentence Expert", tense, person,5)
        self.eva = ["be", "look", "feel"]
        self.atv = ["like", "hate", "love", "know", "need", "see"]

        """ eva - emotional verb active
            evp - emotional verb passive
            ej - emotion adjective
            en - emotional noun
            atv - attitude verb
        """
        self.grammar = nltk.parse_cfg("""
            S -> P | EP | Person ATV NP
            P -> NP VP 
            EP -> Person EVA EJ | NP EVP Pron EJ | ENP VP
            ENP ->  EN OF NP 
            NP -> Det N | Det JJ N | Det EJ JJ N | Det EJ N | Det EN
            VP -> V | V ERB | ERB V
            Det -> 'the'
            N -> 'n'
            V -> 'v' 
            EVA -> 'eva'
            EVP -> 'makes' 
            EN -> 'en'
            EJ -> 'ej'
            JJ -> 'adj'
            ERB -> 'erb'
            ATV -> 'atv'
            Person -> 'person'
            Pron -> 'pron'
            OF -> 'of'
            CC -> 'and' | 'but' | 'because' | 'so'
            """)
开发者ID:JoannaMisztalRadecka,项目名称:Blackboard-Poetry-Generator,代码行数:33,代码来源:SentenceExpert.py


示例10: build_grammar

    def build_grammar(self):
        '''Use the corpus data and return a NLTK grammar.'''

        grammer_def = self.build_grammar_text().getvalue()
        grammar = nltk.parse_cfg(grammer_def.encode('utf8'))

        return grammar
开发者ID:chfoo,项目名称:CompFacts,代码行数:7,代码来源:grammar.py


示例11: Solution_parse

def Solution_parse(args):
  try:
    print "Parser option: %s " % args.parseOption
    gstring = open('solutiongrammar.cfg', 'r').read()
    grammar1 = nltk.parse_cfg(gstring)
    #print grammar1 , '\n'
    
    if (args.parseOption == 'rd'):
      parser = nltk.RecursiveDescentParser(grammar1)
    elif(args.parseOption == 'sr'):
      parser = nltk.ShiftReduceParser(grammar1)
    elif(args.parseOption == 'ec'):
      parser = nltk.parse.EarleyChartParser(grammar1)
    elif(args.parseOption == 'td'):
      parser = nltk.parse.TopDownChartParser(grammar1)
    elif(args.parseOption == 'bu'):
      parser = nltk.parse.BottomUpChartParser(grammar1)
    else:
      raise Exception("Unknown parseOption: %s" % args.parseOption)

    i = 0
    for line in open('inputfile.txt','r'):
      i += 1
      pass
      if i == 1:
        print line
        sent = wordpunct_tokenize(line)
        print sent , '\n'
        pp = parser.parse(sent)
        print pp, '\n'
        pass

  except Exception, err:
    sys.stderr.write('ERROR: %s\n' % str(err))
    raise
开发者ID:jtmhom88,项目名称:LING571_HW1,代码行数:35,代码来源:multiparser.py


示例12: test_ctor

	def test_ctor(self):
		# arrange
		testGrammar = """
S -> NP VP

VP -> VP PP
VP -> V NP
VP -> 'eats'

PP -> P NP

NP -> Det N
NP -> 'she'

V -> 'eats'

P -> 'with'

N -> 'fish'
N -> 'fork'

Det -> 'a'
"""
		grammar = nltk.parse_cfg(testGrammar)

		sent = ['she', 'eats', 'a', 'fish', 'with', 'a', 'fork']

		# act
		inst = cyk.Cyk(sent, grammar.productions())

		# assert
		self.assertTrue(inst != None)
		self.assertTrue(inst.sentence == sent)
开发者ID:roylanceMichael,项目名称:compling_571_deepprocessing_washington,代码行数:33,代码来源:test.py


示例13: demo

def demo():
    """
    A demonstration of the shift-reduce parser.
    """

    from nltk import parse, parse_cfg

    grammar = parse_cfg(
        """
    S -> NP VP
    NP -> Det N | Det N PP
    VP -> V NP | V NP PP
    PP -> P NP
    NP -> 'I'
    N -> 'man' | 'park' | 'telescope' | 'dog'
    Det -> 'the' | 'a'
    P -> 'in' | 'with'
    V -> 'saw'
    """
    )

    sent = "I saw a man in the park".split()

    parser = parse.ShiftReduceParser(grammar, trace=2)
    for p in parser.nbest_parse(sent):
        print p
开发者ID:Razin-Tailor,项目名称:ChatterBot,代码行数:26,代码来源:sr.py


示例14: test

def test():
  import nltk
	grammar1 = nltk.parse_cfg("""
	
	""")
	sr_parse = nltk.Shift Reduce Parser(grammar1)

	sent = "Lee ran away home".split()
	return sr_parse.parse(sent)
开发者ID:sprotsai,项目名称:SolomiyaProtsay,代码行数:9,代码来源:8.9.18.py


示例15: __init__

 def __init__(self, blackboard, tense = "present"):
     super(RhetoricalExpert, self).__init__(blackboard, "Rhetorical Expert", tense, 3)
     self.grammar = nltk.parse_cfg("""
         S -> WHAT BE Det NP | WHY BE Det N SO JJ
         NP -> JJ N | N
         JJ -> 'adj'
         N -> 'n'
         Det -> 'the'
         BE -> 'be'
         SO -> 'so'
         WHAT -> 'what'
         WHY -> 'why'
         """)
开发者ID:JoannaMisztalRadecka,项目名称:Blackboard-Poetry-Generator,代码行数:13,代码来源:RhetoricalExpert.py


示例16: __init__

 def __init__(self, blackboard, tense="present", person=3):
     super(MetaphoreExpert, self).__init__(blackboard, "Metaphore Expert", tense=tense, person=person, importance=2)
     self.grammar = nltk.parse_cfg(
         """
         S -> Person BE LIKE NP 
         NP -> Det JJ N | Det N
         Person -> 'person'
         JJ -> 'adj'
         N -> 'n'
         Det -> 'the'
         BE -> 'be'
         LIKE -> 'like'
         """
     )
开发者ID:JoannaMisztalRadecka,项目名称:Blackboard-Poetry-Generator,代码行数:14,代码来源:MetaphoreExpert.py


示例17: chart_parse

def chart_parse(in_file, grammar_file, out_file):
    text = unicode(open(in_file, 'r').read(), errors='ignore')
    output = open(out_file, 'w')
    grammar_string = unicode(open(grammar_file, 'r').read(), errors='ignore')
    try:
        grammar = nltk.parse_cfg(grammar_string)
        parser = nltk.ChartParser(grammar)
        sentences = nltk.sent_tokenize(text)
        for sentence in sentences:
            words = nltk.word_tokenize(sentence)
            tree = parser.parse(words)
            output.write(tree.pprint())
            output.write('\n')
    except Exception, e:
        message = "Error with parsing. Check the input files are correct and the grammar contains every word in the input sequence. \n----\n" + str(e)
        sys.stderr.write(message)
        sys.exit()
开发者ID:stevecassidy,项目名称:hcsvlab-galaxy,代码行数:17,代码来源:g_chart_parser.py


示例18: sentence_parse_example

def sentence_parse_example():
    groucho_grammar = nltk.parse_cfg(
        """
    S -> NP VP
    PP -> P NP
    NP -> Det N | Det N PP | 'I'
    VP -> V NP | VP PP
    Det -> 'an' | 'my'
    N -> 'elephant' | 'pajamas'
    V -> 'shot'
    P -> 'in'
  """
    )
    sent = ["I", "shot", "an", "elephant", "in", "my", "pajamas"]
    parser = nltk.ChartParser(groucho_grammar)
    trees = parser.nbest_parse(sent)
    for tree in trees:
        print tree
开发者ID:prashiyn,项目名称:nltk-examples,代码行数:18,代码来源:ch08.py


示例19: chart_parsing

def chart_parsing():
    groucho_grammar = nltk.parse_cfg(
        """
    S -> NP VP
    PP -> P NP
    NP -> Det N | Det N PP | 'I'
    VP -> V NP | VP PP
    Det -> 'an' | 'my'
    N -> 'elephant' | 'pajamas'
    V -> 'shot'
    P -> 'in'
  """
    )
    tokens = "I shot an elephant in my pajamas".split()
    wfst0 = _chart_init_wfst(tokens, groucho_grammar)
    _chart_display(wfst0, tokens)
    wfst1 = _chart_complete_wfst(wfst0, tokens, groucho_grammar, trace=True)
    _chart_display(wfst1, tokens)
开发者ID:prashiyn,项目名称:nltk-examples,代码行数:18,代码来源:ch08.py


示例20: simpleGrammar

def simpleGrammar():

    grammar1 = nltk.parse_cfg(
        """
       S -> NP VP
       VP -> V NP | V NP PP
       PP -> P NP
       V -> "saw" | "ate" | "walked"
       NP -> "John" | "Mary" | "Bob" | Det N | Det N PP
       Det -> "a" | "an" | "the" | "my"
       N -> "man" | "dog" | "cat" | "telescope" | "park"
       P -> "in" | "on" | "by" | "with"
       """
    )

    sent = "Mary saw Bob".split()
    rd_parser = nltk.RecursiveDescentParser(grammar1)
    for tree in rd_parser.nbest_parse(sent):
        print tree
开发者ID:AkiraKane,项目名称:Python,代码行数:19,代码来源:c08_analyzing_sentence_structure.py



注:本文中的nltk.parse_cfg函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python nltk.pos_tag函数代码示例发布时间:2022-05-27
下一篇:
Python nltk.ngrams函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap