• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python generate.generate函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中nltk.parse.generate.generate函数的典型用法代码示例。如果您正苦于以下问题:Python generate函数的具体用法?Python generate怎么用?Python generate使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了generate函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: generate_sentence

    def generate_sentence(self, depth=9, num=30000):
        if num > 30000:
            num = 30000
        gen_num = 0
        done = False
        sentences_list = list()

        for dep in range(1, depth):
            sentences = generate(self.grammar, depth=dep)
            for s in sentences:
                sentences_list.append(' '.join(s) + '\n')
                gen_num += 1
                if gen_num > num:
                    done = True
                    break
            if done:
                break

        # sentences = generate(self.grammar, depth=depth, n=4)
        # for s in sentences:
        #     # file.write(' '.join(s) + '\n')
        #     sentences_list.append(' '.join(s) + '\n')
        # sentences_list = sentences_list[0:num]
        random.shuffle(sentences_list)
        with open(self.origin_file, 'w') as file:
            for s in sentences_list:
                file.write(s)
开发者ID:IshJ,项目名称:Texygen,代码行数:27,代码来源:OracleCfg.py


示例2: gen_grammar3_past_plural

def gen_grammar3_past_plural(verb, direct_object, count):
	g1 ="""
	S -> W TR SUB V '?' | WA TR SUB V DO '?' 
	W -> 'who' | 'what' | 'when' | 'where' | 'why' | 'how'
	WA -> 'when' | 'where' | 'why' | 'how'
	TR -> 'have'
	SUB -> PRO
	PRO -> 'they' |'you'
	V -> '%s'
	DO -> 'the %s'
	"""%(verb, direct_object)
	grammar1 = CFG.fromstring(g1)
	multiplier = 0
	with open('sentences.csv', 'ab') as csvwriter:
		writer = csv.writer(csvwriter)
		for sentence in generate(grammar1, n=999):
			if sentence.find('who') == 0:
				multiplier = 1
			if sentence.find('what') == 0:
				multiplier = 1
			if sentence.find('when') == 0:
				multiplier = 2
			if sentence.find('where') == 0:
				multiplier = 2
			if sentence.find('why') == 0:
				multiplier = 4
			if sentence.find('how') == 0:
				multiplier = 4
			writer.writerow((' '.join(sentence) , multiplier*count))
开发者ID:danielzuot,项目名称:honeyencryption,代码行数:29,代码来源:message_generator.py


示例3: Tweet_content1

def Tweet_content1():
  grammar = CFG.fromstring(demo_grammar)

  for sentence in generate(grammar, n=4): """generating sentence of 4 words depth"""
    print(' '.join(sentence))
    
    return sentence
开发者ID:BelhalK,项目名称:twitterbot,代码行数:7,代码来源:Tweet_content1.py


示例4: gen_grammar_plural

def gen_grammar_plural(verb, direct_object, count):
	try:
		verb = en.verb.present_participle(verb)
	except KeyError:
		return
	if verb != "":
		g1 ="""
		S -> WA TR SUB V DO '?' | W TR SUB V '?' 
		W -> 'who' | 'what' | 'when' | 'where' | 'why' | 'how'
		WA -> 'when' | 'where' | 'why' | 'how'
		TR -> 'are' | 'were'
		SUB -> 'they' | 'you'
		V -> '%s'
		DO -> 'the %s'
		"""%(verb, direct_object)
		grammar1 = CFG.fromstring(g1)
		multiplier = 1
		with open('sentences.csv', 'ab') as csvwriter:
			writer = csv.writer(csvwriter)
			for sentence in generate(grammar1, n=999):
				sentence = ' '.join(sentence)
				if sentence.find('who') == 0:
					multiplier = 1
				if sentence.find('what') == 0:
					multiplier = 1
				if sentence.find('when') == 0:
					multiplier = 2
				if sentence.find('where') == 0:
					multiplier = 2
				if sentence.find('why') == 0:
					multiplier = 4
				if sentence.find('how') == 0:
					multiplier = 4
				writer.writerow((' '.join(sentence) , multiplier*count))
开发者ID:danielzuot,项目名称:honeyencryption,代码行数:34,代码来源:message_generator.py


示例5: generate_text

def generate_text(grammar,N):
    from nltk.grammar import CFG
    import nltk.parse.generate as gen

    print('Generating the first %d sentences for demo grammar:' % (N,))
    print(grammar)
    grammar = CFG.fromstring(grammar)

    grm_list = gen.generate(grammar, n=N)
    for n, sent in enumerate(grm_list):
        print('%3d. %s' % (n, ' '.join(sent)))
开发者ID:BelkacemB,项目名称:nltk,代码行数:11,代码来源:util.py


示例6: respondQuestion

def respondQuestion(sentence, keyWord, POS):
	if "Tell me" not in sentence:
		grammar = ""

		if POS == "NNPS" or POS == "NNS":
			grammar = CFG.fromstring("""
			S -> H-NP1 Adj VP'?' | Wh-NP VP'?'
			H-NP1 -> 'How'
			Wh-NP -> 'Who' | 'What' | 'Where' | 'What'
			Adj -> 'big' | 'small' | 'happy' | 'sad' | 'large' | 'difficult' | 'emotional' | 'old' | 'healthy' | 'strong' | 'cute' | 'hungry'
			NP -> Pronoun | Proper-Noun | Noun
			Pronoun -> 'they' | 'those'
			Proper-Noun -> '[]'
			Noun -> 'the <>'
			VP -> Verb NP  
			Verb -> 'are' 
			""")
		elif POS == "NN" or "NNP":
			grammar = CFG.fromstring("""
			S -> H-NP1 Adj VP'?' | Wh-NP VP'?'
			H-NP1 -> 'How'
			Wh-NP -> 'Who' | 'What' | 'Where' | 'What'
			Adj -> 'big' | 'small' | 'happy' | 'sad' | 'large' | 'difficult' | 'emotional' | 'old' | 'healthy' | 'strong' | 'cute' | 'hungry'
			NP -> Pronoun | Proper-Noun | Noun
			Pronoun -> 'it' | 'that'
			Proper-Noun -> '[]'
			Noun -> 'the <>'
			VP -> Verb NP  
			Verb -> 'is' 
			""")

		rand_sent_list = []
                response = ""
		for sentence in generate(grammar):
		    rand_sent_list.append(' '.join(sentence))
		while True:
			num = randint(0, len(rand_sent_list)-1)
			response = rand_sent_list[num]
			if "<>" in response and (POS == "NNS" or POS == "NN"):
				index = response.index("<>")
				response = response[:index] + keyWord + response[index+2:]
				break
			if "[]" in response and (POS == "NNPS" or POS == "NNP"):
				index = response.index("[]")
				response = response[:index] + keyWord + response[index+2:]
				break
			if "<>" not in response and "[]" not in response:
				break
		return response
	else:
		knowledgeRep(sentence)
开发者ID:Roceso1337,项目名称:FriendBot,代码行数:51,代码来源:parser.py


示例7: generateRawTemplates

def generateRawTemplates(depth):
    gram = CFG.fromstring(grammarstring)
    rawTemplates = generate(gram, depth=depth)
    templatefiles = []

    for index, state in enumerate(rawTemplates):
        filename = os.path.join("./templates","template"+str(index))
        with open(filename, 'w') as templatefile:
            templatefile.write(' '.join(state))
            templatefiles.append(filename)

    print str(len(templatefiles))+" template files generated"

    return templatefiles
开发者ID:slecourse,项目名称:slecourse,代码行数:14,代码来源:SyntaxGenerator.py


示例8: generate_tweet

def generate_tweet(grammar):
    from nltk.grammar import CFG
    import nltk.parse.generate as gen

    print(grammar)
    grammar = CFG.fromstring(grammar)
    grm_list = gen.generate(grammar, n=SIZE) # TODO voir la taille max ? moyen de la recuperer ?
    from random import randint
    rd = randint(0,SIZE)
    cpt = 0
    for n, sent in enumerate(grm_list):
        if rd == cpt:
            print ("Your tweet : ")
            print('%3d. %s' % (n, ' '.join(sent)))
        cpt += 1
开发者ID:BelkacemB,项目名称:nltk,代码行数:15,代码来源:util.py


示例9: main

def main():
    zen = """ Beautiful is better than ugly.
    Explicit is better than implicit.
    Simple is better than complex.
    Complex is better than complicated.
    Flat is better than nested.
    Sparse is better than dense.
    Readability counts.
    Special cases aren't special enough to break the rules.
    Although practicality beats purity.
    Errors should never pass silently.
    Unless explicitly silenced.
    In the face of ambiguity, refuse the temptation to guess.
    There should be one-- and preferably only one --obvious way to do it.
    Although that way may not be obvious at first unless you're Dutch.
    Now is better than never.
    Although never is often better than *right* now.
    If the implementation is hard to explain, it's a bad idea.
    If the implementation is easy to explain, it may be a good idea.
    Namespaces are one honking great idea -- let's do more of those!"""
        
    tagged = nltk.pos_tag(nltk.word_tokenize(zen))
    tagged = [(tag, word) for word, tag in tagged]
    #
    #tag_word_map = defaultdict(list)
    #[(tag, word) for word, tag in tagged]
    tags = set([tag for tag, _  in tagged])
    tag_word_map = {tag: {word for key, word in tagged if key == tag} for tag in tags}
                
           
    gram_head = """
      S -> NNP VBZ JJR IN RB
    """
    cats = ['NNP', 'VBZ', 'JJR', 'IN', 'RB']
    gram = [cat + ' -> ' + '|'.join([repr(x) for x in tag_word_map[cat]]) for cat in cats]
    
    grammar = gram_head + '\n'.join(gram)
    grammar = nltk.CFG.fromstring(grammar)
    
    poem = []    
    for sentence2 in generate(grammar, depth=5):
        poem.append(' '.join(sentence2))
        
    out =  "\n".join(choice(poem, size=10))
    print(out)
开发者ID:lbillingham,项目名称:py-edinburgh-dojos,代码行数:45,代码来源:ryhmes.py


示例10: print

import nltk
from nltk.parse import generate
from nltk.grammar import Nonterminal


cfg = nltk.CFG.fromstring("""
root -> who_player has the most runs
who_player -> who
who_player -> which player
who_player -> which team player
who -> 'who'
which -> 'which'
player -> 'player'
team -> 'indian' | 'australian' | 'england' | 'sri' 'lankan'
has -> 'has'
the -> 'the'
this -> 'this'
most -> 'most'
runs -> 'runs'
""")

print(list((n,sent) for n, sent in enumerate(generate.generate(cfg, n=100, start=Nonterminal('root')), 1)))

result1 = nltk.ChartParser(cfg).parse('which england player has the most runs'.split())
result2 = nltk.ChartParser(cfg).parse(['which', 'sri', 'lankan', 'player', 'has', 'the', 'most',  'runs'])
print(list(result1))
print(list(result2))

开发者ID:aadiuppal,项目名称:open-cricket,代码行数:27,代码来源:learning.py


示例11: load

from nltk.parse.generate import generate
from nltk import CFG
from nltk.data import load

for gg in [ 'grammar_2.cfg']:
    grammar = load( 'file:' + gg)
    for  sentence in generate(grammar, depth=6, n=1000000):
        print(' '.join(sentence))
开发者ID:johnjosephmorgan,项目名称:yaounde,代码行数:8,代码来源:generate.py


示例12: output


#.........这里部分代码省略.........
            # Create the grammar
            #P:prepositions, DET:articles, adverbs
            DET = ["'the'","'a'","'some'"]
            # P = ["'in'","'at'","'since'","'for'","'to'","'past'","'to'""'by'","'in'","'at'","'on'","'under'","'below'","'over'","'above'","'into'","'from'","'of'","'on'","'at'"]
            VB = ["'talks'","'does'","'has'","'cries'", "'fights'", "'traps'", "'bakes'", "'fondles'", "'cooks'", "'sees'", "'calls'", "'smells'", "'tastes'", "'hears'"]
            
            
            assignments = pos_tag(tokens) # tagset='universal' for ADJ, NOUN, etc.
            
            # pos_tags = []
            pos_words = {}
            pos_words['DET'] = DET
            #pos_words['P'] = P
            pos_words['VB'] = VB
            
            for tuple in assignments:
                word = tuple[0]
                pos = tuple[1]
                if pos in pos_words:
                    pos_words[pos].append("\'" + word + "\'")
                else:
                    pos_words[pos] = []
                    pos_words[pos].append("\'" + word + "\'")
                # pos_tags.append(pos)

            #grammar = """
            #S -> NP VP
            #PP -> P NP
            #NP -> Det N
            #VP -> V Det N | V Det N PP
            
            #"""
            
            grammar = """
            S -> NP VP
            NP -> Det N
            VP -> V Det N
            """
            #Det -> 'DT'
            # N -> 'NN'
            # V -> 'VBZ'
            # P -> 'PP'
            
            
            # adverb is RB
            
            if 'DET' in pos_words:
                grammar += 'Det ->' + ' | '.join(pos_words['DET']) + '\n'
                
            if 'P' in pos_words:
                grammar += 'P ->' + ' | '.join(pos_words['P']) + '\n'
                
            if 'NN' in pos_words:
                grammar += 'N ->' + ' | '.join(pos_words['NN']) + '\n'
            #change to VB for nltk
            if 'VB' in pos_words:
                grammar += 'V ->' + ' | '.join(pos_words['VB']) + '\n'
            
            
            #if 'JJ' in pos_words:
            #    grammar += 'A ->' + ' | '.join(pos_words['JJ']) + '\n'
                
            simple_grammar = CFG.fromstring(grammar)
            #  simple_grammar.start()
            # simple_grammar.productions()
            
            sentences = []
            sentence_validity = []
         
            for sentence in generate(simple_grammar, depth=4):
                sentences.append(' '.join(sentence))
            
            
            sentence_validity = get_validity(sentences)
            
            #get_validity(sentences)
            
            # parser = nltk.ChartParser(simple_grammar)
            # tree = parser.parse(pos_tags)
            
            story = ""
            for i in range(0, 10):
                tuple = sentence_validity[i]
                string = tuple[1]
                start_letter = string[0].upper()
                story += start_letter
                story += string[1:]
                story += ". "
            
            return render(request, 'makestory/output.html',
                {
                'imageURL_output': imageURL,
                'story_output': story,
                'grammar_test_output': simple_grammar,
                'sentences_test_output': sentences,
                }
            )
        else:
            return fail(request)
    return fail(request)
开发者ID:cts5ws,项目名称:hackuva2016,代码行数:101,代码来源:views.py


示例13: choose_line

def choose_line(some_lines):#5
    return a_random.choice(#7
                    some_lines).lower() #5

############################################

############################################
choose = choose_line #5

g = G.fromstring(#7
                    this_is_the_grammar) #5
############################################

############################################
while not len(pentas):#5
    for poem in generate(g, #7
                           start=N('five')): #5
############################################

############################################
      pentas.append(#5
                    with_blank_spaces.join(poem))#7

fives = pentas #5
############################################

############################################
third = choose(fives) #5
first = choose(fives) #7

def display_the(poem):#5
############################################
开发者ID:NatalieBlack,项目名称:haiku_haiku,代码行数:32,代码来源:generate.py


示例14: xrange

    [1. if i == b else 0. for i in xrange(len(code_for))])

# list of codes of symbols to predict
to_predict_codes = [onehot(code_for[s]) for s in to_predict]


# function to test if a symbol code is in list to predict
def in_predict_codes(code):
    for i in xrange(len(to_predict_codes)):
        if ((code == to_predict_codes[i]).all()):
            return True
    return False


# sample_strings = all strings from grammar of depth at most sample_depth
sample_strings = list(generate(grammar, depth=sample_depth))

# report #, min length and max length for strings in sample_strings
print("number of sample strings = {}".format(len(sample_strings)))
sample_lengths = [len(s) for s in sample_strings]
print("min length = {}, max length = {}".format(min(sample_lengths),
                                                max(sample_lengths)))

# sanity check: report one random string from sample_strings
print "random sample string = {}".format(random.choice(sample_strings))

#################################

model = VanillaModel(len(code_for), READ_SIZE, len(code_for))
try:
    model.cuda()
开发者ID:simonjmendelsohn,项目名称:StackNN,代码行数:31,代码来源:cfg.py


示例15: print

from nltk.parse.generate import generate #, demo_grammar
from nltk import CFG


demo_grammar = """
  S -> NP VP
  NP -> Det N
  PP -> P NP
  VP -> 'slept' | 'saw' NP | 'walked' PP
  Det -> 'the' | 'a'
  N -> 'man' | 'park' | 'dog'
  P -> 'in' | 'with'
"""
grammar = CFG.fromstring(demo_grammar)
print(grammar)


#Join words and generate based off of grammar - for n 
for sentence in generate(grammar, n=12):
    print(' '.join(sentence))

'''
Notes: 
Need to symbolize the grammar
Have the machine process the language
Need to integrate with Markov chain - file 'agiliq-markov.py'
'''
for sentence in generate(grammar, depth=4):
    print(' '.join(sentence))
    
开发者ID:johnsonbui,项目名称:Hermes-Python-Proto,代码行数:29,代码来源:grammar_test.py


示例16: eliminate

# Filter each sentence and return them all.
def eliminate(sentence):
    sents=nltk.sent_tokenize(sentence)
    for sent in sents:
        str=filter(sent)
        return str

#Here input is the chosen option on UI.
#Given IDs to each question as per NCERT Book,input will be given that chosen value.
input=26
# Generate variations of a particular question based on the input and its corresponding grammar.
if input==2:
    g=CFG.fromstring(g1)
    g2=CFG.fromstring(g2)
    rd_parser=nltk.RecursiveDescentParser(g)
    for sent,sent2 in zip(generate(g2,n=100),generate(g,n=100)):
        newsent1=' '.join(sent)
        newsent2=' '.join(sent2)
        ans1=eliminate(newsent1)
        ans2=eliminate(newsent2)
        if(ans1 == None or ans2 == None):
            pass
        else:
            print(ans1)
            print(ans2)
            print("Determine the length and breadth")
            print("\n")
elif input==4:
    g=CFG.fromstring(g3)
    g2=CFG.fromstring(g4)
    rd_parser=nltk.RecursiveDescentParser(g)
开发者ID:Vanrao,项目名称:VariationGenWordProbs,代码行数:31,代码来源:MainCode.py


示例17: generate

            
            if 'NN' in pos_words:
                grammar += 'N ->' + ' | '.join(pos_words['NN']) + '\n'
            
            if 'VB' in pos_words:
                grammar += 'V ->' + ' | '.join(pos_words['VB']) + '\n'
                
            if 'JJ' in pos_words:
                grammar += 'A ->' + ' | '.join(pos_words['JJ']) + '\n'
                
            simple_grammar = CFG.fromstring(grammar)
            #simple_grammar.start()
            simple_grammar.productions()
            
            sentences = []
            for sentence in generate(simple_grammar, n=10):
                sentences.append(' '.join(sentence))
            
            # parser = nltk.ChartParser(simple_grammar)
            # tree = parser.parse(pos_tags)
            


            caption = 'this is a caption'
            story = 'this is the story'
            
            return render(request, 'makestory/output.html',
                {
                'nouns_output': nouns,
                'verbs_output': verbs,
                'adjectives_output': adjectives,
开发者ID:cts5ws,项目名称:hackuva2016,代码行数:30,代码来源:.~c9_invoke_ifZOQs.py


示例18: generate_syllables

 def generate_syllables(self):
     ''' every possible syllable for the given phonemes and grammar '''
     # spaces, which are only there for NLTK's sake, are removed
     return [re.sub(' ', '', '/'.join(s)) for s in \
             generate(self.grammar, depth=4)]
开发者ID:mouse-reeve,项目名称:langmaker,代码行数:5,代码来源:syllable.py


示例19: PunktSentenceTokenizer

from contractions import contractions


sent_tokenizer = PunktSentenceTokenizer()

with open("<source of text>", "r") as f:
    text = f.read()

for k, v in contractions.items():
    text = text.replace(k, v)

sents = []
for paragraph in text.split('\n'):
    sents += sent_tokenizer.tokenize(paragraph)

parser = Parser()

productions = []
for sent in sents[:25]:
    try:
        tree = parser.parse(sent)
        productions += tree.productions()
    except:
        pass

S = Nonterminal('S')
grammar = induce_pcfg(S, productions)

for sentence in generate(grammar, depth=5):
    print " ".join(sentence) + "\n"
开发者ID:rdcolema,项目名称:nltk_sentence_generator,代码行数:30,代码来源:grammar.py


示例20: generate

g3 = """
S -> S1[G=?n] 
S1[G='money'] -> 'How many notes of each denomination person has?'
S1[G='shape'] -> 'What are its length and breadth?'
S1[G='int'] -> 'What are the two numbers?'
S1[G='age'] -> 'What are their present ages?'
S1[G='class'] -> 'What is the total strength?'

"""
first=[]
sec=[]
third=[]

grammar1 = nltk.grammar.FeatureGrammar.fromstring("""% start S"""+"\n"+gramstring)
parser1 = nltk.FeatureChartParser(grammar1)
for sentence1 in generate(grammar1):
    if(parser1.parse_one(sentence1)): 
        string1=' '.join(sentence1)
        first.append(string1)
    #print(l)


grammar2 = nltk.grammar.FeatureGrammar.fromstring("""% start S"""+"\n"+g2)
parser2 = nltk.FeatureChartParser(grammar2)
for sentence2 in generate(grammar2):
    if(parser2.parse_one(sentence2)): 
        string2=' '.join(sentence2)
        if string2 not in sec:
            sec.append(string2)
        else:
            pass
开发者ID:Vanrao,项目名称:VariationGenWordProbs,代码行数:31,代码来源:FinalCode.py



注:本文中的nltk.parse.generate.generate函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python probability.ConditionalFreqDist类代码示例发布时间:2022-05-27
下一篇:
Python dependencygraph.DependencyGraph类代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap