• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python gutenberg.sents函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中nltk.corpus.gutenberg.sents函数的典型用法代码示例。如果您正苦于以下问题:Python sents函数的具体用法?Python sents怎么用?Python sents使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了sents函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: exercise_gutenberg

def exercise_gutenberg():
    # 打印古腾堡项目的文件列表
    print gutenberg.fileids()

    # 挑选一个文本: 简-奥斯丁的《爱玛》
    emma = gutenberg.words("austen-emma.txt")

    # 查看书的长度
    print len(emma)

    # 导入文本
    emma_text = nltk.Text(emma)
    emma_text.concordance("surprize")

    for file_id in gutenberg.fileids():
        chars_list = gutenberg.raw(file_id)
        words_list = gutenberg.words(file_id)
        sents_list = gutenberg.sents(file_id)

        # 统计文件的总字符数
        num_chars = len(chars_list)
        # 统计文件的总单词数
        num_words = len(words_list)
        # 统计文件的总句子数
        num_sents = len(sents_list)
        # 统计文件的非重复单词数
        num_vocab = len(set([w.lower() for w in words_list]))
        # 打印词的平均字符数, 句子的平均单词数, 每个单词出现的平均次数, 文件名
        print num_chars / num_words, num_words / num_sents, num_words / num_vocab, file_id
开发者ID:BurnellLiu,项目名称:LiuProject,代码行数:29,代码来源:chapter_02.py


示例2: main

def main(num_couplets, num_syllables, rhyme_depth):
  for text in TEXTS:
    for sentence in gutenberg.sents(text):
      addSentence(sentence, rhyme_depth)

  for couplet_number in range(0, num_couplets):
    # Get a randomly selected couplet
    attempts = 0
    while True:
      couplet = getCouplet(num_syllables)
      if couplet is not None: break
      # Prevent an infinite loop if parameters are off
      attempts += 1
      if attempts == 1000: return
    couplet = [ pretty(line) for line in couplet ]

    # A little hack for adjusting punctuation and capitalization
    couplet[0] = couplet[0][0].upper() + couplet[0][1:]
    if couplet[0][-1] == '.' or couplet[0][-1] == ',':
      couplet[0] = couplet[0][:-1] + ','
      char = couplet[1][0].lower() if couplet[1][:2] != 'I ' else 'I'
      couplet[1] = char + couplet[1][1:]
    else:
      couplet[1] = couplet[1][0].upper() + couplet[1][1:]

    # Dump to stdout
    print couplet[0]
    print couplet[1]
开发者ID:amshenoy,项目名称:permutation-poetry,代码行数:28,代码来源:poet.py


示例3: get_book_sents

def get_book_sents(word_list):
    """Searches Jane Austen's 'Emma' for the words in the word list.
    The sentences are modified to highlight the found words by changing them to uppercase.
    Then the sentence number (in order from the book) is appended to the front
    of the sentence string.
    Returns a list of strings (sentence # + \s + sentence string).
    """
    book = 'austen-emma.txt'
    book_sents = gutenberg.sents(book)
    sent_nums = set()
    sents_to_return = []
    s_count = 0
    for s in book_sents:
        s_count += 1
        s_str = " ".join(s)
        for w in word_list:
            if ' '+w+' ' in s_str.lower():
                if s_count not in sent_nums:
                    sent_nums.add(s_count)
                    s_str = s_str.replace(' '+w+' ', ' '+w.upper()+' ')
                    s_str = s_str.replace(' '+w.title()+' ', ' '+w.upper()+' ')
                    sents_to_return.append(str(s_count)+' '+s_str)
                else:
                    s_str = s_str.replace(' '+w+' ', ' '+w.upper()+' ')
                    s_str = s_str.replace(' '+w.title()+' ', ' '+w.upper()+' ')
                    sents_to_return[-1] = str(s_count)+' '+s_str
    return sents_to_return
开发者ID:imladenoff,项目名称:lexploration,代码行数:27,代码来源:search.py


示例4: tagged_sentences

def tagged_sentences( book ):
	'''
	Generator yielding one sentence at a time, filtering out the -NONE- tagged
	sentences, which are anomalies in the words.
	'''

	for sentence in gutenberg.sents( book ):
		yield filter( lambda x: x[1] not in [':', '-NONE-', ], nltk.pos_tag( sentence ) )
开发者ID:snakecharmer1024,项目名称:poetry,代码行数:8,代码来源:luau.py


示例5: gutenberg

def gutenberg():
    from nltk.corpus import gutenberg
    for t in gutenberg.fileids():
        num_chars = len(gutenberg.raw(t))
        num_words = len(gutenberg.words(t))
        num_sents = len(gutenberg.sents(t))
        num_vocab = len(set([w.lower() for w in gutenberg.words(t)]))
        print int(num_chars/num_words), int(num_words/num_sents), int(num_words/num_vocab), t
开发者ID:kwdhd,项目名称:nlp,代码行数:8,代码来源:main.py


示例6: plot_sentiment_flow

def plot_sentiment_flow(title):
    sents = gutenberg.sents(title)
    positive_flow = [partial_sentiment(x) for x in sents]
    negative_flow = [partial_sentiment(x, positive = False) for x in sents]
    plt.plot(range(len(sents)), positive_flow, label = 'Positive')
    plt.plot(range(len(sents)), negative_flow, label = 'Negative')
    plt.ylabel('Sentiment Score')
    plt.xlabel(title)
    plt.show()
开发者ID:chandlerzuo,项目名称:chandlerzuo.github.io,代码行数:9,代码来源:SentimentAnalysis.py


示例7: gutenberg

def gutenberg():

    emma = nltk.corpus.gutenberg.words('austen-emma.txt')
    print len(emma)

    print gutenberg.fileids()
    emma = gutenberg.words('austen-emma.txt')

    macbeth_sentences = gutenberg.sents('shakespeare-macbeth.txt')
    macbeth_sentences[1037]
    longest_len = max([len(s) for s in macbeth_sentences])
    [s for s in macbeth_sentences if len(s) == longest_len]

    for fileid in gutenberg.fileids():
        num_chars = len(gutenberg.raw(fileid))
        num_words = len(gutenberg.words(fileid))
        num_sents = len(gutenberg.sents(fileid))
        num_vocab = len(set([w.lower() for w in gutenberg.words(fileid)]))
        print int(num_chars/num_words), int(num_words/num_sents), int(num_words/num_vocab), fileid
开发者ID:AkiraKane,项目名称:Python,代码行数:19,代码来源:c02_text_corpora.py


示例8: structure

def structure():

    raw = gutenberg.raw("burgess-busterbrown.txt")
    raw[1:20]

    words = gutenberg.words("burgess-busterbrown.txt")
    words[1:20]

    sents = gutenberg.sents("burgess-busterbrown.txt")
    sents[1:20]
开发者ID:AkiraKane,项目名称:Python,代码行数:10,代码来源:c02_text_corpora.py


示例9: page59

def page59():
    """Prints the longest sentence from Macbeth"""
    from nltk.corpus import gutenberg

    macbeth_sentences = gutenberg.sents("shakespeare-macbeth.txt")
    print "macbeth_sentences=", macbeth_sentences
    print "macbeth_sentences[1037]=", macbeth_sentences[1037]
    longest_len = max([len(s) for s in macbeth_sentences])
    print "longest sentence=",
    print [s for s in macbeth_sentences if len(s) == longest_len]
开发者ID:andreoliwa,项目名称:nlp-book,代码行数:10,代码来源:book_examples.py


示例10: create_model_from_NLTK

def create_model_from_NLTK():
    filepath = "nltkcorpus.txt"
    if isfile(filepath):
        return create_model(filepath= filepath, save=False)
    else:
        from nltk.corpus import reuters, brown, gutenberg
        sents = reuters.sents() + brown.sents()
        for gsents in [gutenberg.sents(fid) for fid in gutenberg.fileids()]:
            sents += gsents

        return create_model(sentences=sents, savename=filepath)
开发者ID:ieaalto,项目名称:CCProject,代码行数:11,代码来源:semantics.py


示例11: fun02

def fun02():
    """fun02"""
    for fileid in gutenberg.fileids():
        num_chars = len(gutenberg.raw(fileid))
        num_words = len(gutenberg.words(fileid))
        num_sents = len(gutenberg.sents(fileid))
        num_vocab = len(set([w.lower() for w in gutenberg.words(fileid)]))
        # average word length average sentence length
        print int(num_chars/num_words), int(num_words/num_sents),
        # number of times each vocabulary item appers in the text
        print int(num_words/num_vocab), fileid
开发者ID:gree2,项目名称:hobby,代码行数:11,代码来源:ch02.py


示例12: page57

def page57():
    """Statistics from the Gutenberg corpora"""
    from nltk.corpus import gutenberg

    for fileid in gutenberg.fileids():
        num_chars = len(gutenberg.raw(fileid))
        num_words = len(gutenberg.words(fileid))
        num_sents = len(gutenberg.sents(fileid))
        num_vocab = len(set([w.lower() for w in gutenberg.words(fileid)]))
        print int(num_chars / num_words), int(num_words / num_sents),
        print int(num_words / num_vocab), fileid
开发者ID:andreoliwa,项目名称:nlp-book,代码行数:11,代码来源:book_examples.py


示例13: for_print

def for_print():
    '''
    显示每个文本的三个统计量
    :return:
    '''
    for fileid in gutenberg.fileids():
        num_chars=len(gutenberg.raw(fileid))
        num_words=len(gutenberg.words(fileid))
        num_sents=len(gutenberg.sents(fileid))
        num_vocab=len(set([w.lower() for w in gutenberg.words(fileid)]))
        print int(num_chars/num_words),int(num_words/num_sents),int(num_words/num_vocab),fileid
开发者ID:Paul-Lin,项目名称:misc,代码行数:11,代码来源:toturial.py


示例14: train

 def train(self):
     self.vocabulary=set()
     
     this_bigrams=[]
     self.unigrams = FreqDist([])
     
     for fileid in gutenberg.fileids():
         for sentence in gutenberg.sents(fileid):
             words=["<s>",] + [x.lower() for x in sentence if wordRE.search(x)] + ["</s>",]
             this_bigrams += bigrams(words)
             self.vocabulary.update(words)
             self.unigrams.update(words)
     self.bigrams=ConditionalFreqDist(this_bigrams)
     self.V = len(self.vocabulary)
开发者ID:slee17,项目名称:NLP,代码行数:14,代码来源:LanguageModel.py


示例15: benchmark_sbd

    def benchmark_sbd():
        ps = []
        rs = []
        f1s = []
        c = 0
        for fileid in gutenberg.fileids():
            c += 1
            copy_sents_gold = gutenberg.sents(fileid)
            sents_gold = [s for s in copy_sents_gold]
            for sent_i in range(len(sents_gold)):
                new_sent = [w for w in sents_gold[sent_i] if w.isalpha()]
                sents_gold[sent_i] = new_sent
            text = gutenberg.raw(fileid)
            sents_obtained = split_text(text)
            copy_sents_obtained = sents_obtained.copy()
            for sent_i in range(len(sents_obtained)):
                new_sent = [w.group()
                            for w in re.finditer(r'\w+', sents_obtained[sent_i])
                            if w.group().isalpha()]
                sents_obtained[sent_i] = new_sent
            c_common = 0
            for sent in sents_obtained:
                if sent in  sents_gold:
                    c_common += 1
            p, r, f1 = get_prf(c_common, len(sents_obtained), len(sents_gold))
            print('\n\n', fileid)
            print('Precision: {:0.2f}, Recall: {:0.2f}, F1: {:0.2f}'.format(p, r, f1))
            ps.append(p)
            rs.append(r)
            f1s.append(f1)

        print('\n\nPrecision stats: {:0.3f} +- {:0.4f}'.format(np.mean(ps),
                                                           np.std(ps)))
        print('Recall stats: {:0.3f} +- {:0.4f}'.format(np.mean(rs),
                                                        np.std(rs)))
        print('F1 stats: {:0.3f} +- {:0.4f}'.format(np.mean(f1s),
                                                    np.std(f1s)))
        print(len(f1s))

        good_ps = [p for p in ps if p >= 0.8]
        good_rs = [r for r in rs if r >= 0.8]
        good_f1s = [f1 for f1 in f1s if f1 >= 0.8]
        print('\n Good precision stats: {:0.3f} +- {:0.4f}'.format(np.mean(good_ps),
                                                           np.std(good_ps)))
        print('Good Recall stats: {:0.3f} +- {:0.4f}'.format(np.mean(good_rs),
                                                        np.std(good_rs)))
        print('Good F1 stats: {:0.3f} +- {:0.4f}'.format(np.mean(good_f1s),
                                                    np.std(good_f1s)))
        print(len(good_f1s))
开发者ID:artreven,项目名称:assessment_tools,代码行数:49,代码来源:readability.py


示例16: tokenize_data

    def tokenize_data(self, n = -1):
        # download dependent nltk resources if you havn't.
        # nltk.download('punkt')

        # Read the data and append SENTENCE_START and SENTENCE_END tokens
        print "Reading sentences from gutenberg corpus ..."
        from nltk.corpus import gutenberg
        tokenized_sentences = []
        for s in gutenberg.sents('austen-emma.txt'):
            tokenized_sentences.append([self.sentence_start_token] + s[1:-1] + [self.sentence_end_token])
        print "Parsed %d sentences." % (len(tokenized_sentences))

        if n > 0:
            tokenized_sentences = tokenized_sentences[:n]

        # count the word frequencies
        word_freq = nltk.FreqDist(itertools.chain(*tokenized_sentences))
        print "Found %d unique words tokens." % len(word_freq.items())

        self.vocabulary_size = int(len(word_freq.items()) * 0.95)

        # get the most common words, treat others words as unknown.
        vocab = word_freq.most_common(self.vocabulary_size - 1)
        print "Using vocabulary size %d." % self.vocabulary_size
        print "The least frequent word is '%s' and appeared %d times." % \
              (vocab[-1][0], vocab[-1][1])
        self.index_to_word = [x[0] for x in vocab]
        self.index_to_word.append(self.unknown_token)
        self.word_to_index = dict([(w,i) for i,w in enumerate(self.index_to_word)])

        # replace all words not in our vocabulary with the unknown token
        for i, sent in enumerate(tokenized_sentences):
            tokenized_sentences[i] = [w if w in self.word_to_index
                                      else self.unknown_token for w in sent]

        # create training data
        x_train = np.asarray([[self.word_to_index[w] for w in sent[:-1]]
                             for sent in tokenized_sentences])
        y_train = np.asarray([[self.word_to_index[w] for w in sent[1:]]
                             for sent in tokenized_sentences])

        print ""
        print "Example sentence: '%s'" % tokenized_sentences[0]
        print "By word indexes: '%s'" % \
              [self.word_to_index[w] for w in tokenized_sentences[0]]

        return (x_train, y_train)
开发者ID:yelu,项目名称:blog,代码行数:47,代码来源:rnn_lm.py


示例17: get_gutenberg_data

	def get_gutenberg_data(self):
		count = {}
		self.len_list = []
		#my_fileids = ['austen-sense.txt', 'austen-emma.txt', 'austen-persuasion.txt']
		#my_fileids = ['chesterton-ball.txt', 'chesterton-ball.txt', 'chesterton-thursday.txt']
		my_fileids =['shakespeare-caesar.txt', 'shakespeare-hamlet.txt', 'shakespeare-macbeth.txt']
		for fileids in my_fileids:
			for sent in gutenberg.sents(fileids):
				l = len(sent)
				#if l < 3:
					#continue
				self.len_list.append(l)
				if l in count:
					count[l] += 1
				else:
					count[l] = 1
		total = len(self.len_list)
		for i in range(100):
			if i in count.keys():
				self.probs.append(count[i]/(total+0.0))
			else:
				self.probs.append(0)
开发者ID:cindyxinyiwang,项目名称:NLP,代码行数:22,代码来源:lan_plot.py


示例18: get_poem

def get_poem():
    """
    This function should extract hexametric sentences from Gutenberg texts, but it doesn't.
    Either hexametric sentences are too rare, or the absence of basic function words from CMUdict results in problems
    with the matching of the whole sentence.
    """
    outtext = []
    for corpus in gutenberg.fileids():
        text = gutenberg.sents(corpus)
        for sentence in text:
            transcription = ""
            discard = False
            for word in sentence:
                if word.lower() in words:
                    transcription += words[word.lower()]
                elif re.match(one_syllable, word.lower()):
                    # consider this word a "small", unstressed word
                    transcription += "A0A"
                else:
                    discard = True
            if re.match(verse, transcription) and not discard:
                print(sentence, transcription)
                outtext.append(" ".join(sentence))
    return "\n".join(outtext)
开发者ID:ojahnn,项目名称:NaPoGenMo15-HEXEGEN,代码行数:24,代码来源:hexegen.py


示例19: __init__

    def __init__(self, iterable):
        assert len(iterable) == 2
        tuple.__init__(self, iterable)
        self.label = iterable[0]
        self.child = iterable[1]

    def __new__(cls, *args, **kwargs):
        assert len(args) == 1
        assert len(args[0]) == 2
        return tuple.__new__(cls, args[0])


if __name__ == '__main__':
    import nltk
    from nltk.corpus import gutenberg
    import parser

    def get_deps(s):
        return parser.cp.parse_trees(s, transform=parser.to_deps)


    G = FreqGraph()
    alice_sents = gutenberg.sents(fileids='carroll-alice.txt')
    for sent in alice_sents:
        dep = get_deps(' '.join(sent))
        try:
            term = logic.Term(dep.next())
            G.clique(term)
        except:
            pass
    #G.ingest(get_deps('Barack is president'))
开发者ID:spacenut,项目名称:cs585,代码行数:31,代码来源:graph.py


示例20: print_longest

def print_longest():
    macbeth_sentences=gutenberg.sents('shakespeare-macbeth.txt')
    # print macbeth_sentences
    # print macbeth_sentences[1037]
    longest_len=max([len(s) for s in macbeth_sentences])
    print [s for s in macbeth_sentences if len(s)==longest_len]
开发者ID:Paul-Lin,项目名称:misc,代码行数:6,代码来源:toturial.py



注:本文中的nltk.corpus.gutenberg.sents函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python gutenberg.words函数代码示例发布时间:2022-05-27
下一篇:
Python gutenberg.raw函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap