• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python words.words函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中nltk.corpus.words.words函数的典型用法代码示例。如果您正苦于以下问题:Python words函数的具体用法?Python words怎么用?Python words使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了words函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: add_sample

 def add_sample(self, sample):
     if not isinstance(sample, str):
         raise TypeError
     # Calling add_sample should replace existing sample.
     # To avoid appending new values onto existing lists:
     self.sample = sample
     self.misspelled_words = []
     self.tokenized_sample = []
     self.tagged_sample = {}
     sample = sample.replace('\n', " ")
     sample = sample.rstrip(" ")
     for char in punctuation.replace("'", ""):
         sample = sample.replace(char, "")
     tokens = word_tokenize(sample)
     for word in tokens:
         if word.lower() in words.words():
             self.tokenized_sample.append(word)
         elif word.capitalize() in names.words():
             continue
         elif "'" in word:
             self.tokenized_sample.append(word)
         elif LEMMATIZER.lemmatize(word.lower()) not in words.words():
             if STEMMER.stem(word.lower()) not in words.words():
                 self.misspelled_words.append(word)
         else:
             self.tokenized_sample.append(word)
     self.tagged_sample = pos_tag(tokens)
开发者ID:brythonick,项目名称:pyesol,代码行数:27,代码来源:pyesol.py


示例2: divide

def divide(s):
	first = ''
	for i in range(len(str(s))):
		first += s[i]
		print first
		if first in words.words() and s[i + 1:] in words.words():
			return ' '.join([first, s[i + 1:]])
	return False
开发者ID:agatanyc,项目名称:RC,代码行数:8,代码来源:divide_string.py


示例3: raw_files_to_labeled_features

def raw_files_to_labeled_features(raw_files, label_file):
    # Initialize spark
    conf = SparkConf().setAppName("SpamFilter").setMaster("local[*]")
    sc = SparkContext(conf=conf)

    # Get the set of words that we will be accepting as valid features
    valid_words = set(w.lower() for w in words.words())

    # Load training data and convert to our desired format
    raw_files = sc.wholeTextFiles(raw_files)

    # Extract a document of filtered words from each text file
    documents = raw_files.map(lambda x: (x[0], extract_words(x[1], valid_words)))

    # Calculate TF-IDF values for each document
    tfidf = calculate_tfidf(documents)

    # Load labels
    labels = sc.parallelize(load_labels(label_file)).map(lambda x: x[0])

    # Append indexes to features and labels
    indexed_labels = labels.zipWithIndex().map(lambda x: (x[1],x[0]))
    indexed_features = tfidf.zipWithIndex().map(lambda x: (x[1],x[0]))

    # Join labels and features into tuples and return
    return indexed_labels.join(indexed_features).map(lambda x: x[1]).collect()
开发者ID:agharbin,项目名称:spam-filter-ml,代码行数:26,代码来源:feature_extract.py


示例4: get_vocab

def get_vocab():
    word_list = words.words()
    lowercased = [t.lower() for t in word_list]
    STEMMER = PorterStemmer()
    stemmed = [STEMMER.stem(w) for w in lowercased]
    vocab = list(set(stemmed))
    return vocab
开发者ID:nhu2000,项目名称:wiki-search,代码行数:7,代码来源:kmeans_model.py


示例5: _english_wordlist

 def _english_wordlist(self):
     try:
         wl = self._en_wordlist
     except AttributeError:
         from nltk.corpus import words
         wl = self._en_wordlist = set(words.words('en-basic'))
     return wl
开发者ID:digging-into-data-berkeley,项目名称:cheshire3,代码行数:7,代码来源:extractor.py


示例6: get_english_vocab

def get_english_vocab(lemmatize=False):
    vocab = (w.lower() for w in words.words())

    if lemmatize:
        stemmer = PorterStemmer()
        vocab = (stemmer.stem(w) for w in vocab)
    return set(vocab)
开发者ID:ned2,项目名称:okdata,代码行数:7,代码来源:okreader.py


示例7: __init__

 def __init__(self, dict_path = '/etc/dictionaries-common/words'):
      f = open(dict_path)
      
      # We use two dictionaries for better coverage
      d1 = set([w.lower() for w in f.read().split()])
      d2 = set([w.lower() for w in words.words()])
      
      self.words = set(d1.union(d2))
开发者ID:okkhoy,项目名称:gabe-and-joh,代码行数:8,代码来源:dictionary.py


示例8: unknown

def unknown(list):

    k = re.findall(r'(?<= )+[a-z]+\b', textString)       # Removes punctuation and capitalized words
    print(textString)
    for w in k:                                          # Gets all the words
        if(w not in words.words()):                      # If  website words arent in NLTK word dictionary:
            unW.append(w)                                # Adds the word to the unknown list
    print (unW)                                          # Prints words that are not in the NLTK word dictionary
开发者ID:Plonski,项目名称:Information-Retrieval,代码行数:8,代码来源:unknownWords.py


示例9: __init__

 def __init__(self):
     self.stopwords = stopwords.words('english')
     self.uscities = set([w.lower() for w in gazetteers.words('uscities.txt')])
     self.usstates = set([w.lower() for w in gazetteers.words('usstates.txt')])
     self.countries = set([w.lower() for w in gazetteers.words('countries.txt')])
     self.basicwords = set(words.words('en-basic'))
     self.paragraph_tokens = []
     self.texts = []
开发者ID:yuedong111,项目名称:topical-spiders,代码行数:8,代码来源:topic_dictionary.py


示例10: extractingFromFolders

def extractingFromFolders():
    folder2 = os.path.expanduser('~\\My Documents\\Tara\\Ongoing\\CharacterCorpus\\Reference')
    fileresult = os.path.expanduser('~\\My Documents\\Tara\\Ongoing\\CharacterCorpus\\results.txt')
    refer = PlaintextCorpusReader(folder2, 'harrygrepster.txt')
    grepster = refer.words()
    results = open(fileresult, 'a')
    completeWords = wordlist.words()
    stoppers = stopwords.words()
    return grepster, results, completeWords, stoppers
开发者ID:taratemima,项目名称:Some-Python-Work,代码行数:9,代码来源:grepsterTest.py


示例11: __init__

    def __init__(self,
                 corpora_list=['all_plaintext.txt', 'big.txt'],
                 parse_args=(True, True, True, True, True)):

        #Set the parsing arguments
        self.remove_stopwords = parse_args[0]
        self.tag_numeric = parse_args[1]
        self.correct_spelling = parse_args[2]
        self.kill_nonwords = parse_args[3]
        self.stem = parse_args[4]

        #Alphabet
        self.alphabet = 'abcdefghijklmnopqrstuvwxyz'

        #Punctuation
        self.punc_dict = {ord(c): None for c in string.punctuation}

        #Reserved tags
        self.reserved_tags = ['numeric_type_hex',
                              'numeric_type_binary',
                              'numeric_type_octal',
                              'numeric_type_float',
                              'numeric_type_int',
                              'numeric_type_complex',
                              'numeric_type_roman',
                              'math_type']

        #Update the set of nltk words with the additional corpora
        self.all_words = set(words.words())
        self.all_words.update('a')
        self.all_words.update('i')
        self.all_words.update(self.reserved_tags)
        self.max_word_length = 20

        #Set up the stopwords, remove 'a' due to math issues
        self.stops = set(stopwords.words("english"))
        self.stops.remove('a')
        self.stops.remove('no')

        #Set up the stemmer
        self.st = SnowballStemmer('english')

        #Train the spelling corrector using all corpora
        train_text = ''
        for cfile in corpora_list:
            words_in_file = file(cfile).read()
            self.all_words.update(self.get_all_words(file(cfile).read()))
            train_text = train_text + words_in_file

        #Remove single character terms
        wordlist = list(self.all_words)
        wordlist = [i for i in wordlist if len(i) > 1]
        self.all_words = set(wordlist)
        self.all_words.update('a')
        self.all_words.update('i')

        self.NWORDS = self.train(self.get_all_words(train_text))
开发者ID:openstax,项目名称:research-eGrader,代码行数:57,代码来源:WordUtility.py


示例12: tokenize4

def tokenize4(text):
	wordnet_lemmatizer = WordNetLemmatizer()
	tokens             = word_tokenize(text)
	wordset            = set(words.words())
	tokens             = [wordnet_lemmatizer.lemmatize(token, NOUN) for token in tokens]
	tokens             = [wordnet_lemmatizer.lemmatize(token, VERB) for token in tokens]
	tokens             = [wordnet_lemmatizer.lemmatize(token, ADJ) for token in tokens]
	tokens             = [token for token in tokens if token in wordset]
	return tokens
开发者ID:SJinping,项目名称:WordProc,代码行数:9,代码来源:wordProcBase.py


示例13: unknown

def unknown(url):
    # get the HTML, as a string
    html = str(bs(urllib.urlopen(url).read()))
    # find all substrings
    substrings = set(re.findall(r'[a-z]+', html))
    # specify the wordlist
    wordlist = words.words()
    # return the words not in the wordlist
    return [word for word in substrings if word not in wordlist]
开发者ID:jonathanmonreal,项目名称:nltk-examples,代码行数:9,代码来源:c3q21.py


示例14: textParse

def textParse(file):
    processedText = ''
    with open(file, 'r') as f:
        lines = f.read().splitlines()
        for line in lines:
            wordsInLine = line.split(' ')
            for word in wordsInLine:
                # print '*'+word+'*'
                if word.lower() in words.words():
                    processedText += word + ' '
    return processedText
开发者ID:WodlBodl,项目名称:visionAssistant,代码行数:11,代码来源:textProcess.py


示例15: getReadabilityScore

def getReadabilityScore(tweet):
    w1 = tweet.split(" ")
    ASL1 = len(w1)
    AOV1 = 0
    l = 0
    for w in w1:
        l+=len(w)
        if(w not in words.words()):
            AOV1+=1
    ASW1 = l/float(ASL1)
    S1 = 206.835 - (1.015*ASL1) - (84.6*ASW1)- (10.5*AOV1)
    return S1
开发者ID:hackuser15,项目名称:239AS,代码行数:12,代码来源:Functions.py


示例16: anagrams_for

def anagrams_for(word):
  # TODO: 
  # 1. generate permutations of word array (note: don't return itself)
  # 2. check if word is a real word using the syntax below:
  # if "word" in words.words():
  #  print word
  # example using itertools: list(itertools.permutations([1,2,3,4], 2))
  word_array = array('u', word)
  [print new_word.tobytes() 
    for new_word in 
      list(itertools.permutations(word_array, word_array.length)) 
      if new_word in 
        words.words())]
开发者ID:AnnaNican,项目名称:Algorithms,代码行数:13,代码来源:anagrams_for.py


示例17: precomputeFromNLTK

def precomputeFromNLTK():
    """
    precompute with nltk's corpus as wordbase
    """
    language = set()
    print(len(words.words()))
    for word in words.words():
        word = word.lower()
        sortW = "".join(char for char in sorted(word))
        if sortW[0] >= "a" and sortW[0] <= "z":
            word = word + ":" + sortW
            language.add(word)
    print("Loaded %d words from NLTK wordnet" % (len(language)))
    buckets = [set() for x in xrange(25)]
    for word in language:
        buckets[len(word) / 2].add(word)
    count = 0
    for word in language:
        if count % 1000 == 0:
            print("Done for %d words" % count)
        count += 1
        sortedW = word.split(":")[1]
        if sortedW not in nltkHashMap:
            nltkHashMap[sortedW] = set()
            for word2 in buckets[len(sortedW)]:
                sortedW2 = word2.split(":")[1]
                if sortedW == sortedW2:
                    nltkHashMap[sortedW].add(word2.split(":")[0])
    file = open(nltkAnagramsFile, "w")
    file.truncate()
    count = 0
    for anagrams, listOfAnagrams in nltkHashMap.items():
        if count % 1000 == 0:
            print("%d anagram lists written" % count)
            file.flush()
        count += 1
        file.write("%s:%s\n" % (anagrams, listOfAnagrams))
    file.close()
    print("Precomputation with NLTK done")
开发者ID:harunrashidanver,项目名称:TextProcessor,代码行数:39,代码来源:TextProcessor.py


示例18: exercise_unusual_words

def exercise_unusual_words():
    text = gutenberg.words("austen-sense.txt")

    # 取出文本中的词汇, 去除数字, 转换为小写
    text_vocab = set(w.lower() for w in text if w.isalpha())

    # 取出词典中的词汇
    english_vocab = set(w.lower() for w in words.words())

    # 找出文本中的非常用词汇(错误词汇)
    unusual_vocab = text_vocab.difference(english_vocab)

    print sorted(unusual_vocab)
开发者ID:BurnellLiu,项目名称:LiuProject,代码行数:13,代码来源:chapter_02.py


示例19: unknown

def unknown(url):
	"""Takes a URL as its argument and returns a list of unknown words that occur on that webpage."""
	
	# gets the text of the page
	html = request.urlopen(url).read().decode('utf8')
	raw = BeautifulSoup(html).get_text()
	junk = set(words.words())
	# finds the lower case words by searching for a word boundary plus one or more lower case letters
	lower_case_words = re.findall(r'\b[a-z]+', raw)

	# searches through the list of lower case words and gets rid of those not in the words corpus.
	unknowns = [word for word in lower_case_words if word not in junk]
	print(unknowns)
开发者ID:shubh29,项目名称:Exercise-21-nltk,代码行数:13,代码来源:UnknownWords.py


示例20: __init__

    def __init__(self):

        self.dict_anagrams = {}

        for t in words.words():
            word = str.lower(str(t))
            word = word.replace('-',' ')

            alpha_count = get_alphaCount(word)

            if alpha_count in self.dict_anagrams:
                self.dict_anagrams[alpha_count].add(word)
            else:
                self.dict_anagrams[alpha_count] = {word}
开发者ID:shiladityasen,项目名称:ICS_SearchEngine,代码行数:14,代码来源:utils.py



注:本文中的nltk.corpus.words.words函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python data.find函数代码示例发布时间:2022-05-27
下一篇:
Python wordnet_ic.ic函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap