• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python metrics.f_measure函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中nltk.metrics.f_measure函数的典型用法代码示例。如果您正苦于以下问题:Python f_measure函数的具体用法?Python f_measure怎么用?Python f_measure使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了f_measure函数的18个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: validate

	def validate(self, validation_set):
		if self.classifier is None:
			raise Exception("self.classifier is None")
		reference=defaultdict(set)
		observed=defaultdict(set)
		observed['neutral']=set()

		for i, (tweet, label) in enumerate(validation_set):
			reference[label].add(i)
			observation=self.classify(tweet)
			observed[observation].add(i)
		acc=classify.accuracy(self.classifier, observed)
		posp=precision(reference['positive'],observed['positive'])
		posr=recall(reference['positive'], observed['positive'])
		posf=f_measure(reference['positive'], observed['positive'])
		negp=precision(reference['negative'],observed['negative'])
		negr=recall(reference['negative'], observed['negative'])
		negf=f_measure(reference['negative'], observed['negative'])
		
		print "accuracy: %s" % acc
		print "pos precision: %s" % posp
		print "pos recall: %s" % posr
		print "pos f-measure: %s" % posf
		print "neg precision: %s" % negp
		print "neg recall: %s" % negr
		print "neg f-measure: %s" % negf
		return (acc, posp, posr, posf, negp, negr, negf)
开发者ID:anov,项目名称:honors,代码行数:27,代码来源:classifier.py


示例2: print_precision_recall

def print_precision_recall(classifier, test_dict):
    refsets = defaultdict(set)
    testsets = defaultdict(set)
    for i, (feats, label) in enumerate(test_dict):
        refsets[label].add(i)
        observed = classifier.classify(feats)
        testsets[observed].add(i)
    print 'pos precision:', precision(refsets['positive'], testsets['positive'])
    print 'pos recall:', recall(refsets['positive'], testsets['positive'])
    print 'pos F-measure:', f_measure(refsets['positive'], testsets['positive'])
    print 'neg precision:', precision(refsets['negative'], testsets['negative'])
    print 'neg recall:', recall(refsets['negative'], testsets['negative'])
    print 'neg F-measure:', f_measure(refsets['negative'], testsets['negative'])
开发者ID:gleicon,项目名称:sentiment_analysis,代码行数:13,代码来源:filters.py


示例3: benchmarking

 def benchmarking(self, classifier,_test_set,all_f_measure=[],all_precision=[],all_recall=[]):
     from nltk import classify
     accuracy = classify.accuracy(classifier, _test_set)
     
     print("accuracy:",accuracy)
     
     from nltk.metrics import precision
     from nltk.metrics import recall
     from nltk.metrics import f_measure
     
     import collections
     refsets = collections.defaultdict(set)
     testsets = collections.defaultdict(set)
     for i, (feats, label) in enumerate(_test_set):
         refsets[label].add(i)
         observed = classifier.classify(feats)
         testsets[observed].add(i)
         
     prec=precision(refsets['class'], testsets['class'])
     rec=recall(refsets['class'], testsets['class'])
     f1=f_measure(refsets['class'], testsets['class'])
     print('precision:', prec)
     print('recall:', rec)
     print('F-measure:', f1)
             
     all_f_measure.append(f1)
     all_precision.append(prec)
     all_recall.append(rec)
     print('========Show top 10 most informative features========')
     classifier.show_most_informative_features(10)
开发者ID:jerrygaoLondon,项目名称:oke-extractor,代码行数:30,代码来源:okeConceptRecogniser.py


示例4: eval_stats

def eval_stats(results):
	'''
	Compute recall, precision, and f-measure from passed results.
	The expected format for results is a dictionary whose keys=<name of article>
	and values=tuple (<test category>, <reference category>, <scores>), where:
	test=category suggested by classifier, reference=pre-classified gold
	category, scores=can be None or dictionary whose keys=category names and
	values=matching score for this article.
	'''
	# Calculate number of correct matches
	correct = 0
	missed = defaultdict(tuple)
	for article_name, (suggested, real, scores) in results.iteritems():
		if suggested==real:
			correct += 1
		else:
			missed[article_name] = (suggested, real)
	success_ratio = correct / float(len(results))
	print "Ratio: %0.3f" % success_ratio
	
	# Print wrong matches
	for name, (suggested, real) in missed.iteritems():
		print "%s\t%s\t%s" % (name, suggested, real)
	
	# Create sets of references / test classification for evaluation
	cat_ref = defaultdict(set)
	cat_test= defaultdict(set)
	for name, (test_category, ref_category, scores) in results.iteritems():
		cat_ref[ref_category].add(name) 		# gold-tagged categories
		cat_test[test_category].add(name) 	# suggested categories

	# Precision, recall, f-measure, support (num of reference articles in
	# each category) for each category
	print "\nCategory\tPrecision\tRecall\tF-measure\tSupport" 
	measures = defaultdict(tuple)
	for category in cat_ref.keys():
		cat_prec = metrics.precision(cat_ref[category], cat_test[category])
		cat_rec = metrics.recall(cat_ref[category], cat_test[category])
		cat_f = metrics.f_measure(cat_ref[category], cat_test[category])
		cat_support = len(cat_ref[category])
		measures[category] = (cat_prec, cat_rec, cat_f, cat_support)
		print "%s\t%0.3f\t%0.3f\t%0.3f\t%d" % \
		(category, cat_prec, cat_rec, cat_f, cat_support)
	
	# Calculate precision, recall, f-measure for entire corpus:
	# This is a weighted average of the values of separate categories
	# SUM(product of all precisions, product of all supports)/sum(total number of supports)
	avg_prec = weighted_average([(cat_measure[0], cat_measure[3]) for \
		cat_measure in measures.values()])
	avg_rec = weighted_average([(cat_measure[1], cat_measure[3]) for \
		cat_measure in measures.values()])
	avg_f = weighted_average([(cat_measure[2], cat_measure[3]) for \
		cat_measure in measures.values()])
	total_support = sum([cat_support[3] for cat_support in measures.values()])
	
	print "%s\t%0.3f\t%0.3f\t%0.3f\t%d" % ("Total", avg_prec, avg_rec, avg_f, total_support)
开发者ID:campustimes,项目名称:pnlp-final-project,代码行数:56,代码来源:eval_class.py


示例5: evaluate_features

def evaluate_features(feature_extractor, N, only_acc=False):
    from nltk.corpus import movie_reviews
    from nltk.classify import NaiveBayesClassifier as naive
    from nltk.classify.util import accuracy
    from nltk.metrics import precision, recall, f_measure
    from sys import stdout
    
    negative = movie_reviews.fileids('neg')
    positive = movie_reviews.fileids('pos')
    negfeats = [(feature_extractor(movie_reviews.sents(fileids=[f])),
                 'neg') for f in negative]

    posfeats = [(feature_extractor(movie_reviews.sents(fileids=[f])),
                 'pos') for f in positive]
    negtrain, negtest = stratifiedSamples(negfeats, N)
    postrain, postest = stratifiedSamples(posfeats, N)

    trainfeats = negtrain + postrain
    testfeats = negtest + postest
    classifier = naive.train(trainfeats)
    if only_acc: return accuracy(classifier, testfeats)
    print 'accuracy: {}'.format(accuracy(classifier, testfeats))

    # Precision, Recall, F-measure
    from collections import defaultdict
    refsets = defaultdict(set)
    testsets = defaultdict(set)

    for i, (feats, label) in enumerate(testfeats):
        refsets[label].add(i)
        observed = classifier.classify(feats)
        testsets[observed].add(i)
        
    print 'pos precision:', precision(refsets['pos'], testsets['pos'])
    print 'pos recall:', recall(refsets['pos'], testsets['pos'])
    print 'pos F-measure:', f_measure(refsets['pos'], testsets['pos'])
    print 'neg precision:', precision(refsets['neg'], testsets['neg'])
    print 'neg recall:', recall(refsets['neg'], testsets['neg'])
    print 'neg F-measure:', f_measure(refsets['neg'], testsets['neg'])
    stdout.flush()
    classifier.show_most_informative_features()
    return classifier
开发者ID:lxmonk,项目名称:nlg12_hw2,代码行数:42,代码来源:hw2.py


示例6: calcAllClassesFMeasure

def calcAllClassesFMeasure(classSet, refsets, testsets):
    fSum = 0.0
    denominator = 0
    for category in classSet:
        num = f_measure(refsets[category], testsets[category])
        if num is None:
            continue
        fSum += num
        denominator += 1
    
    return fSum/denominator
开发者ID:peeceeprashant,项目名称:SharedTask,代码行数:11,代码来源:explicit_sense_perceptron_predict.py


示例7: word_similarity_dict

    def word_similarity_dict(self, word):
        """
        Return a dictionary mapping from words to 'similarity scores,'
        indicating how often these two words occur in the same
        context.
        """
        word = self._key(word)
        word_contexts = set(self._word_to_contexts[word])

        scores = {}
        for w, w_contexts in self._word_to_contexts.items():
            scores[w] = f_measure(word_contexts, set(w_contexts))

        return scores
开发者ID:prz3m,项目名称:kind2anki,代码行数:14,代码来源:text.py


示例8: measure

def measure(classifier, testfeats, alpha=0.5):
    refsets = collections.defaultdict(set)
    testsets = collections.defaultdict(set)
    
    for i, (feats, label) in enumerate(testfeats):
        refsets[label].add(i)
        observed = classifier.classify(feats)
        testsets[observed].add(i)
    
    precisions = {}
    recalls = {}
    f_measures = {}
    for label in classifier.labels():
        precisions[label] = metrics.precision(refsets[label], testsets[label])
        recalls[label] = metrics.recall(refsets[label], testsets[label])
        f_measures[label] = metrics.f_measure(refsets[label], testsets[label], alpha)
	
    return precisions, recalls, f_measures
开发者ID:hitesh915,项目名称:SentimentAnalysis,代码行数:18,代码来源:sentiment.py


示例9: set

#!/usr/bin/python
import nltk
from nltk.metrics import precision, recall, f_measure

reference = 'DET NN VB DET JJ NN NN IN DET NN'.split()
test    = 'DET VB VB DET NN NN NN IN DET NN'.split()
reference_set = set(reference)
test_set = set(test)

print "Precision: "
print precision(reference_set, test_set)

print "\n"

print "Recall: "
print recall(reference_set, test_set)

print "\n"

print "F_Measure: "
print f_measure(reference_set, test_set)

开发者ID:amanelis,项目名称:tweethose-nltk,代码行数:21,代码来源:nltk.metrics.py


示例10: precision

			refsets, testsets = scoring.multi_ref_test_sets(classifier, test_feats)
		else:
			refsets, testsets = scoring.ref_test_sets(classifier, test_feats)
		
		for label in labels:
			ref = refsets[label]
			test = testsets[label]
			
			if not args.no_precision:
				print '%s precision: %f' % (label, precision(ref, test) or 0)
			
			if not args.no_recall:
				print '%s recall: %f' % (label, recall(ref, test) or 0)
			
			if not args.no_fmeasure:
				print '%s f-measure: %f' % (label, f_measure(ref, test) or 0)

if args.show_most_informative and args.algorithm != 'DecisionTree' and not (args.multi and args.binary):
	print '%d most informative features' % args.show_most_informative
	classifier.show_most_informative_features(args.show_most_informative)

##############
## pickling ##
##############

if not args.no_pickle:
	if args.filename:
		fname = os.path.expanduser(args.filename)
	else:
		name = '%s_%s.pickle' % (args.corpus, args.algorithm)
		fname = os.path.join(os.path.expanduser('~/nltk_data/classifiers'), name)
开发者ID:jrivero,项目名称:nltk-trainer,代码行数:31,代码来源:train_classifier.py


示例11: enumerate

#script to validate coding
import cPickle as pickle
import sys
from nltk.metrics import accuracy, ConfusionMatrix, precision, recall, f_measure
from collections import defaultdict
import classifier

if __name__=='__main__':
	validation_pickle=sys.argv[1]
	classifier_pickle=sys.argv[2]
	validation_set=pickle.load(open(validation_pickle, 'rb'))
	c=pickle.load(open(classifier_pickle, 'rb'))
	
	reference=defaultdict(set)
	observed=defaultdict(set)
	for i, (tweet, label) in enumerate(validation_set):
		reference[label].add(i)
		observation=c.classify(tweet)
		observed[observation].add(i)
	
	print "accuracy: %s" % accuracy(observed, reference)
	print "pos precision: %s" % precision(reference['positive'], observed['positive'])
	print "pos recall: %s" % recall(reference['positive'], observed['positive'])
	print "pos f-measure: %s" % f_measure(reference['positive'], observed['positive'])
	print "neg precision: %s" % precision(reference['negative'], observed['negative'])
	print "neg recall: %s" % recall(reference['negative'], observed['negative'])
	print "neg f-measure: %s" % f_measure(reference['negative'], observed['negative'])
	
开发者ID:anov,项目名称:honors,代码行数:27,代码来源:validate.py


示例12: avaliate_new_classifier

def avaliate_new_classifier(featureSet):
	print("Vamos treinar o classificador agora!")
	print("\n")
	#random.shuffle(featureSet)

	#Cada um tem 197
	positive_tweets = featureSet[:196]

	#Misturando as paradas pra nao ficar testando só os mesmos últimos
	random.shuffle(positive_tweets)

	#print(featureSet[7185])
	#Pra pegar 7185 do pos e 7185 do negativo mas o negativo tem 7213
	negative_tweets = featureSet[196:293]
	random.shuffle(negative_tweets)

	neutral_tweets = featureSet[293:]
	random.shuffle(neutral_tweets)

	#Agora vou dividir cada classe em um conjunto de referencia e outro de teste
	pos_cutoff = len(positive_tweets)*3/4
	neg_cutoff = len(negative_tweets)*3/4
	neu_cutoff = len(neutral_tweets)*3/4

	# 75% dos tweets vao pra ser de referencia(treinamento) e o resto pra teste
	pos_references = positive_tweets[:pos_cutoff]
	pos_tests = positive_tweets[pos_cutoff:]

	neg_references = negative_tweets[:neg_cutoff]
	neg_tests = negative_tweets[neg_cutoff:]

	neu_references = neutral_tweets[:neu_cutoff]
	neu_tests = neutral_tweets[neu_cutoff:]

	#COnjunto de treinamento e de testes pra calcular a accuracy
	training_set = pos_references + neg_references + neu_references
	testing_set = pos_tests + neg_tests + neu_tests

	start_time = time.time()

	global classifier
	print("Comecou a treina-lo agora!")

	#training_set2 = [(t,l) for (t,l,twe) in training_set]

	classifier = nltk.NaiveBayesClassifier.train(training_set)
	#testing_set2 = [(t,l) for (t,l,twe) in testing_set]
	print("Naive Bayes Algo accuracy:", (nltk.classify.accuracy(classifier, testing_set)) * 100)
	classifier.show_most_informative_features(30)

	refsets = collections.defaultdict(set)
	testsets = collections.defaultdict(set)

	for i, (feats, label) in enumerate(testing_set):
	    refsets[label].add(i)
	    observed = classifier.classify(feats)
	    testsets[observed].add(i)
	 
	print 'pos precision:', precision(refsets['pos'], testsets['pos'])
	print 'pos recall:', recall(refsets['pos'], testsets['pos'])
	print 'pos F-measure:', f_measure(refsets['pos'], testsets['pos'])

	print 'neg precision:', precision(refsets['neg'], testsets['neg'])
	print 'neg recall:', recall(refsets['neg'], testsets['neg'])
	print 'neg F-measure:', f_measure(refsets['neg'], testsets['neg'])

	print 'neutral precision:', precision(refsets['neutral'], testsets['neutral'])
	print 'neutral recall:', recall(refsets['neutral'], testsets['neutral'])
	print 'neutral F-measure:', f_measure(refsets['neutral'], testsets['neutral'])


	print("--- Classifier executed in %s seconds ---" % (time.time() - start_time))
开发者ID:CarlosRafael22,项目名称:Estudos-NLTK,代码行数:72,代码来源:sentimentAnalysis.py


示例13: f_measure

 def f_measure(self):
     return f_measure(self._reference, self._test)
开发者ID:chloebt,项目名称:educe,代码行数:2,代码来源:showscores.py


示例14: avaliate_classifiers

def avaliate_classifiers(featureSet):
	print("Vamos treinar o classificador agora!")
	print("\n")
	#random.shuffle(featureSet)

	#Vai fazer o calculo de recall e precision
	# You need to build 2 sets for each classification label:
	# a reference set of correct values, and a test set of observed values.

	#Os primeiros 6686 + 500(dia 14) tweets sao positivos e resto(6757 + 500(dia 14)) negativo
	positive_tweets = featureSet[:7185]

	#Misturando as paradas pra nao ficar testando só os mesmos últimos
	random.shuffle(positive_tweets)

	#print(featureSet[7185])
	#Pra pegar 7185 do pos e 7185 do negativo mas o negativo tem 7213
	negative_tweets = featureSet[7185:14372]
	random.shuffle(negative_tweets)

	#Agora vou dividir cada classe em um conjunto de referencia e outro de teste
	pos_cutoff = len(positive_tweets)*3/4
	neg_cutoff = len(negative_tweets)*3/4

	# 75% dos tweets vao pra ser de referencia(treinamento) e o resto pra teste
	pos_references = positive_tweets[:pos_cutoff]
	pos_tests = positive_tweets[pos_cutoff:]

	neg_references = negative_tweets[:neg_cutoff]
	neg_tests = negative_tweets[neg_cutoff:]

	#COnjunto de treinamento e de testes pra calcular a accuracy
	training_set = pos_references + neg_references
	testing_set = pos_tests + neg_tests

	start_time = time.time()

	global classifier
	print("Comecou a treina-lo agora!")

	#training_set2 = [(t,l) for (t,l,twe) in training_set]

	classifier = nltk.NaiveBayesClassifier.train(training_set)
	#testing_set2 = [(t,l) for (t,l,twe) in testing_set]
	print("Naive Bayes Algo accuracy:", (nltk.classify.accuracy(classifier, testing_set)) * 100)
	classifier.show_most_informative_features(30)

	refsets = collections.defaultdict(set)
	testsets = collections.defaultdict(set)

	# for i, (feats, label, l) in enumerate(testing_set):
	#     refsets[label].add(i)
	#     observed = classifier.classify(feats)
	#     testsets[observed].add(i)
	#     print("--"*200)
	#     print()
	#     print("Classified as: ",observed)
	#     print()
	#     print(l)
	#     print()
	#     print("--"*200)
	#     raw_input("Press any key to continue:")
	 
	print 'pos precision:', precision(refsets['pos'], testsets['pos'])
	print 'pos recall:', recall(refsets['pos'], testsets['pos'])
	print 'pos F-measure:', f_measure(refsets['pos'], testsets['pos'])
	print 'neg precision:', precision(refsets['neg'], testsets['neg'])
	print 'neg recall:', recall(refsets['neg'], testsets['neg'])
	print 'neg F-measure:', f_measure(refsets['neg'], testsets['neg'])


	print("--- Classifier executed in %s seconds ---" % (time.time() - start_time))
开发者ID:CarlosRafael22,项目名称:Estudos-NLTK,代码行数:72,代码来源:sentimentAnalysis.py


示例15: int

        stop = int(len(texts) * args.fraction)

        for t in texts[:stop]:
            feat = bag_of_words(norm_words(t))
            feats.append(feat)
            test_feats.append((feat, label))

    print "accuracy:", accuracy(classifier, test_feats)
    refsets, testsets = scoring.ref_test_sets(classifier, test_feats)

    for label in labels:
        ref = refsets[label]
        test = testsets[label]
        print "%s precision: %f" % (label, precision(ref, test) or 0)
        print "%s recall: %f" % (label, recall(ref, test) or 0)
        print "%s f-measure: %f" % (label, f_measure(ref, test) or 0)
else:
    if args.instances == "sents":
        texts = categorized_corpus.sents()
        total = len(texts)
    elif args.instances == "paras":
        texts = (itertools.chain(*para) for para in categorized_corpus.paras())
        total = len(categorized_corpus.paras)
    elif args.instances == "files":
        texts = (categorized_corpus.words(fileids=[fid]) for fid in categorized_corpus.fileids())
        total = len(categorized_corpus.fileids())

    stop = int(total * args.fraction)
    feats = (bag_of_words(norm_words(i)) for i in itertools.islice(texts, stop))

label_counts = collections.defaultdict(int)
开发者ID:berkeley-food-recommendations,项目名称:nltk-trainer,代码行数:31,代码来源:analyze_classifier_coverage.py


示例16: print

    trainfeats = negfeats[:4000] + posfeats[:4000]
    testfeats = negfeats[4000:] + posfeats[4000:]
    print("train on %d instances, test on %d instances" % (len(trainfeats), len(testfeats)))
    classifier = NaiveBayesClassifier.train(trainfeats)
    refsets = collections.defaultdict(set)
    testsets = collections.defaultdict(set)
    for i, (feats, label) in enumerate(testfeats):
        refsets[label].add(i)
        observed = classifier.classify(feats)
        testsets[observed].add(i)

    # cross validation  3-fold
    feats = negfeats + posfeats
    M = math.floor(len(feats) / 3)
    result = []
    for n in range(3):
        val_set = feats[n * M :][:M]
        train_set = feats[(n + 1) * M :] + feats[: n * M]
        classifier = nltk.NaiveBayesClassifier.train(train_set)
        result.append("{:.4f}".format(round(nltk.classify.accuracy(classifier, val_set) * 100, 4)))

    print("cross_validation:", result)

    print("pos precision:", precision(refsets["pos"], testsets["pos"]))
    print("pos recall:", recall(refsets["pos"], testsets["pos"]))
    print("pos F-measure:", f_measure(refsets["pos"], testsets["pos"]))
    print("neg precision:", precision(refsets["neg"], testsets["neg"]))
    print("neg recall:", recall(refsets["neg"], testsets["neg"]))
    print("neg F-measure:", f_measure(refsets["neg"], testsets["neg"]))
    classifier.show_most_informative_features()
开发者ID:efrenaguilar95,项目名称:Yelp_Analyzer,代码行数:30,代码来源:nbClassifierV2.py


示例17: cross_fold

def cross_fold(instances, trainf, testf, folds=10, trace=1, metrics=True, informative=0):
	if folds < 2:
		raise ValueError('must have at least 3 folds')
	# ensure isn't an exhaustible iterable
	instances = list(instances)
	# randomize so get an even distribution, in case labeled instances are
	# ordered by label
	random.shuffle(instances)
	l = len(instances)
	step = l / folds
	
	if trace:
		print('step %d over %d folds of %d instances' % (step, folds, l))
	
	accuracies = []
	precisions = collections.defaultdict(list)
	recalls = collections.defaultdict(list)
	f_measures = collections.defaultdict(list)
	
	for f in range(folds):
		if trace:
			print('\nfold %d' % (f+1))
			print('-----%s' % ('-'*len('%s' % (f+1))))
		
		start = f * step
		end = start + step
		train_instances = instances[:start] + instances[end:]
		test_instances = instances[start:end]
		
		if trace:
			print('training on %d:%d + %d:%d' % (0, start, end, l))
		
		obj = trainf(train_instances)
		
		if trace:
			print('testing on %d:%d' % (start, end))
		
		if metrics:
			refsets, testsets = ref_test_sets(obj, test_instances)
			
			for key in set(refsets.keys() + testsets.keys()):
				ref = refsets[key]
				test = testsets[key]
				p = precision(ref, test) or 0
				r = recall(ref, test) or 0
				f = f_measure(ref, test) or 0
				precisions[key].append(p)
				recalls[key].append(r)
				f_measures[key].append(f)
				
				if trace:
					print('%s precision: %f' % (key, p))
					print('%s recall: %f' % (key, r))
					print('%s f-measure: %f' % (key, f))
		
		accuracy = testf(obj, test_instances)
		
		if trace:
			print('accuracy: %f' % accuracy)
		
		accuracies.append(accuracy)
		
		if trace and informative and hasattr(obj, 'show_most_informative_features'):
			obj.show_most_informative_features(informative)
	
	if trace:
		print('\nmean and variance across folds')
		print('------------------------------')
		print('accuracy mean: %f' % (sum(accuracies) / folds))
		print('accuracy variance: %f' % array(accuracies).var())
		
		for key, ps in iteritems(precisions):
			print('%s precision mean: %f' % (key, sum(ps) / folds))
			print('%s precision variance: %f' % (key, array(ps).var()))
		
		for key, rs in iteritems(recalls):
			print('%s recall mean: %f' % (key, sum(rs) / folds))
			print('%s recall variance: %f' % (key, array(rs).var()))
		
		for key, fs in iteritems(f_measures):
			print('%s f_measure mean: %f' % (key, sum(fs) / folds))
			print('%s f_measure variance: %f' % (key, array(fs).var()))
	
	return accuracies, precisions, recalls, f_measures
开发者ID:Herka,项目名称:nltk-trainer,代码行数:84,代码来源:scoring.py


示例18: ConfusionMatrix

    print 'Dictionary : ', dictionary.get_name(), '\n'
    print ConfusionMatrix(gold_standard,results).pp()
    print 'Accuracy: ', accuracy(gold_standard,results)
    for c in [0,1,-1]:
        print 'Metrics for class ', c
        gold = set()
        test = set()
        for i,x in enumerate(gold_standard):
            if x == c:
                gold.add(i)
        for i,x in enumerate(results):
            if x == c:
                test.add(i)
        print 'Precision: ', precision(gold, test)
        print 'Recall   : ', recall(gold, test)
        print 'F_measure: ', f_measure(gold, test)
    print '\n\n'


#################### Sentences classification ##########################

# Not reported in the paper because LIWC doesn't have neutral class

positive_sents = [reli.words_sentence_pos(s) for s in reli.sents(polarity='positive')]
negative_sents = [reli.words_sentence_pos(s) for s in reli.sents(polarity='negative')]
neutral_sents = [reli.words_sentence_pos(s) for s in reli.sents(polarity='neutral')]


print '#########################################################################'
print '###################### Sentences classification #########################'
print '#########################################################################'
开发者ID:Jewelryland,项目名称:STIL_LIWC_Evaluation,代码行数:31,代码来源:Experiments.py



注:本文中的nltk.metrics.f_measure函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python metrics.precision函数代码示例发布时间:2022-05-27
下一篇:
Python metrics.edit_distance函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap