本文整理汇总了Scala中edu.stanford.nlp.pipeline.StanfordCoreNLP类的典型用法代码示例。如果您正苦于以下问题:Scala StanfordCoreNLP类的具体用法?Scala StanfordCoreNLP怎么用?Scala StanfordCoreNLP使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了StanfordCoreNLP类的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Scala代码示例。
示例1: SentimentAnalyzer
//设置package包名称以及导入依赖的类
package com.knoldus.streaming.core
import java.util.Properties
import edu.stanford.nlp.ling.CoreAnnotations
import edu.stanford.nlp.neural.rnn.RNNCoreAnnotations
import edu.stanford.nlp.pipeline.{Annotation, StanfordCoreNLP}
import edu.stanford.nlp.sentiment.SentimentCoreAnnotations
import scala.collection.convert.wrapAll._
object SentimentAnalyzer {
private val props = new Properties()
props.setProperty("annotators", "tokenize, ssplit, parse, sentiment")
private val pipeline: StanfordCoreNLP = new StanfordCoreNLP(props)
def getSentiment(input: String): String = {
if (Option(input).isDefined && !input.trim.isEmpty) {
val annotation: Annotation = pipeline.process(input)
val (_, sentiment) =
annotation.get(classOf[CoreAnnotations.SentencesAnnotation])
.map { sentence => (sentence, sentence.get(classOf[SentimentCoreAnnotations.SentimentAnnotatedTree])) }
.map { case (sentence, tree) => (sentence.toString, getSentimentType(RNNCoreAnnotations.getPredictedClass(tree))) }
.maxBy { case (sentence, _) => sentence.length }
sentiment
} else {
throw new IllegalArgumentException("Text should not empty or null")
}
}
private def getSentimentType(sentiment: Int): String = sentiment match {
case x if x == 3 || x == 4 => "positive"
case x if x == 0 || x == 1 => "negative"
case 2 => "neutral"
}
}
开发者ID:knoldus,项目名称:real-time-stream-processing-engine,代码行数:40,代码来源:SentimentAnalyzer.scala
示例2: SentimentAnalyzer
//设置package包名称以及导入依赖的类
package com.knoldus.nlp
import java.util.Properties
import edu.stanford.nlp.ling.CoreAnnotations
import edu.stanford.nlp.neural.rnn.RNNCoreAnnotations
import edu.stanford.nlp.pipeline.{Annotation, StanfordCoreNLP}
import edu.stanford.nlp.sentiment.SentimentCoreAnnotations
import scala.collection.convert.wrapAll._
object SentimentAnalyzer {
private val props = new Properties()
props.setProperty("annotators", "tokenize, ssplit, parse, sentiment")
private val pipeline: StanfordCoreNLP = new StanfordCoreNLP(props)
def getSentiment(input: String): String = {
if (Option(input).isDefined && !input.trim.isEmpty) {
val annotation: Annotation = pipeline.process(input)
val (_, sentiment) =
annotation.get(classOf[CoreAnnotations.SentencesAnnotation])
.map { sentence => (sentence, sentence.get(classOf[SentimentCoreAnnotations.SentimentAnnotatedTree])) }
.map { case (sentence, tree) => (sentence.toString, getSentimentType(RNNCoreAnnotations.getPredictedClass(tree))) }
.maxBy { case (sentence, _) => sentence.length }
sentiment
} else {
throw new IllegalArgumentException("Text should not empty or null")
}
}
private def getSentimentType(sentiment: Int): String = sentiment match {
case x if x == 3 || x == 4 => "positive"
case x if x == 0 || x == 1 => "negative"
case 2 => "neutral"
}
}
开发者ID:knoldus,项目名称:spark-streaming-meetup,代码行数:39,代码来源:SentimentAnalyzer.scala
示例3: SNerPipeline
//设置package包名称以及导入依赖的类
package org.aj.ner
import java.util.Properties
import edu.stanford.nlp.pipeline.StanfordCoreNLP
object SNerPipeline {
type Pipeline = StanfordCoreNLP
def apply(regexnerPath: Option[String] = None, nerPath: Option[String] = None): Pipeline = {
val props = new Properties()
regexnerPath match {
case Some(value) =>
props.put("annotators", "tokenize, ssplit, pos, lemma, ner, regexner")
if (new java.io.File(value).exists == false)
throw new IllegalArgumentException(s"File '${value}' for regexner.mapping does not exists!")
props.put("regexner.mapping", value)
case _ =>
props.put("annotators", "tokenize, ssplit, pos, lemma, ner")
}
nerPath match {
case Some(value) => props.put("ner.model", value)
case _ => None
}
new StanfordCoreNLP(props)
}
}
开发者ID:ajmnsk,项目名称:ner,代码行数:33,代码来源:SNerPipeline.scala
示例4: SentimentAnalyzer
//设置package包名称以及导入依赖的类
import java.util.Properties
import edu.stanford.nlp.ling.CoreAnnotations
import edu.stanford.nlp.neural.rnn.RNNCoreAnnotations
import edu.stanford.nlp.pipeline.{Annotation, StanfordCoreNLP}
import edu.stanford.nlp.sentiment.SentimentCoreAnnotations
import scala.collection.JavaConversions._
object SentimentAnalyzer{
val props = new Properties()
props.setProperty("annotators", "tokenize, ssplit, parse, sentiment")
val pipeline: StanfordCoreNLP = new StanfordCoreNLP(props)
def getSentimentScore(text: String) = {
val scoreList = getSentimentScoreList(text: String)
val scores = scoreList.map(_._2)
scores.sum.toDouble/scores.length
}
def getSentimentScoreList(text: String): List[(String, Int)] = {
val annotation: Annotation = pipeline.process(text)
val sentences = annotation.get(classOf[CoreAnnotations.SentencesAnnotation]).filterNot(x=> x.toString.trim.isEmpty)
val res = sentences.map(sentence =>
(sentence, sentence.get(classOf[SentimentCoreAnnotations.SentimentAnnotatedTree])))
.map { case (sentence, tree) => (sentence.toString,RNNCoreAnnotations.getPredictedClass(tree)) }
.toList
res
}
}
开发者ID:siddhartha-chandra,项目名称:scalaML,代码行数:32,代码来源:SentimentAnalyzer.scala
示例5: SentimentAnalyzer
//设置package包名称以及导入依赖的类
package term.project.SentimentalStats
import java.util.Properties
import Sentiment.Sentiment
import edu.stanford.nlp.ling.CoreAnnotations
import edu.stanford.nlp.neural.rnn.RNNCoreAnnotations
import edu.stanford.nlp.pipeline.{Annotation, StanfordCoreNLP}
import edu.stanford.nlp.sentiment.SentimentCoreAnnotations
import scala.collection.convert.wrapAll._
object SentimentAnalyzer {
val props = new Properties
props.setProperty("annotators", "tokenize, ssplit, parse, sentiment")
val pipeline: StanfordCoreNLP = new StanfordCoreNLP(props)
def mainSentiment(input: String): Sentiment = Option(input) match {
case Some(text) if text.nonEmpty => extractSentiment(text)
case None => throw new IllegalArgumentException("input can't be null or empty")
}
def sentiment(input: String): List[(String, Sentiment)] = Option(input) match {
case Some(text) if text.nonEmpty => extractSentiments(text)
case None => throw new IllegalArgumentException("input can't be null or empty")
}
private def extractSentiment(text: String): Sentiment = {
val (_, sentiment) = extractSentiments(text)
.maxBy { case (sentence, _) => sentence.length }
sentiment
}
def extractSentiments(text: String): List[(String, Sentiment)] = {
val annotation: Annotation = pipeline.process(text)
val sentences = annotation.get(classOf[CoreAnnotations.SentencesAnnotation])
sentences
.map(sentence => (sentence, sentence.get(classOf[SentimentCoreAnnotations.SentimentAnnotatedTree])))
.map { case (sentence, tree) => (sentence.toString, Sentiment.toSentiment(RNNCoreAnnotations.getPredictedClass(tree))) }
.toList
}
}
开发者ID:4102,项目名称:Sentimental-Stats,代码行数:45,代码来源:SentimentAnalyzer.scala
示例6: SentimentAnalyzer
//设置package包名称以及导入依赖的类
package pl.pcejrowski
import java.util
import java.util.Properties
import edu.stanford.nlp.ling.CoreAnnotations
import edu.stanford.nlp.neural.rnn.RNNCoreAnnotations
import edu.stanford.nlp.pipeline.{Annotation, StanfordCoreNLP}
import edu.stanford.nlp.sentiment.SentimentCoreAnnotations
import edu.stanford.nlp.util.CoreMap
import scala.collection.convert.wrapAll._
object SentimentAnalyzer {
val props = new Properties()
props.setProperty("annotators", "tokenize, ssplit, parse, sentiment")
val pipeline: StanfordCoreNLP = new StanfordCoreNLP(props)
def mainSentiment(input: String): Int = Option(input) match {
case Some(text) if !text.isEmpty => extractSentiment(text)
case _ => throw new IllegalArgumentException("input can't be null or empty")
}
private def extractSentiment(text: String): Int = {
val (_, sentiment) = extractSentiments(text)
.maxBy { case (sentence, _) => sentence.length }
sentiment
}
private def extractSentiments(text: String): List[(String, Int)] = {
val annotation: Annotation = pipeline.process(text)
val sentences: util.List[CoreMap] = annotation.get(classOf[CoreAnnotations.SentencesAnnotation])
sentences
.map(sentence => (sentence, sentence.get(classOf[SentimentCoreAnnotations.SentimentAnnotatedTree])))
.map { case (sentence, tree) => (sentence.toString, RNNCoreAnnotations.getPredictedClass(tree)) }
.toList
}
}
开发者ID:pcejrowski,项目名称:spark-twitter-sentiment,代码行数:42,代码来源:SentimentAnalyzer.scala
示例7: SentimentAnalyzer
//设置package包名称以及导入依赖的类
package io.purush.spark.twitter.streaming
import java.util.Properties
import edu.stanford.nlp.ling.CoreAnnotations
import edu.stanford.nlp.neural.rnn.RNNCoreAnnotations
import edu.stanford.nlp.pipeline.{Annotation, StanfordCoreNLP}
import edu.stanford.nlp.sentiment.SentimentCoreAnnotations
import io.purush.spark.twitter.streaming.Sentiment._
import scala.collection.convert.wrapAll._
object SentimentAnalyzer {
val props = new Properties()
// corenlp pipeline annotator configuration
props.setProperty("annotators", "tokenize, ssplit, parse, sentiment")
val pipeline: StanfordCoreNLP = new StanfordCoreNLP(props)
def mainSentiment(input:String): Sentiment= Option(input) match{
case Some(text) if !text.isEmpty => extractSentiment(text)
case _ => throw new IllegalArgumentException("Input can't be empty")
}
def extractSentiment(text:String): Sentiment ={
val (_, sentiment) = extractSentiments(text)
.maxBy{case (sentence, _) => sentence.length}
sentiment
}
def extractSentiments(text:String): List[(String, Sentiment)] = {
val annotation: Annotation = pipeline.process(text)
val sentences = annotation.get(classOf[CoreAnnotations.SentencesAnnotation])
sentences.map(sentence => (sentence, sentence.get(classOf[SentimentCoreAnnotations.SentimentAnnotatedTree])))
.map{ case (sentence, tree) => (sentence.toString, Sentiment.toSentiment(RNNCoreAnnotations.getPredictedClass(tree)))}
.toList
}
}
开发者ID:purukaushik,项目名称:twitter-spark-streaming,代码行数:39,代码来源:SentimentAnalyzer.scala
示例8: getSentiment
//设置package包名称以及导入依赖的类
package com.knoldus.core
import java.util.Properties
import edu.stanford.nlp.ling.CoreAnnotations
import edu.stanford.nlp.neural.rnn.RNNCoreAnnotations
import edu.stanford.nlp.pipeline.{Annotation, StanfordCoreNLP}
import edu.stanford.nlp.sentiment.SentimentCoreAnnotations
import scala.collection.convert.wrapAll._
trait SentimentAnalyzer {
private val props = new Properties()
props.setProperty("annotators", "tokenize, ssplit, parse, sentiment")
private val pipeline: StanfordCoreNLP = new StanfordCoreNLP(props)
def getSentiment(input: String): String = {
if (Option(input).isDefined && !input.trim.isEmpty) {
val annotation: Annotation = pipeline.process(input)
val (_, sentiment) =
annotation.get(classOf[CoreAnnotations.SentencesAnnotation])
.map { sentence => (sentence, sentence.get(classOf[SentimentCoreAnnotations.SentimentAnnotatedTree])) }
.map { case (sentence, tree) => (sentence.toString, getSentimentType(RNNCoreAnnotations.getPredictedClass(tree))) }
.maxBy { case (sentence, _) => sentence.length }
sentiment
} else {
throw new IllegalArgumentException("Text should not empty or null")
}
}
private def getSentimentType(sentiment: Int): String = sentiment match {
case x if x == 3 || x == 4 => "positive"
case x if x == 0 || x == 1 => "negative"
case 2 => "neutral"
}
}
开发者ID:knoldus,项目名称:tweet-processing-engine,代码行数:40,代码来源:SentimentAnalyzer.scala
示例9: SentimentAnalyzer
//设置package包名称以及导入依赖的类
package com.whimsicalbees.nlp
import java.util.Properties
import com.whimsicalbees.nlp.Sentiment.Sentiment
import edu.stanford.nlp.pipeline.Annotation
import edu.stanford.nlp.ling.CoreAnnotations
import edu.stanford.nlp.pipeline.StanfordCoreNLP
import collection.JavaConverters._
import collection.JavaConversions._
import edu.stanford.nlp.sentiment.SentimentCoreAnnotations
import edu.stanford.nlp.neural.rnn.RNNCoreAnnotations
object SentimentAnalyzer {
val props = new Properties()
props.setProperty("annotators", "tokenize, ssplit, parse, sentiment")
val pipeline: StanfordCoreNLP = new StanfordCoreNLP(props)
def mainSentiment(input: String): Sentiment = Option(input) match {
case Some(text) if !text.isEmpty => extractSentiment(text)
case _ => throw new IllegalArgumentException("input can't be null or empty")
}
def sentiment(input: String): List[(String, Sentiment)] = Option(input) match {
case Some(text) if !text.isEmpty => extractSentiments(text)
case _ => throw new IllegalArgumentException("input can't be null or empty")
}
private def extractSentiment(text: String): Sentiment = {
val (_, sentiment) = extractSentiments(text)
.maxBy { case (sentence, _) => sentence.length }
sentiment
}
def extractSentiments(text: String): List[(String, Sentiment)] = {
val annotation: Annotation = pipeline.process(text)
val sentences = annotation.get(classOf[CoreAnnotations.SentencesAnnotation])
sentences
.map(sentence => (sentence, sentence.get(classOf[SentimentCoreAnnotations.SentimentAnnotatedTree])))
.map { case (sentence, tree) => (sentence.toString, Sentiment.toSentiment(RNNCoreAnnotations.getPredictedClass(tree))) }
.toList
}
}
object Sentiment extends Enumeration {
type Sentiment = Value
val POSITIVE, NEGATIVE, NEUTRAL = Value
def toSentiment(sentiment: Int): Sentiment = sentiment match {
case x if x == 0 || x == 1 => Sentiment.NEGATIVE
case 2 => Sentiment.NEUTRAL
case x if x == 3 || x == 4 => Sentiment.POSITIVE
}
}
开发者ID:WhimsicalBees,项目名称:meet-what-you-tweet,代码行数:57,代码来源:SentimentAnalyzer.scala
注:本文中的edu.stanford.nlp.pipeline.StanfordCoreNLP类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论