本文整理汇总了Java中edu.stanford.nlp.util.Generics类的典型用法代码示例。如果您正苦于以下问题:Java Generics类的具体用法?Java Generics怎么用?Java Generics使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Generics类属于edu.stanford.nlp.util包,在下文中一共展示了Generics类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: DepParseInfo
import edu.stanford.nlp.util.Generics; //导入依赖的package包/类
public DepParseInfo(SemanticGraph dependencies) {
Collection<IndexedWord> rootNodes = dependencies.getRoots();
if (rootNodes.isEmpty()) {
// Shouldn't happen, but return something!
return;
}
StringBuilder sb = new StringBuilder();
Set<IndexedWord> used = Generics.newHashSet();
for (IndexedWord root : rootNodes) {
depParents.put(root.index(), 0);
depLabels.put(root.index(), "root");
sb.append("-> ").append(root).append(" (root)\n");
recToString(root, sb, 1, used, dependencies, depLabels, depParents);
}
Set<IndexedWord> nodes = Generics.newHashSet(dependencies.vertexSet());
nodes.removeAll(used);
while (!nodes.isEmpty()) {
IndexedWord node = nodes.iterator().next();
sb.append(node).append("\n");
recToString(node, sb, 1, used, dependencies, depLabels, depParents);
nodes.removeAll(used);
}
}
开发者ID:dkmfbk,项目名称:pikes,代码行数:27,代码来源:DepParseInfo.java
示例2: getArc
import edu.stanford.nlp.util.Generics; //导入依赖的package包/类
/**
* Slow implementation.
*/
public Arc getArc(Object source, Object target) {
Set arcsFromSource = arcsBySource.get(source);
Set arcsToTarget = arcsByTarget.get(target);
HashSet result = Generics.newHashSet();
result.addAll(arcsFromSource);
result.retainAll(arcsToTarget); // intersection
if (result.size() < 1) {
return null;
}
if (result.size() > 1) {
throw new RuntimeException("Problem in TransducerGraph data structures.");
}
// get the only member
Iterator iterator = result.iterator();
return (Arc) iterator.next();
}
开发者ID:FabianFriedrich,项目名称:Text2Process,代码行数:20,代码来源:TransducerGraph.java
示例3: arcLabelsToNode
import edu.stanford.nlp.util.Generics; //导入依赖的package包/类
/**
* Finds all arcs between this node and <code>destNode</code>,
* and returns the <code>Set</code> of <code>Object</code>s which
* label those arcs. If no such arcs exist, returns an empty
* <code>Set</code>.
*
* @param destNode the destination node
* @return the <code>Set</code> of <code>Object</code>s which
* label arcs between this node and <code>destNode</code>
*/
public Set<Class<? extends GrammaticalRelationAnnotation>> arcLabelsToNode(TreeGraphNode destNode) {
Set<Class<? extends GrammaticalRelationAnnotation>> arcLabels = Generics.newHashSet();
CyclicCoreLabel cl = label();
for (Iterator<Class<?>> it = cl.keySet().iterator(); it.hasNext();) {
Class<? extends CoreAnnotation> key = (Class<? extends CoreAnnotation>) it.next();//javac doesn't compile properly if generics are fully specified (but eclipse does...)
Object val = cl.get(key);
if (val != null && val instanceof Set) {
if (((Set) val).contains(destNode)) {
if (key != null) {
arcLabels.add((Class<? extends GrammaticalRelationAnnotation>) key);
}
}
}
}
return arcLabels;
}
开发者ID:FabianFriedrich,项目名称:Text2Process,代码行数:27,代码来源:TreeGraphNode.java
示例4: getDeps
import edu.stanford.nlp.util.Generics; //导入依赖的package包/类
/**
* The constructor builds a list of typed dependencies using
* information from a <code>GrammaticalStructure</code>.
*
* @param getExtra If true, the list of typed dependencies will contain extra ones.
* If false, the list of typed dependencies will respect the tree structure.
*/
private List<TypedDependency> getDeps(boolean getExtra, Filter<TypedDependency> f) {
List<TypedDependency> basicDep = Generics.newArrayList();
for (Dependency<Label, Label, Object> d : dependencies()) {
TreeGraphNode gov = (TreeGraphNode) d.governor();
TreeGraphNode dep = (TreeGraphNode) d.dependent();
//System.out.println("Gov: " + gov);
//System.out.println("Dep: " + dep);
GrammaticalRelation reln = getGrammaticalRelation(gov, dep);
//System.out.println("Reln: " + reln);
basicDep.add(new TypedDependency(reln, gov, dep));
}
if (getExtra) {
TreeGraphNode rootTree = root();
getDep(rootTree, basicDep, f); // adds stuff to basicDep
}
Collections.sort(basicDep);
return basicDep;
}
开发者ID:FabianFriedrich,项目名称:Text2Process,代码行数:27,代码来源:GrammaticalStructure.java
示例5: getAllDependents
import edu.stanford.nlp.util.Generics; //导入依赖的package包/类
/**
* Returns all the dependencies of a certain node.
*
* @param node The node to return dependents for
* @return map of dependencies
*/
private static Map<Class<? extends CoreAnnotation>, Set<TreeGraphNode>> getAllDependents(TreeGraphNode node) {
Map<Class<? extends CoreAnnotation>, Set<TreeGraphNode>> newMap = Generics.newHashMap();
for (Class<?> o : node.label.keySet()) {
try {
// The line below will exception unless it's a GrammaticalRelationAnnotation,
// so the effect is that only the GrammaticalRelationAnnotation things get put into newMap
o.asSubclass(GrammaticalRelationAnnotation.class);
newMap.put((Class<? extends CoreAnnotation>) o, (Set<TreeGraphNode>) node.label.get((Class<? extends CoreAnnotation>) o));//javac doesn't compile properly if generics are fully specified (but eclipse does...)
} catch (Exception e) {
// ignore a non-GrammaticalRelationAnnotation element
}
}
return newMap;
}
开发者ID:FabianFriedrich,项目名称:Text2Process,代码行数:22,代码来源:GrammaticalStructure.java
示例6: UCPtransform
import edu.stanford.nlp.util.Generics; //导入依赖的package包/类
/**
* Transforms t if it contains an UCP, it will change the UCP tag
* into the phrasal tag of the first word of the UCP
* (UCP (JJ electronic) (, ,) (NN computer) (CC and) (NN building))
* will become
* (ADJP (JJ electronic) (, ,) (NN computer) (CC and) (NN building))
*
* @param t a tree to be transformed
* @return t transformed
*/
public static Tree UCPtransform(Tree t) {
Tree firstChild = t.firstChild();
if (firstChild != null) {
List<Pair<TregexPattern,TsurgeonPattern>> ops = Generics.newArrayList();
for (int i = 0; i < operations.length; i++) {
for (TregexPattern pattern : matchPatterns[i]) {
ops.add(Generics.newPair(pattern, operations[i]));
}
}
return Tsurgeon.processPatternsOnTree(ops, t);
} else {
return t;
}
}
开发者ID:FabianFriedrich,项目名称:Text2Process,代码行数:27,代码来源:CoordinationTransformer.java
示例7: getGovMaxChains
import edu.stanford.nlp.util.Generics; //导入依赖的package包/类
private static Set<List<TypedDependency>> getGovMaxChains(Map<TreeGraphNode,List<TypedDependency>> govToDepMap, TreeGraphNode gov, int depth) {
Set<List<TypedDependency>> depLists = Generics.newHashSet();
List<TypedDependency> children = govToDepMap.get(gov);
if (depth > 0 && children != null) {
for (TypedDependency child : children) {
TreeGraphNode childNode = child.dep();
if (childNode == null) continue;
Set<List<TypedDependency>> childDepLists = getGovMaxChains(govToDepMap, childNode, depth-1);
if (childDepLists.size() != 0) {
for (List<TypedDependency> childDepList : childDepLists) {
List<TypedDependency> depList = new ArrayList<TypedDependency>(childDepList.size() + 1);
depList.add(child);
depList.addAll(childDepList);
depLists.add(depList);
}
} else {
depLists.add(Arrays.asList(child));
}
}
}
return depLists;
}
开发者ID:benblamey,项目名称:stanford-nlp,代码行数:24,代码来源:Dependencies.java
示例8: printFullFeatureMatrix
import edu.stanford.nlp.util.Generics; //导入依赖的package包/类
/**
* prints the full feature matrix in tab-delimited form. These can be BIG
* matrices, so be careful! [Can also use printFullFeatureMatrixWithValues]
*/
public void printFullFeatureMatrix(PrintWriter pw) {
String sep = "\t";
for (int i = 0; i < featureIndex.size(); i++) {
pw.print(sep + featureIndex.get(i));
}
pw.println();
for (int i = 0; i < labels.length; i++) {
pw.print(labelIndex.get(i));
Set<Integer> feats = Generics.newHashSet();
for (int j = 0; j < data[i].length; j++) {
int feature = data[i][j];
feats.add(Integer.valueOf(feature));
}
for (int j = 0; j < featureIndex.size(); j++) {
if (feats.contains(Integer.valueOf(j))) {
pw.print(sep + "1");
} else {
pw.print(sep + "0");
}
}
pw.println();
}
}
开发者ID:benblamey,项目名称:stanford-nlp,代码行数:28,代码来源:RVFDataset.java
示例9: arcLabelsToNode
import edu.stanford.nlp.util.Generics; //导入依赖的package包/类
/**
* Finds all arcs between this node and <code>destNode</code>,
* and returns the <code>Set</code> of <code>Object</code>s which
* label those arcs. If no such arcs exist, returns an empty
* <code>Set</code>.
*
* @param destNode the destination node
* @return the <code>Set</code> of <code>Object</code>s which
* label arcs between this node and <code>destNode</code>
*/
public Set<Class<? extends GrammaticalRelationAnnotation>> arcLabelsToNode(TreeGraphNode destNode) {
Set<Class<? extends GrammaticalRelationAnnotation>> arcLabels = Generics.newHashSet();
CoreLabel cl = label();
for (Class key : cl.keySet()) {
if (key == null || !GrammaticalRelationAnnotation.class.isAssignableFrom(key)) {
continue;
}
Class<? extends GrammaticalRelationAnnotation> typedKey = ErasureUtils.uncheckedCast(key);
Set<TreeGraphNode> val = cl.get(typedKey);
if (val != null && val.contains(destNode)) {
arcLabels.add(typedKey);
}
}
return arcLabels;
}
开发者ID:jaimeguzman,项目名称:data_mining,代码行数:26,代码来源:TreeGraphNode.java
示例10: RegexNERSequenceClassifier
import edu.stanford.nlp.util.Generics; //导入依赖的package包/类
/**
* Make a new instance of this classifier. The ignoreCase option allows case-insensitive
* regular expression matching, provided with the idea that the provided file might just
* be a manual list of the possible entities for each type.
* @param mapping
* @param ignoreCase
*/
public RegexNERSequenceClassifier(String mapping, boolean ignoreCase, boolean overwriteMyLabels, String validPosRegex) {
super(new Properties());
if (validPosRegex != null && !validPosRegex.equals("")) {
validPosPattern = Pattern.compile(validPosRegex);
} else {
validPosPattern = null;
}
entries = readEntries(mapping, ignoreCase);
this.ignoreCase = ignoreCase;
this.overwriteMyLabels = overwriteMyLabels;
myLabels = Generics.newHashSet();
if(this.overwriteMyLabels) {
for(Entry entry: entries) myLabels.add(entry.type);
}
//System.err.println("RegexNERSequenceClassifier using labels: " +
// myLabels);
}
开发者ID:benblamey,项目名称:stanford-nlp,代码行数:25,代码来源:RegexNERSequenceClassifier.java
示例11: printFullFeatureMatrix
import edu.stanford.nlp.util.Generics; //导入依赖的package包/类
/**
* prints the full feature matrix in tab-delimited form. These can be BIG
* matrices, so be careful!
*/
public void printFullFeatureMatrix(PrintWriter pw) {
String sep = "\t";
for (int i = 0; i < featureIndex.size(); i++) {
pw.print(sep + featureIndex.get(i));
}
pw.println();
for (int i = 0; i < labels.length; i++) {
pw.print(labelIndex.get(i));
Set<Integer> feats = Generics.newHashSet();
for (int j = 0; j < data[i].length; j++) {
int feature = data[i][j];
feats.add(Integer.valueOf(feature));
}
for (int j = 0; j < featureIndex.size(); j++) {
if (feats.contains(Integer.valueOf(j))) {
pw.print(sep + '1');
} else {
pw.print(sep + '0');
}
}
}
}
开发者ID:benblamey,项目名称:stanford-nlp,代码行数:27,代码来源:Dataset.java
示例12: calculatePrecision
import edu.stanford.nlp.util.Generics; //导入依赖的package包/类
@Override
protected void calculatePrecision(Document doc) {
int pDen = 0;
int pNum = 0;
Map<Integer, Mention> goldMentions = doc.allGoldMentions;
for(CorefCluster c : doc.corefClusters.values()){
if(c.corefMentions.size()==0) continue;
pDen += c.corefMentions.size()-1;
pNum += c.corefMentions.size();
Set<CorefCluster> partitions = Generics.newHashSet();
for (Mention predictedMention : c.corefMentions){
if(!goldMentions.containsKey(predictedMention.mentionID)) { // twinless goldmention
pNum--;
} else {
partitions.add(doc.goldCorefClusters.get(goldMentions.get(predictedMention.mentionID).goldCorefClusterID));
}
}
pNum -= partitions.size();
}
assert(pDen == (doc.allPredictedMentions.size()-doc.corefClusters.values().size()));
precisionDenSum += pDen;
precisionNumSum += pNum;
}
开发者ID:benblamey,项目名称:stanford-nlp,代码行数:26,代码来源:ScorerMUC.java
示例13: fromJavaUtilLogging
import edu.stanford.nlp.util.Generics; //导入依赖的package包/类
public static RedirectOutputHandler<Logger, Level> fromJavaUtilLogging(Logger logger) {
Map <Object, Level> channelMapping = Generics.newHashMap();
channelMapping.put(Redwood.WARN, Level.WARNING);
channelMapping.put(Redwood.DBG, Level.FINE);
channelMapping.put(Redwood.ERR, Level.SEVERE);
try {
return new RedirectOutputHandler<Logger, Level>(
logger,
Logger.class.getMethod("log", Level.class, String.class),
channelMapping,
Level.INFO
);
} catch (NoSuchMethodException e) {
throw new IllegalStateException(e);
}
}
开发者ID:jaimeguzman,项目名称:data_mining,代码行数:17,代码来源:RedirectOutputHandler.java
示例14: init
import edu.stanford.nlp.util.Generics; //导入依赖的package包/类
@SuppressWarnings("unchecked")
private void init() {
ruleMap = Generics.newHashMap();
int numStates = index.size();
rulesWithParent = new List[numStates];
rulesWithLC = new List[numStates];
rulesWithRC = new List[numStates];
ruleSetWithLC = new Set[numStates];
ruleSetWithRC = new Set[numStates];
for (int s = 0; s < numStates; s++) {
rulesWithParent[s] = new ArrayList<BinaryRule>();
rulesWithLC[s] = new ArrayList<BinaryRule>();
rulesWithRC[s] = new ArrayList<BinaryRule>();
ruleSetWithLC[s] = Generics.newHashSet();
ruleSetWithRC[s] = Generics.newHashSet();
}
}
开发者ID:benblamey,项目名称:stanford-nlp,代码行数:18,代码来源:BinaryGrammar.java
示例15: readTagCount
import edu.stanford.nlp.util.Generics; //导入依赖的package包/类
/** A TagCount object's fields are read from the file. They are read from
* the current position and the file is not closed afterwards.
*/
public static TagCount readTagCount(DataInputStream rf) {
try {
TagCount tc = new TagCount();
int numTags = rf.readInt();
tc.map = Generics.newHashMap(numTags);
for (int i = 0; i < numTags; i++) {
String tag = rf.readUTF();
int count = rf.readInt();
if (tag.equals(NULL_SYMBOL)) tag = null;
tc.map.put(tag, count);
}
tc.getTagsCache = tc.map.keySet().toArray(new String[tc.map.keySet().size()]);
tc.sumCache = tc.calculateSumCache();
return tc;
} catch (IOException e) {
throw new RuntimeIOException(e);
}
}
开发者ID:jaimeguzman,项目名称:data_mining,代码行数:25,代码来源:TagCount.java
示例16: findDocType
import edu.stanford.nlp.util.Generics; //导入依赖的package包/类
/** Find document type: Conversation or article */
private DocType findDocType(Dictionaries dict) {
boolean speakerChange = false;
Set<Integer> discourseWithIorYou = Generics.newHashSet();
for(CoreMap sent : annotation.get(CoreAnnotations.SentencesAnnotation.class)) {
for(CoreLabel w : sent.get(CoreAnnotations.TokensAnnotation.class)) {
int utterIndex = w.get(CoreAnnotations.UtteranceAnnotation.class);
if(utterIndex!=0) speakerChange = true;
if(speakerChange && utterIndex==0) return DocType.ARTICLE;
if(dict.firstPersonPronouns.contains(w.get(CoreAnnotations.TextAnnotation.class).toLowerCase())
|| dict.secondPersonPronouns.contains(w.get(CoreAnnotations.TextAnnotation.class).toLowerCase())) {
discourseWithIorYou.add(utterIndex);
}
if(maxUtter < utterIndex) maxUtter = utterIndex;
}
}
if(!speakerChange) return DocType.ARTICLE;
return DocType.CONVERSATION; // in conversation, utter index keep increasing.
}
开发者ID:benblamey,项目名称:stanford-nlp,代码行数:21,代码来源:Document.java
示例17: UCPtransform
import edu.stanford.nlp.util.Generics; //导入依赖的package包/类
/**
* Transforms t if it contains an UCP, it will change the UCP tag
* into the phrasal tag of the first word of the UCP
* (UCP (JJ electronic) (, ,) (NN computer) (CC and) (NN building))
* will become
* (ADJP (JJ electronic) (, ,) (NN computer) (CC and) (NN building))
*
* @param t a tree to be transformed
* @return t transformed
*/
public static Tree UCPtransform(Tree t) {
if (t == null) {
return null;
}
Tree firstChild = t.firstChild();
if (firstChild != null) {
List<Pair<TregexPattern,TsurgeonPattern>> ops = Generics.newArrayList();
for (int i = 0; i < operations.length; i++) {
for (TregexPattern pattern : matchPatterns[i]) {
ops.add(Generics.newPair(pattern, operations[i]));
}
}
return Tsurgeon.processPatternsOnTree(ops, t);
} else {
return t;
}
}
开发者ID:benblamey,项目名称:stanford-nlp,代码行数:30,代码来源:CoordinationTransformer.java
示例18: toString
import edu.stanford.nlp.util.Generics; //导入依赖的package包/类
/**
* Recursive depth first traversal. Returns a structured representation of the
* dependency graph.
*
* Example:
* <p/>
*
* <pre>
* /-> need-3 (root)
* -> We-0 (nsubj)
* -> do-1 (aux)
* -> n't-2 (neg)
* -> badges-6 (dobj)
* -> no-4 (det)
* -> stinking-5 (amod)
* </pre>
*/
@Override
public String toString() {
Collection<IndexedWord> rootNodes = getRoots();
if (rootNodes.isEmpty()) {
// Shouldn't happen, but return something!
return toString("readable");
}
StringBuilder sb = new StringBuilder();
Set<IndexedWord> used = Generics.newHashSet();
for (IndexedWord root : rootNodes) {
sb.append("-> ").append(root).append(" (root)\n");
recToString(root, sb, 1, used);
}
Set<IndexedWord> nodes = Generics.newHashSet(vertexSet());
nodes.removeAll(used);
while (!nodes.isEmpty()) {
IndexedWord node = nodes.iterator().next();
sb.append(node).append("\n");
recToString(node, sb, 1, used);
nodes.removeAll(used);
}
return sb.toString();
}
开发者ID:benblamey,项目名称:stanford-nlp,代码行数:42,代码来源:SemanticGraph.java
示例19: requirementsSatisfied
import edu.stanford.nlp.util.Generics; //导入依赖的package包/类
@Override
public Set<Requirement> requirementsSatisfied() {
Set<Requirement> satisfied = Generics.newHashSet();
for (Annotator annotator : annotators) {
satisfied.addAll(annotator.requirementsSatisfied());
}
return satisfied;
}
开发者ID:begab,项目名称:kpe,代码行数:9,代码来源:SzTEAnnotationPipeline.java
示例20: evalPrecision
import edu.stanford.nlp.util.Generics; //导入依赖的package包/类
Pair<ClassicCounter<OUT>, ClassicCounter<OUT>> evalPrecision(Collection<IN> guesses, Collection<IN> golds) {
Collection<IN> internalGuesses = null;
Collection<IN> internalGolds = null;
if(bagEval) {
internalGuesses = new ArrayList<IN>(guesses.size());
internalGolds = new ArrayList<IN>(golds.size());
}
else {
internalGuesses = new HashSet<IN>(guesses.size());
internalGolds = new HashSet<IN>(golds.size());
}
internalGuesses.addAll(guesses);
internalGolds.addAll(golds);
ClassicCounter<OUT> thisGuessed = new ClassicCounter<OUT>();
ClassicCounter<OUT> thisCorrect = new ClassicCounter<OUT>();
for (IN o : internalGuesses) {
OUT equivalenceClass = eq.equivalenceClass(o);
thisGuessed.incrementCount(equivalenceClass);
if (checker.contained(o, internalGolds)) {
thisCorrect.incrementCount(equivalenceClass);
removeItem(o,internalGolds,checker);
} else {
if (verbose) {
System.out.println("Eval missed " + o);
}
}
}
return Generics.newPair(thisGuessed, thisCorrect);
}
开发者ID:FabianFriedrich,项目名称:Text2Process,代码行数:30,代码来源:EquivalenceClassEval.java
注:本文中的edu.stanford.nlp.util.Generics类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论