• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Java TreeFactory类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Java中edu.stanford.nlp.trees.TreeFactory的典型用法代码示例。如果您正苦于以下问题:Java TreeFactory类的具体用法?Java TreeFactory怎么用?Java TreeFactory使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



TreeFactory类属于edu.stanford.nlp.trees包,在下文中一共展示了TreeFactory类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。

示例1: match

import edu.stanford.nlp.trees.TreeFactory; //导入依赖的package包/类
@Override
protected boolean match(ExtractorMatcher matcher,
    TregexMatcher tregexMatcher, Tree tree) {

  boolean result = super.match(matcher, tregexMatcher, tree);
  if (result) {
    List<Tree> children = matcher.getMatch().getChildrenAsList();
    int triggerIndex = children.indexOf(matcher.trigger);
    int argIndex = children.indexOf(matcher.argument);

    matcher.matched = tregexMatcher.getMatch().deepCopy();
    children = matcher.matched.getChildrenAsList();

    matcher.trigger = children.get(triggerIndex);

    TreeFactory tf = matcher.matched.treeFactory();
    matcher.argument = tf.newTreeNode(
        "NP",
        children.subList(argIndex, children.size()));
  }
  return result;
}
 
开发者ID:leebird,项目名称:legonlp,代码行数:23,代码来源:VadjArg.java


示例2: match

import edu.stanford.nlp.trees.TreeFactory; //导入依赖的package包/类
@Override
protected boolean match(ExtractorMatcher matcher,
    TregexMatcher tregexMatcher, Tree tree) {

  boolean result = super.match(matcher, tregexMatcher, tree);
  if (result) {
    List<Tree> children = matcher.getMatch().getChildrenAsList();

    int argIndex = children.indexOf(matcher.argument);

    matcher.matched = tregexMatcher.getMatch().deepCopy();
    children = matcher.matched.getChildrenAsList();

    matcher.trigger = matcher.matched;

    TreeFactory tf = matcher.matched.treeFactory();
    matcher.argument = tf
        .newTreeNode("NP", children.subList(0, argIndex + 1));
  }
  return result;
}
 
开发者ID:leebird,项目名称:legonlp,代码行数:22,代码来源:ArgLocus.java


示例3: fixNonUnaryRoot

import edu.stanford.nlp.trees.TreeFactory; //导入依赖的package包/类
private Tree fixNonUnaryRoot(Tree t, TreeFactory tf) {
  List<Tree> kids = t.getChildrenAsList();
  if(kids.size() == 2 && t.firstChild().isPhrasal() && tlp.isSentenceFinalPunctuationTag(t.lastChild().value())) {
    List<Tree> grandKids = t.firstChild().getChildrenAsList();
    grandKids.add(t.lastChild());
    t.firstChild().setChildren(grandKids);
    kids.remove(kids.size() - 1);
    t.setChildren(kids);
    t.setValue(tlp.startSymbol());
    
  } else {
    t.setValue(nonUnaryRoot);
    t = tf.newTreeNode(tlp.startSymbol(), Collections.singletonList(t));
  }
  return t;
}
 
开发者ID:benblamey,项目名称:stanford-nlp,代码行数:17,代码来源:NegraPennTreeNormalizer.java


示例4: stringToTree

import edu.stanford.nlp.trees.TreeFactory; //导入依赖的package包/类
/**
 * Build the Tree object from its string form.
 */
public static Tree stringToTree(String str, TreeFactory tf) {
  try {
    PennTreeReader ptr = new PennTreeReader(new BufferedReader(new StringReader(str)), tf);
    Tree parseTree = ptr.readTree();
    return parseTree;
  } catch( Exception ex ) { ex.printStackTrace(); }
  return null;
}
 
开发者ID:nchambers,项目名称:probschemas,代码行数:12,代码来源:TreeOperator.java


示例5: stringsToTrees

import edu.stanford.nlp.trees.TreeFactory; //导入依赖的package包/类
/**
 * Build Tree objects from a collection of parses in string form.
 */
public static List<Tree> stringsToTrees(Collection<String> strings) {
  if( strings != null ) {
    List<Tree> trees = new ArrayList<Tree>();
    TreeFactory tf = new LabeledScoredTreeFactory();
    for( String str : strings )
      trees.add(stringToTree(str, tf));
    return trees;
  }
  else return null;
}
 
开发者ID:nchambers,项目名称:probschemas,代码行数:14,代码来源:TreeOperator.java


示例6: main

import edu.stanford.nlp.trees.TreeFactory; //导入依赖的package包/类
/**
 * @param args
 */
public static void main(String[] args) {
  TreeFactory tf = new LabeledScoredTreeFactory();
  List<Tree> trees = new ArrayList<Tree>();
  trees.add(TreeOperator.stringToTree("(TOP (S (NP (person (NNP Dave))) (VP (VBD left) (NP (NP (DT the) (NN job) (JJ first) (NN thing)) (PP (IN in) (NP (DT the) (NN morning))))) (. .)) )", tf));
  trees.add(TreeOperator.stringToTree("(TOP (S (NP (PRP He)) (VP (VP (VBD drank) (NP (NP (NNS lots)) (PP (IN of) (NP (NN coffee))))) (CC and) (VP (VBD picked) (NP (PRP her)) (PRT (RP up)))) (. .)) )", tf));

  CorefStanford coref = new CorefStanford();
  coref.processParses(trees);
}
 
开发者ID:nchambers,项目名称:schemas,代码行数:13,代码来源:CorefStanford.java


示例7: analyzeParsesNoCoref

import edu.stanford.nlp.trees.TreeFactory; //导入依赖的package包/类
/**
 * @desc Process parses for verbs
 * @param parses The list of parses in String form
 */
private void analyzeParsesNoCoref( Vector<String> parses ) {
  TreeFactory tf = new LabeledScoredTreeFactory();
  Vector<String> verbs = new Vector();

  // Save the verbs in each parse
  int sentence = 0;
  for( String parse : parses ) {
    try {
      // Read the parse
      PennTreeReader ptr = new PennTreeReader(new BufferedReader(new StringReader(parse)), tf);
      Tree ansTree = ptr.readTree();

      // Look for the verbs in the tree
      if( ansTree != null ) {

        Vector<Tree> parseVerbs = TreeOperator.verbTreesFromTree(ansTree);
        for( Tree verb : parseVerbs )
          //	    verbs.add(verb.firstChild().firstChild().value());
          verbs.add(verb.firstChild().value());

      }
    } catch( Exception ex ) { ex.printStackTrace(); }
    sentence++;
  }

  // do the pair counts
  countPairs(verbs);
}
 
开发者ID:nchambers,项目名称:schemas,代码行数:33,代码来源:BasicEventAnalyzer.java


示例8: analyzeParses

import edu.stanford.nlp.trees.TreeFactory; //导入依赖的package包/类
private void analyzeParses( GigaDoc doc, Collection<String> parses, Collection<EntityMention> entities ) {
  if( entities != null ) {
    TreeFactory tf = new LabeledScoredTreeFactory();

    // Read in all the parse trees
    Tree trees[] = new Tree[parses.size()];
    int i = 0;
    for( String parse : parses ) {
      try {
        PennTreeReader ptr = new PennTreeReader(new BufferedReader(new StringReader(parse)), tf);
        trees[i] = ptr.readTree();
      } catch( Exception ex ) { ex.printStackTrace(); }
      i++;
    }

    // Convert all entity spans from character spans to word-based
    for( EntityMention mention : entities ) {
      mention.convertCharSpanToIndex(TreeOperator.toRaw(trees[mention.sentenceID()-1]));
      doc.addEntity(mention);
      //	mentions[mention.sentenceID()-1].add(mention);
    }

    // Save the verbs in each parse
    int sid = 0, eid = 0;
    for( Tree tree : trees ) {
      if( tree != null ) {
        // Look for the verbs in the tree
        Vector<Tree> parseVerbs = TreeOperator.verbTreesFromTree(tree);
        for( Tree verb : parseVerbs ) {
          //            System.out.println("  verb: " + verb + " index: " + (TreeOperator.wordIndex(tree,verb)+1));
          doc.addEvent(new WordEvent(eid, verb.firstChild().value(), TreeOperator.wordIndex(tree,verb)+1, sid+1));
          eid++;
        }
        sid++;
      }
    }
  }
}
 
开发者ID:nchambers,项目名称:schemas,代码行数:39,代码来源:ParsedToCoref.java


示例9: getStanfordContituencyTree

import edu.stanford.nlp.trees.TreeFactory; //导入依赖的package包/类
public Tree getStanfordContituencyTree() {
    TreeFactory tf = new LabeledScoredTreeFactory();
    StringReader r = new StringReader(getParseText());
    TreeReader tr = new PennTreeReader(r, tf);
    try {
        return tr.readTree();
    } catch (IOException e) {
        throw new RuntimeException("Error: IOException should not be thrown by StringReader");
    }
}
 
开发者ID:mgormley,项目名称:agiga,代码行数:11,代码来源:StanfordAgigaSentence.java


示例10: match

import edu.stanford.nlp.trees.TreeFactory; //导入依赖的package包/类
@Override

  protected boolean match(ExtractorMatcher matcher,
      TregexMatcher tregexMatcher,
      Tree tree) {

    boolean result = super.match(matcher, tregexMatcher, tree);
    if (result) {
      List<Tree> children = matcher.getMatch().getChildrenAsList();
      int triggerIndex = children.indexOf(matcher.trigger);
      int argIndex = children.indexOf(matcher.argument);

      matcher.matched = tregexMatcher.getMatch();
      TreeFactory tf = tregexMatcher.getMatch().treeFactory();

      matcher.matched = tregexMatcher.getMatch().deepCopy(tf);
      children = matcher.matched.getChildrenAsList();
      
      matcher.trigger = tf.newTreeNode(
          "NP",
            children.subList(triggerIndex, children.size()));

      matcher.argument = tf
          .newTreeNode("NP", children.subList(0, argIndex + 1));

      while (matcher.matched.numChildren() != 0) {
        matcher.matched.removeChild(0);
      }
      matcher.matched.addChild(matcher.argument);
      matcher.matched.addChild(matcher.trigger);
    }
    return result;
  }
 
开发者ID:leebird,项目名称:legonlp,代码行数:34,代码来源:ArgVnorm.java


示例11: match

import edu.stanford.nlp.trees.TreeFactory; //导入依赖的package包/类
@Override
protected boolean match(ExtractorMatcher matcher,
    TregexMatcher tregexMatcher, Tree tree) {

  Tree leaf = tregexMatcher.getMatch().getChild(0);
  if (!leaf.isLeaf()) {
    return false;
  }

  OffsetLabel label = (OffsetLabel) leaf.label();
  int dash = label.value().lastIndexOf('-');
  if (dash == -1 || dash == 0) {
    return false;
  }

  matcher.matched = leaf.parent(tree).deepCopy();

  TreeFactory tf = matcher.matched.treeFactory();

  OffsetLabel newLeafLabel = new OffsetLabel(label.value()
      .substring(dash + 1));
  newLeafLabel.setBeginPosition(dash + 1 + label.beginPosition());
  newLeafLabel.setEndPosition(label.endPosition());
  
  leaf = tf.newLeaf(newLeafLabel);
  matcher.trigger = tf.newTreeNode("NP", Collections.singletonList(leaf));
  
  newLeafLabel = new OffsetLabel(label.value()
      .substring(0, dash));
  newLeafLabel.setBeginPosition(label.beginPosition());
  newLeafLabel.setEndPosition(label.beginPosition() + dash);
  
  leaf = tf.newLeaf(newLeafLabel);
  matcher.argument = tf.newTreeNode("NP", Collections.singletonList(leaf));

  matcher.matched.removeChild(0);
  matcher.matched.addChild(matcher.argument);
  matcher.matched.addChild(matcher.trigger);
  return true;
}
 
开发者ID:leebird,项目名称:legonlp,代码行数:41,代码来源:ArgDashExpressing.java


示例12: transformRoot

import edu.stanford.nlp.trees.TreeFactory; //导入依赖的package包/类
Tree transformRoot(Tree tree, TreeFactory tf) {
  // XXXX TODO: use tlp and don't assume 1 daughter of ROOT!
  // leave the root intact
  // if (tlp.isStartSymbol(tlp.basicCategory(tree.label().value())))
  if (tree.label().toString().startsWith("ROOT")) {
    return tf.newTreeNode(tree.label(), Collections.singletonList(transformNode(tree.children()[0], tf)));
  }
  return transformNode(tree, tf);
}
 
开发者ID:FabianFriedrich,项目名称:Text2Process,代码行数:10,代码来源:CollinsPuncTransformer.java


示例13: normalizeWholeTree

import edu.stanford.nlp.trees.TreeFactory; //导入依赖的package包/类
/**
 * Normalize a whole tree.
 * TueBa-D/Z adaptation. Fixes trees with non-unary roots, does nothing else.
 */
@Override
public Tree normalizeWholeTree(Tree tree, TreeFactory tf) {
  if (tree.label().value().equals(root) && tree.children().length > 1) {
    Tree underRoot = tree.treeFactory().newTreeNode(root, tree.getChildrenAsList());
    tree.setChildren(new Tree[1]);
    tree.setChild(0, underRoot);

  }
  // we just want the non-unary root fixed.
  return tree;
}
 
开发者ID:FabianFriedrich,项目名称:Text2Process,代码行数:16,代码来源:TueBaDZPennTreeNormalizer.java


示例14: xTree

import edu.stanford.nlp.trees.TreeFactory; //导入依赖的package包/类
/**
 * Construct a fall through tree in case we can't parse this sentence
 * @param words
 * @return a tree with X for all the internal nodes
 */
public static Tree xTree(List<? extends HasWord> words) {
  TreeFactory lstf = new LabeledScoredTreeFactory();
  List<Tree> lst2 = new ArrayList<Tree>();
  for (HasWord obj : words) {
    String s = obj.word();
    Tree t = lstf.newLeaf(s);
    Tree t2 = lstf.newTreeNode("X", Collections.singletonList(t));
    lst2.add(t2);
  }
  return lstf.newTreeNode("X", lst2);
}
 
开发者ID:benblamey,项目名称:stanford-nlp,代码行数:17,代码来源:ParserAnnotatorUtils.java


示例15: normalizeWholeTree

import edu.stanford.nlp.trees.TreeFactory; //导入依赖的package包/类
@Override
public Tree normalizeWholeTree(Tree tree, TreeFactory tf) {
  tree = tree.prune(hebrewEmptyFilter, tf).spliceOut(aOverAFilter, tf);

  //Add start symbol so that the root has only one sub-state. Escape any enclosing brackets.
  //If the "tree" consists entirely of enclosing brackets e.g. ((())) then this method
  //will return null. In this case, readers e.g. PennTreeReader will try to read the next tree.
  while(tree != null && (tree.value() == null || tree.value().equals("")) && tree.numChildren() <= 1)
    tree = tree.firstChild();

  if(tree != null && !tree.value().equals(tlp.startSymbol()))
    tree = tf.newTreeNode(tlp.startSymbol(), Collections.singletonList(tree));

  return tree;
}
 
开发者ID:benblamey,项目名称:stanford-nlp,代码行数:16,代码来源:HebrewTreeNormalizer.java


示例16: normalizeWholeTree

import edu.stanford.nlp.trees.TreeFactory; //导入依赖的package包/类
/**
 * Normalize a whole tree -- one can assume that this is the
 * root.  This implementation deletes empty elements (ones with
 * nonterminal tag label starting with '*T') from the tree.  It
 * does work for a null tree.
 */
@Override
public Tree normalizeWholeTree(Tree tree, TreeFactory tf) {
  // add an extra root to non-unary roots
  if(tree.value() == null)
    tree = fixNonUnaryRoot(tree, tf);
  else if(!tree.value().equals(tlp.startSymbol()))
    tree = tf.newTreeNode(tlp.startSymbol(), Collections.singletonList(tree));

  tree = tree.prune(emptyFilter, tf).spliceOut(aOverAFilter, tf);

  // insert NPs in PPs if you're supposed to do that
  if (insertNPinPP) {
    insertNPinPPall(tree);
  }

  for(Tree t : tree) {
    if(t.isLeaf() || t.isPreTerminal()) continue;
    if(t.value() == null || t.value().equals("")) t.setValue("DUMMY");

    // there's also a '--' category
    if(t.value().matches("--.*")) continue;

    // fix a bug in the ACL08 German tiger treebank
    String cat = t.value();
    if(cat == null || cat.equals("")) {
      if (t.numChildren() == 3 && t.firstChild().label().value().equals("NN") && t.getChild(1).label().value().equals("$.")) {
        System.err.println("Correcting treebank error: giving phrase label DL to " + t);
        t.label().setValue("DL");
      }
    }
  }

  return tree;
}
 
开发者ID:benblamey,项目名称:stanford-nlp,代码行数:41,代码来源:NegraPennTreeNormalizer.java


示例17: calculateIDFofVerbs

import edu.stanford.nlp.trees.TreeFactory; //导入依赖的package包/类
/**
 * Counts all *verbs* in the parse trees and tracks IDF counts.
 */
private void calculateIDFofVerbs( Vector<String> parses ) {
  TreeFactory tf = new LabeledScoredTreeFactory();
  HashMap<String,Boolean> seen = new HashMap(100);
  HashMap<String,Boolean> seenLemmas = new HashMap(100);

  // Loop over each parse tree
  for( String parse : parses ) {
    try {
      PennTreeReader ptr = new PennTreeReader(new BufferedReader(new StringReader(parse)), tf);
      Tree ansTree = ptr.readTree();

      if( ansTree != null ) {

        // Look for the verbs in the tree
        Vector<Tree> parseVerbs = TreeOperator.verbTreesFromTree(ansTree);
        for( Tree verb : parseVerbs ) {
          String wordString = verb.firstChild().value().toLowerCase();

          // Look for a particle
          String particle = findParticleInTree(verb.parent(ansTree));
          if( particle != null ) wordString += "_" + particle.toLowerCase();

          // Don't count numerals or weird starting punctuation tokens
          if( wordString.matches("[a-zA-Z].*") && !wordString.matches(".*\\d.*") )  {
            // Count the documents the word appears in, for IDF score
            if( !seen.containsKey(wordString) ) {
              seen.put(wordString, true);
              // add the count of documents
              idf.increaseDocCount(wordString);
            }

            // Count the documents the lemma appears in, for IDF score
            String lemmaString = verbToLemma(wordString);
            if( lemmaString == null ) lemmaString = wordString;
            if( !seenLemmas.containsKey(lemmaString) ) {
              seenLemmas.put(lemmaString, true);
              // add the count of documents
              idfLemmas.increaseDocCount(lemmaString);
            }

            // Increment word frequency
            idf.increaseTermFrequency(wordString);
            idfLemmas.increaseTermFrequency(lemmaString);
          }
        }
      }

    } catch( Exception ex ) { ex.printStackTrace(); }
  }
}
 
开发者ID:nchambers,项目名称:schemas,代码行数:54,代码来源:BasicEventAnalyzer.java


示例18: match

import edu.stanford.nlp.trees.TreeFactory; //导入依赖的package包/类
@Override
protected boolean match(ExtractorMatcher matcher,
    TregexMatcher tregexMatcher, Tree tree) {

  Tree leaf = tregexMatcher.getMatch().getChild(0);
  if (!leaf.isLeaf()) {
    return false;
  }

  OffsetLabel label = (OffsetLabel) leaf.label();
  if (label.value().contains("-RRB-")) {
    return false;
  }
  Matcher m = p.matcher(label.value());
  if (!m.find()) {
    return false;
  }

  matcher.matched = leaf.parent(tree).deepCopy();
  
  TreeFactory tf = matcher.matched.treeFactory();

  OffsetLabel newLeafLabel = new OffsetLabel(m.group(2));
  newLeafLabel.setBeginPosition(m.start(2) + label.beginPosition());
  newLeafLabel.setEndPosition(m.end(2) + label.beginPosition());
  
  leaf = tf.newLeaf(newLeafLabel);
  matcher.trigger = tf.newTreeNode(matcher.matched.value(), Collections.singletonList(leaf));
  
  newLeafLabel = new OffsetLabel(m.group(1));
  newLeafLabel.setBeginPosition(m.start(1) + label.beginPosition());
  newLeafLabel.setEndPosition(m.end(1) + label.beginPosition());
  
  leaf = tf.newLeaf(newLeafLabel);
  matcher.argument = tf.newTreeNode(matcher.matched.value(), Collections.singletonList(leaf));

  matcher.matched.removeChild(0);
  matcher.matched.addChild(matcher.argument);
  matcher.matched.addChild(matcher.trigger);
  return true;
}
 
开发者ID:leebird,项目名称:legonlp,代码行数:42,代码来源:ArgDashVnorm.java


示例19: treeFactory

import edu.stanford.nlp.trees.TreeFactory; //导入依赖的package包/类
@Override
public TreeFactory treeFactory() {
  return OffsetTreeFactory.instance();
}
 
开发者ID:leebird,项目名称:legonlp,代码行数:5,代码来源:OffsetTree.java


示例20: FragDiscardingPennTreeReader

import edu.stanford.nlp.trees.TreeFactory; //导入依赖的package包/类
public FragDiscardingPennTreeReader(Reader in, TreeFactory tf, TreeNormalizer tn, Tokenizer<String> tk) {
  super(in, tf, tn, tk);
}
 
开发者ID:FabianFriedrich,项目名称:Text2Process,代码行数:4,代码来源:FragDiscardingPennTreeReader.java



注:本文中的edu.stanford.nlp.trees.TreeFactory类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Java IRtcEngineEventHandler类代码示例发布时间:2022-05-22
下一篇:
Java FakeTicker类代码示例发布时间:2022-05-22
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap