本文整理汇总了Java中org.apache.lucene.analysis.synonym.SolrSynonymParser类的典型用法代码示例。如果您正苦于以下问题:Java SolrSynonymParser类的具体用法?Java SolrSynonymParser怎么用?Java SolrSynonymParser使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
SolrSynonymParser类属于org.apache.lucene.analysis.synonym包,在下文中一共展示了SolrSynonymParser类的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: inform
import org.apache.lucene.analysis.synonym.SolrSynonymParser; //导入依赖的package包/类
@Override
public void inform(ResourceLoader loader) throws IOException {
final TokenizerFactory factory = tokenizerFactory == null ? null : loadTokenizerFactory(loader, tokenizerFactory);
Analyzer analyzer = new Analyzer() {
@Override
protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
Tokenizer tokenizer = factory == null ? new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader) : factory.create(reader);
TokenStream stream = ignoreCase ? new LowerCaseFilter(Version.LUCENE_CURRENT, tokenizer) : tokenizer;
return new TokenStreamComponents(tokenizer, stream);
}
};
try {
String formatClass = format;
if (format == null || format.equals("solr")) {
formatClass = SolrSynonymParser.class.getName();
} else if (format.equals("wordnet")) {
formatClass = WordnetSynonymParser.class.getName();
}
// TODO: expose dedup as a parameter?
map = loadSynonyms(loader, formatClass, true, analyzer);
} catch (ParseException e) {
throw new IOException("Error parsing synonyms file:", e);
}
}
开发者ID:europeana,项目名称:search,代码行数:27,代码来源:FSTSynonymFilterFactory.java
示例2: loadSolrSynonyms
import org.apache.lucene.analysis.synonym.SolrSynonymParser; //导入依赖的package包/类
/**
* Load synonyms from the solr format, "format=solr".
*/
private SynonymMap loadSolrSynonyms(ResourceLoader loader, boolean dedup, Analyzer analyzer) throws IOException, ParseException {
final boolean expand = getBoolean("expand", true);
String synonyms = args.get("synonyms");
if (synonyms == null)
throw new IllegalArgumentException("Missing required argument 'synonyms'.");
CharsetDecoder decoder = Charset.forName("UTF-8").newDecoder()
.onMalformedInput(CodingErrorAction.REPORT)
.onUnmappableCharacter(CodingErrorAction.REPORT);
SolrSynonymParser parser = new SolrSynonymParser(dedup, expand, analyzer);
File synonymFile = new File(synonyms);
if (synonymFile.exists()) {
decoder.reset();
parser.add(new InputStreamReader(loader.openResource(synonyms), decoder));
} else {
List<String> files = splitFileNames(synonyms);
for (String file : files) {
decoder.reset();
parser.add(new InputStreamReader(loader.openResource(file), decoder));
}
}
return parser.build();
}
开发者ID:pkarmstr,项目名称:NYBC,代码行数:28,代码来源:FSTSynonymFilterFactory.java
示例3: inform
import org.apache.lucene.analysis.synonym.SolrSynonymParser; //导入依赖的package包/类
@Override
public void inform(final ResourceLoader loader) throws IOException {
if (synonymFiles == null) {
map = null;
return;
}
final Analyzer analyzer = getAnalyzer(ignoreCase);
try {
String formatClass = format;
if (format == null || format.equals("solr")) {
formatClass = SolrSynonymParser.class.getName();
} else if (format.equals("wordnet")) {
formatClass = WordnetSynonymParser.class.getName();
}
// TODO: expose dedup as a parameter?
map = loadSynonyms(loader, formatClass, true, analyzer, true,
synonymFiles); // always expand=true in NGramSynonymTokenizer
} catch (final ParseException e) {
throw new IOException("Error parsing synonyms file:", e);
}
}
开发者ID:codelibs,项目名称:fess-solr-plugin,代码行数:24,代码来源:NGramSynonymTokenizerFactory.java
示例4: inform
import org.apache.lucene.analysis.synonym.SolrSynonymParser; //导入依赖的package包/类
@Override
public void inform(ResourceLoader loader) throws IOException {
final TokenizerFactory factory = tokenizerFactory == null ? null : loadTokenizerFactory(loader, tokenizerFactory);
Analyzer analyzer = new Analyzer() {
@Override
protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
Tokenizer tokenizer = factory == null ? new WhitespaceTokenizer(Version.LUCENE_43, reader) : factory.create(reader);
TokenStream stream = ignoreCase ? new LowerCaseFilter(Version.LUCENE_43, tokenizer) : tokenizer;
return new TokenStreamComponents(tokenizer, stream);
}
};
try {
String formatClass = format;
if (format == null || format.equals("solr")) {
formatClass = SolrSynonymParser.class.getName();
} else if (format.equals("wordnet")) {
formatClass = WordnetSynonymParser.class.getName();
}
// TODO: expose dedup as a parameter?
map = loadSynonyms(loader, formatClass, true, analyzer);
} catch (ParseException e) {
throw new IOException("Error parsing synonyms file:", e);
}
}
开发者ID:jimaguere,项目名称:Maskana-Gestor-de-Conocimiento,代码行数:27,代码来源:FSTSynonymFilterFactory.java
示例5: inform
import org.apache.lucene.analysis.synonym.SolrSynonymParser; //导入依赖的package包/类
@Override
public void inform( SolrCore core ) {
if (initParams != null) {
SolrResourceLoader resourceLoader = core.getResourceLoader( );
synonymsFile = (String)initParams.get( "synonyms" );
if (synonymsFile != null) {
Analyzer analyzer = new Analyzer() {
@Override
protected TokenStreamComponents createComponents(String fieldName) {
Tokenizer tokenizer = new KeywordTokenizer();
return new TokenStreamComponents(tokenizer, tokenizer );
}
};
try {
SolrSynonymParser parser = new SolrSynonymParser(true, true, analyzer);
CharsetDecoder decoder = StandardCharsets.UTF_8.newDecoder().onMalformedInput(CodingErrorAction.REPORT)
.onUnmappableCharacter(CodingErrorAction.REPORT);
parser.parse(new InputStreamReader( resourceLoader.openResource(synonymsFile), decoder));
this.synonyms = parser.build( );
}
catch ( Exception e ) {
// ???
Log.warn( "Parsing Synonyms Got Exception " + e );
}
}
String stopwordsFile = (String)initParams.get( "stopwords" );
if (stopwordsFile != null) {
this.stopwords = new HashSet<String>( );
try {
BufferedReader br = new BufferedReader( new InputStreamReader( resourceLoader.openResource( stopwordsFile )));
String line = null;
while ((line = br.readLine( )) != null) {
stopwords.add( line.toLowerCase( ) );
}
br.close( );
}
catch ( IOException ioe ) {
Log.warn( "Adding Stopwords Got Exception " + ioe );
}
}
}
core.registerFirstSearcherListener( this );
core.registerNewSearcherListener( this );
}
开发者ID:lucidworks,项目名称:query-autofiltering-component,代码行数:50,代码来源:QueryAutoFilteringComponent.java
示例6: inform
import org.apache.lucene.analysis.synonym.SolrSynonymParser; //导入依赖的package包/类
@Override
public void inform( SolrCore core ) {
if (initParams != null) {
SolrResourceLoader resourceLoader = core.getResourceLoader( );
synonymsFile = (String)initParams.get( "synonyms" );
if (synonymsFile != null) {
Analyzer analyzer = new Analyzer() {
@Override
protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
Tokenizer tokenizer = new KeywordTokenizer( reader );
return new TokenStreamComponents(tokenizer, tokenizer );
}
};
try {
SolrSynonymParser parser = new SolrSynonymParser(true, true, analyzer);
CharsetDecoder decoder = StandardCharsets.UTF_8.newDecoder().onMalformedInput(CodingErrorAction.REPORT)
.onUnmappableCharacter(CodingErrorAction.REPORT);
parser.parse(new InputStreamReader( resourceLoader.openResource(synonymsFile), decoder));
this.synonyms = parser.build( );
}
catch ( Exception e ) {
// ???
Log.warn( "Parsing Synonyms Got Exception " + e );
}
}
String stopwordsFile = (String)initParams.get( "stopwords" );
if (stopwordsFile != null) {
this.stopwords = new HashSet<String>( );
try {
BufferedReader br = new BufferedReader( new InputStreamReader( resourceLoader.openResource( stopwordsFile )));
String line = null;
while ((line = br.readLine( )) != null) {
stopwords.add( line.toLowerCase( ) );
}
br.close( );
}
catch ( IOException ioe ) {
Log.warn( "Adding Stopwords Got Exception " + ioe );
}
}
}
core.registerFirstSearcherListener( this );
core.registerNewSearcherListener( this );
}
开发者ID:lucidworks,项目名称:query-autofiltering-component,代码行数:50,代码来源:QueryAutoFilteringComponent.java
注:本文中的org.apache.lucene.analysis.synonym.SolrSynonymParser类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论