本文整理汇总了Java中org.elasticsearch.index.analysis.CharFilterFactory类的典型用法代码示例。如果您正苦于以下问题:Java CharFilterFactory类的具体用法?Java CharFilterFactory怎么用?Java CharFilterFactory使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
CharFilterFactory类属于org.elasticsearch.index.analysis包,在下文中一共展示了CharFilterFactory类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: TraditionalChineseConvertCharFilterFactory
import org.elasticsearch.index.analysis.CharFilterFactory; //导入依赖的package包/类
public TraditionalChineseConvertCharFilterFactory(final IndexSettings indexSettings, final Environment env, final String name,
final Settings settings, final FessAnalysisService fessAnalysisService) {
super(indexSettings, name);
final Class<?> charFilterFactoryClass = fessAnalysisService.loadClass(FACTORY);
if (charFilterFactoryClass != null) {
if (logger.isInfoEnabled()) {
logger.info("{} is found.", FACTORY);
}
charFilterFactory = AccessController.doPrivileged((PrivilegedAction<CharFilterFactory>) () -> {
try {
final Constructor<?> constructor =
charFilterFactoryClass.getConstructor(IndexSettings.class, Environment.class, String.class, Settings.class);
return (CharFilterFactory) constructor.newInstance(indexSettings, env, name, settings);
} catch (final Exception e) {
throw new ElasticsearchException("Failed to load " + FACTORY, e);
}
});
} else if (logger.isDebugEnabled()) {
logger.debug("{} is not found.", FACTORY);
}
}
开发者ID:codelibs,项目名称:elasticsearch-analysis-fess,代码行数:23,代码来源:TraditionalChineseConvertCharFilterFactory.java
示例2: JapaneseIterationMarkCharFilterFactory
import org.elasticsearch.index.analysis.CharFilterFactory; //导入依赖的package包/类
public JapaneseIterationMarkCharFilterFactory(final IndexSettings indexSettings, final Environment env, final String name,
final Settings settings, final FessAnalysisService fessAnalysisService) {
super(indexSettings, name);
for (final String factoryClass : FACTORIES) {
final Class<?> charFilterFactoryClass = fessAnalysisService.loadClass(factoryClass);
if (charFilterFactoryClass != null) {
if (logger.isInfoEnabled()) {
logger.info("{} is found.", factoryClass);
}
charFilterFactory = AccessController.doPrivileged((PrivilegedAction<CharFilterFactory>) () -> {
try {
final Constructor<?> constructor =
charFilterFactoryClass.getConstructor(IndexSettings.class, Environment.class, String.class, Settings.class);
return (CharFilterFactory) constructor.newInstance(indexSettings, env, name, settings);
} catch (final Exception e) {
throw new ElasticsearchException("Failed to load " + factoryClass, e);
}
});
break;
} else if (logger.isDebugEnabled()) {
logger.debug("{} is not found.", factoryClass);
}
}
}
开发者ID:codelibs,项目名称:elasticsearch-analysis-fess,代码行数:26,代码来源:JapaneseIterationMarkCharFilterFactory.java
示例3: charFilterFactory
import org.elasticsearch.index.analysis.CharFilterFactory; //导入依赖的package包/类
public static CharFilterFactory charFilterFactory(String name) throws IOException {
Settings settings = Settings.builder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put("path.home", System.getProperty("path.home", "/tmp"))
.build();
Environment environment = new Environment(settings);
AnalysisRegistry analysisRegistry = analysisService(settings);
IndexMetaData indexMetaData = IndexMetaData.builder("test")
.settings(settings)
.numberOfShards(1)
.numberOfReplicas(1)
.build();
IndexSettings indexSettings = new IndexSettings(indexMetaData, settings);
Map<String, CharFilterFactory> map = analysisRegistry.buildCharFilterFactories(indexSettings);
return map.containsKey(name) ? map.get(name) :
analysisRegistry.getCharFilterProvider(name).get(environment, name);
}
开发者ID:jprante,项目名称:elasticsearch-plugin-bundle,代码行数:18,代码来源:MapperTestUtils.java
示例4: testDefaultsIcuAnalysis
import org.elasticsearch.index.analysis.CharFilterFactory; //导入依赖的package包/类
@Test
public void testDefaultsIcuAnalysis() throws IOException {
CharFilterFactory charFilterFactory = charFilterFactory("icu_normalizer");
assertThat(charFilterFactory, instanceOf(IcuNormalizerCharFilterFactory.class));
TokenizerFactory tf = tokenizerFactory("icu_tokenizer");
assertThat(tf, instanceOf(IcuTokenizerFactory.class));
TokenFilterFactory filterFactory = tokenFilterFactory("icu_normalizer");
assertThat(filterFactory, instanceOf(IcuNormalizerTokenFilterFactory.class));
filterFactory = tokenFilterFactory("icu_folding");
assertThat(filterFactory, instanceOf(IcuFoldingTokenFilterFactory.class));
filterFactory = tokenFilterFactory("icu_transform");
assertThat(filterFactory, instanceOf(IcuTransformTokenFilterFactory.class));
Analyzer analyzer = MapperTestUtils.analyzer( "icu_collation");
assertThat(analyzer, instanceOf(NamedAnalyzer.class));
}
开发者ID:jprante,项目名称:elasticsearch-plugin-bundle,代码行数:22,代码来源:IcuAnalysisTests.java
示例5: TestAnalysis
import org.elasticsearch.index.analysis.CharFilterFactory; //导入依赖的package包/类
public TestAnalysis(IndexAnalyzers indexAnalyzers,
Map<String, TokenFilterFactory> tokenFilter,
Map<String, TokenizerFactory> tokenizer,
Map<String, CharFilterFactory> charFilter) {
this.indexAnalyzers = indexAnalyzers;
this.tokenFilter = tokenFilter;
this.tokenizer = tokenizer;
this.charFilter = charFilter;
}
开发者ID:justor,项目名称:elasticsearch_my,代码行数:10,代码来源:ESTestCase.java
示例6: AnalysisModule
import org.elasticsearch.index.analysis.CharFilterFactory; //导入依赖的package包/类
public AnalysisModule(Environment environment, List<AnalysisPlugin> plugins) throws IOException {
NamedRegistry<AnalysisProvider<CharFilterFactory>> charFilters = setupCharFilters(plugins);
NamedRegistry<org.apache.lucene.analysis.hunspell.Dictionary> hunspellDictionaries = setupHunspellDictionaries(plugins);
hunspellService = new HunspellService(environment.settings(), environment, hunspellDictionaries.getRegistry());
NamedRegistry<AnalysisProvider<TokenFilterFactory>> tokenFilters = setupTokenFilters(plugins, hunspellService);
NamedRegistry<AnalysisProvider<TokenizerFactory>> tokenizers = setupTokenizers(plugins);
NamedRegistry<AnalysisProvider<AnalyzerProvider<?>>> analyzers = setupAnalyzers(plugins);
NamedRegistry<AnalysisProvider<AnalyzerProvider<?>>> normalizers = setupNormalizers(plugins);
analysisRegistry = new AnalysisRegistry(environment, charFilters.getRegistry(), tokenFilters.getRegistry(), tokenizers
.getRegistry(), analyzers.getRegistry(), normalizers.getRegistry());
}
开发者ID:justor,项目名称:elasticsearch_my,代码行数:12,代码来源:AnalysisModule.java
示例7: setupCharFilters
import org.elasticsearch.index.analysis.CharFilterFactory; //导入依赖的package包/类
private NamedRegistry<AnalysisProvider<CharFilterFactory>> setupCharFilters(List<AnalysisPlugin> plugins) {
NamedRegistry<AnalysisProvider<CharFilterFactory>> charFilters = new NamedRegistry<>("char_filter");
charFilters.register("html_strip", HtmlStripCharFilterFactory::new);
charFilters.register("pattern_replace", requriesAnalysisSettings(PatternReplaceCharFilterFactory::new));
charFilters.register("mapping", requriesAnalysisSettings(MappingCharFilterFactory::new));
charFilters.extractAndRegister(plugins, AnalysisPlugin::getCharFilters);
return charFilters;
}
开发者ID:justor,项目名称:elasticsearch_my,代码行数:9,代码来源:AnalysisModule.java
示例8: createStackedTokenStream
import org.elasticsearch.index.analysis.CharFilterFactory; //导入依赖的package包/类
private static TokenStream createStackedTokenStream(String source, CharFilterFactory[] charFilterFactories, TokenizerFactory tokenizerFactory, TokenFilterFactory[] tokenFilterFactories, int current) {
Reader reader = new FastStringReader(source);
for (CharFilterFactory charFilterFactory : charFilterFactories) {
reader = charFilterFactory.create(reader);
}
Tokenizer tokenizer = tokenizerFactory.create();
tokenizer.setReader(reader);
TokenStream tokenStream = tokenizer;
for (int i = 0; i < current; i++) {
tokenStream = tokenFilterFactories[i].create(tokenStream);
}
return tokenStream;
}
开发者ID:justor,项目名称:elasticsearch_my,代码行数:14,代码来源:TransportAnalyzeAction.java
示例9: build
import org.elasticsearch.index.analysis.CharFilterFactory; //导入依赖的package包/类
@Override
public void build(final Map<String, TokenizerFactory> tokenizers,
final Map<String, CharFilterFactory> charFilters,
final Map<String, TokenFilterFactory> tokenFilters) {
List<CharFilterFactory> myCharFilters = new ArrayList<>();
String[] charFilterNames = analyzerSettings.getAsArray("char_filter");
for (String charFilterName : charFilterNames) {
CharFilterFactory charFilter = charFilters.get(charFilterName);
if (charFilter == null) {
throw new IllegalArgumentException("hyphen analyzer [" + name()
+ "] failed to find char_filter under name [" + charFilterName + "]");
}
myCharFilters.add(charFilter);
}
List<TokenFilterFactory> myTokenFilters = new ArrayList<>();
myTokenFilters.add(tokenFilterFactory);
String[] tokenFilterNames = analyzerSettings.getAsArray("filter");
for (String tokenFilterName : tokenFilterNames) {
TokenFilterFactory tokenFilter = tokenFilters.get(tokenFilterName);
if (tokenFilter == null) {
throw new IllegalArgumentException("hyphen analyzer [" + name()
+ "] failed to find filter under name [" + tokenFilterName + "]");
}
myTokenFilters.add(tokenFilter);
}
int positionOffsetGap = analyzerSettings.getAsInt("position_offset_gap", 0);
int offsetGap = analyzerSettings.getAsInt("offset_gap", -1);
this.customAnalyzer = new CustomAnalyzer(tokenizerFactory,
myCharFilters.toArray(new CharFilterFactory[myCharFilters.size()]),
myTokenFilters.toArray(new TokenFilterFactory[myTokenFilters.size()]),
positionOffsetGap,
offsetGap
);
}
开发者ID:jprante,项目名称:elasticsearch-analysis-hyphen,代码行数:35,代码来源:HyphenAnalyzerProvider.java
示例10: getCharFilters
import org.elasticsearch.index.analysis.CharFilterFactory; //导入依赖的package包/类
@Override
public Map<String, AnalysisModule.AnalysisProvider<CharFilterFactory>> getCharFilters() {
Map<String, AnalysisModule.AnalysisProvider<CharFilterFactory>> factories = new HashMap<>();
factories.put("chinese-simplifying",
(indexSettings, environment, name, settings) ->
new ChineseSimplifyingCharFilterFactory(indexSettings, name));
return factories;
}
开发者ID:dowenliu-xyz,项目名称:elasticsearch-analysis-char-filter-chinese-simplifying,代码行数:11,代码来源:AnalysisChineseSimplifyingPlugin.java
示例11: getCharFilters
import org.elasticsearch.index.analysis.CharFilterFactory; //导入依赖的package包/类
@Override
public Map<String, AnalysisModule.AnalysisProvider<CharFilterFactory>> getCharFilters() {
return Collections.singletonMap("benz_stop", (indexSettings, environment, name, settings) -> new AbstractCharFilterFactory(indexSettings, name) {
@Override
public Reader create(Reader tokenStream) {
return new BenzCjkCharFilter(tokenStream);
}
});
}
开发者ID:wxingyl,项目名称:elasticsearch-analysis-benz,代码行数:11,代码来源:AnalysisBenzPlugin.java
示例12: getCharFilters
import org.elasticsearch.index.analysis.CharFilterFactory; //导入依赖的package包/类
@Override
public Map<String, AnalysisProvider<CharFilterFactory>> getCharFilters() {
final Map<String, AnalysisProvider<CharFilterFactory>> extra = new HashMap<>();
extra.put("fess_japanese_iteration_mark",
(indexSettings, env, name, settings) -> new JapaneseIterationMarkCharFilterFactory(indexSettings, env, name, settings,
pluginComponent.getFessAnalysisService()));
extra.put("fess_traditional_chinese_convert",
(indexSettings, env, name, settings) -> new TraditionalChineseConvertCharFilterFactory(indexSettings, env, name, settings,
pluginComponent.getFessAnalysisService()));
return extra;
}
开发者ID:codelibs,项目名称:elasticsearch-analysis-fess,代码行数:12,代码来源:FessAnalysisPlugin.java
示例13: getCharFilters
import org.elasticsearch.index.analysis.CharFilterFactory; //导入依赖的package包/类
@Override
public Map<String, AnalysisProvider<CharFilterFactory>> getCharFilters() {
final Map<String, AnalysisProvider<CharFilterFactory>> extra = new HashMap<>();
extra.put("iteration_mark", IterationMarkCharFilterFactory::new);
extra.put("prolonged_sound_mark", ProlongedSoundMarkCharFilterFactory::new);
extra.put("reloadable_kuromoji_iteration_mark", KuromojiIterationMarkCharFilterFactory::new);
return extra;
}
开发者ID:codelibs,项目名称:elasticsearch-analysis-ja,代码行数:9,代码来源:JaPlugin.java
示例14: newDocumentMapperParser
import org.elasticsearch.index.analysis.CharFilterFactory; //导入依赖的package包/类
public static DocumentMapperParser newDocumentMapperParser(Settings customSettings, String index) throws IOException {
Settings settings = Settings.builder()
.put("path.home", System.getProperty("path.home", System.getProperty("user.dir")))
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put(customSettings)
.build();
Environment environment = new Environment(settings);
ReferencePlugin referencePlugin = new ReferencePlugin();
AnalysisModule analysisModule = new AnalysisModule(environment, Collections.emptyList());
IndicesModule indicesModule = new IndicesModule(Collections.singletonList(referencePlugin));
MapperRegistry mapperRegistry = indicesModule.getMapperRegistry();
AnalysisRegistry analysisRegistry = analysisModule.getAnalysisRegistry();
IndexMetaData indexMetaData = IndexMetaData.builder(index)
.settings(settings)
.numberOfShards(1)
.numberOfReplicas(1)
.build();
IndexSettings indexSettings = new IndexSettings(indexMetaData, settings);
SimilarityService similarityService = new SimilarityService(indexSettings, SimilarityService.BUILT_IN);
Map<String, CharFilterFactory> charFilterFactoryMap = analysisRegistry.buildCharFilterFactories(indexSettings);
Map<String, TokenFilterFactory> tokenFilterFactoryMap = analysisRegistry.buildTokenFilterFactories(indexSettings);
Map<String, TokenizerFactory> tokenizerFactoryMap = analysisRegistry.buildTokenizerFactories(indexSettings);
Map<String, AnalyzerProvider<?>> analyzerProviderMap = analysisRegistry.buildAnalyzerFactories(indexSettings);
Map<String, AnalyzerProvider<?>> normalizerProviderMap = analysisRegistry.buildNormalizerFactories(indexSettings);
IndexAnalyzers indexAnalyzers = analysisRegistry.build(indexSettings,
analyzerProviderMap,
normalizerProviderMap,
tokenizerFactoryMap,
charFilterFactoryMap,
tokenFilterFactoryMap);
MapperService mapperService = new MapperService(indexSettings, indexAnalyzers, NamedXContentRegistry.EMPTY,
similarityService, mapperRegistry, null);
return new DocumentMapperParser(indexSettings, mapperService, indexAnalyzers, NamedXContentRegistry.EMPTY,
similarityService, mapperRegistry, null);
}
开发者ID:jprante,项目名称:elasticsearch-analysis-reference,代码行数:36,代码来源:MapperTestUtils.java
示例15: getCharFilters
import org.elasticsearch.index.analysis.CharFilterFactory; //导入依赖的package包/类
@Override
public Map<String, AnalysisModule.AnalysisProvider<CharFilterFactory>> getCharFilters() {
Map<String, AnalysisModule.AnalysisProvider<CharFilterFactory>> extra = new LinkedHashMap<>();
if (settings.getAsBoolean("plugins.xbib.icu.enabled", true)) {
extra.put("icu_normalizer", IcuNormalizerCharFilterFactory::new);
extra.put("icu_folding", IcuFoldingCharFilterFactory::new);
}
return extra;
}
开发者ID:jprante,项目名称:elasticsearch-plugin-bundle,代码行数:10,代码来源:BundlePlugin.java
示例16: build
import org.elasticsearch.index.analysis.CharFilterFactory; //导入依赖的package包/类
@Override
public void build(final Map<String, TokenizerFactory> tokenizers,
final Map<String, CharFilterFactory> charFilters,
final Map<String, TokenFilterFactory> tokenFilters) {
List<CharFilterFactory> myCharFilters = new ArrayList<>();
String[] charFilterNames = analyzerSettings.getAsArray("char_filter");
for (String charFilterName : charFilterNames) {
CharFilterFactory charFilter = charFilters.get(charFilterName);
if (charFilter == null) {
throw new IllegalArgumentException("Sortform Analyzer [" + name() + "] failed to find char_filter under name [" + charFilterName + "]");
}
myCharFilters.add(charFilter);
}
List<TokenFilterFactory> myTokenFilters = new ArrayList<>();
String[] tokenFilterNames = analyzerSettings.getAsArray("filter");
for (String tokenFilterName : tokenFilterNames) {
TokenFilterFactory tokenFilter = tokenFilters.get(tokenFilterName);
if (tokenFilter == null) {
throw new IllegalArgumentException("Sortform Analyzer [" + name() + "] failed to find filter under name [" + tokenFilterName + "]");
}
myTokenFilters.add(tokenFilter);
}
int positionOffsetGap = analyzerSettings.getAsInt("position_offset_gap", 0);
int offsetGap = analyzerSettings.getAsInt("offset_gap", -1);
this.customAnalyzer = new CustomAnalyzer(tokenizerFactory,
myCharFilters.toArray(new CharFilterFactory[myCharFilters.size()]),
myTokenFilters.toArray(new TokenFilterFactory[myTokenFilters.size()]),
positionOffsetGap,
offsetGap
);
}
开发者ID:jprante,项目名称:elasticsearch-plugin-bundle,代码行数:32,代码来源:SortformAnalyzerProvider.java
示例17: newDocumentMapperParser
import org.elasticsearch.index.analysis.CharFilterFactory; //导入依赖的package包/类
public static DocumentMapperParser newDocumentMapperParser(Settings customSettings, String index) throws IOException {
Settings settings = Settings.builder()
.put("path.home", System.getProperty("path.home", "/tmp"))
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put(customSettings)
.build();
Environment environment = new Environment(settings);
BundlePlugin bundlePlugin = new BundlePlugin(settings);
AnalysisModule analysisModule = new AnalysisModule(environment, Collections.singletonList(bundlePlugin));
IndicesModule indicesModule = new IndicesModule(Collections.singletonList(bundlePlugin));
MapperRegistry mapperRegistry = indicesModule.getMapperRegistry();
AnalysisRegistry analysisRegistry = analysisModule.getAnalysisRegistry();
IndexMetaData indexMetaData = IndexMetaData.builder(index)
.settings(settings)
.numberOfShards(1)
.numberOfReplicas(1)
.build();
IndexSettings indexSettings = new IndexSettings(indexMetaData, settings);
SimilarityService similarityService = new SimilarityService(indexSettings, SimilarityService.BUILT_IN);
Map<String, CharFilterFactory> charFilterFactoryMap = analysisRegistry.buildCharFilterFactories(indexSettings);
Map<String, TokenFilterFactory> tokenFilterFactoryMap = analysisRegistry.buildTokenFilterFactories(indexSettings);
Map<String, TokenizerFactory> tokenizerFactoryMap = analysisRegistry.buildTokenizerFactories(indexSettings);
Map<String, AnalyzerProvider<?>> analyzerProviderMap = analysisRegistry.buildAnalyzerFactories(indexSettings);
Map<String, AnalyzerProvider<?>> normalizerProviderMap = analysisRegistry.buildNormalizerFactories(indexSettings);
IndexAnalyzers indexAnalyzers = analysisRegistry.build(indexSettings,
analyzerProviderMap,
normalizerProviderMap,
tokenizerFactoryMap,
charFilterFactoryMap,
tokenFilterFactoryMap);
MapperService mapperService = new MapperService(indexSettings, indexAnalyzers, NamedXContentRegistry.EMPTY,
similarityService, mapperRegistry, null);
return new DocumentMapperParser(indexSettings, mapperService, indexAnalyzers, NamedXContentRegistry.EMPTY,
similarityService, mapperRegistry, null);
}
开发者ID:jprante,项目名称:elasticsearch-plugin-bundle,代码行数:36,代码来源:MapperTestUtils.java
示例18: build
import org.elasticsearch.index.analysis.CharFilterFactory; //导入依赖的package包/类
@Override
public void build(AnalysisService analysisService) {
List<CharFilterFactory> charFilters = newArrayList();
String[] charFilterNames = analyzerSettings.getAsArray("char_filter");
for (String charFilterName : charFilterNames) {
CharFilterFactory charFilter = analysisService.charFilter(charFilterName);
if (charFilter == null) {
throw new IllegalArgumentException("hyphen analyzer [" + name() + "] failed to find char_filter under name [" + charFilterName + "]");
}
charFilters.add(charFilter);
}
List<TokenFilterFactory> tokenFilters = newArrayList();
String[] tokenFilterNames = analyzerSettings.getAsArray("filter");
for (String tokenFilterName : tokenFilterNames) {
TokenFilterFactory tokenFilter = analysisService.tokenFilter(tokenFilterName);
if (tokenFilter == null) {
throw new IllegalArgumentException("hyphen analyzer [" + name() + "] failed to find filter under name [" + tokenFilterName + "]");
}
tokenFilters.add(tokenFilter);
}
int positionOffsetGap = analyzerSettings.getAsInt("position_offset_gap", 0);
int offsetGap = analyzerSettings.getAsInt("offset_gap", -1);
this.customAnalyzer = new CustomAnalyzer(tokenizerFactory,
charFilters.toArray(new CharFilterFactory[charFilters.size()]),
tokenFilters.toArray(new TokenFilterFactory[tokenFilters.size()]),
positionOffsetGap,
offsetGap
);
}
开发者ID:jprante,项目名称:elasticsearch-analysis-german,代码行数:34,代码来源:HyphenAnalyzerProvider.java
示例19: build
import org.elasticsearch.index.analysis.CharFilterFactory; //导入依赖的package包/类
@Override
public void build(AnalysisService analysisService) {
List<CharFilterFactory> charFilters = newArrayList();
String[] charFilterNames = analyzerSettings.getAsArray("char_filter");
for (String charFilterName : charFilterNames) {
CharFilterFactory charFilter = analysisService.charFilter(charFilterName);
if (charFilter == null) {
throw new IllegalArgumentException("Sortform Analyzer [" + name() + "] failed to find char_filter under name [" + charFilterName + "]");
}
charFilters.add(charFilter);
}
List<TokenFilterFactory> tokenFilters = newArrayList();
String[] tokenFilterNames = analyzerSettings.getAsArray("filter");
for (String tokenFilterName : tokenFilterNames) {
TokenFilterFactory tokenFilter = analysisService.tokenFilter(tokenFilterName);
if (tokenFilter == null) {
throw new IllegalArgumentException("Sortform Analyzer [" + name() + "] failed to find filter under name [" + tokenFilterName + "]");
}
tokenFilters.add(tokenFilter);
}
int positionOffsetGap = analyzerSettings.getAsInt("position_offset_gap", 0);
int offsetGap = analyzerSettings.getAsInt("offset_gap", -1);
this.customAnalyzer = new CustomAnalyzer(tokenizerFactory,
charFilters.toArray(new CharFilterFactory[charFilters.size()]),
tokenFilters.toArray(new TokenFilterFactory[tokenFilters.size()]),
positionOffsetGap,
offsetGap
);
}
开发者ID:jprante,项目名称:elasticsearch-analysis-german,代码行数:34,代码来源:SortformAnalyzerProvider.java
示例20: getCharFilters
import org.elasticsearch.index.analysis.CharFilterFactory; //导入依赖的package包/类
/**
* Override to add additional {@link CharFilter}s.
*/
default Map<String, AnalysisProvider<CharFilterFactory>> getCharFilters() {
return emptyMap();
}
开发者ID:justor,项目名称:elasticsearch_my,代码行数:7,代码来源:AnalysisPlugin.java
注:本文中的org.elasticsearch.index.analysis.CharFilterFactory类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论