本文整理汇总了Java中org.apache.lucene.util.BytesRefHash类的典型用法代码示例。如果您正苦于以下问题:Java BytesRefHash类的具体用法?Java BytesRefHash怎么用?Java BytesRefHash使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
BytesRefHash类属于org.apache.lucene.util包,在下文中一共展示了BytesRefHash类的18个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: TermsHashPerField
import org.apache.lucene.util.BytesRefHash; //导入依赖的package包/类
/** streamCount: how many streams this field stores per term.
* E.g. doc(+freq) is 1 stream, prox+offset is a second. */
public TermsHashPerField(int streamCount, FieldInvertState fieldState, TermsHash termsHash, TermsHashPerField nextPerField, FieldInfo fieldInfo) {
intPool = termsHash.intPool;
bytePool = termsHash.bytePool;
termBytePool = termsHash.termBytePool;
docState = termsHash.docState;
this.termsHash = termsHash;
bytesUsed = termsHash.bytesUsed;
this.fieldState = fieldState;
this.streamCount = streamCount;
numPostingInt = 2*streamCount;
this.fieldInfo = fieldInfo;
this.nextPerField = nextPerField;
PostingsBytesStartArray byteStarts = new PostingsBytesStartArray(this, bytesUsed);
bytesHash = new BytesRefHash(termBytePool, HASH_INIT_SIZE, byteStarts);
}
开发者ID:lamsfoundation,项目名称:lams,代码行数:19,代码来源:TermsHashPerField.java
示例2: readFromBytes
import org.apache.lucene.util.BytesRefHash; //导入依赖的package包/类
private void readFromBytes(BytesRef bytes) {
// Read pruned flag
this.setIsPruned(bytes.bytes[bytes.offset++] == 1 ? true : false);
// Read size fo the set
int size = Bytes.readInt(bytes);
// Read terms
bytesUsed = Counter.newCounter();
pool = new ByteBlockPool(new ByteBlockPool.DirectTrackingAllocator(bytesUsed));
set = new BytesRefHash(pool);
BytesRef reusable = new BytesRef();
for (int i = 0; i < size; i++) {
Bytes.readBytesRef(bytes, reusable);
set.add(reusable);
}
}
开发者ID:sirensolutions,项目名称:siren-join,代码行数:19,代码来源:BytesRefTermsSet.java
示例3: binarySearch
import org.apache.lucene.util.BytesRefHash; //导入依赖的package包/类
private final int binarySearch(BytesRef b, BytesRef bytesRef, int low,
int high, BytesRefHash hash, int[] ords, Comparator<BytesRef> comparator) {
int mid = 0;
while (low <= high) {
mid = (low + high) >>> 1;
hash.get(ords[mid], bytesRef);
final int cmp = comparator.compare(bytesRef, b);
if (cmp < 0) {
low = mid + 1;
} else if (cmp > 0) {
high = mid - 1;
} else {
return mid;
}
}
assert comparator.compare(bytesRef, b) != 0;
return -(low + 1);
}
开发者ID:europeana,项目名称:search,代码行数:19,代码来源:MemoryIndex.java
示例4: TermsHashPerField
import org.apache.lucene.util.BytesRefHash; //导入依赖的package包/类
public TermsHashPerField(DocInverterPerField docInverterPerField, final TermsHash termsHash, final TermsHash nextTermsHash, final FieldInfo fieldInfo) {
intPool = termsHash.intPool;
bytePool = termsHash.bytePool;
termBytePool = termsHash.termBytePool;
docState = termsHash.docState;
this.termsHash = termsHash;
bytesUsed = termsHash.bytesUsed;
fieldState = docInverterPerField.fieldState;
this.consumer = termsHash.consumer.addField(this, fieldInfo);
PostingsBytesStartArray byteStarts = new PostingsBytesStartArray(this, bytesUsed);
bytesHash = new BytesRefHash(termBytePool, HASH_INIT_SIZE, byteStarts);
streamCount = consumer.getStreamCount();
numPostingInt = 2*streamCount;
this.fieldInfo = fieldInfo;
if (nextTermsHash != null)
nextPerField = (TermsHashPerField) nextTermsHash.addField(docInverterPerField, fieldInfo);
else
nextPerField = null;
}
开发者ID:pkarmstr,项目名称:NYBC,代码行数:20,代码来源:TermsHashPerField.java
示例5: SortedSetDocValuesWriter
import org.apache.lucene.util.BytesRefHash; //导入依赖的package包/类
public SortedSetDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
this.fieldInfo = fieldInfo;
this.iwBytesUsed = iwBytesUsed;
hash = new BytesRefHash(
new ByteBlockPool(
new ByteBlockPool.DirectTrackingAllocator(iwBytesUsed)),
BytesRefHash.DEFAULT_CAPACITY,
new DirectBytesStartArray(BytesRefHash.DEFAULT_CAPACITY, iwBytesUsed));
pending = PackedLongValues.packedBuilder(PackedInts.COMPACT);
pendingCounts = PackedLongValues.deltaPackedBuilder(PackedInts.COMPACT);
bytesUsed = pending.ramBytesUsed() + pendingCounts.ramBytesUsed();
iwBytesUsed.addAndGet(bytesUsed);
}
开发者ID:lamsfoundation,项目名称:lams,代码行数:14,代码来源:SortedSetDocValuesWriter.java
示例6: SortedDocValuesWriter
import org.apache.lucene.util.BytesRefHash; //导入依赖的package包/类
public SortedDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
this.fieldInfo = fieldInfo;
this.iwBytesUsed = iwBytesUsed;
hash = new BytesRefHash(
new ByteBlockPool(
new ByteBlockPool.DirectTrackingAllocator(iwBytesUsed)),
BytesRefHash.DEFAULT_CAPACITY,
new DirectBytesStartArray(BytesRefHash.DEFAULT_CAPACITY, iwBytesUsed));
pending = PackedLongValues.deltaPackedBuilder(PackedInts.COMPACT);
bytesUsed = pending.ramBytesUsed();
iwBytesUsed.addAndGet(bytesUsed);
}
开发者ID:lamsfoundation,项目名称:lams,代码行数:13,代码来源:SortedDocValuesWriter.java
示例7: addAll
import org.apache.lucene.util.BytesRefHash; //导入依赖的package包/类
@Override
protected void addAll(TermsSet terms) {
if (!(terms instanceof BytesRefTermsSet)) {
throw new UnsupportedOperationException("Invalid type: BytesRefTermsSet expected.");
}
BytesRefHash input = ((BytesRefTermsSet) terms).set;
BytesRef reusable = new BytesRef();
for (int i = 0; i < input.size(); i++) {
input.get(i, reusable);
set.add(reusable);
}
}
开发者ID:sirensolutions,项目名称:siren-join,代码行数:14,代码来源:BytesRefTermsSet.java
示例8: readFrom
import org.apache.lucene.util.BytesRefHash; //导入依赖的package包/类
@Override
public void readFrom(StreamInput in) throws IOException {
this.setIsPruned(in.readBoolean());
int size = in.readInt();
bytesUsed = Counter.newCounter();
pool = new ByteBlockPool(new ByteBlockPool.DirectTrackingAllocator(bytesUsed));
set = new BytesRefHash(pool);
for (long i = 0; i < size; i++) {
set.add(in.readBytesRef());
}
}
开发者ID:sirensolutions,项目名称:siren-join,代码行数:14,代码来源:BytesRefTermsSet.java
示例9: Info
import org.apache.lucene.util.BytesRefHash; //导入依赖的package包/类
public Info(BytesRefHash terms, SliceByteStartArray sliceArray, int numTokens, int numOverlapTokens, float boost, int lastPosition, int lastOffset, long sumTotalTermFreq) {
this.terms = terms;
this.sliceArray = sliceArray;
this.numTokens = numTokens;
this.numOverlapTokens = numOverlapTokens;
this.boost = boost;
this.sumTotalTermFreq = sumTotalTermFreq;
this.lastPosition = lastPosition;
this.lastOffset = lastOffset;
}
开发者ID:europeana,项目名称:search,代码行数:11,代码来源:MemoryIndex.java
示例10: TermsQuery
import org.apache.lucene.util.BytesRefHash; //导入依赖的package包/类
/**
* @param field The field that should contain terms that are specified in the previous parameter
* @param terms The terms that matching documents should have. The terms must be sorted by natural order.
*/
TermsQuery(String field, Query fromQuery, BytesRefHash terms) {
super(field);
this.fromQuery = fromQuery;
this.terms = terms;
ords = terms.sort(BytesRef.getUTF8SortedAsUnicodeComparator());
}
开发者ID:europeana,项目名称:search,代码行数:11,代码来源:TermsQuery.java
示例11: SeekingTermSetTermsEnum
import org.apache.lucene.util.BytesRefHash; //导入依赖的package包/类
SeekingTermSetTermsEnum(TermsEnum tenum, BytesRefHash terms, int[] ords) {
super(tenum);
this.terms = terms;
this.ords = ords;
comparator = BytesRef.getUTF8SortedAsUnicodeComparator();
lastElement = terms.size() - 1;
lastTerm = terms.get(ords[lastElement], new BytesRef());
seekTerm = terms.get(ords[upto], spare);
}
开发者ID:europeana,项目名称:search,代码行数:10,代码来源:TermsQuery.java
示例12: TermsIncludingScoreQuery
import org.apache.lucene.util.BytesRefHash; //导入依赖的package包/类
TermsIncludingScoreQuery(String field, boolean multipleValuesPerDocument, BytesRefHash terms, float[] scores, Query originalQuery) {
this.field = field;
this.multipleValuesPerDocument = multipleValuesPerDocument;
this.terms = terms;
this.scores = scores;
this.originalQuery = originalQuery;
this.ords = terms.sort(BytesRef.getUTF8SortedAsUnicodeComparator());
this.unwrittenOriginalQuery = originalQuery;
}
开发者ID:europeana,项目名称:search,代码行数:10,代码来源:TermsIncludingScoreQuery.java
示例13: Info
import org.apache.lucene.util.BytesRefHash; //导入依赖的package包/类
public Info(BytesRefHash terms, SliceByteStartArray sliceArray, int numTokens, int numOverlapTokens, float boost, int lastPosition, long sumTotalTermFreq) {
this.terms = terms;
this.sliceArray = sliceArray;
this.numTokens = numTokens;
this.numOverlapTokens = numOverlapTokens;
this.boost = boost;
this.sumTotalTermFreq = sumTotalTermFreq;
this.lastPosition = lastPosition;
}
开发者ID:pkarmstr,项目名称:NYBC,代码行数:10,代码来源:MemoryIndex.java
示例14: SeekingTermSetTermsEnum
import org.apache.lucene.util.BytesRefHash; //导入依赖的package包/类
SeekingTermSetTermsEnum(TermsEnum tenum, BytesRefHash terms) {
super(tenum);
this.terms = terms;
lastElement = terms.size() - 1;
ords = terms.sort(comparator = tenum.getComparator());
lastTerm = terms.get(ords[lastElement], new BytesRef());
seekTerm = terms.get(ords[upto], spare);
}
开发者ID:pkarmstr,项目名称:NYBC,代码行数:10,代码来源:TermsQuery.java
示例15: SortedSetDocValuesWriter
import org.apache.lucene.util.BytesRefHash; //导入依赖的package包/类
public SortedSetDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
this.fieldInfo = fieldInfo;
this.iwBytesUsed = iwBytesUsed;
hash = new BytesRefHash(
new ByteBlockPool(
new ByteBlockPool.DirectTrackingAllocator(iwBytesUsed)),
BytesRefHash.DEFAULT_CAPACITY,
new DirectBytesStartArray(BytesRefHash.DEFAULT_CAPACITY, iwBytesUsed));
pending = new AppendingLongBuffer();
pendingCounts = new AppendingLongBuffer();
bytesUsed = pending.ramBytesUsed() + pendingCounts.ramBytesUsed();
iwBytesUsed.addAndGet(bytesUsed);
}
开发者ID:pkarmstr,项目名称:NYBC,代码行数:14,代码来源:SortedSetDocValuesWriter.java
示例16: SortedDocValuesWriter
import org.apache.lucene.util.BytesRefHash; //导入依赖的package包/类
public SortedDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
this.fieldInfo = fieldInfo;
this.iwBytesUsed = iwBytesUsed;
hash = new BytesRefHash(
new ByteBlockPool(
new ByteBlockPool.DirectTrackingAllocator(iwBytesUsed)),
BytesRefHash.DEFAULT_CAPACITY,
new DirectBytesStartArray(BytesRefHash.DEFAULT_CAPACITY, iwBytesUsed));
pending = new AppendingLongBuffer();
bytesUsed = pending.ramBytesUsed();
iwBytesUsed.addAndGet(bytesUsed);
}
开发者ID:pkarmstr,项目名称:NYBC,代码行数:13,代码来源:SortedDocValuesWriter.java
示例17: SortedSetDocValuesWriter
import org.apache.lucene.util.BytesRefHash; //导入依赖的package包/类
public SortedSetDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
this.fieldInfo = fieldInfo;
this.iwBytesUsed = iwBytesUsed;
hash = new BytesRefHash(
new ByteBlockPool(
new ByteBlockPool.DirectTrackingAllocator(iwBytesUsed)),
BytesRefHash.DEFAULT_CAPACITY,
new DirectBytesStartArray(BytesRefHash.DEFAULT_CAPACITY, iwBytesUsed));
pending = new AppendingPackedLongBuffer(PackedInts.COMPACT);
pendingCounts = new AppendingDeltaPackedLongBuffer(PackedInts.COMPACT);
bytesUsed = pending.ramBytesUsed() + pendingCounts.ramBytesUsed();
iwBytesUsed.addAndGet(bytesUsed);
}
开发者ID:yintaoxue,项目名称:read-open-source-code,代码行数:14,代码来源:SortedSetDocValuesWriter.java
示例18: SortedDocValuesWriter
import org.apache.lucene.util.BytesRefHash; //导入依赖的package包/类
public SortedDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
this.fieldInfo = fieldInfo;
this.iwBytesUsed = iwBytesUsed;
hash = new BytesRefHash(
new ByteBlockPool(
new ByteBlockPool.DirectTrackingAllocator(iwBytesUsed)),
BytesRefHash.DEFAULT_CAPACITY,
new DirectBytesStartArray(BytesRefHash.DEFAULT_CAPACITY, iwBytesUsed));
pending = new AppendingDeltaPackedLongBuffer(PackedInts.COMPACT);
bytesUsed = pending.ramBytesUsed();
iwBytesUsed.addAndGet(bytesUsed);
}
开发者ID:yintaoxue,项目名称:read-open-source-code,代码行数:13,代码来源:SortedDocValuesWriter.java
注:本文中的org.apache.lucene.util.BytesRefHash类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论