本文整理汇总了Java中org.apache.lucene.util.ByteBlockPool类的典型用法代码示例。如果您正苦于以下问题:Java ByteBlockPool类的具体用法?Java ByteBlockPool怎么用?Java ByteBlockPool使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
ByteBlockPool类属于org.apache.lucene.util包,在下文中一共展示了ByteBlockPool类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: init
import org.apache.lucene.util.ByteBlockPool; //导入依赖的package包/类
public void init(ByteBlockPool pool, int startIndex, int endIndex) {
assert endIndex-startIndex >= 0;
assert startIndex >= 0;
assert endIndex >= 0;
this.pool = pool;
this.endIndex = endIndex;
level = 0;
bufferUpto = startIndex / ByteBlockPool.BYTE_BLOCK_SIZE;
bufferOffset = bufferUpto * ByteBlockPool.BYTE_BLOCK_SIZE;
buffer = pool.buffers[bufferUpto];
upto = startIndex & ByteBlockPool.BYTE_BLOCK_MASK;
final int firstSize = ByteBlockPool.LEVEL_SIZE_ARRAY[0];
if (startIndex+firstSize >= endIndex) {
// There is only this one slice to read
limit = endIndex & ByteBlockPool.BYTE_BLOCK_MASK;
} else
limit = upto+firstSize-4;
}
开发者ID:lamsfoundation,项目名称:lams,代码行数:24,代码来源:ByteSliceReader.java
示例2: readFromBytes
import org.apache.lucene.util.ByteBlockPool; //导入依赖的package包/类
private void readFromBytes(BytesRef bytes) {
// Read pruned flag
this.setIsPruned(bytes.bytes[bytes.offset++] == 1 ? true : false);
// Read size fo the set
int size = Bytes.readInt(bytes);
// Read terms
bytesUsed = Counter.newCounter();
pool = new ByteBlockPool(new ByteBlockPool.DirectTrackingAllocator(bytesUsed));
set = new BytesRefHash(pool);
BytesRef reusable = new BytesRef();
for (int i = 0; i < size; i++) {
Bytes.readBytesRef(bytes, reusable);
set.add(reusable);
}
}
开发者ID:sirensolutions,项目名称:siren-join,代码行数:19,代码来源:BytesRefTermsSet.java
示例3: TermsHash
import org.apache.lucene.util.ByteBlockPool; //导入依赖的package包/类
public TermsHash(final DocumentsWriterPerThread docWriter, final TermsHashConsumer consumer, boolean trackAllocations, final TermsHash nextTermsHash) {
this.docState = docWriter.docState;
this.consumer = consumer;
this.trackAllocations = trackAllocations;
this.nextTermsHash = nextTermsHash;
this.bytesUsed = trackAllocations ? docWriter.bytesUsed : Counter.newCounter();
intPool = new IntBlockPool(docWriter.intBlockAllocator);
bytePool = new ByteBlockPool(docWriter.byteBlockAllocator);
if (nextTermsHash != null) {
// We are primary
primary = true;
termBytePool = bytePool;
nextTermsHash.termBytePool = bytePool;
} else {
primary = false;
}
}
开发者ID:pkarmstr,项目名称:NYBC,代码行数:19,代码来源:TermsHash.java
示例4: TermsHash
import org.apache.lucene.util.ByteBlockPool; //导入依赖的package包/类
TermsHash(final DocumentsWriterPerThread docWriter, boolean trackAllocations, TermsHash nextTermsHash) {
this.docState = docWriter.docState;
this.trackAllocations = trackAllocations;
this.nextTermsHash = nextTermsHash;
this.bytesUsed = trackAllocations ? docWriter.bytesUsed : Counter.newCounter();
intPool = new IntBlockPool(docWriter.intBlockAllocator);
bytePool = new ByteBlockPool(docWriter.byteBlockAllocator);
if (nextTermsHash != null) {
// We are primary
termBytePool = bytePool;
nextTermsHash.termBytePool = bytePool;
}
}
开发者ID:lamsfoundation,项目名称:lams,代码行数:15,代码来源:TermsHash.java
示例5: SortedSetDocValuesWriter
import org.apache.lucene.util.ByteBlockPool; //导入依赖的package包/类
public SortedSetDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
this.fieldInfo = fieldInfo;
this.iwBytesUsed = iwBytesUsed;
hash = new BytesRefHash(
new ByteBlockPool(
new ByteBlockPool.DirectTrackingAllocator(iwBytesUsed)),
BytesRefHash.DEFAULT_CAPACITY,
new DirectBytesStartArray(BytesRefHash.DEFAULT_CAPACITY, iwBytesUsed));
pending = PackedLongValues.packedBuilder(PackedInts.COMPACT);
pendingCounts = PackedLongValues.deltaPackedBuilder(PackedInts.COMPACT);
bytesUsed = pending.ramBytesUsed() + pendingCounts.ramBytesUsed();
iwBytesUsed.addAndGet(bytesUsed);
}
开发者ID:lamsfoundation,项目名称:lams,代码行数:14,代码来源:SortedSetDocValuesWriter.java
示例6: SortedDocValuesWriter
import org.apache.lucene.util.ByteBlockPool; //导入依赖的package包/类
public SortedDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
this.fieldInfo = fieldInfo;
this.iwBytesUsed = iwBytesUsed;
hash = new BytesRefHash(
new ByteBlockPool(
new ByteBlockPool.DirectTrackingAllocator(iwBytesUsed)),
BytesRefHash.DEFAULT_CAPACITY,
new DirectBytesStartArray(BytesRefHash.DEFAULT_CAPACITY, iwBytesUsed));
pending = PackedLongValues.deltaPackedBuilder(PackedInts.COMPACT);
bytesUsed = pending.ramBytesUsed();
iwBytesUsed.addAndGet(bytesUsed);
}
开发者ID:lamsfoundation,项目名称:lams,代码行数:13,代码来源:SortedDocValuesWriter.java
示例7: add
import org.apache.lucene.util.ByteBlockPool; //导入依赖的package包/类
public void add(int textStart) throws IOException {
int termID = bytesHash.addByPoolOffset(textStart);
if (termID >= 0) { // New posting
// First time we are seeing this token since we last
// flushed the hash.
// Init stream slices
if (numPostingInt + intPool.intUpto > IntBlockPool.INT_BLOCK_SIZE) {
intPool.nextBuffer();
}
if (ByteBlockPool.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE) {
bytePool.nextBuffer();
}
intUptos = intPool.buffer;
intUptoStart = intPool.intUpto;
intPool.intUpto += streamCount;
postingsArray.intStarts[termID] = intUptoStart + intPool.intOffset;
for(int i=0;i<streamCount;i++) {
final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
}
postingsArray.byteStarts[termID] = intUptos[intUptoStart];
newTerm(termID);
} else {
termID = (-termID)-1;
int intStart = postingsArray.intStarts[termID];
intUptos = intPool.buffers[intStart >> IntBlockPool.INT_BLOCK_SHIFT];
intUptoStart = intStart & IntBlockPool.INT_BLOCK_MASK;
addTerm(termID);
}
}
开发者ID:lamsfoundation,项目名称:lams,代码行数:37,代码来源:TermsHashPerField.java
示例8: BytesRefArray
import org.apache.lucene.util.ByteBlockPool; //导入依赖的package包/类
/**
* Creates a new {@link BytesRefArray} with a counter to track allocated bytes
*/
public BytesRefArray(Counter bytesUsed) {
this.pool = new ByteBlockPool(new ByteBlockPool.DirectTrackingAllocator(
bytesUsed));
pool.nextBuffer();
bytesUsed.addAndGet(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER
+ RamUsageEstimator.NUM_BYTES_INT);
this.bytesUsed = bytesUsed;
}
开发者ID:lamsfoundation,项目名称:lams,代码行数:12,代码来源:BytesRefArray.java
示例9: readFrom
import org.apache.lucene.util.ByteBlockPool; //导入依赖的package包/类
@Override
public void readFrom(StreamInput in) throws IOException {
this.setIsPruned(in.readBoolean());
int size = in.readInt();
bytesUsed = Counter.newCounter();
pool = new ByteBlockPool(new ByteBlockPool.DirectTrackingAllocator(bytesUsed));
set = new BytesRefHash(pool);
for (long i = 0; i < size; i++) {
set.add(in.readBytesRef());
}
}
开发者ID:sirensolutions,项目名称:siren-join,代码行数:14,代码来源:BytesRefTermsSet.java
示例10: randomByteBlockAllocator
import org.apache.lucene.util.ByteBlockPool; //导入依赖的package包/类
private Allocator randomByteBlockAllocator() {
if (random().nextBoolean()) {
return new RecyclingByteBlockAllocator();
} else {
return new ByteBlockPool.DirectAllocator();
}
}
开发者ID:europeana,项目名称:search,代码行数:8,代码来源:MemoryIndexTest.java
示例11: SortedSetDocValuesWriter
import org.apache.lucene.util.ByteBlockPool; //导入依赖的package包/类
public SortedSetDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
this.fieldInfo = fieldInfo;
this.iwBytesUsed = iwBytesUsed;
hash = new BytesRefHash(
new ByteBlockPool(
new ByteBlockPool.DirectTrackingAllocator(iwBytesUsed)),
BytesRefHash.DEFAULT_CAPACITY,
new DirectBytesStartArray(BytesRefHash.DEFAULT_CAPACITY, iwBytesUsed));
pending = new AppendingLongBuffer();
pendingCounts = new AppendingLongBuffer();
bytesUsed = pending.ramBytesUsed() + pendingCounts.ramBytesUsed();
iwBytesUsed.addAndGet(bytesUsed);
}
开发者ID:pkarmstr,项目名称:NYBC,代码行数:14,代码来源:SortedSetDocValuesWriter.java
示例12: SortedDocValuesWriter
import org.apache.lucene.util.ByteBlockPool; //导入依赖的package包/类
public SortedDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
this.fieldInfo = fieldInfo;
this.iwBytesUsed = iwBytesUsed;
hash = new BytesRefHash(
new ByteBlockPool(
new ByteBlockPool.DirectTrackingAllocator(iwBytesUsed)),
BytesRefHash.DEFAULT_CAPACITY,
new DirectBytesStartArray(BytesRefHash.DEFAULT_CAPACITY, iwBytesUsed));
pending = new AppendingLongBuffer();
bytesUsed = pending.ramBytesUsed();
iwBytesUsed.addAndGet(bytesUsed);
}
开发者ID:pkarmstr,项目名称:NYBC,代码行数:13,代码来源:SortedDocValuesWriter.java
示例13: add
import org.apache.lucene.util.ByteBlockPool; //导入依赖的package包/类
public void add(int textStart) throws IOException {
int termID = bytesHash.addByPoolOffset(textStart);
if (termID >= 0) { // New posting
// First time we are seeing this token since we last
// flushed the hash.
// Init stream slices
if (numPostingInt + intPool.intUpto > IntBlockPool.INT_BLOCK_SIZE)
intPool.nextBuffer();
if (ByteBlockPool.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE) {
bytePool.nextBuffer();
}
intUptos = intPool.buffer;
intUptoStart = intPool.intUpto;
intPool.intUpto += streamCount;
postingsArray.intStarts[termID] = intUptoStart + intPool.intOffset;
for(int i=0;i<streamCount;i++) {
final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
}
postingsArray.byteStarts[termID] = intUptos[intUptoStart];
consumer.newTerm(termID);
} else {
termID = (-termID)-1;
int intStart = postingsArray.intStarts[termID];
intUptos = intPool.buffers[intStart >> IntBlockPool.INT_BLOCK_SHIFT];
intUptoStart = intStart & IntBlockPool.INT_BLOCK_MASK;
consumer.addTerm(termID);
}
}
开发者ID:pkarmstr,项目名称:NYBC,代码行数:36,代码来源:TermsHashPerField.java
示例14: SortedSetDocValuesWriter
import org.apache.lucene.util.ByteBlockPool; //导入依赖的package包/类
public SortedSetDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
this.fieldInfo = fieldInfo;
this.iwBytesUsed = iwBytesUsed;
hash = new BytesRefHash(
new ByteBlockPool(
new ByteBlockPool.DirectTrackingAllocator(iwBytesUsed)),
BytesRefHash.DEFAULT_CAPACITY,
new DirectBytesStartArray(BytesRefHash.DEFAULT_CAPACITY, iwBytesUsed));
pending = new AppendingPackedLongBuffer(PackedInts.COMPACT);
pendingCounts = new AppendingDeltaPackedLongBuffer(PackedInts.COMPACT);
bytesUsed = pending.ramBytesUsed() + pendingCounts.ramBytesUsed();
iwBytesUsed.addAndGet(bytesUsed);
}
开发者ID:yintaoxue,项目名称:read-open-source-code,代码行数:14,代码来源:SortedSetDocValuesWriter.java
示例15: SortedDocValuesWriter
import org.apache.lucene.util.ByteBlockPool; //导入依赖的package包/类
public SortedDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
this.fieldInfo = fieldInfo;
this.iwBytesUsed = iwBytesUsed;
hash = new BytesRefHash(
new ByteBlockPool(
new ByteBlockPool.DirectTrackingAllocator(iwBytesUsed)),
BytesRefHash.DEFAULT_CAPACITY,
new DirectBytesStartArray(BytesRefHash.DEFAULT_CAPACITY, iwBytesUsed));
pending = new AppendingDeltaPackedLongBuffer(PackedInts.COMPACT);
bytesUsed = pending.ramBytesUsed();
iwBytesUsed.addAndGet(bytesUsed);
}
开发者ID:yintaoxue,项目名称:read-open-source-code,代码行数:13,代码来源:SortedDocValuesWriter.java
注:本文中的org.apache.lucene.util.ByteBlockPool类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论