本文整理汇总了Java中org.apache.lucene.util.packed.PackedInts类的典型用法代码示例。如果您正苦于以下问题:Java PackedInts类的具体用法?Java PackedInts怎么用?Java PackedInts使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
PackedInts类属于org.apache.lucene.util.packed包,在下文中一共展示了PackedInts类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: getLeafCollector
import org.apache.lucene.util.packed.PackedInts; //导入依赖的package包/类
@Override
public LeafBucketCollector getLeafCollector(LeafReaderContext ctx) throws IOException {
finishLeaf();
context = ctx;
docDeltas = PackedLongValues.packedBuilder(PackedInts.DEFAULT);
buckets = PackedLongValues.packedBuilder(PackedInts.DEFAULT);
return new LeafBucketCollector() {
int lastDoc = 0;
@Override
public void collect(int doc, long bucket) throws IOException {
docDeltas.add(doc - lastDoc);
buckets.add(bucket);
lastDoc = doc;
maxBucket = Math.max(maxBucket, bucket);
}
};
}
开发者ID:justor,项目名称:elasticsearch_my,代码行数:21,代码来源:BestBucketsDeferringCollector.java
示例2: addAddresses
import org.apache.lucene.util.packed.PackedInts; //导入依赖的package包/类
private void addAddresses(FieldInfo field, Iterable<Number> values) throws IOException {
meta.writeVInt(field.number);
meta.writeByte(Lucene49DocValuesFormat.NUMERIC);
meta.writeVInt(MONOTONIC_COMPRESSED);
meta.writeLong(-1L);
meta.writeLong(data.getFilePointer());
meta.writeVLong(maxDoc);
meta.writeVInt(PackedInts.VERSION_CURRENT);
meta.writeVInt(BLOCK_SIZE);
final MonotonicBlockPackedWriter writer = new MonotonicBlockPackedWriter(data, BLOCK_SIZE);
long addr = 0;
writer.add(addr);
for (Number v : values) {
addr += v.longValue();
writer.add(addr);
}
writer.finish();
meta.writeLong(data.getFilePointer());
}
开发者ID:lamsfoundation,项目名称:lams,代码行数:21,代码来源:Lucene49DocValuesConsumer.java
示例3: addAddresses
import org.apache.lucene.util.packed.PackedInts; //导入依赖的package包/类
private void addAddresses(FieldInfo field, Iterable<Number> values) throws IOException {
meta.writeVInt(field.number);
meta.writeByte(Lucene410DocValuesFormat.NUMERIC);
meta.writeVInt(MONOTONIC_COMPRESSED);
meta.writeLong(-1L);
meta.writeLong(data.getFilePointer());
meta.writeVLong(maxDoc);
meta.writeVInt(PackedInts.VERSION_CURRENT);
meta.writeVInt(BLOCK_SIZE);
final MonotonicBlockPackedWriter writer = new MonotonicBlockPackedWriter(data, BLOCK_SIZE);
long addr = 0;
writer.add(addr);
for (Number v : values) {
addr += v.longValue();
writer.add(addr);
}
writer.finish();
meta.writeLong(data.getFilePointer());
}
开发者ID:lamsfoundation,项目名称:lams,代码行数:21,代码来源:Lucene410DocValuesConsumer.java
示例4: pforDecompress
import org.apache.lucene.util.packed.PackedInts; //导入依赖的package包/类
void pforDecompress(byte token) {
final int bitsPerValue = token & 0x1F;
if (bitsPerValue == 0) {
Arrays.fill(nextDocs, 0);
} else {
DECODERS[bitsPerValue].decode(data, offset, nextDocs, 0, ITERATIONS[bitsPerValue]);
offset += BYTE_BLOCK_COUNTS[bitsPerValue];
}
if ((token & HAS_EXCEPTIONS) != 0) {
// there are exceptions
final int numExceptions = data[offset++];
final int bitsPerException = data[offset++];
final int numIterations = (numExceptions + DECODERS[bitsPerException].byteValueCount() - 1) / DECODERS[bitsPerException].byteValueCount();
DECODERS[bitsPerException].decode(data, offset, nextExceptions, 0, numIterations);
offset += PackedInts.Format.PACKED.byteCount(PackedInts.VERSION_CURRENT, numExceptions, bitsPerException);
for (int i = 0; i < numExceptions; ++i) {
nextDocs[data[offset++]] |= nextExceptions[i] << bitsPerValue;
}
}
for (int previousDoc = docID, i = 0; i < BLOCK_SIZE; ++i) {
final int doc = previousDoc + 1 + nextDocs[i];
previousDoc = nextDocs[i] = doc;
}
}
开发者ID:lamsfoundation,项目名称:lams,代码行数:25,代码来源:PForDeltaDocIdSet.java
示例5: getPageMemoryUsage
import org.apache.lucene.util.packed.PackedInts; //导入依赖的package包/类
private long getPageMemoryUsage(PackedLongValues values, float acceptableOverheadRatio, int pageSize, long pageMinOrdinal, long pageMaxOrdinal) {
int bitsRequired;
long pageMemorySize = 0;
PackedInts.FormatAndBits formatAndBits;
if (pageMaxOrdinal == Long.MIN_VALUE) {
// empty page - will use the null reader which just stores size
pageMemorySize += RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + RamUsageEstimator.NUM_BYTES_INT);
} else {
long pageMinValue = values.get(pageMinOrdinal);
long pageMaxValue = values.get(pageMaxOrdinal);
long pageDelta = pageMaxValue - pageMinValue;
if (pageDelta != 0) {
bitsRequired = pageDelta < 0 ? 64 : PackedInts.bitsRequired(pageDelta);
formatAndBits = PackedInts.fastestFormatAndBits(pageSize, bitsRequired, acceptableOverheadRatio);
pageMemorySize += formatAndBits.format.longCount(PackedInts.VERSION_CURRENT, pageSize, formatAndBits.bitsPerValue) * RamUsageEstimator.NUM_BYTES_LONG;
pageMemorySize += RamUsageEstimator.NUM_BYTES_LONG; // min value per page storage
} else {
// empty page
pageMemorySize += RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + RamUsageEstimator.NUM_BYTES_INT);
}
}
return pageMemorySize;
}
开发者ID:baidu,项目名称:Elasticsearch,代码行数:25,代码来源:PackedArrayIndexFieldData.java
示例6: buildAutomaton
import org.apache.lucene.util.packed.PackedInts; //导入依赖的package包/类
/**
* Builds the final automaton from a list of entries.
*/
private FST<Object> buildAutomaton(BytesRefSorter sorter) throws IOException {
// Build the automaton.
final Outputs<Object> outputs = NoOutputs.getSingleton();
final Object empty = outputs.getNoOutput();
final Builder<Object> builder = new Builder<>(
FST.INPUT_TYPE.BYTE1, 0, 0, true, true,
shareMaxTailLength, outputs, false,
PackedInts.DEFAULT, true, 15);
BytesRefBuilder scratch = new BytesRefBuilder();
BytesRef entry;
final IntsRefBuilder scratchIntsRef = new IntsRefBuilder();
int count = 0;
BytesRefIterator iter = sorter.iterator();
while((entry = iter.next()) != null) {
count++;
if (scratch.get().compareTo(entry) != 0) {
builder.add(Util.toIntsRef(entry, scratchIntsRef), empty);
scratch.copyBytes(entry);
}
}
return count == 0 ? null : builder.finish();
}
开发者ID:europeana,项目名称:search,代码行数:28,代码来源:FSTCompletionBuilder.java
示例7: getDeletes
import org.apache.lucene.util.packed.PackedInts; //导入依赖的package包/类
private PackedLongValues getDeletes(List<AtomicReader> readers) {
PackedLongValues.Builder deletes = PackedLongValues.monotonicBuilder(PackedInts.COMPACT);
int deleteCount = 0;
for (AtomicReader reader : readers) {
final int maxDoc = reader.maxDoc();
final Bits liveDocs = reader.getLiveDocs();
for (int i = 0; i < maxDoc; ++i) {
if (liveDocs != null && !liveDocs.get(i)) {
++deleteCount;
} else {
deletes.add(deleteCount);
}
}
}
return deletes.build();
}
开发者ID:europeana,项目名称:search,代码行数:17,代码来源:SortingMergePolicy.java
示例8: testDateCompression
import org.apache.lucene.util.packed.PackedInts; //导入依赖的package包/类
public void testDateCompression() throws IOException {
final Directory dir = new RAMDirectory();
final IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
final IndexWriter iwriter = new IndexWriter(dir, iwc);
final long base = 13; // prime
final long day = 1000L * 60 * 60 * 24;
final Document doc = new Document();
final NumericDocValuesField dvf = new NumericDocValuesField("dv", 0);
doc.add(dvf);
for (int i = 0; i < 300; ++i) {
dvf.setLongValue(base + random().nextInt(1000) * day);
iwriter.addDocument(doc);
}
iwriter.forceMerge(1);
final long size1 = dirSize(dir);
for (int i = 0; i < 50; ++i) {
dvf.setLongValue(base + random().nextInt(1000) * day);
iwriter.addDocument(doc);
}
iwriter.forceMerge(1);
final long size2 = dirSize(dir);
// make sure the new longs costed less than if they had only been packed
assertTrue(size2 < size1 + (PackedInts.bitsRequired(day) * 50) / 8);
}
开发者ID:europeana,项目名称:search,代码行数:27,代码来源:BaseCompressingDocValuesFormatTestCase.java
示例9: testInternalFinalState
import org.apache.lucene.util.packed.PackedInts; //导入依赖的package包/类
public void testInternalFinalState() throws Exception {
final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton();
final boolean willRewrite = random().nextBoolean();
final Builder<Long> builder = new Builder<>(FST.INPUT_TYPE.BYTE1, 0, 0, true, true, Integer.MAX_VALUE, outputs, willRewrite, PackedInts.DEFAULT, true, 15);
builder.add(Util.toIntsRef(new BytesRef("stat"), new IntsRefBuilder()), outputs.getNoOutput());
builder.add(Util.toIntsRef(new BytesRef("station"), new IntsRefBuilder()), outputs.getNoOutput());
final FST<Long> fst = builder.finish();
StringWriter w = new StringWriter();
//Writer w = new OutputStreamWriter(new FileOutputStream("/x/tmp/out.dot"));
Util.toDot(fst, w, false, false);
w.close();
//System.out.println(w.toString());
// check for accept state at label t
assertTrue(w.toString().indexOf("[label=\"t\" style=\"bold\"") != -1);
// check for accept state at label n
assertTrue(w.toString().indexOf("[label=\"n\" style=\"bold\"") != -1);
}
开发者ID:europeana,项目名称:search,代码行数:19,代码来源:TestFSTs.java
示例10: TermsWriter
import org.apache.lucene.util.packed.PackedInts; //导入依赖的package包/类
TermsWriter(FieldInfo fieldInfo) {
this.fieldInfo = fieldInfo;
noOutputs = NoOutputs.getSingleton();
// This Builder is just used transiently to fragment
// terms into "good" blocks; we don't save the
// resulting FST:
blockBuilder = new Builder<Object>(FST.INPUT_TYPE.BYTE1,
0, 0, true,
true, Integer.MAX_VALUE,
noOutputs,
new FindBlocks(), false,
PackedInts.COMPACT,
true, 15);
postingsWriter.setField(fieldInfo);
}
开发者ID:pkarmstr,项目名称:NYBC,代码行数:19,代码来源:BlockTreeTermsWriter.java
示例11: testInternalFinalState
import org.apache.lucene.util.packed.PackedInts; //导入依赖的package包/类
public void testInternalFinalState() throws Exception {
final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(true);
final boolean willRewrite = random().nextBoolean();
final Builder<Long> builder = new Builder<Long>(FST.INPUT_TYPE.BYTE1, 0, 0, true, true, Integer.MAX_VALUE, outputs, null, willRewrite, PackedInts.DEFAULT, true, 15);
builder.add(Util.toIntsRef(new BytesRef("stat"), new IntsRef()), outputs.getNoOutput());
builder.add(Util.toIntsRef(new BytesRef("station"), new IntsRef()), outputs.getNoOutput());
final FST<Long> fst = builder.finish();
StringWriter w = new StringWriter();
//Writer w = new OutputStreamWriter(new FileOutputStream("/x/tmp/out.dot"));
Util.toDot(fst, w, false, false);
w.close();
//System.out.println(w.toString());
// check for accept state at label t
assertTrue(w.toString().indexOf("[label=\"t\" style=\"bold\"") != -1);
// check for accept state at label n
assertTrue(w.toString().indexOf("[label=\"n\" style=\"bold\"") != -1);
}
开发者ID:pkarmstr,项目名称:NYBC,代码行数:19,代码来源:TestFSTs.java
示例12: testWritePerformance
import org.apache.lucene.util.packed.PackedInts; //导入依赖的package包/类
@Test
public void testWritePerformance() throws Exception {
System.out.println("Writing " + NumberFormat.getInstance().format(MAX_DOCS) + " values.");
System.out.println("Float array bytes: " + NumberFormat.getInstance().format(RamUsageEstimator.sizeOf(new float[MAX_DOCS])));
OffsetGrowableFloatWriter writer = new OffsetGrowableFloatWriter(OffsetGrowableFloatWriter.DEFAULT_PRECISION, 2, MAX_DOCS, PackedInts.DEFAULT);
long start = System.currentTimeMillis();
for (int i = 0; i < MAX_DOCS; i++) {
float value = RandomUtils.nextFloat() * RandomUtils.nextFloat();
int j = RandomUtils.nextInt(MAX_DOCS);
writer.setFloat(j, value);
assertEquals(value, writer.getFloat(j), OffsetGrowableFloatWriter.DEFAULT_PRECISION);
}
System.out.println("OffsetGrowableFloatWriter bytes: " + NumberFormat.getInstance().format(writer.ramBytesUsed()) + " in " + (System.currentTimeMillis() - start) + "ms ...");
}
开发者ID:shopping24,项目名称:solr-bmax-queryparser,代码行数:18,代码来源:OffsetGrowableFloatWriterTest.java
示例13: addSortedSetField
import org.apache.lucene.util.packed.PackedInts; //导入依赖的package包/类
@Override
public void addSortedSetField(FieldInfo field, Iterable<BytesRef> values, Iterable<Number> docToOrdCount, Iterable<Number> ords) throws IOException {
meta.writeVInt(field.number);
meta.writeByte(DiskDocValuesFormat.SORTED_SET);
// write the ord -> byte[] as a binary field
addBinaryField(field, values);
// write the stream of ords as a numeric field
// NOTE: we could return an iterator that delta-encodes these within a doc
addNumericField(field, ords);
// write the doc -> ord count as a absolute index to the stream
meta.writeVInt(field.number);
meta.writeByte(DiskDocValuesFormat.NUMERIC);
meta.writeVInt(PackedInts.VERSION_CURRENT);
meta.writeLong(data.getFilePointer());
meta.writeVLong(maxDoc);
meta.writeVInt(BLOCK_SIZE);
final MonotonicBlockPackedWriter writer = new MonotonicBlockPackedWriter(data, BLOCK_SIZE);
long addr = 0;
for (Number v : docToOrdCount) {
addr += v.longValue();
writer.add(addr);
}
writer.finish();
}
开发者ID:apache,项目名称:incubator-blur,代码行数:27,代码来源:DiskDocValuesConsumer.java
示例14: TermsWriter
import org.apache.lucene.util.packed.PackedInts; //导入依赖的package包/类
TermsWriter(FieldInfo fieldInfo) {
this.fieldInfo = fieldInfo;
noOutputs = NoOutputs.getSingleton();
// This Builder is just used transiently to fragment
// terms into "good" blocks; we don't save the
// resulting FST:
blockBuilder = new Builder<Object>(FST.INPUT_TYPE.BYTE1,
0, 0, true,
true, Integer.MAX_VALUE,
noOutputs,
new FindBlocks(), false,
PackedInts.COMPACT,
true, 15);
this.longsSize = postingsWriter.setField(fieldInfo);
}
开发者ID:yintaoxue,项目名称:read-open-source-code,代码行数:19,代码来源:BlockTreeTermsWriter.java
示例15: buildAutomaton
import org.apache.lucene.util.packed.PackedInts; //导入依赖的package包/类
/**
* Builds the final automaton from a list of entries.
*/
private FST<Object> buildAutomaton(BytesRefSorter sorter) throws IOException {
// Build the automaton.
final Outputs<Object> outputs = NoOutputs.getSingleton();
final Object empty = outputs.getNoOutput();
final Builder<Object> builder = new Builder<Object>(
FST.INPUT_TYPE.BYTE1, 0, 0, true, true,
shareMaxTailLength, outputs, null, false,
PackedInts.DEFAULT, true, 15);
BytesRef scratch = new BytesRef();
BytesRef entry;
final IntsRef scratchIntsRef = new IntsRef();
int count = 0;
BytesRefIterator iter = sorter.iterator();
while((entry = iter.next()) != null) {
count++;
if (scratch.compareTo(entry) != 0) {
builder.add(Util.toIntsRef(entry, scratchIntsRef), empty);
scratch.copyBytes(entry);
}
}
return count == 0 ? null : builder.finish();
}
开发者ID:yintaoxue,项目名称:read-open-source-code,代码行数:28,代码来源:FSTCompletionBuilder.java
示例16: precisionFromThreshold
import org.apache.lucene.util.packed.PackedInts; //导入依赖的package包/类
/**
* Compute the required precision so that <code>count</code> distinct entries
* would be counted with linear counting.
*/
public static int precisionFromThreshold(long count) {
final long hashTableEntries = (long) Math.ceil(count / MAX_LOAD_FACTOR);
int precision = PackedInts.bitsRequired(hashTableEntries * Integer.BYTES);
precision = Math.max(precision, MIN_PRECISION);
precision = Math.min(precision, MAX_PRECISION);
return precision;
}
开发者ID:justor,项目名称:elasticsearch_my,代码行数:12,代码来源:HyperLogLogPlusPlus.java
示例17: build
import org.apache.lucene.util.packed.PackedInts; //导入依赖的package包/类
/**
* Builds an {@link Ordinals} instance from the builders current state.
*/
public Ordinals build() {
final float acceptableOverheadRatio = PackedInts.DEFAULT;
if (numMultiValuedDocs > 0 || MultiOrdinals.significantlySmallerThanSinglePackedOrdinals(maxDoc, numDocsWithValue, getValueCount(), acceptableOverheadRatio)) {
// MultiOrdinals can be smaller than SinglePackedOrdinals for sparse fields
return new MultiOrdinals(this, acceptableOverheadRatio);
} else {
return new SinglePackedOrdinals(this, acceptableOverheadRatio);
}
}
开发者ID:justor,项目名称:elasticsearch_my,代码行数:13,代码来源:OrdinalsBuilder.java
示例18: significantlySmallerThanSinglePackedOrdinals
import org.apache.lucene.util.packed.PackedInts; //导入依赖的package包/类
/**
* Return true if this impl is going to be smaller than {@link SinglePackedOrdinals} by at least 20%.
*/
public static boolean significantlySmallerThanSinglePackedOrdinals(int maxDoc, int numDocsWithValue, long numOrds, float acceptableOverheadRatio) {
int bitsPerOrd = PackedInts.bitsRequired(numOrds);
bitsPerOrd = PackedInts.fastestFormatAndBits(numDocsWithValue, bitsPerOrd, acceptableOverheadRatio).bitsPerValue;
// Compute the worst-case number of bits per value for offsets in the worst case, eg. if no docs have a value at the
// beginning of the block and all docs have one at the end of the block
final float avgValuesPerDoc = (float) numDocsWithValue / maxDoc;
final int maxDelta = (int) Math.ceil(OFFSETS_PAGE_SIZE * (1 - avgValuesPerDoc) * avgValuesPerDoc);
int bitsPerOffset = PackedInts.bitsRequired(maxDelta) + 1; // +1 because of the sign
bitsPerOffset = PackedInts.fastestFormatAndBits(maxDoc, bitsPerOffset, acceptableOverheadRatio).bitsPerValue;
final long expectedMultiSizeInBytes = (long) numDocsWithValue * bitsPerOrd + (long) maxDoc * bitsPerOffset;
final long expectedSingleSizeInBytes = (long) maxDoc * bitsPerOrd;
return expectedMultiSizeInBytes < 0.8f * expectedSingleSizeInBytes;
}
开发者ID:justor,项目名称:elasticsearch_my,代码行数:18,代码来源:MultiOrdinals.java
示例19: buildOrdinalMap
import org.apache.lucene.util.packed.PackedInts; //导入依赖的package包/类
private static OrdinalMap buildOrdinalMap(AtomicParentChildFieldData[] atomicFD, String parentType) throws IOException {
final SortedDocValues[] ordinals = new SortedDocValues[atomicFD.length];
for (int i = 0; i < ordinals.length; ++i) {
ordinals[i] = atomicFD[i].getOrdinalsValues(parentType);
}
return OrdinalMap.build(null, ordinals, PackedInts.DEFAULT);
}
开发者ID:justor,项目名称:elasticsearch_my,代码行数:8,代码来源:ParentChildIndexFieldData.java
示例20: loadBytesFixedSorted
import org.apache.lucene.util.packed.PackedInts; //导入依赖的package包/类
private SortedDocValues loadBytesFixedSorted(FieldInfo field, IndexInput data, IndexInput index) throws IOException {
CodecUtil.checkHeader(data, Lucene40DocValuesFormat.BYTES_FIXED_SORTED_CODEC_NAME_DAT,
Lucene40DocValuesFormat.BYTES_FIXED_SORTED_VERSION_START,
Lucene40DocValuesFormat.BYTES_FIXED_SORTED_VERSION_CURRENT);
CodecUtil.checkHeader(index, Lucene40DocValuesFormat.BYTES_FIXED_SORTED_CODEC_NAME_IDX,
Lucene40DocValuesFormat.BYTES_FIXED_SORTED_VERSION_START,
Lucene40DocValuesFormat.BYTES_FIXED_SORTED_VERSION_CURRENT);
final int fixedLength = data.readInt();
final int valueCount = index.readInt();
PagedBytes bytes = new PagedBytes(16);
bytes.copy(data, fixedLength * (long) valueCount);
final PagedBytes.Reader bytesReader = bytes.freeze(true);
final PackedInts.Reader reader = PackedInts.getReader(index);
ramBytesUsed.addAndGet(bytesReader.ramBytesUsed() + reader.ramBytesUsed());
return correctBuggyOrds(new SortedDocValues() {
@Override
public int getOrd(int docID) {
return (int) reader.get(docID);
}
@Override
public BytesRef lookupOrd(int ord) {
final BytesRef term = new BytesRef();
bytesReader.fillSlice(term, fixedLength * (long) ord, fixedLength);
return term;
}
@Override
public int getValueCount() {
return valueCount;
}
});
}
开发者ID:lamsfoundation,项目名称:lams,代码行数:37,代码来源:Lucene40DocValuesReader.java
注:本文中的org.apache.lucene.util.packed.PackedInts类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论