本文整理汇总了Java中org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext类的典型用法代码示例。如果您正苦于以下问题:Java HFileBlockDefaultEncodingContext类的具体用法?Java HFileBlockDefaultEncodingContext怎么用?Java HFileBlockDefaultEncodingContext使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
HFileBlockDefaultEncodingContext类属于org.apache.hadoop.hbase.io.encoding包,在下文中一共展示了HFileBlockDefaultEncodingContext类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: Writer
import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext; //导入依赖的package包/类
/**
* @param dataBlockEncoder data block encoding algorithm to use
*/
public Writer(HFileDataBlockEncoder dataBlockEncoder, HFileContext fileContext) {
this.dataBlockEncoder = dataBlockEncoder != null
? dataBlockEncoder : NoOpDataBlockEncoder.INSTANCE;
defaultBlockEncodingCtx = new HFileBlockDefaultEncodingContext(null,
HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext);
dataBlockEncodingCtx = this.dataBlockEncoder
.newDataBlockEncodingContext(HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext);
if (fileContext.getBytesPerChecksum() < HConstants.HFILEBLOCK_HEADER_SIZE) {
throw new RuntimeException("Unsupported value of bytesPerChecksum. " +
" Minimum is " + HConstants.HFILEBLOCK_HEADER_SIZE + " but the configured value is " +
fileContext.getBytesPerChecksum());
}
baosInMemory = new ByteArrayOutputStream();
prevOffsetByType = new long[BlockType.values().length];
for (int i = 0; i < prevOffsetByType.length; ++i)
prevOffsetByType[i] = -1;
this.fileContext = fileContext;
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:HFileBlock.java
示例2: createBlockOnDisk
import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext; //导入依赖的package包/类
private HFileBlock createBlockOnDisk(List<KeyValue> kvs, HFileBlock block, boolean useTags)
throws IOException {
int size;
HFileBlockEncodingContext context = new HFileBlockDefaultEncodingContext(
blockEncoder.getDataBlockEncoding(), HConstants.HFILEBLOCK_DUMMY_HEADER,
block.getHFileContext());
ByteArrayOutputStream baos = new ByteArrayOutputStream();
baos.write(block.getDummyHeaderForVersion());
DataOutputStream dos = new DataOutputStream(baos);
blockEncoder.startBlockEncoding(context, dos);
for (KeyValue kv : kvs) {
blockEncoder.encode(kv, context, dos);
}
BufferGrabbingByteArrayOutputStream stream = new BufferGrabbingByteArrayOutputStream();
baos.writeTo(stream);
blockEncoder.endBlockEncoding(context, dos, stream.getBuffer(), BlockType.DATA);
byte[] encodedBytes = baos.toByteArray();
size = encodedBytes.length - block.getDummyHeaderForVersion().length;
return new HFileBlock(context.getBlockType(), size, size, -1, ByteBuffer.wrap(encodedBytes),
HFileBlock.FILL_HEADER, 0, block.getOnDiskDataSizeWithHeader(), block.getHFileContext());
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:TestHFileDataBlockEncoder.java
示例3: startBlockEncoding
import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext; //导入依赖的package包/类
@Override
public void startBlockEncoding(HFileBlockEncodingContext blkEncodingCtx, DataOutputStream out)
throws IOException {
if (blkEncodingCtx.getClass() != HFileBlockDefaultEncodingContext.class) {
throw new IOException(this.getClass().getName() + " only accepts "
+ HFileBlockDefaultEncodingContext.class.getName() + " as the " + "encoding context.");
}
HFileBlockDefaultEncodingContext encodingCtx =
(HFileBlockDefaultEncodingContext) blkEncodingCtx;
encodingCtx.prepareEncoding(out);
PrefixTreeEncoder builder = EncoderFactory.checkOut(out, encodingCtx.getHFileContext()
.isIncludesMvcc());
PrefixTreeEncodingState state = new PrefixTreeEncodingState();
state.builder = builder;
blkEncodingCtx.setEncodingState(state);
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:PrefixTreeCodec.java
示例4: Writer
import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext; //导入依赖的package包/类
/**
* @param dataBlockEncoder data block encoding algorithm to use
*/
public Writer(HFileDataBlockEncoder dataBlockEncoder, HFileContext fileContext) {
this.dataBlockEncoder = dataBlockEncoder != null
? dataBlockEncoder : NoOpDataBlockEncoder.INSTANCE;
defaultBlockEncodingCtx = new HFileBlockDefaultEncodingContext(null,
HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext);
dataBlockEncodingCtx = this.dataBlockEncoder
.newDataBlockEncodingContext(HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext);
if (fileContext.getBytesPerChecksum() < HConstants.HFILEBLOCK_HEADER_SIZE) {
throw new RuntimeException("Unsupported value of bytesPerChecksum. " +
" Minimum is " + HConstants.HFILEBLOCK_HEADER_SIZE + " but the configured value is " +
fileContext.getBytesPerChecksum());
}
baosInMemory = new ByteArrayOutputStream();
prevOffsetByType = new long[BlockType.values().length];
for (int i = 0; i < prevOffsetByType.length; ++i)
prevOffsetByType[i] = -1;
this.fileContext = fileContext;
}
开发者ID:tenggyut,项目名称:HIndex,代码行数:26,代码来源:HFileBlock.java
示例5: Writer
import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext; //导入依赖的package包/类
/**
* @param compressionAlgorithm compression algorithm to use
* @param dataBlockEncoderAlgo data block encoding algorithm to use
*/
public Writer(Compression.Algorithm compressionAlgorithm,
HFileDataBlockEncoder dataBlockEncoder, boolean includesMemstoreTS, boolean includesTag) {
compressAlgo = compressionAlgorithm == null ? NONE : compressionAlgorithm;
this.dataBlockEncoder = dataBlockEncoder != null
? dataBlockEncoder : NoOpDataBlockEncoder.INSTANCE;
meta = new HFileContextBuilder()
.withHBaseCheckSum(false)
.withIncludesMvcc(includesMemstoreTS)
.withIncludesTags(includesTag)
.withCompression(compressionAlgorithm)
.build();
defaultBlockEncodingCtx = new HFileBlockDefaultEncodingContext(null, DUMMY_HEADER, meta);
dataBlockEncodingCtx =
this.dataBlockEncoder.newDataBlockEncodingContext(
DUMMY_HEADER, meta);
baosInMemory = new ByteArrayOutputStream();
prevOffsetByType = new long[BlockType.values().length];
for (int i = 0; i < prevOffsetByType.length; ++i)
prevOffsetByType[i] = -1;
}
开发者ID:tenggyut,项目名称:HIndex,代码行数:28,代码来源:TestHFileBlockCompatibility.java
示例6: encodeKeyValues
import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext; //导入依赖的package包/类
/**
* Copied from BufferedDataBlockEncoder. Almost definitely can be improved, but i'm not familiar
* enough with the concept of the HFileBlockEncodingContext.
*/
@Override
public void encodeKeyValues(ByteBuffer in,
HFileBlockEncodingContext blkEncodingCtx) throws IOException {
if (blkEncodingCtx.getClass() != HFileBlockDefaultEncodingContext.class) {
throw new IOException(this.getClass().getName() + " only accepts "
+ HFileBlockDefaultEncodingContext.class.getName() + " as the " + "encoding context.");
}
HFileBlockDefaultEncodingContext encodingCtx
= (HFileBlockDefaultEncodingContext) blkEncodingCtx;
encodingCtx.prepareEncoding();
DataOutputStream dataOut = encodingCtx.getOutputStreamForEncoder();
internalEncodeKeyValues(dataOut, in, encodingCtx.getHFileContext().isIncludesMvcc(),
encodingCtx.getHFileContext().isIncludesTags());
//do i need to check this, or will it always be DataBlockEncoding.PREFIX_TREE?
if (encodingCtx.getDataBlockEncoding() != DataBlockEncoding.NONE) {
encodingCtx.postEncoding(BlockType.ENCODED_DATA);
} else {
encodingCtx.postEncoding(BlockType.DATA);
}
}
开发者ID:tenggyut,项目名称:HIndex,代码行数:27,代码来源:PrefixTreeCodec.java
示例7: startBlockEncoding
import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext; //导入依赖的package包/类
@Override
public void startBlockEncoding(HFileBlockEncodingContext blkEncodingCtx,
DataOutputStream out) throws IOException {
if (blkEncodingCtx.getClass() != HFileBlockDefaultEncodingContext.class) {
throw new IOException(this.getClass().getName() + " only accepts "
+ HFileBlockDefaultEncodingContext.class.getName() + " as the "
+ "encoding context.");
}
HFileBlockDefaultEncodingContext encodingCtx = (HFileBlockDefaultEncodingContext) blkEncodingCtx;
encodingCtx.prepareEncoding(out);
NoneEncoder encoder = new NoneEncoder(out, encodingCtx);
NoneEncodingState state = new NoneEncodingState();
state.encoder = encoder;
blkEncodingCtx.setEncodingState(state);
}
开发者ID:apache,项目名称:hbase,代码行数:18,代码来源:NoOpDataBlockEncoder.java
示例8: Writer
import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext; //导入依赖的package包/类
/**
* @param dataBlockEncoder data block encoding algorithm to use
*/
public Writer(HFileDataBlockEncoder dataBlockEncoder, HFileContext fileContext) {
if (fileContext.getBytesPerChecksum() < HConstants.HFILEBLOCK_HEADER_SIZE) {
throw new RuntimeException("Unsupported value of bytesPerChecksum. " +
" Minimum is " + HConstants.HFILEBLOCK_HEADER_SIZE + " but the configured value is " +
fileContext.getBytesPerChecksum());
}
this.dataBlockEncoder = dataBlockEncoder != null?
dataBlockEncoder: NoOpDataBlockEncoder.INSTANCE;
this.dataBlockEncodingCtx = this.dataBlockEncoder.
newDataBlockEncodingContext(HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext);
// TODO: This should be lazily instantiated since we usually do NOT need this default encoder
this.defaultBlockEncodingCtx = new HFileBlockDefaultEncodingContext(null,
HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext);
// TODO: Set BAOS initial size. Use fileContext.getBlocksize() and add for header/checksum
baosInMemory = new ByteArrayOutputStream();
prevOffsetByType = new long[BlockType.values().length];
for (int i = 0; i < prevOffsetByType.length; ++i) {
prevOffsetByType[i] = UNSET;
}
// TODO: Why fileContext saved away when we have dataBlockEncoder and/or
// defaultDataBlockEncoder?
this.fileContext = fileContext;
}
开发者ID:apache,项目名称:hbase,代码行数:27,代码来源:HFileBlock.java
示例9: createBlockOnDisk
import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext; //导入依赖的package包/类
private HFileBlock createBlockOnDisk(List<KeyValue> kvs, HFileBlock block, boolean useTags)
throws IOException {
int size;
HFileBlockEncodingContext context = new HFileBlockDefaultEncodingContext(
blockEncoder.getDataBlockEncoding(), HConstants.HFILEBLOCK_DUMMY_HEADER,
block.getHFileContext());
ByteArrayOutputStream baos = new ByteArrayOutputStream();
baos.write(block.getDummyHeaderForVersion());
DataOutputStream dos = new DataOutputStream(baos);
blockEncoder.startBlockEncoding(context, dos);
for (KeyValue kv : kvs) {
blockEncoder.encode(kv, context, dos);
}
blockEncoder.endBlockEncoding(context, dos, baos.getBuffer(), BlockType.DATA);
byte[] encodedBytes = baos.toByteArray();
size = encodedBytes.length - block.getDummyHeaderForVersion().length;
return new HFileBlock(context.getBlockType(), size, size, -1, ByteBuffer.wrap(encodedBytes),
HFileBlock.FILL_HEADER, 0, block.getOnDiskDataSizeWithHeader(), -1,
block.getHFileContext());
}
开发者ID:apache,项目名称:hbase,代码行数:22,代码来源:TestHFileDataBlockEncoder.java
示例10: Writer
import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext; //导入依赖的package包/类
/**
* @param compressionAlgorithm compression algorithm to use
* @param dataBlockEncoderAlgo data block encoding algorithm to use
*/
public Writer(Compression.Algorithm compressionAlgorithm,
HFileDataBlockEncoder dataBlockEncoder, boolean includesMemstoreTS) {
compressAlgo = compressionAlgorithm == null ? NONE : compressionAlgorithm;
this.dataBlockEncoder = dataBlockEncoder != null
? dataBlockEncoder : NoOpDataBlockEncoder.INSTANCE;
defaultBlockEncodingCtx =
new HFileBlockDefaultEncodingContext(compressionAlgorithm,
null, DUMMY_HEADER);
dataBlockEncodingCtx =
this.dataBlockEncoder.newDataBlockEncodingContext(
compressionAlgorithm, DUMMY_HEADER);
baosInMemory = new ByteArrayOutputStream();
prevOffsetByType = new long[BlockType.values().length];
for (int i = 0; i < prevOffsetByType.length; ++i)
prevOffsetByType[i] = -1;
this.includesMemstoreTS = includesMemstoreTS;
}
开发者ID:cloud-software-foundation,项目名称:c5,代码行数:26,代码来源:TestHFileBlockCompatibility.java
示例11: encodeKeyValues
import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext; //导入依赖的package包/类
/**
* Copied from BufferedDataBlockEncoder. Almost definitely can be improved, but i'm not familiar
* enough with the concept of the HFileBlockEncodingContext.
*/
@Override
public void encodeKeyValues(ByteBuffer in, boolean includesMvccVersion,
HFileBlockEncodingContext blkEncodingCtx) throws IOException {
if (blkEncodingCtx.getClass() != HFileBlockDefaultEncodingContext.class) {
throw new IOException(this.getClass().getName() + " only accepts "
+ HFileBlockDefaultEncodingContext.class.getName() + " as the " + "encoding context.");
}
HFileBlockDefaultEncodingContext encodingCtx
= (HFileBlockDefaultEncodingContext) blkEncodingCtx;
encodingCtx.prepareEncoding();
DataOutputStream dataOut = encodingCtx.getOutputStreamForEncoder();
internalEncodeKeyValues(dataOut, in, includesMvccVersion);
//do i need to check this, or will it always be DataBlockEncoding.PREFIX_TREE?
if (encodingCtx.getDataBlockEncoding() != DataBlockEncoding.NONE) {
encodingCtx.postEncoding(BlockType.ENCODED_DATA);
} else {
encodingCtx.postEncoding(BlockType.DATA);
}
}
开发者ID:cloud-software-foundation,项目名称:c5,代码行数:26,代码来源:PrefixTreeCodec.java
示例12: Writer
import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext; //导入依赖的package包/类
/**
* @param compressionAlgorithm compression algorithm to use
* @param dataBlockEncoderAlgo data block encoding algorithm to use
*/
public Writer(Compression.Algorithm compressionAlgorithm,
HFileDataBlockEncoder dataBlockEncoder, boolean includesMemstoreTS) {
compressAlgo = compressionAlgorithm == null ? NONE : compressionAlgorithm;
this.dataBlockEncoder = dataBlockEncoder != null
? dataBlockEncoder : NoOpDataBlockEncoder.INSTANCE;
defaultBlockEncodingCtx =
new HFileBlockDefaultEncodingContext(compressionAlgorithm,
null, DUMMY_HEADER);
dataBlockEncodingCtx =
this.dataBlockEncoder.newOnDiskDataBlockEncodingContext(
compressionAlgorithm, DUMMY_HEADER);
baosInMemory = new ByteArrayOutputStream();
prevOffsetByType = new long[BlockType.values().length];
for (int i = 0; i < prevOffsetByType.length; ++i)
prevOffsetByType[i] = -1;
this.includesMemstoreTS = includesMemstoreTS;
}
开发者ID:daidong,项目名称:DominoHBase,代码行数:26,代码来源:TestHFileBlockCompatibility.java
示例13: newDataBlockEncodingContext
import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext; //导入依赖的package包/类
@Override
public HFileBlockEncodingContext newDataBlockEncodingContext(
byte[] dummyHeader, HFileContext fileContext) {
DataBlockEncoder encoder = encoding.getEncoder();
if (encoder != null) {
return encoder.newDataBlockEncodingContext(encoding, dummyHeader, fileContext);
}
return new HFileBlockDefaultEncodingContext(null, dummyHeader, fileContext);
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:10,代码来源:HFileDataBlockEncoderImpl.java
示例14: Writer
import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext; //导入依赖的package包/类
public Writer(HFileDataBlockEncoder dataBlockEncoder, HFileContext meta) {
super(dataBlockEncoder, meta);
compressAlgo = meta.getCompression() == null ? NONE : meta.getCompression();
this.dataBlockEncoder = dataBlockEncoder != null ? dataBlockEncoder
: NoOpDataBlockEncoder.INSTANCE;
defaultBlockEncodingCtx = new HFileBlockDefaultEncodingContext(null, DUMMY_HEADER, meta);
dataBlockEncodingCtx = this.dataBlockEncoder.newDataBlockEncodingContext(DUMMY_HEADER, meta);
baosInMemory = new ByteArrayOutputStream();
prevOffsetByType = new long[BlockType.values().length];
for (int i = 0; i < prevOffsetByType.length; ++i)
prevOffsetByType[i] = -1;
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:14,代码来源:TestHFileBlockCompatibility.java
示例15: newDataBlockEncodingContext
import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext; //导入依赖的package包/类
@Override
public HFileBlockEncodingContext newDataBlockEncodingContext(
DataBlockEncoding encoding, byte[] header, HFileContext meta) {
if(DataBlockEncoding.PREFIX_TREE != encoding){
//i'm not sure why encoding is in the interface. Each encoder implementation should probably
//know it's encoding type
throw new IllegalArgumentException("only DataBlockEncoding.PREFIX_TREE supported");
}
return new HFileBlockDefaultEncodingContext(encoding, header, meta);
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:11,代码来源:PrefixTreeCodec.java
示例16: beforeWriteToDisk
import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext; //导入依赖的package包/类
/**
* Precondition: a non-encoded buffer. Postcondition: on-disk encoding.
*
* The encoded results can be stored in {@link HFileBlockEncodingContext}.
*
* @throws IOException
*/
@Override
public void beforeWriteToDisk(ByteBuffer in,
HFileBlockEncodingContext encodeCtx,
BlockType blockType) throws IOException {
if (encoding == DataBlockEncoding.NONE) {
// there is no need to encode the block before writing it to disk
((HFileBlockDefaultEncodingContext) encodeCtx).compressAfterEncodingWithBlockType(
in.array(), blockType);
return;
}
encodeBufferToHFileBlockBuffer(in, encoding, encodeCtx);
}
开发者ID:tenggyut,项目名称:HIndex,代码行数:20,代码来源:HFileDataBlockEncoderImpl.java
示例17: beforeWriteToDisk
import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext; //导入依赖的package包/类
@Override
public void beforeWriteToDisk(ByteBuffer in,
HFileBlockEncodingContext encodeCtx, BlockType blockType)
throws IOException {
if (!(encodeCtx.getClass().getName().equals(
HFileBlockDefaultEncodingContext.class.getName()))) {
throw new IOException (this.getClass().getName() + " only accepts " +
HFileBlockDefaultEncodingContext.class.getName() + ".");
}
HFileBlockDefaultEncodingContext defaultContext =
(HFileBlockDefaultEncodingContext) encodeCtx;
defaultContext.compressAfterEncodingWithBlockType(in.array(), blockType);
}
开发者ID:tenggyut,项目名称:HIndex,代码行数:15,代码来源:NoOpDataBlockEncoder.java
示例18: createBlockOnDisk
import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext; //导入依赖的package包/类
private HFileBlock createBlockOnDisk(HFileBlock block, boolean useTags) throws IOException {
int size;
HFileBlockEncodingContext context = new HFileBlockDefaultEncodingContext(
blockEncoder.getDataBlockEncoding(),
HConstants.HFILEBLOCK_DUMMY_HEADER, block.getHFileContext());
context.setDummyHeader(block.getDummyHeaderForVersion());
blockEncoder.beforeWriteToDisk(block.getBufferWithoutHeader(), context, block.getBlockType());
byte[] encodedBytes = context.getUncompressedBytesWithHeader();
size = encodedBytes.length - block.getDummyHeaderForVersion().length;
return new HFileBlock(context.getBlockType(), size, size, -1,
ByteBuffer.wrap(encodedBytes), HFileBlock.FILL_HEADER, 0,
block.getOnDiskDataSizeWithHeader(), block.getHFileContext());
}
开发者ID:tenggyut,项目名称:HIndex,代码行数:14,代码来源:TestHFileDataBlockEncoder.java
示例19: writeBlock
import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext; //导入依赖的package包/类
private void writeBlock(List<Cell> kvs, HFileContext fileContext, boolean useTags)
throws IOException {
HFileBlockEncodingContext context = new HFileBlockDefaultEncodingContext(
blockEncoder.getDataBlockEncoding(), HConstants.HFILEBLOCK_DUMMY_HEADER,
fileContext);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
baos.write(HConstants.HFILEBLOCK_DUMMY_HEADER);
DataOutputStream dos = new DataOutputStream(baos);
blockEncoder.startBlockEncoding(context, dos);
for (Cell kv : kvs) {
blockEncoder.encode(kv, context, dos);
}
}
开发者ID:apache,项目名称:hbase,代码行数:15,代码来源:TestHFileDataBlockEncoder.java
示例20: beforeWriteToDisk
import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext; //导入依赖的package包/类
/**
* Precondition: a non-encoded buffer. Postcondition: on-disk encoding.
*
* The encoded results can be stored in {@link HFileBlockEncodingContext}.
*
* @throws IOException
*/
@Override
public void beforeWriteToDisk(ByteBuffer in,
boolean includesMemstoreTS,
HFileBlockEncodingContext encodeCtx,
BlockType blockType) throws IOException {
if (encoding == DataBlockEncoding.NONE) {
// there is no need to encode the block before writing it to disk
((HFileBlockDefaultEncodingContext) encodeCtx).compressAfterEncodingWithBlockType(
in.array(), blockType);
return;
}
encodeBufferToHFileBlockBuffer(in, encoding,
includesMemstoreTS, encodeCtx);
}
开发者ID:cloud-software-foundation,项目名称:c5,代码行数:22,代码来源:HFileDataBlockEncoderImpl.java
注:本文中的org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论