本文整理汇总了Java中org.apache.hadoop.hbase.codec.Codec类的典型用法代码示例。如果您正苦于以下问题:Java Codec类的具体用法?Java Codec怎么用?Java Codec使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Codec类属于org.apache.hadoop.hbase.codec包,在下文中一共展示了Codec类的18个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: doCodec
import org.apache.hadoop.hbase.codec.Codec; //导入依赖的package包/类
static void doCodec(final Codec codec, final Cell [] cells, final int cycles, final int count,
final int initialBufferSize)
throws IOException {
byte [] bytes = null;
Cell [] cellsDecoded = null;
for (int i = 0; i < cycles; i++) {
ByteArrayOutputStream baos = new ByteArrayOutputStream(initialBufferSize);
Codec.Encoder encoder = codec.getEncoder(baos);
bytes = runEncoderTest(i, initialBufferSize, baos, encoder, cells);
}
for (int i = 0; i < cycles; i++) {
ByteArrayInputStream bais = new ByteArrayInputStream(bytes);
Codec.Decoder decoder = codec.getDecoder(bais);
cellsDecoded = CodecPerformance.runDecoderTest(i, count, decoder);
}
verifyCells(cells, cellsDecoded);
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:CodecPerformance.java
示例2: timerTests
import org.apache.hadoop.hbase.codec.Codec; //导入依赖的package包/类
private static void timerTests(final IPCUtil util, final int count, final int size,
final Codec codec, final CompressionCodec compressor)
throws IOException {
final int cycles = 1000;
StopWatch timer = new StopWatch();
timer.start();
for (int i = 0; i < cycles; i++) {
timerTest(util, timer, count, size, codec, compressor, false);
}
timer.stop();
LOG.info("Codec=" + codec + ", compression=" + compressor + ", sized=" + false +
", count=" + count + ", size=" + size + ", + took=" + timer.getTime() + "ms");
timer.reset();
timer.start();
for (int i = 0; i < cycles; i++) {
timerTest(util, timer, count, size, codec, compressor, true);
}
timer.stop();
LOG.info("Codec=" + codec + ", compression=" + compressor + ", sized=" + true +
", count=" + count + ", size=" + size + ", + took=" + timer.getTime() + "ms");
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:TestIPCUtil.java
示例3: getConnection
import org.apache.hadoop.hbase.codec.Codec; //导入依赖的package包/类
/**
* Get a connection from the pool, or create a new one and add it to the
* pool. Connections to a given host/port are reused.
*/
protected Connection getConnection(User ticket, Call call, InetSocketAddress addr,
final Codec codec, final CompressionCodec compressor)
throws IOException {
if (!running.get()) throw new StoppedRpcClientException();
Connection connection;
ConnectionId remoteId =
new ConnectionId(ticket, call.md.getService().getName(), addr);
synchronized (connections) {
connection = connections.get(remoteId);
if (connection == null) {
connection = createConnection(remoteId, this.codec, this.compressor);
connections.put(remoteId, connection);
}
}
return connection;
}
开发者ID:grokcoder,项目名称:pbase,代码行数:22,代码来源:RpcClientImpl.java
示例4: testEmptyWorks
import org.apache.hadoop.hbase.codec.Codec; //导入依赖的package包/类
@Test
public void testEmptyWorks() throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
CountingOutputStream cos = new CountingOutputStream(baos);
DataOutputStream dos = new DataOutputStream(cos);
MessageCodec cmc = new MessageCodec();
Codec.Encoder encoder = cmc.getEncoder(dos);
encoder.flush();
dos.close();
long offset = cos.getCount();
assertEquals(0, offset);
CountingInputStream cis = new CountingInputStream(new ByteArrayInputStream(baos.toByteArray()));
DataInputStream dis = new DataInputStream(cis);
Codec.Decoder decoder = cmc.getDecoder(dis);
assertFalse(decoder.advance());
dis.close();
assertEquals(0, cis.getCount());
}
开发者ID:tenggyut,项目名称:HIndex,代码行数:19,代码来源:TestCellMessageCodec.java
示例5: testOne
import org.apache.hadoop.hbase.codec.Codec; //导入依赖的package包/类
@Test
public void testOne() throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
CountingOutputStream cos = new CountingOutputStream(baos);
DataOutputStream dos = new DataOutputStream(cos);
MessageCodec cmc = new MessageCodec();
Codec.Encoder encoder = cmc.getEncoder(dos);
final KeyValue kv =
new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("q"), Bytes.toBytes("v"));
encoder.write(kv);
encoder.flush();
dos.close();
long offset = cos.getCount();
CountingInputStream cis = new CountingInputStream(new ByteArrayInputStream(baos.toByteArray()));
DataInputStream dis = new DataInputStream(cis);
Codec.Decoder decoder = cmc.getDecoder(dis);
assertTrue(decoder.advance()); // First read should pull in the KV
assertFalse(decoder.advance()); // Second read should trip over the end-of-stream marker and return false
dis.close();
assertEquals(offset, cis.getCount());
}
开发者ID:tenggyut,项目名称:HIndex,代码行数:22,代码来源:TestCellMessageCodec.java
示例6: getConnection
import org.apache.hadoop.hbase.codec.Codec; //导入依赖的package包/类
protected Connection getConnection(User ticket, Call call, InetSocketAddress addr,
int rpcTimeout, final Codec codec, final CompressionCodec compressor)
throws IOException, InterruptedException {
if (!running.get()) throw new StoppedRpcClientException();
Connection connection;
ConnectionId remoteId =
new ConnectionId(ticket, call.md.getService().getName(), addr, rpcTimeout);
synchronized (connections) {
connection = connections.get(remoteId);
if (connection == null) {
connection = createConnection(remoteId, this.codec, this.compressor);
connections.put(remoteId, connection);
}
}
connection.addCall(call);
//we don't invoke the method below inside "synchronized (connections)"
//block above. The reason for that is if the server happens to be slow,
//it will take longer to establish a connection and that will slow the
//entire system down.
//Moreover, if the connection is currently created, there will be many threads
// waiting here; as setupIOstreams is synchronized. If the connection fails with a
// timeout, they will all fail simultaneously. This is checked in setupIOstreams.
connection.setupIOstreams();
return connection;
}
开发者ID:tenggyut,项目名称:HIndex,代码行数:27,代码来源:RpcClient.java
示例7: testEmptyWorks
import org.apache.hadoop.hbase.codec.Codec; //导入依赖的package包/类
@Test
public void testEmptyWorks() throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
CountingOutputStream cos = new CountingOutputStream(baos);
DataOutputStream dos = new DataOutputStream(cos);
KeyValueCodec kvc = new KeyValueCodec();
Codec.Encoder encoder = kvc.getEncoder(dos);
encoder.flush();
dos.close();
long offset = cos.getCount();
assertEquals(0, offset);
CountingInputStream cis =
new CountingInputStream(new ByteArrayInputStream(baos.toByteArray()));
DataInputStream dis = new DataInputStream(cis);
Codec.Decoder decoder = kvc.getDecoder(dis);
assertFalse(decoder.advance());
dis.close();
assertEquals(0, cis.getCount());
}
开发者ID:tenggyut,项目名称:HIndex,代码行数:20,代码来源:TestKeyValueCodec.java
示例8: testOne
import org.apache.hadoop.hbase.codec.Codec; //导入依赖的package包/类
@Test
public void testOne() throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
CountingOutputStream cos = new CountingOutputStream(baos);
DataOutputStream dos = new DataOutputStream(cos);
KeyValueCodec kvc = new KeyValueCodec();
Codec.Encoder encoder = kvc.getEncoder(dos);
final KeyValue kv =
new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("q"), Bytes.toBytes("v"));
final long length = kv.getLength() + Bytes.SIZEOF_INT;
encoder.write(kv);
encoder.flush();
dos.close();
long offset = cos.getCount();
assertEquals(length, offset);
CountingInputStream cis =
new CountingInputStream(new ByteArrayInputStream(baos.toByteArray()));
DataInputStream dis = new DataInputStream(cis);
Codec.Decoder decoder = kvc.getDecoder(dis);
assertTrue(decoder.advance()); // First read should pull in the KV
// Second read should trip over the end-of-stream marker and return false
assertFalse(decoder.advance());
dis.close();
assertEquals(length, cis.getCount());
}
开发者ID:tenggyut,项目名称:HIndex,代码行数:26,代码来源:TestKeyValueCodec.java
示例9: testEmptyWorks
import org.apache.hadoop.hbase.codec.Codec; //导入依赖的package包/类
@Test
public void testEmptyWorks() throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
CountingOutputStream cos = new CountingOutputStream(baos);
DataOutputStream dos = new DataOutputStream(cos);
Codec codec = new CellCodec();
Codec.Encoder encoder = codec.getEncoder(dos);
encoder.flush();
dos.close();
long offset = cos.getCount();
assertEquals(0, offset);
CountingInputStream cis =
new CountingInputStream(new ByteArrayInputStream(baos.toByteArray()));
DataInputStream dis = new DataInputStream(cis);
Codec.Decoder decoder = codec.getDecoder(dis);
assertFalse(decoder.advance());
dis.close();
assertEquals(0, cis.getCount());
}
开发者ID:tenggyut,项目名称:HIndex,代码行数:20,代码来源:TestCellCodec.java
示例10: testOne
import org.apache.hadoop.hbase.codec.Codec; //导入依赖的package包/类
@Test
public void testOne() throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
CountingOutputStream cos = new CountingOutputStream(baos);
DataOutputStream dos = new DataOutputStream(cos);
Codec codec = new CellCodec();
Codec.Encoder encoder = codec.getEncoder(dos);
final KeyValue kv =
new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("q"), Bytes.toBytes("v"));
kv.setMvccVersion(Long.MAX_VALUE);
encoder.write(kv);
encoder.flush();
dos.close();
long offset = cos.getCount();
CountingInputStream cis =
new CountingInputStream(new ByteArrayInputStream(baos.toByteArray()));
DataInputStream dis = new DataInputStream(cis);
Codec.Decoder decoder = codec.getDecoder(dis);
assertTrue(decoder.advance()); // First read should pull in the KV
// Second read should trip over the end-of-stream marker and return false
assertFalse(decoder.advance());
dis.close();
assertEquals(offset, cis.getCount());
}
开发者ID:tenggyut,项目名称:HIndex,代码行数:25,代码来源:TestCellCodec.java
示例11: buildCellBlock
import org.apache.hadoop.hbase.codec.Codec; //导入依赖的package包/类
private boolean buildCellBlock(final Codec codec, final CompressionCodec compressor,
final CellScanner cellScanner, OutputStreamSupplier supplier) throws IOException {
if (cellScanner == null) {
return false;
}
if (codec == null) {
throw new CellScannerButNoCodecException();
}
int bufferSize = cellBlockBuildingInitialBufferSize;
encodeCellsTo(supplier.get(bufferSize), cellScanner, codec, compressor);
if (LOG.isTraceEnabled() && bufferSize < supplier.size()) {
LOG.trace("Buffer grew from initial bufferSize=" + bufferSize + " to " + supplier.size()
+ "; up hbase.ipc.cellblock.building.initial.buffersize?");
}
return true;
}
开发者ID:apache,项目名称:hbase,代码行数:17,代码来源:CellBlockBuilder.java
示例12: encodeCellsTo
import org.apache.hadoop.hbase.codec.Codec; //导入依赖的package包/类
private void encodeCellsTo(OutputStream os, CellScanner cellScanner, Codec codec,
CompressionCodec compressor) throws IOException {
Compressor poolCompressor = null;
try {
if (compressor != null) {
if (compressor instanceof Configurable) {
((Configurable) compressor).setConf(this.conf);
}
poolCompressor = CodecPool.getCompressor(compressor);
os = compressor.createOutputStream(os, poolCompressor);
}
Codec.Encoder encoder = codec.getEncoder(os);
while (cellScanner.advance()) {
encoder.write(cellScanner.current());
}
encoder.flush();
} catch (BufferOverflowException | IndexOutOfBoundsException e) {
throw new DoNotRetryIOException(e);
} finally {
os.close();
if (poolCompressor != null) {
CodecPool.returnCompressor(poolCompressor);
}
}
}
开发者ID:apache,项目名称:hbase,代码行数:26,代码来源:CellBlockBuilder.java
示例13: buildCellBlockStream
import org.apache.hadoop.hbase.codec.Codec; //导入依赖的package包/类
/**
* Puts CellScanner Cells into a cell block using passed in <code>codec</code> and/or
* <code>compressor</code>.
* @param codec to use for encoding
* @param compressor to use for encoding
* @param cellScanner to encode
* @param pool Pool of ByteBuffers to make use of.
* @return Null or byte buffer filled with a cellblock filled with passed-in Cells encoded using
* passed in <code>codec</code> and/or <code>compressor</code>; the returned buffer has
* been flipped and is ready for reading. Use limit to find total size. If
* <code>pool</code> was not null, then this returned ByteBuffer came from there and
* should be returned to the pool when done.
* @throws IOException if encoding the cells fail
*/
public ByteBufferListOutputStream buildCellBlockStream(Codec codec, CompressionCodec compressor,
CellScanner cellScanner, ByteBufferPool pool) throws IOException {
if (cellScanner == null) {
return null;
}
if (codec == null) {
throw new CellScannerButNoCodecException();
}
assert pool != null;
ByteBufferListOutputStream bbos = new ByteBufferListOutputStream(pool);
encodeCellsTo(bbos, cellScanner, codec, compressor);
if (bbos.size() == 0) {
bbos.releaseResources();
return null;
}
return bbos;
}
开发者ID:apache,项目名称:hbase,代码行数:32,代码来源:CellBlockBuilder.java
示例14: timerTests
import org.apache.hadoop.hbase.codec.Codec; //导入依赖的package包/类
private static void timerTests(final CellBlockBuilder builder, final int count, final int size,
final Codec codec, final CompressionCodec compressor) throws IOException {
final int cycles = 1000;
StopWatch timer = new StopWatch();
timer.start();
for (int i = 0; i < cycles; i++) {
timerTest(builder, timer, count, size, codec, compressor, false);
}
timer.stop();
LOG.info("Codec=" + codec + ", compression=" + compressor + ", sized=" + false + ", count="
+ count + ", size=" + size + ", + took=" + timer.getTime() + "ms");
timer.reset();
timer.start();
for (int i = 0; i < cycles; i++) {
timerTest(builder, timer, count, size, codec, compressor, true);
}
timer.stop();
LOG.info("Codec=" + codec + ", compression=" + compressor + ", sized=" + true + ", count="
+ count + ", size=" + size + ", + took=" + timer.getTime() + "ms");
}
开发者ID:apache,项目名称:hbase,代码行数:21,代码来源:TestCellBlockBuilder.java
示例15: testOne
import org.apache.hadoop.hbase.codec.Codec; //导入依赖的package包/类
@Test
public void testOne() throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
CountingOutputStream cos = new CountingOutputStream(baos);
DataOutputStream dos = new DataOutputStream(cos);
Codec codec = new CellCodec();
Codec.Encoder encoder = codec.getEncoder(dos);
final KeyValue kv =
new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("q"), Bytes.toBytes("v"));
encoder.write(kv);
encoder.flush();
dos.close();
long offset = cos.getCount();
CountingInputStream cis =
new CountingInputStream(new ByteArrayInputStream(baos.toByteArray()));
DataInputStream dis = new DataInputStream(cis);
Codec.Decoder decoder = codec.getDecoder(dis);
assertTrue(decoder.advance()); // First read should pull in the KV
// Second read should trip over the end-of-stream marker and return false
assertFalse(decoder.advance());
dis.close();
assertEquals(offset, cis.getCount());
}
开发者ID:cloud-software-foundation,项目名称:c5,代码行数:24,代码来源:TestCellCodec.java
示例16: readFromCells
import org.apache.hadoop.hbase.codec.Codec; //导入依赖的package包/类
/**
* Reads WALEdit from cells.
* @param cellDecoder Cell decoder.
* @param expectedCount Expected cell count.
* @return Number of KVs read.
*/
public int readFromCells(Codec.Decoder cellDecoder, int expectedCount) throws IOException {
cells.clear();
cells.ensureCapacity(expectedCount);
while (cells.size() < expectedCount && cellDecoder.advance()) {
cells.add(cellDecoder.current());
}
return cells.size();
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:15,代码来源:WALEdit.java
示例17: createRpcClientNoCodec
import org.apache.hadoop.hbase.codec.Codec; //导入依赖的package包/类
@Override
protected AsyncRpcClient createRpcClientNoCodec(Configuration conf) {
setConf(conf);
return new AsyncRpcClient(conf) {
@Override
Codec getCodec() {
return null;
}
};
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:13,代码来源:TestAsyncIPC.java
示例18: createRpcClientNoCodec
import org.apache.hadoop.hbase.codec.Codec; //导入依赖的package包/类
@Override
protected RpcClientImpl createRpcClientNoCodec(Configuration conf) {
return new RpcClientImpl(conf, HConstants.CLUSTER_ID_DEFAULT) {
@Override
Codec getCodec() {
return null;
}
};
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:10,代码来源:TestIPC.java
注:本文中的org.apache.hadoop.hbase.codec.Codec类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论