本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto类的典型用法代码示例。如果您正苦于以下问题:Java BlockOpResponseProto类的具体用法?Java BlockOpResponseProto怎么用?Java BlockOpResponseProto使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
BlockOpResponseProto类属于org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos包,在下文中一共展示了BlockOpResponseProto类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: checkBlockOpStatus
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; //导入依赖的package包/类
public static void checkBlockOpStatus(
BlockOpResponseProto response,
String logInfo) throws IOException {
if (response.getStatus() != Status.SUCCESS) {
if (response.getStatus() == Status.ERROR_ACCESS_TOKEN) {
throw new InvalidBlockTokenException(
"Got access token error"
+ ", status message " + response.getMessage()
+ ", " + logInfo
);
} else {
throw new IOException(
"Got error"
+ ", status message " + response.getMessage()
+ ", " + logInfo
);
}
}
}
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:DataTransferProtoUtil.java
示例2: inferChecksumTypeByReading
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; //导入依赖的package包/类
/**
* Infer the checksum type for a replica by sending an OP_READ_BLOCK
* for the first byte of that replica. This is used for compatibility
* with older HDFS versions which did not include the checksum type in
* OpBlockChecksumResponseProto.
*
* @param lb the located block
* @param dn the connected datanode
* @return the inferred checksum type
* @throws IOException if an error occurs
*/
private Type inferChecksumTypeByReading(LocatedBlock lb, DatanodeInfo dn)
throws IOException {
IOStreamPair pair = connectToDN(dn, dfsClientConf.socketTimeout, lb);
try {
DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out,
HdfsConstants.SMALL_BUFFER_SIZE));
DataInputStream in = new DataInputStream(pair.in);
new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName,
0, 1, true, CachingStrategy.newDefaultStrategy());
final BlockOpResponseProto reply =
BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
String logInfo = "trying to read " + lb.getBlock() + " from datanode " + dn;
DataTransferProtoUtil.checkBlockOpStatus(reply, logInfo);
return PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType());
} finally {
IOUtils.cleanup(null, pair.in, pair.out);
}
}
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:DFSClient.java
示例3: transferRbw
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; //导入依赖的package包/类
/** For {@link TestTransferRbw} */
public static BlockOpResponseProto transferRbw(final ExtendedBlock b,
final DFSClient dfsClient, final DatanodeInfo... datanodes) throws IOException {
assertEquals(2, datanodes.length);
final Socket s = DFSOutputStream.createSocketForPipeline(datanodes[0],
datanodes.length, dfsClient);
final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length);
final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
NetUtils.getOutputStream(s, writeTimeout),
HdfsConstants.SMALL_BUFFER_SIZE));
final DataInputStream in = new DataInputStream(NetUtils.getInputStream(s));
// send the request
new Sender(out).transferBlock(b, new Token<BlockTokenIdentifier>(),
dfsClient.clientName, new DatanodeInfo[]{datanodes[1]},
new StorageType[]{StorageType.DEFAULT});
out.flush();
return BlockOpResponseProto.parseDelimitedFrom(in);
}
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:DFSTestUtil.java
示例4: checkBlockOpStatus
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; //导入依赖的package包/类
public static void checkBlockOpStatus(
BlockOpResponseProto response,
String logInfo) throws IOException {
if (response.getStatus() != Status.SUCCESS) {
if (response.getStatus() == Status.ERROR_ACCESS_TOKEN) {
throw new InvalidBlockTokenException(
"Got access token error"
+ ", status message " + response.getMessage()
+ ", " + logInfo
);
} else {
throw new IOException(
"Got error"
+ ", status=" + response.getStatus().name()
+ ", status message " + response.getMessage()
+ ", " + logInfo
);
}
}
}
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:21,代码来源:DataTransferProtoUtil.java
示例5: inferChecksumTypeByReading
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; //导入依赖的package包/类
/**
* Infer the checksum type for a replica by sending an OP_READ_BLOCK
* for the first byte of that replica. This is used for compatibility
* with older HDFS versions which did not include the checksum type in
* OpBlockChecksumResponseProto.
*
* @param lb the located block
* @param dn the connected datanode
* @return the inferred checksum type
* @throws IOException if an error occurs
*/
private Type inferChecksumTypeByReading(LocatedBlock lb, DatanodeInfo dn)
throws IOException {
IOStreamPair pair = connectToDN(dn, dfsClientConf.getSocketTimeout(), lb);
try {
DataOutputStream out = new DataOutputStream(
new BufferedOutputStream(pair.out, smallBufferSize));
DataInputStream in = new DataInputStream(pair.in);
new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName,
0, 1, true, CachingStrategy.newDefaultStrategy());
final BlockOpResponseProto reply =
BlockOpResponseProto.parseFrom(PBHelperClient.vintPrefixed(in));
String logInfo = "trying to read " + lb.getBlock() + " from datanode " +
dn;
DataTransferProtoUtil.checkBlockOpStatus(reply, logInfo);
return PBHelperClient.convert(
reply.getReadOpChecksumInfo().getChecksum().getType());
} finally {
IOUtilsClient.cleanup(null, pair.in, pair.out);
}
}
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:35,代码来源:DFSClient.java
示例6: transferRbw
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; //导入依赖的package包/类
/** For {@link TestTransferRbw} */
public static BlockOpResponseProto transferRbw(final ExtendedBlock b,
final DFSClient dfsClient, final DatanodeInfo... datanodes) throws IOException {
assertEquals(2, datanodes.length);
final Socket s = DataStreamer.createSocketForPipeline(datanodes[0],
datanodes.length, dfsClient);
final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length);
final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
NetUtils.getOutputStream(s, writeTimeout),
DFSUtilClient.getSmallBufferSize(dfsClient.getConfiguration())));
final DataInputStream in = new DataInputStream(NetUtils.getInputStream(s));
// send the request
new Sender(out).transferBlock(b, new Token<BlockTokenIdentifier>(),
dfsClient.clientName, new DatanodeInfo[]{datanodes[1]},
new StorageType[]{StorageType.DEFAULT});
out.flush();
return BlockOpResponseProto.parseDelimitedFrom(in);
}
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:21,代码来源:DFSTestUtil.java
示例7: checkSuccess
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; //导入依赖的package包/类
static void checkSuccess(
BlockOpResponseProto status, Peer peer,
ExtendedBlock block, String file)
throws IOException {
if (status.getStatus() != Status.SUCCESS) {
if (status.getStatus() == Status.ERROR_ACCESS_TOKEN) {
throw new InvalidBlockTokenException(
"Got access token error for OP_READ_BLOCK, self="
+ peer.getLocalAddressString() + ", remote="
+ peer.getRemoteAddressString() + ", for file " + file
+ ", for pool " + block.getBlockPoolId() + " block "
+ block.getBlockId() + "_" + block.getGenerationStamp());
} else {
throw new IOException("Got error for OP_READ_BLOCK, self="
+ peer.getLocalAddressString() + ", remote="
+ peer.getRemoteAddressString() + ", for file " + file
+ ", for pool " + block.getBlockPoolId() + " block "
+ block.getBlockId() + "_" + block.getGenerationStamp());
}
}
}
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:22,代码来源:RemoteBlockReader2.java
示例8: transferRbw
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; //导入依赖的package包/类
/** For {@link TestTransferRbw} */
public static BlockOpResponseProto transferRbw(final ExtendedBlock b,
final DFSClient dfsClient, final DatanodeInfo... datanodes) throws IOException {
assertEquals(2, datanodes.length);
final Socket s = DFSOutputStream.createSocketForPipeline(datanodes[0],
datanodes.length, dfsClient);
final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length);
final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
NetUtils.getOutputStream(s, writeTimeout),
HdfsConstants.SMALL_BUFFER_SIZE));
final DataInputStream in = new DataInputStream(NetUtils.getInputStream(s));
// send the request
new Sender(out).transferBlock(b, new Token<BlockTokenIdentifier>(),
dfsClient.clientName, new DatanodeInfo[]{datanodes[1]});
out.flush();
return BlockOpResponseProto.parseDelimitedFrom(in);
}
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:20,代码来源:DFSTestUtil.java
示例9: replaceBlock
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; //导入依赖的package包/类
private boolean replaceBlock( ExtendedBlock block, DatanodeInfo source,
DatanodeInfo sourceProxy, DatanodeInfo destination) throws IOException {
Socket sock = new Socket();
sock.connect(NetUtils.createSocketAddr(
destination.getXferAddr()), HdfsServerConstants.READ_TIMEOUT);
sock.setKeepAlive(true);
// sendRequest
DataOutputStream out = new DataOutputStream(sock.getOutputStream());
new Sender(out).replaceBlock(block, BlockTokenSecretManager.DUMMY_TOKEN,
source.getStorageID(), sourceProxy);
out.flush();
// receiveResponse
DataInputStream reply = new DataInputStream(sock.getInputStream());
BlockOpResponseProto proto =
BlockOpResponseProto.parseDelimitedFrom(reply);
return proto.getStatus() == Status.SUCCESS;
}
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:19,代码来源:TestBlockReplacement.java
示例10: replaceBlock
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; //导入依赖的package包/类
private boolean replaceBlock( ExtendedBlock block, DatanodeInfo source,
DatanodeInfo sourceProxy, DatanodeInfo destination) throws IOException {
Socket sock = new Socket();
sock.connect(NetUtils.createSocketAddr(
destination.getXferAddr()), HdfsServerConstants.READ_TIMEOUT);
sock.setKeepAlive(true);
// sendRequest
DataOutputStream out = new DataOutputStream(sock.getOutputStream());
new Sender(out).replaceBlock(block, StorageType.DEFAULT,
BlockTokenSecretManager.DUMMY_TOKEN,
source.getDatanodeUuid(), sourceProxy);
out.flush();
// receiveResponse
DataInputStream reply = new DataInputStream(sock.getInputStream());
BlockOpResponseProto proto = BlockOpResponseProto.parseDelimitedFrom(reply);
while (proto.getStatus() == Status.IN_PROGRESS) {
proto = BlockOpResponseProto.parseDelimitedFrom(reply);
}
return proto.getStatus() == Status.SUCCESS;
}
开发者ID:yncxcw,项目名称:FlexMap,代码行数:22,代码来源:TestBlockReplacement.java
示例11: checkSuccess
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; //导入依赖的package包/类
static void checkSuccess(BlockOpResponseProto status, Socket sock,
ExtendedBlock block, String file) throws IOException {
if (status.getStatus() != Status.SUCCESS) {
if (status.getStatus() == Status.ERROR_ACCESS_TOKEN) {
throw new InvalidBlockTokenException(
"Got access token error for OP_READ_BLOCK, self=" +
sock.getLocalSocketAddress() + ", remote=" +
sock.getRemoteSocketAddress() + ", for file " + file +
", for pool " + block.getBlockPoolId() + " block " +
block.getBlockId() + "_" + block.getGenerationStamp());
} else {
throw new IOException("Got error for OP_READ_BLOCK, self=" +
sock.getLocalSocketAddress() + ", remote=" +
sock.getRemoteSocketAddress() + ", for file " + file +
", for pool " + block.getBlockPoolId() + " block " +
block.getBlockId() + "_" + block.getGenerationStamp());
}
}
}
开发者ID:hopshadoop,项目名称:hops,代码行数:20,代码来源:RemoteBlockReader2.java
示例12: transferRbw
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; //导入依赖的package包/类
/**
* For {@link TestTransferRbw}
*/
public static BlockOpResponseProto transferRbw(final ExtendedBlock b,
final DFSClient dfsClient, final DatanodeInfo... datanodes)
throws IOException {
assertEquals(2, datanodes.length);
final Socket s = DFSOutputStream
.createSocketForPipeline(datanodes[0], datanodes.length, dfsClient);
final long writeTimeout =
dfsClient.getDatanodeWriteTimeout(datanodes.length);
final DataOutputStream out = new DataOutputStream(
new BufferedOutputStream(NetUtils.getOutputStream(s, writeTimeout),
HdfsConstants.SMALL_BUFFER_SIZE));
final DataInputStream in = new DataInputStream(NetUtils.getInputStream(s));
// send the request
new Sender(out).transferBlock(b, new Token<BlockTokenIdentifier>(),
dfsClient.clientName, new DatanodeInfo[]{datanodes[1]});
out.flush();
return BlockOpResponseProto.parseDelimitedFrom(in);
}
开发者ID:hopshadoop,项目名称:hops,代码行数:24,代码来源:DFSTestUtil.java
示例13: replaceBlock
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; //导入依赖的package包/类
private boolean replaceBlock(ExtendedBlock block, DatanodeInfo source,
DatanodeInfo sourceProxy, DatanodeInfo destination) throws IOException {
Socket sock = new Socket();
sock.connect(NetUtils.createSocketAddr(destination.getXferAddr()),
HdfsServerConstants.READ_TIMEOUT);
sock.setKeepAlive(true);
// sendRequest
DataOutputStream out = new DataOutputStream(sock.getOutputStream());
new Sender(out).replaceBlock(block, BlockTokenSecretManager.DUMMY_TOKEN,
source.getStorageID(), sourceProxy);
out.flush();
// receiveResponse
DataInputStream reply = new DataInputStream(sock.getInputStream());
BlockOpResponseProto proto = BlockOpResponseProto.parseDelimitedFrom(reply);
return proto.getStatus() == Status.SUCCESS;
}
开发者ID:hopshadoop,项目名称:hops,代码行数:18,代码来源:TestBlockReplacement.java
示例14: replaceBlock
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; //导入依赖的package包/类
private boolean replaceBlock( ExtendedBlock block, DatanodeInfo source,
DatanodeInfo sourceProxy, DatanodeInfo destination) throws IOException {
Socket sock = new Socket();
sock.connect(NetUtils.createSocketAddr(
destination.getXferAddr()), HdfsServerConstants.READ_TIMEOUT);
sock.setKeepAlive(true);
// sendRequest
DataOutputStream out = new DataOutputStream(sock.getOutputStream());
new Sender(out).replaceBlock(block, BlockTokenSecretManager.DUMMY_TOKEN,
source.getDatanodeUuid(), sourceProxy);
out.flush();
// receiveResponse
DataInputStream reply = new DataInputStream(sock.getInputStream());
BlockOpResponseProto proto =
BlockOpResponseProto.parseDelimitedFrom(reply);
return proto.getStatus() == Status.SUCCESS;
}
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:19,代码来源:TestBlockReplacement.java
示例15: checkSuccess
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; //导入依赖的package包/类
static void checkSuccess(
BlockOpResponseProto status, Peer peer,
ExtendedBlock block, String file)
throws IOException {
String logInfo = "for OP_READ_BLOCK"
+ ", self=" + peer.getLocalAddressString()
+ ", remote=" + peer.getRemoteAddressString()
+ ", for file " + file
+ ", for pool " + block.getBlockPoolId()
+ " block " + block.getBlockId() + "_" + block.getGenerationStamp();
DataTransferProtoUtil.checkBlockOpStatus(status, logInfo);
}
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:RemoteBlockReader2.java
示例16: receiveResponse
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; //导入依赖的package包/类
/** Receive a block copy response from the input stream */
private void receiveResponse(DataInputStream in) throws IOException {
BlockOpResponseProto response =
BlockOpResponseProto.parseFrom(vintPrefixed(in));
while (response.getStatus() == Status.IN_PROGRESS) {
// read intermediate responses
response = BlockOpResponseProto.parseFrom(vintPrefixed(in));
}
String logInfo = "block move is failed";
DataTransferProtoUtil.checkBlockOpStatus(response, logInfo);
}
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:Dispatcher.java
示例17: writeResponse
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; //导入依赖的package包/类
private static void writeResponse(Status status, String message, OutputStream out)
throws IOException {
BlockOpResponseProto.Builder response = BlockOpResponseProto.newBuilder()
.setStatus(status);
if (message != null) {
response.setMessage(message);
}
response.build().writeDelimitedTo(out);
out.flush();
}
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:DataXceiver.java
示例18: writeSuccessWithChecksumInfo
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; //导入依赖的package包/类
private void writeSuccessWithChecksumInfo(BlockSender blockSender,
DataOutputStream out) throws IOException {
ReadOpChecksumInfoProto ckInfo = ReadOpChecksumInfoProto.newBuilder()
.setChecksum(DataTransferProtoUtil.toProto(blockSender.getChecksum()))
.setChunkOffset(blockSender.getOffset())
.build();
BlockOpResponseProto response = BlockOpResponseProto.newBuilder()
.setStatus(SUCCESS)
.setReadOpChecksumInfo(ckInfo)
.build();
response.writeDelimitedTo(out);
out.flush();
}
开发者ID:naver,项目名称:hadoop,代码行数:16,代码来源:DataXceiver.java
示例19: checkAccess
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; //导入依赖的package包/类
private void checkAccess(OutputStream out, final boolean reply,
final ExtendedBlock blk,
final Token<BlockTokenIdentifier> t,
final Op op,
final BlockTokenSecretManager.AccessMode mode) throws IOException {
if (datanode.isBlockTokenEnabled) {
if (LOG.isDebugEnabled()) {
LOG.debug("Checking block access token for block '" + blk.getBlockId()
+ "' with mode '" + mode + "'");
}
try {
datanode.blockPoolTokenSecretManager.checkAccess(t, null, blk, mode);
} catch(InvalidToken e) {
try {
if (reply) {
BlockOpResponseProto.Builder resp = BlockOpResponseProto.newBuilder()
.setStatus(ERROR_ACCESS_TOKEN);
if (mode == BlockTokenSecretManager.AccessMode.WRITE) {
DatanodeRegistration dnR =
datanode.getDNRegistrationForBP(blk.getBlockPoolId());
// NB: Unconditionally using the xfer addr w/o hostname
resp.setFirstBadLink(dnR.getXferAddr());
}
resp.build().writeDelimitedTo(out);
out.flush();
}
LOG.warn("Block token verification failed: op=" + op
+ ", remoteAddress=" + remoteAddress
+ ", message=" + e.getLocalizedMessage());
throw e;
} finally {
IOUtils.closeStream(out);
}
}
}
}
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:DataXceiver.java
示例20: sendResponse
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; //导入依赖的package包/类
private void sendResponse(Status status, String firstBadLink,
String message,
DataOutputStream out)
throws IOException {
Builder builder = BlockOpResponseProto.newBuilder().setStatus(status);
if (firstBadLink != null) {
builder.setFirstBadLink(firstBadLink);
}
if (message != null) {
builder.setMessage(message);
}
builder.build()
.writeDelimitedTo(out);
}
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:TestDataTransferProtocol.java
注:本文中的org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论