本文整理汇总了Java中org.apache.hadoop.hdfs.server.protocol.StorageBlockReport类的典型用法代码示例。如果您正苦于以下问题:Java StorageBlockReport类的具体用法?Java StorageBlockReport怎么用?Java StorageBlockReport使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
StorageBlockReport类属于org.apache.hadoop.hdfs.server.protocol包,在下文中一共展示了StorageBlockReport类的16个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: register
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; //导入依赖的package包/类
void register() throws IOException {
// get versions from the namenode
nsInfo = nameNodeProto.versionRequest();
dnRegistration = new DatanodeRegistration(
new DatanodeID(DNS.getDefaultIP("default"),
DNS.getDefaultHost("default", "default"),
DataNode.generateUuid(), getNodePort(dnIdx),
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
new DataStorage(nsInfo),
new ExportedBlockKeys(), VersionInfo.getVersion());
// register datanode
dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
//first block reports
storage = new DatanodeStorage(DatanodeStorage.generateUuid());
final StorageBlockReport[] reports = {
new StorageBlockReport(storage, BlockListAsLongs.EMPTY)
};
nameNodeProto.blockReport(dnRegistration,
nameNode.getNamesystem().getBlockPoolId(), reports,
new BlockReportContext(1, 0, System.nanoTime()));
}
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:NNThroughputBenchmark.java
示例2: waitForBlockReport
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; //导入依赖的package包/类
private void waitForBlockReport(final DatanodeProtocolClientSideTranslatorPB mockNN)
throws Exception {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
try {
Mockito.verify(mockNN).blockReport(
Mockito.<DatanodeRegistration>anyObject(),
Mockito.eq(FAKE_BPID),
Mockito.<StorageBlockReport[]>anyObject(),
Mockito.<BlockReportContext>anyObject());
return true;
} catch (Throwable t) {
LOG.info("waiting on block report: " + t.getMessage());
return false;
}
}
}, 500, 10000);
}
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestBPOfferService.java
示例3: waitForBlockReport
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; //导入依赖的package包/类
private void waitForBlockReport(
final DatanodeProtocolClientSideTranslatorPB mockNN) throws Exception {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
try {
Mockito.verify(mockNN).blockReport(
Mockito.eq(datanodeRegistration),
Mockito.eq(POOL_ID),
Mockito.<StorageBlockReport[]>anyObject(),
Mockito.<BlockReportContext>anyObject());
return true;
} catch (Throwable t) {
LOG.info("waiting on block report: " + t.getMessage());
return false;
}
}
}, 500, 100000);
}
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestDatanodeProtocolRetryPolicy.java
示例4: blockReport_03
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; //导入依赖的package包/类
/**
* Test writes a file and closes it.
* Block reported is generated with a bad GS for a single block.
* Block report is forced and the check for # of corrupted blocks is performed.
*
* @throws IOException in case of an error
*/
@Test(timeout=300000)
public void blockReport_03() throws IOException {
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path filePath = new Path("/" + METHOD_NAME + ".dat");
writeFile(METHOD_NAME, FILE_SIZE, filePath);
// all blocks belong to the same file, hence same BP
DataNode dn = cluster.getDataNodes().get(DN_N0);
String poolId = cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports = getBlockReports(dn, poolId, true, false);
sendBlockReports(dnR, poolId, reports);
printStats();
assertThat("Wrong number of corrupt blocks",
cluster.getNamesystem().getCorruptReplicaBlocks(), is(1L));
assertThat("Wrong number of PendingDeletion blocks",
cluster.getNamesystem().getPendingDeletionBlocks(), is(0L));
}
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:BlockReportTestBase.java
示例5: blockReport_06
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; //导入依赖的package包/类
/**
* Test creates a file and closes it.
* The second datanode is started in the cluster.
* As soon as the replication process is completed test runs
* Block report and checks that no underreplicated blocks are left
*
* @throws IOException in case of an error
*/
@Test(timeout=300000)
public void blockReport_06() throws Exception {
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path filePath = new Path("/" + METHOD_NAME + ".dat");
final int DN_N1 = DN_N0 + 1;
writeFile(METHOD_NAME, FILE_SIZE, filePath);
startDNandWait(filePath, true);
// all blocks belong to the same file, hence same BP
DataNode dn = cluster.getDataNodes().get(DN_N1);
String poolId = cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false);
sendBlockReports(dnR, poolId, reports);
printStats();
assertEquals("Wrong number of PendingReplication Blocks",
0, cluster.getNamesystem().getUnderReplicatedBlocks());
}
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:BlockReportTestBase.java
示例6: verifyCapturedArguments
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; //导入依赖的package包/类
private void verifyCapturedArguments(
ArgumentCaptor<StorageBlockReport[]> captor,
int expectedReportsPerCall,
int expectedTotalBlockCount) {
List<StorageBlockReport[]> listOfReports = captor.getAllValues();
int numBlocksReported = 0;
for (StorageBlockReport[] reports : listOfReports) {
assertThat(reports.length, is(expectedReportsPerCall));
for (StorageBlockReport report : reports) {
BlockListAsLongs blockList = report.getBlocks();
numBlocksReported += blockList.getNumberOfBlocks();
}
}
assert(numBlocksReported >= expectedTotalBlockCount);
}
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:TestDnRespectsBlockReportSplitThreshold.java
示例7: register
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; //导入依赖的package包/类
void register() throws IOException {
// get versions from the namenode
nsInfo = nameNodeProto.versionRequest();
dnRegistration = new DatanodeRegistration(
new DatanodeID(DNS.getDefaultIP("default"),
DNS.getDefaultHost("default", "default"),
DataNode.generateUuid(), getNodePort(dnIdx),
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
new DataStorage(nsInfo),
new ExportedBlockKeys(), VersionInfo.getVersion());
// register datanode
dnRegistration = dataNodeProto.registerDatanode(dnRegistration);
dnRegistration.setNamespaceInfo(nsInfo);
//first block reports
storage = new DatanodeStorage(DatanodeStorage.generateUuid());
final StorageBlockReport[] reports = {
new StorageBlockReport(storage, BlockListAsLongs.EMPTY)
};
dataNodeProto.blockReport(dnRegistration, bpid, reports,
new BlockReportContext(1, 0, System.nanoTime(), 0L));
}
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:24,代码来源:NNThroughputBenchmark.java
示例8: blockReport
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; //导入依赖的package包/类
@Override
public DatanodeCommand blockReport(DatanodeRegistration registration,
String poolId, StorageBlockReport[] reports, BlockReportContext context)
throws IOException {
BlockReportRequestProto.Builder builder = BlockReportRequestProto
.newBuilder().setRegistration(PBHelper.convert(registration))
.setBlockPoolId(poolId);
for (StorageBlockReport r : reports) {
StorageBlockReportProto.Builder reportBuilder = StorageBlockReportProto
.newBuilder().setStorage(PBHelper.convert(r.getStorage()));
long[] blocks = r.getBlocks();
for (int i = 0; i < blocks.length; i++) {
reportBuilder.addBlocks(blocks[i]);
}
builder.addReports(reportBuilder.build());
}
builder.setContext(PBHelper.convert(context));
BlockReportResponseProto resp;
try {
resp = rpcProxy.blockReport(NULL_CONTROLLER, builder.build());
} catch (ServiceException se) {
throw ProtobufHelper.getRemoteException(se);
}
return resp.hasCmd() ? PBHelper.convert(resp.getCmd()) : null;
}
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:27,代码来源:DatanodeProtocolClientSideTranslatorPB.java
示例9: register
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; //导入依赖的package包/类
void register() throws IOException {
// get versions from the namenode
nsInfo = nameNodeProto.versionRequest();
dnRegistration = new DatanodeRegistration(
new DatanodeID(DNS.getDefaultIP("default"),
DNS.getDefaultHost("default", "default"),
DataNode.generateUuid(), getNodePort(dnIdx),
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
new DataStorage(nsInfo),
new ExportedBlockKeys(), VersionInfo.getVersion());
// register datanode
dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
//first block reports
storage = new DatanodeStorage(DatanodeStorage.generateUuid());
final StorageBlockReport[] reports = {
new StorageBlockReport(storage,
new BlockListAsLongs(null, null).getBlockListAsLongs())
};
nameNodeProto.blockReport(dnRegistration,
nameNode.getNamesystem().getBlockPoolId(), reports,
new BlockReportContext(1, 0, System.nanoTime()));
}
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:25,代码来源:NNThroughputBenchmark.java
示例10: verifyCapturedArguments
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; //导入依赖的package包/类
private void verifyCapturedArguments(
ArgumentCaptor<StorageBlockReport[]> captor,
int expectedReportsPerCall,
int expectedTotalBlockCount) {
List<StorageBlockReport[]> listOfReports = captor.getAllValues();
int numBlocksReported = 0;
for (StorageBlockReport[] reports : listOfReports) {
assertThat(reports.length, is(expectedReportsPerCall));
for (StorageBlockReport report : reports) {
BlockListAsLongs blockList = new BlockListAsLongs(report.getBlocks());
numBlocksReported += blockList.getNumberOfBlocks();
}
}
assert(numBlocksReported >= expectedTotalBlockCount);
}
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:19,代码来源:TestDnRespectsBlockReportSplitThreshold.java
示例11: blockReport
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; //导入依赖的package包/类
@Override // DatanodeProtocol
public DatanodeCommand blockReport(DatanodeRegistration nodeReg,
String poolId, StorageBlockReport[] reports) throws IOException {
verifyRequest(nodeReg);
BlockListAsLongs blist = new BlockListAsLongs(reports[0].getBlocks());
if(blockStateChangeLog.isDebugEnabled()) {
blockStateChangeLog.debug("*BLOCK* NameNode.blockReport: "
+ "from " + nodeReg + " " + blist.getNumberOfBlocks()
+ " blocks");
}
namesystem.getBlockManager().processReport(nodeReg, poolId, blist);
if (nn.getFSImage().isUpgradeFinalized() && !nn.isStandbyState())
return new FinalizeCommand(poolId);
return null;
}
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:17,代码来源:NameNodeRpcServer.java
示例12: blockReport
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; //导入依赖的package包/类
@Override
public DatanodeCommand blockReport(DatanodeRegistration registration,
String poolId, StorageBlockReport[] reports) throws IOException {
BlockReportRequestProto.Builder builder = BlockReportRequestProto
.newBuilder().setRegistration(PBHelper.convert(registration))
.setBlockPoolId(poolId);
for (StorageBlockReport r : reports) {
StorageBlockReportProto.Builder reportBuilder = StorageBlockReportProto
.newBuilder().setStorage(PBHelper.convert(r.getStorage()));
long[] blocks = r.getBlocks();
for (int i = 0; i < blocks.length; i++) {
reportBuilder.addBlocks(blocks[i]);
}
builder.addReports(reportBuilder.build());
}
BlockReportResponseProto resp;
try {
resp = rpcProxy.blockReport(NULL_CONTROLLER, builder.build());
} catch (ServiceException se) {
throw ProtobufHelper.getRemoteException(se);
}
return resp.hasCmd() ? PBHelper.convert(resp.getCmd()) : null;
}
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:25,代码来源:DatanodeProtocolClientSideTranslatorPB.java
示例13: register
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; //导入依赖的package包/类
void register() throws IOException {
// get versions from the namenode
nsInfo = nameNodeProto.versionRequest();
dnRegistration = new DatanodeRegistration(
new DatanodeID(DNS.getDefaultIP("default"),
DNS.getDefaultHost("default", "default"),
"", getNodePort(dnIdx),
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
new DataStorage(nsInfo, ""),
new ExportedBlockKeys(), VersionInfo.getVersion());
DataNode.setNewStorageID(dnRegistration);
// register datanode
dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
//first block reports
storage = new DatanodeStorage(dnRegistration.getStorageID());
final StorageBlockReport[] reports = {
new StorageBlockReport(storage,
new BlockListAsLongs(null, null).getBlockListAsLongs())
};
nameNodeProto.blockReport(dnRegistration,
nameNode.getNamesystem().getBlockPoolId(), reports);
}
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:24,代码来源:NNThroughputBenchmark.java
示例14: waitForBlockReport
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; //导入依赖的package包/类
private void waitForBlockReport(final DatanodeProtocolClientSideTranslatorPB mockNN)
throws Exception {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
try {
Mockito.verify(mockNN).blockReport(
Mockito.<DatanodeRegistration>anyObject(),
Mockito.eq(FAKE_BPID),
Mockito.<StorageBlockReport[]>anyObject());
return true;
} catch (Throwable t) {
LOG.info("waiting on block report: " + t.getMessage());
return false;
}
}
}, 500, 10000);
}
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:19,代码来源:TestBPOfferService.java
示例15: blockReport_06
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; //导入依赖的package包/类
/**
* Test creates a file and closes it.
* The second datanode is started in the cluster.
* As soon as the replication process is completed test runs
* Block report and checks that no underreplicated blocks are left
*
* @throws IOException in case of an error
*/
@Test
public void blockReport_06() throws Exception {
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path filePath = new Path("/" + METHOD_NAME + ".dat");
final int DN_N1 = DN_N0 + 1;
ArrayList<Block> blocks = writeFile(METHOD_NAME, FILE_SIZE, filePath);
startDNandWait(filePath, true);
// all blocks belong to the same file, hence same BP
DataNode dn = cluster.getDataNodes().get(DN_N1);
String poolId = cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
StorageBlockReport[] report = { new StorageBlockReport(
new DatanodeStorage(dnR.getStorageID()),
new BlockListAsLongs(blocks, null).getBlockListAsLongs()) };
cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
printStats();
assertEquals("Wrong number of PendingReplication Blocks",
0, cluster.getNamesystem().getUnderReplicatedBlocks());
}
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:30,代码来源:TestBlockReport.java
示例16: register
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; //导入依赖的package包/类
void register() throws IOException {
// get versions from the namenode
nsInfo = nameNodeProto.versionRequest();
dnRegistration = new DatanodeRegistration(
new DatanodeID(DNS.getDefaultIP("default"),
DNS.getDefaultHost("default", "default"),
DataNode.generateUuid(), getNodePort(dnIdx),
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
new DataStorage(nsInfo),
new ExportedBlockKeys(), VersionInfo.getVersion());
// register datanode
dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
//first block reports
storage = new DatanodeStorage(DatanodeStorage.generateUuid());
final StorageBlockReport[] reports = {
new StorageBlockReport(storage,
new BlockListAsLongs(null, null).getBlockListAsLongs())
};
nameNodeProto.blockReport(dnRegistration,
nameNode.getNamesystem().getBlockPoolId(), reports);
}
开发者ID:yncxcw,项目名称:FlexMap,代码行数:24,代码来源:NNThroughputBenchmark.java
注:本文中的org.apache.hadoop.hdfs.server.protocol.StorageBlockReport类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论