本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.UnderReplicatedBlocks.BlockIterator类的典型用法代码示例。如果您正苦于以下问题:Java BlockIterator类的具体用法?Java BlockIterator怎么用?Java BlockIterator使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
BlockIterator类属于org.apache.hadoop.hdfs.server.namenode.UnderReplicatedBlocks包,在下文中一共展示了BlockIterator类的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: listCorruptFileBlocks
import org.apache.hadoop.hdfs.server.namenode.UnderReplicatedBlocks.BlockIterator; //导入依赖的package包/类
/**
* @param path Restrict corrupt files to this portion of namespace.
* @param startBlockAfter Support for continuation; the set of files we return
* back is ordered by blockid; startBlockAfter tells where to start from
* @return a list in which each entry describes a corrupt file/block
* @throws AccessControlException
* @throws IOException
*/
Collection<CorruptFileBlockInfo> listCorruptFileBlocks(String path,
String startBlockAfter) throws AccessControlException, IOException {
readLock();
try {
checkSuperuserPrivilege();
long startBlockId = 0;
// print a limited # of corrupt files per call
int count = 0;
ArrayList<CorruptFileBlockInfo> corruptFiles = new ArrayList<CorruptFileBlockInfo>();
if (startBlockAfter != null) {
startBlockId = Block.filename2id(startBlockAfter);
}
BlockIterator blkIterator = blockManager.getCorruptReplicaBlockIterator();
while (blkIterator.hasNext()) {
Block blk = blkIterator.next();
INode inode = blockManager.getINode(blk);
if (inode != null && blockManager.countNodes(blk).liveReplicas() == 0) {
String src = FSDirectory.getFullPathName(inode);
if (((startBlockAfter == null) || (blk.getBlockId() > startBlockId))
&& (src.startsWith(path))) {
corruptFiles.add(new CorruptFileBlockInfo(src, blk));
count++;
if (count >= DEFAULT_MAX_CORRUPT_FILEBLOCKS_RETURNED)
break;
}
}
}
LOG.info("list corrupt file blocks returned: " + count);
return corruptFiles;
} finally {
readUnlock();
}
}
开发者ID:cumulusyebl,项目名称:cumulus,代码行数:44,代码来源:FSNamesystem.java
示例2: getCorruptReplicaBlockIterator
import org.apache.hadoop.hdfs.server.namenode.UnderReplicatedBlocks.BlockIterator; //导入依赖的package包/类
/**
* Return an iterator over the set of blocks for which there are no replicas.
*/
BlockIterator getCorruptReplicaBlockIterator() {
return neededReplications
.iterator(UnderReplicatedBlocks.QUEUE_WITH_CORRUPT_BLOCKS);
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:8,代码来源:FSNamesystem.java
示例3: listCorruptFileBlocks
import org.apache.hadoop.hdfs.server.namenode.UnderReplicatedBlocks.BlockIterator; //导入依赖的package包/类
/**
* @param path Restrict corrupt files to this portion of namespace.
* @param startBlockAfter Support for continuation; the set of files we return
* back is ordered by blockid; startBlockAfter tells where to start from
* @param decommissioningOnly if set the blocks returned will be the ones that
* only have replicas on the nodes that are being decommissioned
* @return a list in which each entry describes a corrupt file/block
* @throws AccessControlException
* @throws IOException
*/
Collection<CorruptFileBlockInfo>
listCorruptFileBlocks(String path,
String[] cookieTab,
boolean decommissioningOnly)
throws IOException {
readLock();
synchronized (neededReplications) {
try {
if (!isPopulatingReplQueues()) {
throw new IOException("Cannot run listCorruptFileBlocks because "
+ "replication queues have not been initialized.");
}
// print a limited # of corrupt files per call
int count = 0;
ArrayList<CorruptFileBlockInfo> corruptFiles =
new ArrayList<CorruptFileBlockInfo>();
BlockIterator blkIterator = null;
if (decommissioningOnly) {
blkIterator = neededReplications.iterator(0);
} else {
blkIterator = getCorruptReplicaBlockIterator();
}
if (cookieTab == null) {
cookieTab = new String[] { null };
}
int skip = getIntCookie(cookieTab[0]);
for(int i = 0; i < skip && blkIterator.hasNext(); i++) {
blkIterator.next();
}
while (blkIterator.hasNext()) {
Block blk = blkIterator.next();
INode inode = blocksMap.getINode(blk);
skip++;
if (inode != null) {
try {
String src = FSDirectory.getFullPathName(inode);
if (src != null && src.startsWith(path)) {
NumberReplicas num = countNodes(blk);
if (num.liveReplicas == 0) {
if (decommissioningOnly && num.decommissionedReplicas > 0 ||
!decommissioningOnly && num.decommissionedReplicas == 0) {
corruptFiles.add(new CorruptFileBlockInfo(src, blk));
count++;
if (count >= maxCorruptFilesReturned)
break;
}
}
}
} catch (IOException ioe) {
// the node may have already been deleted; ingore it
LOG.info("Invalid inode", ioe);
}
}
}
cookieTab[0] = String.valueOf(skip);
LOG.info("list corrupt file blocks under " + path + ": " + count);
return corruptFiles;
} finally {
readUnlock();
}
}
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:77,代码来源:FSNamesystem.java
示例4: getCorruptReplicaBlockIterator
import org.apache.hadoop.hdfs.server.namenode.UnderReplicatedBlocks.BlockIterator; //导入依赖的package包/类
/**
* Return an iterator over the set of blocks for which there are no replicas.
*/
BlockIterator getCorruptReplicaBlockIterator() {
return neededReplications
.iterator(UnderReplicatedBlocks.QUEUE_WITH_CORRUPT_BLOCKS);
}
开发者ID:cumulusyebl,项目名称:cumulus,代码行数:8,代码来源:BlockManager.java
示例5: listCorruptFileBlocks
import org.apache.hadoop.hdfs.server.namenode.UnderReplicatedBlocks.BlockIterator; //导入依赖的package包/类
/**
* @param path Restrict corrupt files to this portion of namespace.
* @param startBlockAfter Support for continuation; the set of files we return
* back is ordered by blockid; startBlockAfter tells where to start from
* @param decommissioningOnly if set the blocks returned will be the ones that
* only have replicas on the nodes that are being decommissioned
* @return a list in which each entry describes a corrupt file/block
* @throws AccessControlException
* @throws IOException
*/
Collection<CorruptFileBlockInfo>
listCorruptFileBlocks(String path,
String[] cookieTab,
boolean decommissioningOnly)
throws IOException {
readLock();
synchronized (neededReplications) {
try {
if (!isPopulatingReplQueues()) {
throw new IOException("Cannot run listCorruptFileBlocks because "
+ "replication queues have not been initialized.");
}
checkSuperuserPrivilege();
// print a limited # of corrupt files per call
int count = 0;
ArrayList<CorruptFileBlockInfo> corruptFiles =
new ArrayList<CorruptFileBlockInfo>();
BlockIterator blkIterator = null;
if (decommissioningOnly) {
blkIterator = neededReplications.iterator(0);
} else {
blkIterator = getCorruptReplicaBlockIterator();
}
if (cookieTab == null) {
cookieTab = new String[] { null };
}
int skip = getIntCookie(cookieTab[0]);
for(int i = 0; i < skip && blkIterator.hasNext(); i++) {
blkIterator.next();
}
while (blkIterator.hasNext()) {
Block blk = blkIterator.next();
INode inode = blocksMap.getINode(blk);
skip++;
if (inode != null) {
String src = FSDirectory.getFullPathName(inode);
if (src.startsWith(path)) {
NumberReplicas num = countNodes(blk);
if (num.liveReplicas == 0) {
if (decommissioningOnly && num.decommissionedReplicas > 0 ||
!decommissioningOnly && num.decommissionedReplicas == 0) {
corruptFiles.add(new CorruptFileBlockInfo(src, blk));
count++;
if (count >= maxCorruptFilesReturned)
break;
}
}
}
}
}
cookieTab[0] = String.valueOf(skip);
LOG.info("list corrupt file blocks under " + path + ": " + count);
return corruptFiles;
} finally {
readUnlock();
}
}
}
开发者ID:iVCE,项目名称:RDFS,代码行数:73,代码来源:FSNamesystem.java
注:本文中的org.apache.hadoop.hdfs.server.namenode.UnderReplicatedBlocks.BlockIterator类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论