本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff类的典型用法代码示例。如果您正苦于以下问题:Java FileDiff类的具体用法?Java FileDiff怎么用?Java FileDiff使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
FileDiff类属于org.apache.hadoop.hdfs.server.namenode.snapshot包,在下文中一共展示了FileDiff类的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: getBlocks
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff; //导入依赖的package包/类
/** @return blocks of the file corresponding to the snapshot. */
public BlockInfo[] getBlocks(int snapshot) {
if (snapshot == CURRENT_STATE_ID || getDiffs() == null) {
return getBlocks();
}
// find blocks stored in snapshot diffs (for truncate)
FileDiff diff = getDiffs().getDiffById(snapshot);
// note that currently FileDiff can only store contiguous blocks
BlockInfo[] snapshotBlocks = diff == null ? getBlocks() : diff.getBlocks();
if (snapshotBlocks != null) {
return snapshotBlocks;
}
// Blocks are not in the current snapshot
// Find next snapshot with blocks present or return current file blocks
snapshotBlocks = getDiffs().findLaterSnapshotBlocks(snapshot);
return (snapshotBlocks == null) ? getBlocks() : snapshotBlocks;
}
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:18,代码来源:INodeFile.java
示例2: getBlocks
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff; //导入依赖的package包/类
/** @return blocks of the file corresponding to the snapshot. */
public BlockInfoContiguous[] getBlocks(int snapshot) {
if(snapshot == CURRENT_STATE_ID || getDiffs() == null)
return getBlocks();
FileDiff diff = getDiffs().getDiffById(snapshot);
BlockInfoContiguous[] snapshotBlocks =
diff == null ? getBlocks() : diff.getBlocks();
if(snapshotBlocks != null)
return snapshotBlocks;
// Blocks are not in the current snapshot
// Find next snapshot with blocks present or return current file blocks
snapshotBlocks = getDiffs().findLaterSnapshotBlocks(snapshot);
return (snapshotBlocks == null) ? getBlocks() : snapshotBlocks;
}
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:INodeFile.java
示例3: computeFileSize
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff; //导入依赖的package包/类
/**
* Compute file size of the current file if the given snapshot is null;
* otherwise, get the file size from the given snapshot.
*/
public final long computeFileSize(int snapshotId) {
FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
if (snapshotId != CURRENT_STATE_ID && sf != null) {
final FileDiff d = sf.getDiffs().getDiffById(snapshotId);
if (d != null) {
return d.getFileSize();
}
}
return computeFileSize(true, false);
}
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:INodeFile.java
示例4: storagespaceConsumedNoReplication
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff; //导入依赖的package包/类
public final long storagespaceConsumedNoReplication() {
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
if(sf == null) {
return computeFileSize(true, true);
}
// Collect all distinct blocks
long size = 0;
Set<Block> allBlocks = new HashSet<Block>(Arrays.asList(getBlocks()));
List<FileDiff> diffs = sf.getDiffs().asList();
for(FileDiff diff : diffs) {
BlockInfoContiguous[] diffBlocks = diff.getBlocks();
if (diffBlocks != null) {
allBlocks.addAll(Arrays.asList(diffBlocks));
}
}
for(Block block : allBlocks) {
size += block.getNumBytes();
}
// check if the last block is under construction
BlockInfoContiguous lastBlock = getLastBlock();
if(lastBlock != null &&
lastBlock instanceof BlockInfoContiguousUnderConstruction) {
size += getPreferredBlockSize() - lastBlock.getNumBytes();
}
return size;
}
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:INodeFile.java
示例5: computeQuotaDeltaForTruncate
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff; //导入依赖的package包/类
/**
* compute the quota usage change for a truncate op
* @param newLength the length for truncation
* @return the quota usage delta (not considering replication factor)
*/
long computeQuotaDeltaForTruncate(final long newLength) {
final BlockInfoContiguous[] blocks = getBlocks();
if (blocks == null || blocks.length == 0) {
return 0;
}
int n = 0;
long size = 0;
for (; n < blocks.length && newLength > size; n++) {
size += blocks[n].getNumBytes();
}
final boolean onBoundary = size == newLength;
long truncateSize = 0;
for (int i = (onBoundary ? n : n - 1); i < blocks.length; i++) {
truncateSize += blocks[i].getNumBytes();
}
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
if (sf != null) {
FileDiff diff = sf.getDiffs().getLast();
BlockInfoContiguous[] sblocks = diff != null ? diff.getBlocks() : null;
if (sblocks != null) {
for (int i = (onBoundary ? n : n-1); i < blocks.length
&& i < sblocks.length && blocks[i].equals(sblocks[i]); i++) {
truncateSize -= blocks[i].getNumBytes();
}
}
}
return onBoundary ? -truncateSize : (getPreferredBlockSize() - truncateSize);
}
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:INodeFile.java
示例6: storagespaceConsumedContiguous
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff; //导入依赖的package包/类
public final QuotaCounts storagespaceConsumedContiguous(
BlockStoragePolicy bsp) {
QuotaCounts counts = new QuotaCounts.Builder().build();
final Iterable<BlockInfo> blocks;
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
if (sf == null) {
blocks = Arrays.asList(getBlocks());
} else {
// Collect all distinct blocks
Set<BlockInfo> allBlocks = new HashSet<>(Arrays.asList(getBlocks()));
List<FileDiff> diffs = sf.getDiffs().asList();
for(FileDiff diff : diffs) {
BlockInfo[] diffBlocks = diff.getBlocks();
if (diffBlocks != null) {
allBlocks.addAll(Arrays.asList(diffBlocks));
}
}
blocks = allBlocks;
}
final short replication = getPreferredBlockReplication();
for (BlockInfo b : blocks) {
long blockSize = b.isComplete() ? b.getNumBytes() :
getPreferredBlockSize();
counts.addStorageSpace(blockSize * replication);
if (bsp != null) {
List<StorageType> types = bsp.chooseStorageTypes(replication);
for (StorageType t : types) {
if (t.supportTypeQuota()) {
counts.addTypeSpace(t, blockSize);
}
}
}
}
return counts;
}
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:37,代码来源:INodeFile.java
示例7: addSnapshotFeature
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff; //导入依赖的package包/类
private static void addSnapshotFeature(INodeFile file, BlockInfo[] blocks) {
FileDiff diff = mock(FileDiff.class);
when(diff.getBlocks()).thenReturn(blocks);
FileDiffList diffList = new FileDiffList();
@SuppressWarnings("unchecked")
ArrayList<FileDiff> diffs = ((ArrayList<FileDiff>)Whitebox.getInternalState
(diffList, "diffs"));
diffs.add(diff);
FileWithSnapshotFeature sf = new FileWithSnapshotFeature(diffList);
file.addFeature(sf);
}
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:12,代码来源:TestTruncateQuotaUpdate.java
示例8: computeQuotaUsage
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff; //导入依赖的package包/类
@Override
public final Quota.Counts computeQuotaUsage(Quota.Counts counts,
boolean useCache, int lastSnapshotId) {
long nsDelta = 1;
final long dsDelta;
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
if (sf != null) {
FileDiffList fileDiffList = sf.getDiffs();
int last = fileDiffList.getLastSnapshotId();
List<FileDiff> diffs = fileDiffList.asList();
if (lastSnapshotId == Snapshot.CURRENT_STATE_ID
|| last == Snapshot.CURRENT_STATE_ID) {
nsDelta += diffs.size();
dsDelta = diskspaceConsumed();
} else if (last < lastSnapshotId) {
dsDelta = computeFileSize(true, false) * getFileReplication();
} else {
int sid = fileDiffList.getSnapshotById(lastSnapshotId);
dsDelta = diskspaceConsumed(sid);
}
} else {
dsDelta = diskspaceConsumed();
}
counts.add(Quota.NAMESPACE, nsDelta);
counts.add(Quota.DISKSPACE, dsDelta);
return counts;
}
开发者ID:yncxcw,项目名称:FlexMap,代码行数:29,代码来源:INodeFile.java
示例9: computeQuotaDeltaForTruncate
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff; //导入依赖的package包/类
/**
* compute the quota usage change for a truncate op
* @param newLength the length for truncation
* TODO: properly handle striped blocks (HDFS-7622)
**/
void computeQuotaDeltaForTruncate(
long newLength, BlockStoragePolicy bsps,
QuotaCounts delta) {
final BlockInfo[] blocks = getBlocks();
if (blocks == null || blocks.length == 0) {
return;
}
long size = 0;
for (BlockInfo b : blocks) {
size += b.getNumBytes();
}
BlockInfo[] sblocks = null;
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
if (sf != null) {
FileDiff diff = sf.getDiffs().getLast();
sblocks = diff != null ? diff.getBlocks() : null;
}
for (int i = blocks.length - 1; i >= 0 && size > newLength;
size -= blocks[i].getNumBytes(), --i) {
BlockInfo bi = blocks[i];
long truncatedBytes;
if (size - newLength < bi.getNumBytes()) {
// Record a full block as the last block will be copied during
// recovery
truncatedBytes = bi.getNumBytes() - getPreferredBlockSize();
} else {
truncatedBytes = bi.getNumBytes();
}
// The block exist in snapshot, adding back the truncated bytes in the
// existing files
if (sblocks != null && i < sblocks.length && bi.equals(sblocks[i])) {
truncatedBytes -= bi.getNumBytes();
}
delta.addStorageSpace(-truncatedBytes * bi.getReplication());
if (bsps != null) {
List<StorageType> types = bsps.chooseStorageTypes(bi.getReplication());
for (StorageType t : types) {
if (t.supportTypeQuota()) {
delta.addTypeSpace(t, -truncatedBytes);
}
}
}
}
}
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:55,代码来源:INodeFile.java
注:本文中的org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论