本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList类的典型用法代码示例。如果您正苦于以下问题:Java FileDiffList类的具体用法?Java FileDiffList怎么用?Java FileDiffList使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
FileDiffList类属于org.apache.hadoop.hdfs.server.namenode.snapshot包,在下文中一共展示了FileDiffList类的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: computeQuotaUsage
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList; //导入依赖的package包/类
@Override
public final Quota.Counts computeQuotaUsage(Quota.Counts counts,
boolean useCache, int lastSnapshotId) {
long nsDelta = 1;
final long dsDelta;
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
if (sf != null) {
FileDiffList fileDiffList = sf.getDiffs();
int last = fileDiffList.getLastSnapshotId();
if (lastSnapshotId == Snapshot.CURRENT_STATE_ID
|| last == Snapshot.CURRENT_STATE_ID) {
dsDelta = diskspaceConsumed();
} else if (last < lastSnapshotId) {
dsDelta = computeFileSize(true, false) * getFileReplication();
} else {
int sid = fileDiffList.getSnapshotById(lastSnapshotId);
dsDelta = diskspaceConsumed(sid);
}
} else {
dsDelta = diskspaceConsumed();
}
counts.add(Quota.NAMESPACE, nsDelta);
counts.add(Quota.DISKSPACE, dsDelta);
return counts;
}
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:27,代码来源:INodeFile.java
示例2: computeContentSummary4Snapshot
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList; //导入依赖的package包/类
private void computeContentSummary4Snapshot(final Content.Counts counts) {
// file length and diskspace only counted for the latest state of the file
// i.e. either the current state or the last snapshot
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
if (sf != null) {
final FileDiffList diffs = sf.getDiffs();
final int n = diffs.asList().size();
counts.add(Content.FILE, n);
if (n > 0 && sf.isCurrentFileDeleted()) {
counts.add(Content.LENGTH, diffs.getLast().getFileSize());
}
if (sf.isCurrentFileDeleted()) {
final long lastFileSize = diffs.getLast().getFileSize();
counts.add(Content.DISKSPACE, lastFileSize * getBlockReplication());
}
}
}
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:19,代码来源:INodeFile.java
示例3: addSnapshotFeature
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList; //导入依赖的package包/类
public FileWithSnapshotFeature addSnapshotFeature(FileDiffList diffs) {
Preconditions.checkState(!isWithSnapshot(),
"File is already with snapshot");
FileWithSnapshotFeature sf = new FileWithSnapshotFeature(diffs);
this.addFeature(sf);
return sf;
}
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:INodeFile.java
示例4: getDiffs
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList; //导入依赖的package包/类
public FileDiffList getDiffs() {
FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
if (sf != null) {
return sf.getDiffs();
}
return null;
}
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:INodeFile.java
示例5: computeContentSummary
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList; //导入依赖的package包/类
@Override
public final ContentSummaryComputationContext computeContentSummary(
final ContentSummaryComputationContext summary) {
final ContentCounts counts = summary.getCounts();
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
long fileLen = 0;
if (sf == null) {
fileLen = computeFileSize();
counts.addContent(Content.FILE, 1);
} else {
final FileDiffList diffs = sf.getDiffs();
final int n = diffs.asList().size();
counts.addContent(Content.FILE, n);
if (n > 0 && sf.isCurrentFileDeleted()) {
fileLen = diffs.getLast().getFileSize();
} else {
fileLen = computeFileSize();
}
}
counts.addContent(Content.LENGTH, fileLen);
counts.addContent(Content.DISKSPACE, storagespaceConsumed());
if (getStoragePolicyID() != ID_UNSPECIFIED){
BlockStoragePolicy bsp = summary.getBlockStoragePolicySuite().
getPolicy(getStoragePolicyID());
List<StorageType> storageTypes = bsp.chooseStorageTypes(getFileReplication());
for (StorageType t : storageTypes) {
if (!t.supportTypeQuota()) {
continue;
}
counts.addTypeSpace(t, fileLen);
}
}
return summary;
}
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:INodeFile.java
示例6: addSnapshotFeature
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList; //导入依赖的package包/类
private static void addSnapshotFeature(INodeFile file, BlockInfo[] blocks) {
FileDiff diff = mock(FileDiff.class);
when(diff.getBlocks()).thenReturn(blocks);
FileDiffList diffList = new FileDiffList();
@SuppressWarnings("unchecked")
ArrayList<FileDiff> diffs = ((ArrayList<FileDiff>)Whitebox.getInternalState
(diffList, "diffs"));
diffs.add(diff);
FileWithSnapshotFeature sf = new FileWithSnapshotFeature(diffList);
file.addFeature(sf);
}
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:12,代码来源:TestTruncateQuotaUpdate.java
示例7: computeQuotaUsage
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList; //导入依赖的package包/类
@Override
public final Quota.Counts computeQuotaUsage(Quota.Counts counts,
boolean useCache, int lastSnapshotId) {
long nsDelta = 1;
final long dsDelta;
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
if (sf != null) {
FileDiffList fileDiffList = sf.getDiffs();
int last = fileDiffList.getLastSnapshotId();
List<FileDiff> diffs = fileDiffList.asList();
if (lastSnapshotId == Snapshot.CURRENT_STATE_ID
|| last == Snapshot.CURRENT_STATE_ID) {
nsDelta += diffs.size();
dsDelta = diskspaceConsumed();
} else if (last < lastSnapshotId) {
dsDelta = computeFileSize(true, false) * getFileReplication();
} else {
int sid = fileDiffList.getSnapshotById(lastSnapshotId);
dsDelta = diskspaceConsumed(sid);
}
} else {
dsDelta = diskspaceConsumed();
}
counts.add(Quota.NAMESPACE, nsDelta);
counts.add(Quota.DISKSPACE, dsDelta);
return counts;
}
开发者ID:yncxcw,项目名称:FlexMap,代码行数:29,代码来源:INodeFile.java
示例8: INodeFile
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList; //导入依赖的package包/类
public INodeFile(INodeFile that, FileDiffList diffs) {
this(that);
Preconditions.checkArgument(!that.isWithSnapshot());
this.addSnapshotFeature(diffs);
}
开发者ID:naver,项目名称:hadoop,代码行数:6,代码来源:INodeFile.java
示例9: computeQuotaUsage
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList; //导入依赖的package包/类
@Override
public final QuotaCounts computeQuotaUsage(
BlockStoragePolicySuite bsps, byte blockStoragePolicyId,
QuotaCounts counts, boolean useCache,
int lastSnapshotId) {
long nsDelta = 1;
final long ssDeltaNoReplication;
short replication;
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
if (sf != null) {
FileDiffList fileDiffList = sf.getDiffs();
int last = fileDiffList.getLastSnapshotId();
if (lastSnapshotId == Snapshot.CURRENT_STATE_ID
|| last == Snapshot.CURRENT_STATE_ID) {
ssDeltaNoReplication = storagespaceConsumedNoReplication();
replication = getBlockReplication();
} else if (last < lastSnapshotId) {
ssDeltaNoReplication = computeFileSize(true, false);
replication = getFileReplication();
} else {
int sid = fileDiffList.getSnapshotById(lastSnapshotId);
ssDeltaNoReplication = storagespaceConsumedNoReplication(sid);
replication = getReplication(sid);
}
} else {
ssDeltaNoReplication = storagespaceConsumedNoReplication();
replication = getBlockReplication();
}
counts.addNameSpace(nsDelta);
counts.addStorageSpace(ssDeltaNoReplication * replication);
if (blockStoragePolicyId != ID_UNSPECIFIED){
BlockStoragePolicy bsp = bsps.getPolicy(blockStoragePolicyId);
List<StorageType> storageTypes = bsp.chooseStorageTypes(replication);
for (StorageType t : storageTypes) {
if (!t.supportTypeQuota()) {
continue;
}
counts.addTypeSpace(t, ssDeltaNoReplication);
}
}
return counts;
}
开发者ID:naver,项目名称:hadoop,代码行数:45,代码来源:INodeFile.java
示例10: computeQuotaUsage
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList; //导入依赖的package包/类
@Override
public final QuotaCounts computeQuotaUsage(BlockStoragePolicySuite bsps,
byte blockStoragePolicyId, boolean useCache, int lastSnapshotId) {
final QuotaCounts counts = new QuotaCounts.Builder().nameSpace(1).build();
final BlockStoragePolicy bsp = (blockStoragePolicyId ==
BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) ? null :
bsps.getPolicy(blockStoragePolicyId);
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
if (sf == null) {
counts.add(storagespaceConsumed(bsp));
return counts;
}
FileDiffList fileDiffList = sf.getDiffs();
int last = fileDiffList.getLastSnapshotId();
if (lastSnapshotId == Snapshot.CURRENT_STATE_ID
|| last == Snapshot.CURRENT_STATE_ID) {
counts.add(storagespaceConsumed(bsp));
return counts;
}
final long ssDeltaNoReplication;
short replication;
if (isStriped()) {
return computeQuotaUsageWithStriped(bsp, counts);
}
if (last < lastSnapshotId) {
ssDeltaNoReplication = computeFileSize(true, false);
replication = getFileReplication();
} else {
int sid = fileDiffList.getSnapshotById(lastSnapshotId);
ssDeltaNoReplication = computeFileSize(sid);
replication = getFileReplication(sid);
}
counts.addStorageSpace(ssDeltaNoReplication * replication);
if (bsp != null) {
List<StorageType> storageTypes = bsp.chooseStorageTypes(replication);
for (StorageType t : storageTypes) {
if (!t.supportTypeQuota()) {
continue;
}
counts.addTypeSpace(t, ssDeltaNoReplication);
}
}
return counts;
}
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:51,代码来源:INodeFile.java
注:本文中的org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论