本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.compactions.CompactSelection类的典型用法代码示例。如果您正苦于以下问题:Java CompactSelection类的具体用法?Java CompactSelection怎么用?Java CompactSelection使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
CompactSelection类属于org.apache.hadoop.hbase.regionserver.compactions包,在下文中一共展示了CompactSelection类的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: doMinorCompact
import org.apache.hadoop.hbase.regionserver.compactions.CompactSelection; //导入依赖的package包/类
private void doMinorCompact() throws IOException {
// hfilePath = lcc/AAA/.tmp/BBB/
// destLCIndexDir = lcc/AAA/.lctmp/BBB.lccindex
// every qualifier must be merged seperately!
// merge B1-Q1, B2-Q1, B3-Q1 into B-target-Q1
// shoud not use
for (Entry<Path, List<Path>> entry : allIdxPaths.entrySet()) {
List<StoreFile> lccSFList = new ArrayList<StoreFile>();
List<Path> statPathList = new ArrayList<Path>();
for (Path lcIdxPath : entry.getValue()) {
StoreFile lccSF =
new StoreFile(store.localfs, lcIdxPath, store.conf, store.cacheConf, store
.getFamily().getBloomFilterType(), store.getDataBlockEncoder());
lccSF.createReader();
lccSFList.add(lccSF);
statPathList.add(new Path(lcIdxPath.getParent(), lcIdxPath.getName()
+ LCCIndexConstant.LC_STAT_FILE_SUFFIX));
}
CompactSelection lccIndexFilesToCompactCS = new CompactSelection(store.conf, lccSFList);
CompactionRequest lccCR =
new CompactionRequest(request.getHRegion(), store, lccIndexFilesToCompactCS,
request.isMajor(), request.getPriority());
long maxId = StoreFile.getMaxSequenceIdInList(lccSFList, true);
Path destPath = new Path(destLCIndexDir, entry.getKey().getName());
// compact stat file, and then compact storefile
store.compactor.mWinterCompactStatFile(store.localfs, statPathList, new Path(
destLCIndexDir, entry.getKey().getName() + LCCIndexConstant.LC_STAT_FILE_SUFFIX));
StoreFile.Writer writer = store.compactor.lcIdxCompact(lccCR, maxId, destPath, true);
for (StoreFile sf : lccSFList) {
sf.closeReader(true);
}
if (printForDebug) {
System.out.println("winter minor compact flush to: " + writer.getPath());
}
}
}
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:37,代码来源:CompactJobQueue.java
示例2: defaultCompactionSelection
import org.apache.hadoop.hbase.regionserver.compactions.CompactSelection; //导入依赖的package包/类
private CompactSelection defaultCompactionSelection(CompactSelection compactSelection) {
// we're doing a minor compaction, let's see what files are applicable
int start = 0;
double r = compactSelection.getCompactSelectionRatio();
// get store file sizes for incremental compacting selection.
int countOfFiles = compactSelection.getFilesToCompact().size();
long[] fileSizes = new long[countOfFiles];
long[] sumSize = new long[countOfFiles];
for (int i = countOfFiles - 1; i >= 0; --i) {
StoreFile file = compactSelection.getFilesToCompact().get(i);
fileSizes[i] = file.getReader().length();
// calculate the sum of fileSizes[i,i+maxFilesToCompact-1) for algo
int tooFar = i + this.maxFilesToCompact - 1;
sumSize[i] =
fileSizes[i] + ((i + 1 < countOfFiles) ? sumSize[i + 1] : 0)
- ((tooFar < countOfFiles) ? fileSizes[tooFar] : 0);
}
/*
* Start at the oldest file and stop when you find the first file that meets compaction
* criteria: (1) a recently-flushed, small file (i.e. <= minCompactSize) OR (2) within the
* compactRatio of sum(newer_files) Given normal skew, any newer files will also meet this
* criteria Additional Note: If fileSizes.size() >> maxFilesToCompact, we will recurse on
* compact(). Consider the oldest files first to avoid a situation where we always compact
* [end-threshold,end). Then, the last file becomes an aggregate of the previous compactions.
*/
while (countOfFiles - start >= this.minFilesToCompact
&& fileSizes[start] > Math.max(minCompactSize, (long) (sumSize[start + 1] * r))) {
++start;
}
int end = Math.min(countOfFiles, start + this.maxFilesToCompact);
long totalSize = fileSizes[start] + ((start + 1 < countOfFiles) ? sumSize[start + 1] : 0);
compactSelection = compactSelection.getSubList(start, end);
// if we don't have enough files to compact, just wait
if (compactSelection.getFilesToCompact().size() < this.minFilesToCompact) {
if (LOG.isDebugEnabled()) {
LOG.debug("Skipped compaction of " + this + ". Only " + (end - start)
+ " file(s) of size " + StringUtils.humanReadableInt(totalSize)
+ " have met compaction criteria.");
}
compactSelection.emptyFileList();
return compactSelection;
}
return compactSelection;
}
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:49,代码来源:Store.java
示例3: defaultCompactionSelection
import org.apache.hadoop.hbase.regionserver.compactions.CompactSelection; //导入依赖的package包/类
private CompactSelection defaultCompactionSelection(CompactSelection compactSelection) {
// we're doing a minor compaction, let's see what files are applicable
int start = 0;
double r = compactSelection.getCompactSelectionRatio();
// get store file sizes for incremental compacting selection.
int countOfFiles = compactSelection.getFilesToCompact().size();
long [] fileSizes = new long[countOfFiles];
long [] sumSize = new long[countOfFiles];
for (int i = countOfFiles-1; i >= 0; --i) {
StoreFile file = compactSelection.getFilesToCompact().get(i);
fileSizes[i] = file.getReader().length();
// calculate the sum of fileSizes[i,i+maxFilesToCompact-1) for algo
int tooFar = i + this.maxFilesToCompact - 1;
sumSize[i] = fileSizes[i]
+ ((i+1 < countOfFiles) ? sumSize[i+1] : 0)
- ((tooFar < countOfFiles) ? fileSizes[tooFar] : 0);
}
/* Start at the oldest file and stop when you find the first file that
* meets compaction criteria:
* (1) a recently-flushed, small file (i.e. <= minCompactSize)
* OR
* (2) within the compactRatio of sum(newer_files)
* Given normal skew, any newer files will also meet this criteria
*
* Additional Note:
* If fileSizes.size() >> maxFilesToCompact, we will recurse on
* compact(). Consider the oldest files first to avoid a
* situation where we always compact [end-threshold,end). Then, the
* last file becomes an aggregate of the previous compactions.
*/
while(countOfFiles - start >= this.minFilesToCompact &&
fileSizes[start] >
Math.max(minCompactSize, (long)(sumSize[start+1] * r))) {
++start;
}
int end = Math.min(countOfFiles, start + this.maxFilesToCompact);
long totalSize = fileSizes[start]
+ ((start+1 < countOfFiles) ? sumSize[start+1] : 0);
compactSelection = compactSelection.getSubList(start, end);
// if we don't have enough files to compact, just wait
if (compactSelection.getFilesToCompact().size() < this.minFilesToCompact) {
if (LOG.isDebugEnabled()) {
LOG.debug("Skipped compaction of " + this
+ ". Only " + (end - start) + " file(s) of size "
+ StringUtils.humanReadableInt(totalSize)
+ " have met compaction criteria.");
}
compactSelection.emptyFileList();
return compactSelection;
}
return compactSelection;
}
开发者ID:wanhao,项目名称:IRIndex,代码行数:57,代码来源:Store.java
示例4: compactSelection
import org.apache.hadoop.hbase.regionserver.compactions.CompactSelection; //导入依赖的package包/类
/**
* Algorithm to choose which files to compact, see {@link #compactSelection(java.util.List, int)}
* @param candidates
* @return
* @throws IOException
*/
CompactSelection compactSelection(List<StoreFile> candidates) throws IOException {
return compactSelection(candidates, NO_PRIORITY);
}
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:10,代码来源:Store.java
示例5: compactSelection
import org.apache.hadoop.hbase.regionserver.compactions.CompactSelection; //导入依赖的package包/类
/**
* Algorithm to choose which files to compact, see {@link #compactSelection(java.util.List, int)}
* @param candidates
* @return
* @throws IOException
*/
CompactSelection compactSelection(List<StoreFile> candidates) throws IOException {
return compactSelection(candidates,NO_PRIORITY);
}
开发者ID:wanhao,项目名称:IRIndex,代码行数:10,代码来源:Store.java
注:本文中的org.apache.hadoop.hbase.regionserver.compactions.CompactSelection类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论