本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor类的典型用法代码示例。如果您正苦于以下问题:Java DefaultCompactor类的具体用法?Java DefaultCompactor怎么用?Java DefaultCompactor使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
DefaultCompactor类属于org.apache.hadoop.hbase.regionserver.compactions包,在下文中一共展示了DefaultCompactor类的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: testCompactionWithCorruptResult
import org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor; //导入依赖的package包/类
@Test
public void testCompactionWithCorruptResult() throws Exception {
int nfiles = 10;
for (int i = 0; i < nfiles; i++) {
createStoreFile(r);
}
HStore store = (HStore) r.getStore(COLUMN_FAMILY);
Collection<StoreFile> storeFiles = store.getStorefiles();
DefaultCompactor tool = (DefaultCompactor) store.storeEngine.getCompactor();
tool.compactForTesting(storeFiles, false);
// Now lets corrupt the compacted file.
FileSystem fs = store.getFileSystem();
// default compaction policy created one and only one new compacted file
Path dstPath = store.getRegionFileSystem().createTempName();
FSDataOutputStream stream = fs.create(dstPath, null, true, 512, (short) 3, (long) 1024, null);
stream.writeChars("CORRUPT FILE!!!!");
stream.close();
Path origPath =
store.getRegionFileSystem().commitStoreFile(Bytes.toString(COLUMN_FAMILY), dstPath);
try {
((HStore) store).moveFileIntoPlace(origPath);
} catch (Exception e) {
// The complete compaction should fail and the corrupt file should remain
// in the 'tmp' directory;
assert (fs.exists(origPath));
assert (!fs.exists(dstPath));
System.out.println("testCompactionWithCorruptResult Passed");
return;
}
fail("testCompactionWithCorruptResult failed since no exception was"
+ "thrown while completing a corrupt file");
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:36,代码来源:TestCompaction.java
示例2: testCompactionWithCorruptResult
import org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor; //导入依赖的package包/类
@Test
public void testCompactionWithCorruptResult() throws Exception {
int nfiles = 10;
for (int i = 0; i < nfiles; i++) {
createStoreFile(r);
}
HStore store = (HStore) r.getStore(COLUMN_FAMILY);
Collection<StoreFile> storeFiles = store.getStorefiles();
DefaultCompactor tool = (DefaultCompactor)store.storeEngine.getCompactor();
tool.compactForTesting(storeFiles, false);
// Now lets corrupt the compacted file.
FileSystem fs = store.getFileSystem();
// default compaction policy created one and only one new compacted file
Path dstPath = store.getRegionFileSystem().createTempName();
FSDataOutputStream stream = fs.create(dstPath, null, true, 512, (short)3, (long)1024, null);
stream.writeChars("CORRUPT FILE!!!!");
stream.close();
Path origPath = store.getRegionFileSystem().commitStoreFile(
Bytes.toString(COLUMN_FAMILY), dstPath);
try {
((HStore)store).moveFileIntoPlace(origPath);
} catch (Exception e) {
// The complete compaction should fail and the corrupt file should remain
// in the 'tmp' directory;
assert (fs.exists(origPath));
assert (!fs.exists(dstPath));
System.out.println("testCompactionWithCorruptResult Passed");
return;
}
fail("testCompactionWithCorruptResult failed since no exception was" +
"thrown while completing a corrupt file");
}
开发者ID:grokcoder,项目名称:pbase,代码行数:36,代码来源:TestCompaction.java
示例3: testCompactionWithCorruptResult
import org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor; //导入依赖的package包/类
@Test
public void testCompactionWithCorruptResult() throws Exception {
int nfiles = 10;
for (int i = 0; i < nfiles; i++) {
createStoreFile(r);
}
HStore store = r.getStore(COLUMN_FAMILY);
Collection<HStoreFile> storeFiles = store.getStorefiles();
DefaultCompactor tool = (DefaultCompactor)store.storeEngine.getCompactor();
tool.compactForTesting(storeFiles, false);
// Now lets corrupt the compacted file.
FileSystem fs = store.getFileSystem();
// default compaction policy created one and only one new compacted file
Path dstPath = store.getRegionFileSystem().createTempName();
FSDataOutputStream stream = fs.create(dstPath, null, true, 512, (short)3, 1024L, null);
stream.writeChars("CORRUPT FILE!!!!");
stream.close();
Path origPath = store.getRegionFileSystem().commitStoreFile(
Bytes.toString(COLUMN_FAMILY), dstPath);
try {
((HStore)store).moveFileIntoPlace(origPath);
} catch (Exception e) {
// The complete compaction should fail and the corrupt file should remain
// in the 'tmp' directory;
assertTrue(fs.exists(origPath));
assertFalse(fs.exists(dstPath));
System.out.println("testCompactionWithCorruptResult Passed");
return;
}
fail("testCompactionWithCorruptResult failed since no exception was" +
"thrown while completing a corrupt file");
}
开发者ID:apache,项目名称:hbase,代码行数:36,代码来源:TestCompaction.java
示例4: compactRecentForTestingAssumingDefaultPolicy
import org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor; //导入依赖的package包/类
/**
* This method tries to compact N recent files for testing. Note that because compacting "recent"
* files only makes sense for some policies, e.g. the default one, it assumes default policy is
* used. It doesn't use policy, but instead makes a compaction candidate list by itself.
*
* @param N Number of files.
*/
public void compactRecentForTestingAssumingDefaultPolicy(int N) throws IOException {
List<StoreFile> filesToCompact;
boolean isMajor;
this.lock.readLock().lock();
try {
synchronized (filesCompacting) {
filesToCompact = Lists.newArrayList(storeEngine.getStoreFileManager().getStorefiles());
if (!filesCompacting.isEmpty()) {
// exclude all files older than the newest file we're currently
// compacting. this allows us to preserve contiguity (HBASE-2856)
StoreFile last = filesCompacting.get(filesCompacting.size() - 1);
int idx = filesToCompact.indexOf(last);
Preconditions.checkArgument(idx != -1);
filesToCompact.subList(0, idx + 1).clear();
}
int count = filesToCompact.size();
if (N > count) {
throw new RuntimeException("Not enough files");
}
filesToCompact = filesToCompact.subList(count - N, count);
isMajor = (filesToCompact.size() == storeEngine.getStoreFileManager().getStorefileCount());
filesCompacting.addAll(filesToCompact);
Collections.sort(filesCompacting, StoreFile.Comparators.SEQ_ID);
}
} finally {
this.lock.readLock().unlock();
}
try {
// Ready to go. Have list of files to compact.
List<Path> newFiles = ((DefaultCompactor) this.storeEngine.getCompactor())
.compactForTesting(filesToCompact, isMajor);
for (Path newFile : newFiles) {
// Move the compaction into place.
StoreFile sf = moveFileIntoPlace(newFile);
if (this.getCoprocessorHost() != null) {
this.getCoprocessorHost().postCompact(this, sf, null);
}
replaceStoreFiles(filesToCompact, Lists.newArrayList(sf));
completeCompaction(filesToCompact, true);
}
} finally {
synchronized (filesCompacting) {
filesCompacting.removeAll(filesToCompact);
}
}
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:57,代码来源:HStore.java
示例5: compactRecentForTestingAssumingDefaultPolicy
import org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor; //导入依赖的package包/类
/**
* This method tries to compact N recent files for testing.
* Note that because compacting "recent" files only makes sense for some policies,
* e.g. the default one, it assumes default policy is used. It doesn't use policy,
* but instead makes a compaction candidate list by itself.
*
* @param N Number of files.
*/
public void compactRecentForTestingAssumingDefaultPolicy(int N) throws IOException {
List<StoreFile> filesToCompact;
boolean isMajor;
this.lock.readLock().lock();
try {
synchronized (filesCompacting) {
filesToCompact = Lists.newArrayList(storeEngine.getStoreFileManager().getStorefiles());
if (!filesCompacting.isEmpty()) {
// exclude all files older than the newest file we're currently
// compacting. this allows us to preserve contiguity (HBASE-2856)
StoreFile last = filesCompacting.get(filesCompacting.size() - 1);
int idx = filesToCompact.indexOf(last);
Preconditions.checkArgument(idx != -1);
filesToCompact.subList(0, idx + 1).clear();
}
int count = filesToCompact.size();
if (N > count) {
throw new RuntimeException("Not enough files");
}
filesToCompact = filesToCompact.subList(count - N, count);
isMajor = (filesToCompact.size() == storeEngine.getStoreFileManager().getStorefileCount());
filesCompacting.addAll(filesToCompact);
Collections.sort(filesCompacting, StoreFile.Comparators.SEQ_ID);
}
} finally {
this.lock.readLock().unlock();
}
try {
// Ready to go. Have list of files to compact.
List<Path> newFiles = ((DefaultCompactor) this.storeEngine.getCompactor())
.compactForTesting(filesToCompact, isMajor);
for (Path newFile : newFiles) {
// Move the compaction into place.
StoreFile sf = moveFileIntoPlace(newFile);
if (this.getCoprocessorHost() != null) {
this.getCoprocessorHost().postCompact(this, sf, null);
}
replaceStoreFiles(filesToCompact, Lists.newArrayList(sf));
completeCompaction(filesToCompact, true);
}
} finally {
synchronized (filesCompacting) {
filesCompacting.removeAll(filesToCompact);
}
}
}
开发者ID:grokcoder,项目名称:pbase,代码行数:58,代码来源:HStore.java
示例6: compactRecentForTestingAssumingDefaultPolicy
import org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor; //导入依赖的package包/类
/**
* This method tries to compact N recent files for testing.
* Note that because compacting "recent" files only makes sense for some policies,
* e.g. the default one, it assumes default policy is used. It doesn't use policy,
* but instead makes a compaction candidate list by itself.
* @param N Number of files.
*/
public void compactRecentForTestingAssumingDefaultPolicy(int N) throws IOException {
List<StoreFile> filesToCompact;
boolean isMajor;
this.lock.readLock().lock();
try {
synchronized (filesCompacting) {
filesToCompact = Lists.newArrayList(storeEngine.getStoreFileManager().getStorefiles());
if (!filesCompacting.isEmpty()) {
// exclude all files older than the newest file we're currently
// compacting. this allows us to preserve contiguity (HBASE-2856)
StoreFile last = filesCompacting.get(filesCompacting.size() - 1);
int idx = filesToCompact.indexOf(last);
Preconditions.checkArgument(idx != -1);
filesToCompact.subList(0, idx + 1).clear();
}
int count = filesToCompact.size();
if (N > count) {
throw new RuntimeException("Not enough files");
}
filesToCompact = filesToCompact.subList(count - N, count);
isMajor = (filesToCompact.size() == storeEngine.getStoreFileManager().getStorefileCount());
filesCompacting.addAll(filesToCompact);
Collections.sort(filesCompacting, StoreFile.Comparators.SEQ_ID);
}
} finally {
this.lock.readLock().unlock();
}
try {
// Ready to go. Have list of files to compact.
List<Path> newFiles = ((DefaultCompactor)this.storeEngine.getCompactor())
.compactForTesting(filesToCompact, isMajor);
for (Path newFile: newFiles) {
// Move the compaction into place.
StoreFile sf = moveFileIntoPlace(newFile);
if (this.getCoprocessorHost() != null) {
this.getCoprocessorHost().postCompact(this, sf, null);
}
replaceStoreFiles(filesToCompact, Lists.newArrayList(sf));
completeCompaction(filesToCompact);
}
} finally {
synchronized (filesCompacting) {
filesCompacting.removeAll(filesToCompact);
}
}
}
开发者ID:tenggyut,项目名称:HIndex,代码行数:57,代码来源:HStore.java
示例7: compactRecentForTestingAssumingDefaultPolicy
import org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor; //导入依赖的package包/类
/**
* This method tries to compact N recent files for testing.
* Note that because compacting "recent" files only makes sense for some policies,
* e.g. the default one, it assumes default policy is used. It doesn't use policy,
* but instead makes a compaction candidate list by itself.
* @param N Number of files.
*/
@VisibleForTesting
public void compactRecentForTestingAssumingDefaultPolicy(int N) throws IOException {
List<HStoreFile> filesToCompact;
boolean isMajor;
this.lock.readLock().lock();
try {
synchronized (filesCompacting) {
filesToCompact = Lists.newArrayList(storeEngine.getStoreFileManager().getStorefiles());
if (!filesCompacting.isEmpty()) {
// exclude all files older than the newest file we're currently
// compacting. this allows us to preserve contiguity (HBASE-2856)
HStoreFile last = filesCompacting.get(filesCompacting.size() - 1);
int idx = filesToCompact.indexOf(last);
Preconditions.checkArgument(idx != -1);
filesToCompact.subList(0, idx + 1).clear();
}
int count = filesToCompact.size();
if (N > count) {
throw new RuntimeException("Not enough files");
}
filesToCompact = filesToCompact.subList(count - N, count);
isMajor = (filesToCompact.size() == storeEngine.getStoreFileManager().getStorefileCount());
filesCompacting.addAll(filesToCompact);
Collections.sort(filesCompacting, storeEngine.getStoreFileManager()
.getStoreFileComparator());
}
} finally {
this.lock.readLock().unlock();
}
try {
// Ready to go. Have list of files to compact.
List<Path> newFiles = ((DefaultCompactor)this.storeEngine.getCompactor())
.compactForTesting(filesToCompact, isMajor);
for (Path newFile: newFiles) {
// Move the compaction into place.
HStoreFile sf = moveFileIntoPlace(newFile);
if (this.getCoprocessorHost() != null) {
this.getCoprocessorHost().postCompact(this, sf, null, null, null);
}
replaceStoreFiles(filesToCompact, Collections.singletonList(sf));
completeCompaction(filesToCompact);
}
} finally {
synchronized (filesCompacting) {
filesCompacting.removeAll(filesToCompact);
}
}
}
开发者ID:apache,项目名称:hbase,代码行数:59,代码来源:HStore.java
注:本文中的org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论