本文整理汇总了Java中org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker类的典型用法代码示例。如果您正苦于以下问题:Java HFileCorruptionChecker类的具体用法?Java HFileCorruptionChecker怎么用?Java HFileCorruptionChecker使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
HFileCorruptionChecker类属于org.apache.hadoop.hbase.util.hbck包,在下文中一共展示了HFileCorruptionChecker类的16个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: testQuarantineCorruptHFile
import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker; //导入依赖的package包/类
/**
* This creates a table and then corrupts an hfile. Hbck should quarantine the file.
*/
@Test(timeout=180000)
public void testQuarantineCorruptHFile() throws Exception {
TableName table = TableName.valueOf(name.getMethodName());
try {
setupTable(table);
assertEquals(ROWKEYS.length, countRows());
admin.flush(table); // flush is async.
FileSystem fs = FileSystem.get(conf);
Path hfile = getFlushedHFile(fs, table);
// Mess it up by leaving a hole in the assignment, meta, and hdfs data
admin.disableTable(table);
// create new corrupt file called deadbeef (valid hfile name)
Path corrupt = new Path(hfile.getParent(), "deadbeef");
TestHFile.truncateFile(fs, hfile, corrupt);
LOG.info("Created corrupted file " + corrupt);
HBaseFsck.debugLsr(conf, FSUtils.getRootDir(conf));
// we cannot enable here because enable never finished due to the corrupt region.
HBaseFsck res = HbckTestingUtil.doHFileQuarantine(conf, table);
assertEquals(res.getRetCode(), 0);
HFileCorruptionChecker hfcc = res.getHFilecorruptionChecker();
assertEquals(hfcc.getHFilesChecked(), 5);
assertEquals(hfcc.getCorrupted().size(), 1);
assertEquals(hfcc.getFailures().size(), 0);
assertEquals(hfcc.getQuarantined().size(), 1);
assertEquals(hfcc.getMissing().size(), 0);
// Its been fixed, verify that we can enable.
admin.enableTable(table);
} finally {
cleanupTable(table);
}
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:40,代码来源:TestHBaseFsck.java
示例2: testQuarantineMissingHFile
import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker; //导入依赖的package包/类
/**
* This creates a table and simulates the race situation where a concurrent compaction or split
* has removed an hfile after the corruption checker learned about it.
*/
@Test(timeout=180000)
public void testQuarantineMissingHFile() throws Exception {
TableName table = TableName.valueOf(name.getMethodName());
// inject a fault in the hfcc created.
final FileSystem fs = FileSystem.get(conf);
HBaseFsck hbck = new HBaseFsck(conf, hbfsckExecutorService) {
@Override
public HFileCorruptionChecker createHFileCorruptionChecker(boolean sidelineCorruptHFiles) throws IOException {
return new HFileCorruptionChecker(conf, executor, sidelineCorruptHFiles) {
AtomicBoolean attemptedFirstHFile = new AtomicBoolean(false);
@Override
protected void checkHFile(Path p) throws IOException {
if (attemptedFirstHFile.compareAndSet(false, true)) {
assertTrue(fs.delete(p, true)); // make sure delete happened.
}
super.checkHFile(p);
}
};
}
};
doQuarantineTest(table, hbck, 4, 0, 0, 0, 1); // 4 attempted, but 1 missing.
hbck.close();
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:TestHBaseFsck.java
示例3: testQuarantineMissingFamdir
import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker; //导入依赖的package包/类
/**
* This creates a table and simulates the race situation where a concurrent compaction or split
* has removed an colfam dir before the corruption checker got to it.
*/
// Disabled because fails sporadically. Is this test right? Timing-wise, there could be no
// files in a column family on initial creation -- as suggested by Matteo.
@Ignore @Test(timeout=180000)
public void testQuarantineMissingFamdir() throws Exception {
TableName table = TableName.valueOf(name.getMethodName());
// inject a fault in the hfcc created.
final FileSystem fs = FileSystem.get(conf);
HBaseFsck hbck = new HBaseFsck(conf, hbfsckExecutorService) {
@Override
public HFileCorruptionChecker createHFileCorruptionChecker(boolean sidelineCorruptHFiles) throws IOException {
return new HFileCorruptionChecker(conf, executor, sidelineCorruptHFiles) {
AtomicBoolean attemptedFirstHFile = new AtomicBoolean(false);
@Override
protected void checkColFamDir(Path p) throws IOException {
if (attemptedFirstHFile.compareAndSet(false, true)) {
assertTrue(fs.delete(p, true)); // make sure delete happened.
}
super.checkColFamDir(p);
}
};
}
};
doQuarantineTest(table, hbck, 3, 0, 0, 0, 1);
hbck.close();
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:30,代码来源:TestHBaseFsck.java
示例4: testQuarantineMissingRegionDir
import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker; //导入依赖的package包/类
/**
* This creates a table and simulates the race situation where a concurrent compaction or split
* has removed a region dir before the corruption checker got to it.
*/
@Test(timeout=180000)
public void testQuarantineMissingRegionDir() throws Exception {
TableName table = TableName.valueOf(name.getMethodName());
// inject a fault in the hfcc created.
final FileSystem fs = FileSystem.get(conf);
HBaseFsck hbck = new HBaseFsck(conf, hbfsckExecutorService) {
@Override
public HFileCorruptionChecker createHFileCorruptionChecker(boolean sidelineCorruptHFiles)
throws IOException {
return new HFileCorruptionChecker(conf, executor, sidelineCorruptHFiles) {
AtomicBoolean attemptedFirstHFile = new AtomicBoolean(false);
@Override
protected void checkRegionDir(Path p) throws IOException {
if (attemptedFirstHFile.compareAndSet(false, true)) {
assertTrue(fs.delete(p, true)); // make sure delete happened.
}
super.checkRegionDir(p);
}
};
}
};
doQuarantineTest(table, hbck, 3, 0, 0, 0, 1);
hbck.close();
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:TestHBaseFsck.java
示例5: testQuarantineMissingHFile
import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker; //导入依赖的package包/类
/**
* This creates a table and simulates the race situation where a concurrent compaction or split
* has removed an hfile after the corruption checker learned about it.
*/
@Test(timeout=180000)
public void testQuarantineMissingHFile() throws Exception {
String table = name.getMethodName();
ExecutorService exec = new ScheduledThreadPoolExecutor(10);
// inject a fault in the hfcc created.
final FileSystem fs = FileSystem.get(conf);
HBaseFsck hbck = new HBaseFsck(conf, exec) {
public HFileCorruptionChecker createHFileCorruptionChecker(boolean sidelineCorruptHFiles) throws IOException {
return new HFileCorruptionChecker(conf, executor, sidelineCorruptHFiles) {
boolean attemptedFirstHFile = false;
protected void checkHFile(Path p) throws IOException {
if (!attemptedFirstHFile) {
attemptedFirstHFile = true;
assertTrue(fs.delete(p, true)); // make sure delete happened.
}
super.checkHFile(p);
}
};
}
};
doQuarantineTest(table, hbck, 4, 0, 0, 0, 1); // 4 attempted, but 1 missing.
}
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:27,代码来源:TestHBaseFsck.java
示例6: testQuarantineMissingFamdir
import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker; //导入依赖的package包/类
/**
* This creates a table and simulates the race situation where a concurrent compaction or split
* has removed an colfam dir before the corruption checker got to it.
*/
@Test(timeout=180000)
public void testQuarantineMissingFamdir() throws Exception {
String table = name.getMethodName();
ExecutorService exec = new ScheduledThreadPoolExecutor(10);
// inject a fault in the hfcc created.
final FileSystem fs = FileSystem.get(conf);
HBaseFsck hbck = new HBaseFsck(conf, exec) {
public HFileCorruptionChecker createHFileCorruptionChecker(boolean sidelineCorruptHFiles) throws IOException {
return new HFileCorruptionChecker(conf, executor, sidelineCorruptHFiles) {
boolean attemptedFirstFamDir = false;
protected void checkColFamDir(Path p) throws IOException {
if (!attemptedFirstFamDir) {
attemptedFirstFamDir = true;
assertTrue(fs.delete(p, true)); // make sure delete happened.
}
super.checkColFamDir(p);
}
};
}
};
doQuarantineTest(table, hbck, 3, 0, 0, 0, 1);
}
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:27,代码来源:TestHBaseFsck.java
示例7: testQuarantineMissingRegionDir
import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker; //导入依赖的package包/类
/**
* This creates a table and simulates the race situation where a concurrent compaction or split
* has removed a region dir before the corruption checker got to it.
*/
@Test(timeout=180000)
public void testQuarantineMissingRegionDir() throws Exception {
String table = name.getMethodName();
ExecutorService exec = new ScheduledThreadPoolExecutor(10);
// inject a fault in the hfcc created.
final FileSystem fs = FileSystem.get(conf);
HBaseFsck hbck = new HBaseFsck(conf, exec) {
public HFileCorruptionChecker createHFileCorruptionChecker(boolean sidelineCorruptHFiles) throws IOException {
return new HFileCorruptionChecker(conf, executor, sidelineCorruptHFiles) {
boolean attemptedFirstRegionDir = false;
protected void checkRegionDir(Path p) throws IOException {
if (!attemptedFirstRegionDir) {
attemptedFirstRegionDir = true;
assertTrue(fs.delete(p, true)); // make sure delete happened.
}
super.checkRegionDir(p);
}
};
}
};
doQuarantineTest(table, hbck, 3, 0, 0, 0, 1);
}
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:27,代码来源:TestHBaseFsck.java
示例8: testQuarantineMissingHFile
import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker; //导入依赖的package包/类
/**
* This creates a table and simulates the race situation where a concurrent compaction or split
* has removed an hfile after the corruption checker learned about it.
*/
@Test(timeout=180000)
public void testQuarantineMissingHFile() throws Exception {
TableName table = TableName.valueOf(name.getMethodName());
ExecutorService exec = new ScheduledThreadPoolExecutor(10);
// inject a fault in the hfcc created.
final FileSystem fs = FileSystem.get(conf);
HBaseFsck hbck = new HBaseFsck(conf, exec) {
@Override
public HFileCorruptionChecker createHFileCorruptionChecker(boolean sidelineCorruptHFiles) throws IOException {
return new HFileCorruptionChecker(conf, executor, sidelineCorruptHFiles) {
boolean attemptedFirstHFile = false;
@Override
protected void checkHFile(Path p) throws IOException {
if (!attemptedFirstHFile) {
attemptedFirstHFile = true;
assertTrue(fs.delete(p, true)); // make sure delete happened.
}
super.checkHFile(p);
}
};
}
};
doQuarantineTest(table, hbck, 4, 0, 0, 0, 1); // 4 attempted, but 1 missing.
}
开发者ID:tenggyut,项目名称:HIndex,代码行数:29,代码来源:TestHBaseFsck.java
示例9: testQuarantineMissingRegionDir
import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker; //导入依赖的package包/类
/**
* This creates a table and simulates the race situation where a concurrent compaction or split
* has removed a region dir before the corruption checker got to it.
*/
@Test(timeout=180000)
public void testQuarantineMissingRegionDir() throws Exception {
TableName table = TableName.valueOf(name.getMethodName());
ExecutorService exec = new ScheduledThreadPoolExecutor(10);
// inject a fault in the hfcc created.
final FileSystem fs = FileSystem.get(conf);
HBaseFsck hbck = new HBaseFsck(conf, exec) {
@Override
public HFileCorruptionChecker createHFileCorruptionChecker(boolean sidelineCorruptHFiles) throws IOException {
return new HFileCorruptionChecker(conf, executor, sidelineCorruptHFiles) {
boolean attemptedFirstRegionDir = false;
@Override
protected void checkRegionDir(Path p) throws IOException {
if (!attemptedFirstRegionDir) {
attemptedFirstRegionDir = true;
assertTrue(fs.delete(p, true)); // make sure delete happened.
}
super.checkRegionDir(p);
}
};
}
};
doQuarantineTest(table, hbck, 3, 0, 0, 0, 1);
}
开发者ID:tenggyut,项目名称:HIndex,代码行数:29,代码来源:TestHBaseFsck.java
示例10: testQuarantineMissingHFile
import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker; //导入依赖的package包/类
/**
* This creates a table and simulates the race situation where a concurrent compaction or split
* has removed an hfile after the corruption checker learned about it.
*/
@Test(timeout=120000)
public void testQuarantineMissingHFile() throws Exception {
String table = name.getMethodName();
ExecutorService exec = new ScheduledThreadPoolExecutor(10);
// inject a fault in the hfcc created.
final FileSystem fs = FileSystem.get(conf);
HBaseFsck hbck = new HBaseFsck(conf, exec) {
public HFileCorruptionChecker createHFileCorruptionChecker(boolean sidelineCorruptHFiles) throws IOException {
return new HFileCorruptionChecker(conf, executor, sidelineCorruptHFiles) {
boolean attemptedFirstHFile = false;
protected void checkHFile(Path p) throws IOException {
if (!attemptedFirstHFile) {
attemptedFirstHFile = true;
assertTrue(fs.delete(p, true)); // make sure delete happened.
}
super.checkHFile(p);
}
};
}
};
doQuarantineTest(table, hbck, 4, 0, 0, 0, 1); // 4 attempted, but 1 missing.
}
开发者ID:zwqjsj0404,项目名称:HBase-Research,代码行数:27,代码来源:TestHBaseFsck.java
示例11: testQuarantineMissingFamdir
import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker; //导入依赖的package包/类
/**
* This creates a table and simulates the race situation where a concurrent compaction or split
* has removed an colfam dir before the corruption checker got to it.
*/
@Test(timeout=120000)
public void testQuarantineMissingFamdir() throws Exception {
String table = name.getMethodName();
ExecutorService exec = new ScheduledThreadPoolExecutor(10);
// inject a fault in the hfcc created.
final FileSystem fs = FileSystem.get(conf);
HBaseFsck hbck = new HBaseFsck(conf, exec) {
public HFileCorruptionChecker createHFileCorruptionChecker(boolean sidelineCorruptHFiles) throws IOException {
return new HFileCorruptionChecker(conf, executor, sidelineCorruptHFiles) {
boolean attemptedFirstFamDir = false;
protected void checkColFamDir(Path p) throws IOException {
if (!attemptedFirstFamDir) {
attemptedFirstFamDir = true;
assertTrue(fs.delete(p, true)); // make sure delete happened.
}
super.checkColFamDir(p);
}
};
}
};
doQuarantineTest(table, hbck, 3, 0, 0, 0, 1);
}
开发者ID:zwqjsj0404,项目名称:HBase-Research,代码行数:27,代码来源:TestHBaseFsck.java
示例12: testQuarantineMissingRegionDir
import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker; //导入依赖的package包/类
/**
* This creates a table and simulates the race situation where a concurrent compaction or split
* has removed a region dir before the corruption checker got to it.
*/
@Test(timeout=120000)
public void testQuarantineMissingRegionDir() throws Exception {
String table = name.getMethodName();
ExecutorService exec = new ScheduledThreadPoolExecutor(10);
// inject a fault in the hfcc created.
final FileSystem fs = FileSystem.get(conf);
HBaseFsck hbck = new HBaseFsck(conf, exec) {
public HFileCorruptionChecker createHFileCorruptionChecker(boolean sidelineCorruptHFiles) throws IOException {
return new HFileCorruptionChecker(conf, executor, sidelineCorruptHFiles) {
boolean attemptedFirstRegionDir = false;
protected void checkRegionDir(Path p) throws IOException {
if (!attemptedFirstRegionDir) {
attemptedFirstRegionDir = true;
assertTrue(fs.delete(p, true)); // make sure delete happened.
}
super.checkRegionDir(p);
}
};
}
};
doQuarantineTest(table, hbck, 3, 0, 0, 0, 1);
}
开发者ID:zwqjsj0404,项目名称:HBase-Research,代码行数:27,代码来源:TestHBaseFsck.java
示例13: doQuarantineTest
import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker; //导入依赖的package包/类
/**
* Test that use this should have a timeout, because this method could potentially wait forever.
*/
private void doQuarantineTest(TableName table, HBaseFsck hbck, int check,
int corrupt, int fail, int quar, int missing) throws Exception {
try {
setupTable(table);
assertEquals(ROWKEYS.length, countRows());
admin.flush(table); // flush is async.
// Mess it up by leaving a hole in the assignment, meta, and hdfs data
admin.disableTable(table);
String[] args = {"-sidelineCorruptHFiles", "-repairHoles", "-ignorePreCheckPermission",
table.getNameAsString()};
HBaseFsck res = hbck.exec(hbfsckExecutorService, args);
HFileCorruptionChecker hfcc = res.getHFilecorruptionChecker();
assertEquals(hfcc.getHFilesChecked(), check);
assertEquals(hfcc.getCorrupted().size(), corrupt);
assertEquals(hfcc.getFailures().size(), fail);
assertEquals(hfcc.getQuarantined().size(), quar);
assertEquals(hfcc.getMissing().size(), missing);
// its been fixed, verify that we can enable
admin.enableTableAsync(table);
while (!admin.isTableEnabled(table)) {
try {
Thread.sleep(250);
} catch (InterruptedException e) {
e.printStackTrace();
fail("Interrupted when trying to enable table " + table);
}
}
} finally {
cleanupTable(table);
}
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:39,代码来源:TestHBaseFsck.java
示例14: testFsckWithEncryption
import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker; //导入依赖的package包/类
@Test
public void testFsckWithEncryption() throws Exception {
// Populate the table with some data
Table table = new HTable(conf, htd.getTableName());
try {
byte[] values = { 'A', 'B', 'C', 'D' };
for (int i = 0; i < values.length; i++) {
for (int j = 0; j < values.length; j++) {
Put put = new Put(new byte[] { values[i], values[j] });
put.add(Bytes.toBytes("cf"), new byte[] {}, new byte[] { values[i],
values[j] });
table.put(put);
}
}
} finally {
table.close();
}
// Flush it
TEST_UTIL.getHBaseAdmin().flush(htd.getTableName());
// Verify we have encrypted store files on disk
final List<Path> paths = findStorefilePaths(htd.getTableName());
assertTrue(paths.size() > 0);
for (Path path: paths) {
assertTrue("Store file " + path + " has incorrect key",
Bytes.equals(cfKey.getEncoded(), extractHFileKey(path)));
}
// Insure HBck doesn't consider them corrupt
HBaseFsck res = HbckTestingUtil.doHFileQuarantine(conf, htd.getTableName());
assertEquals(res.getRetCode(), 0);
HFileCorruptionChecker hfcc = res.getHFilecorruptionChecker();
assertEquals(hfcc.getCorrupted().size(), 0);
assertEquals(hfcc.getFailures().size(), 0);
assertEquals(hfcc.getQuarantined().size(), 0);
assertEquals(hfcc.getMissing().size(), 0);
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:38,代码来源:TestHBaseFsckEncryption.java
示例15: testQuarantineCorruptHFile
import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker; //导入依赖的package包/类
/**
* This creates a table and then corrupts an hfile. Hbck should quarantine the file.
*/
@Test(timeout=180000)
public void testQuarantineCorruptHFile() throws Exception {
String table = name.getMethodName();
try {
setupTable(table);
assertEquals(ROWKEYS.length, countRows());
TEST_UTIL.getHBaseAdmin().flush(table); // flush is async.
FileSystem fs = FileSystem.get(conf);
Path hfile = getFlushedHFile(fs, table);
// Mess it up by leaving a hole in the assignment, meta, and hdfs data
TEST_UTIL.getHBaseAdmin().disableTable(table);
// create new corrupt file called deadbeef (valid hfile name)
Path corrupt = new Path(hfile.getParent(), "deadbeef");
TestHFile.truncateFile(fs, hfile, corrupt);
LOG.info("Created corrupted file " + corrupt);
HBaseFsck.debugLsr(conf, FSUtils.getRootDir(conf));
// we cannot enable here because enable never finished due to the corrupt region.
HBaseFsck res = HbckTestingUtil.doHFileQuarantine(conf, table);
assertEquals(res.getRetCode(), 0);
HFileCorruptionChecker hfcc = res.getHFilecorruptionChecker();
assertEquals(hfcc.getHFilesChecked(), 5);
assertEquals(hfcc.getCorrupted().size(), 1);
assertEquals(hfcc.getFailures().size(), 0);
assertEquals(hfcc.getQuarantined().size(), 1);
assertEquals(hfcc.getMissing().size(), 0);
// Its been fixed, verify that we can enable.
TEST_UTIL.getHBaseAdmin().enableTable(table);
} finally {
deleteTable(table);
}
}
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:40,代码来源:TestHBaseFsck.java
示例16: doQuarantineTest
import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker; //导入依赖的package包/类
/**
* Test that use this should have a timeout, because this method could potentially wait forever.
*/
private void doQuarantineTest(String table, HBaseFsck hbck, int check, int corrupt, int fail,
int quar, int missing) throws Exception {
try {
setupTable(table);
assertEquals(ROWKEYS.length, countRows());
TEST_UTIL.getHBaseAdmin().flush(table); // flush is async.
// Mess it up by leaving a hole in the assignment, meta, and hdfs data
TEST_UTIL.getHBaseAdmin().disableTable(table);
String[] args = {"-sidelineCorruptHFiles", "-repairHoles", "-ignorePreCheckPermission", table};
ExecutorService exec = new ScheduledThreadPoolExecutor(10);
HBaseFsck res = hbck.exec(exec, args);
HFileCorruptionChecker hfcc = res.getHFilecorruptionChecker();
assertEquals(hfcc.getHFilesChecked(), check);
assertEquals(hfcc.getCorrupted().size(), corrupt);
assertEquals(hfcc.getFailures().size(), fail);
assertEquals(hfcc.getQuarantined().size(), quar);
assertEquals(hfcc.getMissing().size(), missing);
// its been fixed, verify that we can enable
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
admin.enableTableAsync(table);
while (!admin.isTableEnabled(table)) {
try {
Thread.sleep(250);
} catch (InterruptedException e) {
e.printStackTrace();
fail("Interrupted when trying to enable table " + table);
}
}
} finally {
deleteTable(table);
}
}
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:40,代码来源:TestHBaseFsck.java
注:本文中的org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论