本文整理汇总了Java中org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication类的典型用法代码示例。如果您正苦于以下问题:Java VerifyReplication类的具体用法?Java VerifyReplication怎么用?Java VerifyReplication使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
VerifyReplication类属于org.apache.hadoop.hbase.mapreduce.replication包,在下文中一共展示了VerifyReplication类的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: main
import org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication; //导入依赖的package包/类
/**
* @param args
* @throws Throwable
*/
public static void main(String[] args) throws Throwable {
ProgramDriver pgd = new ProgramDriver();
pgd.addClass(RowCounter.NAME, RowCounter.class,
"Count rows in HBase table.");
pgd.addClass(CellCounter.NAME, CellCounter.class,
"Count cells in HBase table.");
pgd.addClass(Export.NAME, Export.class, "Write table data to HDFS.");
pgd.addClass(Import.NAME, Import.class, "Import data written by Export.");
pgd.addClass(ImportTsv.NAME, ImportTsv.class, "Import data in TSV format.");
pgd.addClass(LoadIncrementalHFiles.NAME, LoadIncrementalHFiles.class,
"Complete a bulk data load.");
pgd.addClass(CopyTable.NAME, CopyTable.class,
"Export a table from local cluster to peer cluster.");
pgd.addClass(VerifyReplication.NAME, VerifyReplication.class, "Compare" +
" the data from tables in two different clusters. WARNING: It" +
" doesn't work for incrementColumnValues'd cells since the" +
" timestamp is changed after being appended to the log.");
pgd.addClass(WALPlayer.NAME, WALPlayer.class, "Replay WAL files.");
pgd.addClass(ExportSnapshot.NAME, ExportSnapshot.class, "Export" +
" the specific snapshot to a given FileSystem.");
ProgramDriver.class.getMethod("driver", new Class [] {String[].class}).
invoke(pgd, new Object[]{args});
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:30,代码来源:Driver.java
示例2: main
import org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication; //导入依赖的package包/类
/**
* @param args
* @throws Throwable
*/
public static void main(String[] args) throws Throwable {
ProgramDriver pgd = new ProgramDriver();
pgd.addClass(RowCounter.NAME, RowCounter.class,
"Count rows in HBase table");
pgd.addClass(CellCounter.NAME, CellCounter.class,
"Count cells in HBase table");
pgd.addClass(Export.NAME, Export.class, "Write table data to HDFS.");
pgd.addClass(Import.NAME, Import.class, "Import data written by Export.");
pgd.addClass(ImportTsv.NAME, ImportTsv.class, "Import data in TSV format.");
pgd.addClass(LoadIncrementalHFiles.NAME, LoadIncrementalHFiles.class,
"Complete a bulk data load.");
pgd.addClass(CopyTable.NAME, CopyTable.class,
"Export a table from local cluster to peer cluster");
pgd.addClass(VerifyReplication.NAME, VerifyReplication.class, "Compare" +
" the data from tables in two different clusters. WARNING: It" +
" doesn't work for incrementColumnValues'd cells since the" +
" timestamp is changed after being appended to the log.");
ProgramDriver.class.getMethod("driver", new Class [] {String[].class}).
invoke(pgd, new Object[]{args});
}
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:25,代码来源:Driver.java
示例3: main
import org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication; //导入依赖的package包/类
/**
* @param args
* @throws Throwable
*/
public static void main(String[] args) throws Throwable {
ProgramDriver pgd = new ProgramDriver();
pgd.addClass(RowCounter.NAME, RowCounter.class,
"Count rows in HBase table.");
pgd.addClass(CellCounter.NAME, CellCounter.class,
"Count cells in HBase table.");
pgd.addClass(Export.NAME, Export.class, "Write table data to HDFS.");
pgd.addClass(Import.NAME, Import.class, "Import data written by Export.");
pgd.addClass(ImportTsv.NAME, ImportTsv.class, "Import data in TSV format.");
pgd.addClass(LoadIncrementalHFiles.NAME, LoadIncrementalHFiles.class,
"Complete a bulk data load.");
pgd.addClass(CopyTable.NAME, CopyTable.class,
"Export a table from local cluster to peer cluster.");
pgd.addClass(VerifyReplication.NAME, VerifyReplication.class, "Compare" +
" data from tables in two different clusters. It" +
" doesn't work for incrementColumnValues'd cells since" +
" timestamp is changed after appending to WAL.");
pgd.addClass(WALPlayer.NAME, WALPlayer.class, "Replay WAL files.");
pgd.addClass(ExportSnapshot.NAME, ExportSnapshot.class, "Export" +
" the specific snapshot to a given FileSystem.");
ProgramDriver.class.getMethod("driver", new Class [] {String[].class}).
invoke(pgd, new Object[]{args});
}
开发者ID:apache,项目名称:hbase,代码行数:30,代码来源:Driver.java
示例4: runVerifyReplication
import org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication; //导入依赖的package包/类
private void runVerifyReplication(String[] args, int expectedGoodRows, int expectedBadRows)
throws IOException, InterruptedException, ClassNotFoundException {
Job job = new VerifyReplication().createSubmittableJob(new Configuration(conf1), args);
if (job == null) {
fail("Job wasn't created, see the log");
}
if (!job.waitForCompletion(true)) {
fail("Job failed, see the log");
}
assertEquals(expectedGoodRows,
job.getCounters().findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue());
assertEquals(expectedBadRows,
job.getCounters().findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue());
}
开发者ID:apache,项目名称:hbase,代码行数:15,代码来源:TestVerifyReplication.java
示例5: testVerifyReplicationSnapshotArguments
import org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication; //导入依赖的package包/类
@Test
public void testVerifyReplicationSnapshotArguments() {
String[] args =
new String[] { "--sourceSnapshotName=snapshot1", "2", tableName.getNameAsString() };
assertFalse(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args));
args = new String[] { "--sourceSnapshotTmpDir=tmp", "2", tableName.getNameAsString() };
assertFalse(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args));
args = new String[] { "--sourceSnapshotName=snapshot1", "--sourceSnapshotTmpDir=tmp", "2",
tableName.getNameAsString() };
assertTrue(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args));
args = new String[] { "--peerSnapshotName=snapshot1", "2", tableName.getNameAsString() };
assertFalse(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args));
args = new String[] { "--peerSnapshotTmpDir=/tmp/", "2", tableName.getNameAsString() };
assertFalse(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args));
args = new String[] { "--peerSnapshotName=snapshot1", "--peerSnapshotTmpDir=/tmp/",
"--peerFSAddress=tempfs", "--peerHBaseRootAddress=hdfs://tempfs:50070/hbase/", "2",
tableName.getNameAsString() };
assertTrue(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args));
args = new String[] { "--sourceSnapshotName=snapshot1", "--sourceSnapshotTmpDir=/tmp/",
"--peerSnapshotName=snapshot2", "--peerSnapshotTmpDir=/tmp/", "--peerFSAddress=tempfs",
"--peerHBaseRootAddress=hdfs://tempfs:50070/hbase/", "2", tableName.getNameAsString() };
assertTrue(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args));
}
开发者ID:apache,项目名称:hbase,代码行数:31,代码来源:TestVerifyReplication.java
示例6: testVerifyRepJob
import org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication; //导入依赖的package包/类
/**
* Do a small loading into a table, make sure the data is really the same,
* then run the VerifyReplication job to check the results. Do a second
* comparison where all the cells are different.
* @throws Exception
*/
@Test(timeout=300000)
public void testVerifyRepJob() throws Exception {
// Populate the tables, at the same time it guarantees that the tables are
// identical since it does the check
testSmallBatch();
String[] args = new String[] {"2", Bytes.toString(tableName)};
Job job = VerifyReplication.createSubmittableJob(CONF_WITH_LOCALFS, args);
if (job == null) {
fail("Job wasn't created, see the log");
}
if (!job.waitForCompletion(true)) {
fail("Job failed, see the log");
}
assertEquals(NB_ROWS_IN_BATCH, job.getCounters().
findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue());
assertEquals(0, job.getCounters().
findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue());
Scan scan = new Scan();
ResultScanner rs = htable2.getScanner(scan);
Put put = null;
for (Result result : rs) {
put = new Put(result.getRow());
KeyValue firstVal = result.raw()[0];
put.add(firstVal.getFamily(),
firstVal.getQualifier(), Bytes.toBytes("diff data"));
htable2.put(put);
}
Delete delete = new Delete(put.getRow());
htable2.delete(delete);
job = VerifyReplication.createSubmittableJob(CONF_WITH_LOCALFS, args);
if (job == null) {
fail("Job wasn't created, see the log");
}
if (!job.waitForCompletion(true)) {
fail("Job failed, see the log");
}
assertEquals(0, job.getCounters().
findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue());
assertEquals(NB_ROWS_IN_BATCH, job.getCounters().
findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue());
}
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:50,代码来源:TestReplicationSmallTests.java
示例7: testVerifyRepJob
import org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication; //导入依赖的package包/类
/**
* Do a small loading into a table, make sure the data is really the same,
* then run the VerifyReplication job to check the results. Do a second
* comparison where all the cells are different.
* @throws Exception
*/
@Test(timeout=300000)
public void testVerifyRepJob() throws Exception {
// Populate the tables, at the same time it guarantees that the tables are
// identical since it does the check
testSmallBatch();
String[] args = new String[] {"2", tableName.getNameAsString()};
Job job = VerifyReplication.createSubmittableJob(CONF_WITH_LOCALFS, args);
if (job == null) {
fail("Job wasn't created, see the log");
}
if (!job.waitForCompletion(true)) {
fail("Job failed, see the log");
}
assertEquals(NB_ROWS_IN_BATCH, job.getCounters().
findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue());
assertEquals(0, job.getCounters().
findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue());
Scan scan = new Scan();
ResultScanner rs = htable2.getScanner(scan);
Put put = null;
for (Result result : rs) {
put = new Put(result.getRow());
Cell firstVal = result.rawCells()[0];
put.add(CellUtil.cloneFamily(firstVal),
CellUtil.cloneQualifier(firstVal), Bytes.toBytes("diff data"));
htable2.put(put);
}
Delete delete = new Delete(put.getRow());
htable2.delete(delete);
job = VerifyReplication.createSubmittableJob(CONF_WITH_LOCALFS, args);
if (job == null) {
fail("Job wasn't created, see the log");
}
if (!job.waitForCompletion(true)) {
fail("Job failed, see the log");
}
assertEquals(0, job.getCounters().
findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue());
assertEquals(NB_ROWS_IN_BATCH, job.getCounters().
findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue());
}
开发者ID:grokcoder,项目名称:pbase,代码行数:50,代码来源:TestReplicationSmallTests.java
示例8: testVerifyRepJob
import org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication; //导入依赖的package包/类
/**
* Do a small loading into a table, make sure the data is really the same,
* then run the VerifyReplication job to check the results. Do a second
* comparison where all the cells are different.
* @throws Exception
*/
@Test(timeout=300000)
public void testVerifyRepJob() throws Exception {
// Populate the tables, at the same time it guarantees that the tables are
// identical since it does the check
testSmallBatch();
String[] args = new String[] {"2", Bytes.toString(tableName)};
Job job = VerifyReplication.createSubmittableJob(CONF_WITH_LOCALFS, args);
if (job == null) {
fail("Job wasn't created, see the log");
}
if (!job.waitForCompletion(true)) {
fail("Job failed, see the log");
}
assertEquals(NB_ROWS_IN_BATCH, job.getCounters().
findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue());
assertEquals(0, job.getCounters().
findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue());
Scan scan = new Scan();
ResultScanner rs = htable2.getScanner(scan);
Put put = null;
for (Result result : rs) {
put = new Put(result.getRow());
Cell firstVal = result.rawCells()[0];
put.add(CellUtil.cloneFamily(firstVal),
CellUtil.cloneQualifier(firstVal), Bytes.toBytes("diff data"));
htable2.put(put);
}
Delete delete = new Delete(put.getRow());
htable2.delete(delete);
job = VerifyReplication.createSubmittableJob(CONF_WITH_LOCALFS, args);
if (job == null) {
fail("Job wasn't created, see the log");
}
if (!job.waitForCompletion(true)) {
fail("Job failed, see the log");
}
assertEquals(0, job.getCounters().
findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue());
assertEquals(NB_ROWS_IN_BATCH, job.getCounters().
findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue());
}
开发者ID:tenggyut,项目名称:HIndex,代码行数:50,代码来源:TestReplicationSmallTests.java
注:本文中的org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论