本文整理汇总了Java中org.apache.hadoop.examples.SleepJob.SleepInputFormat类的典型用法代码示例。如果您正苦于以下问题:Java SleepInputFormat类的具体用法?Java SleepInputFormat怎么用?Java SleepInputFormat使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
SleepInputFormat类属于org.apache.hadoop.examples.SleepJob包,在下文中一共展示了SleepInputFormat类的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: testBlacklistAcrossJobs
import org.apache.hadoop.examples.SleepJob.SleepInputFormat; //导入依赖的package包/类
public void testBlacklistAcrossJobs() throws Exception {
MiniDFSCluster dfs = null;
MiniMRCluster mr = null;
FileSystem fileSys = null;
Configuration conf = new Configuration();
// setup dfs and input
dfs = new MiniDFSCluster(conf, 1, true, null, hosts);
fileSys = dfs.getFileSystem();
if (!fileSys.mkdirs(inDir)) {
throw new IOException("Mkdirs failed to create " + inDir.toString());
}
UtilsForTests.writeFile(dfs.getNameNode(), conf,
new Path(inDir + "/file"), (short) 1);
// start mr cluster
JobConf jtConf = new JobConf();
jtConf.setInt("mapred.max.tracker.blacklists", 1);
mr = new MiniMRCluster(3, fileSys.getUri().toString(),
1, null, hosts, jtConf);
// setup job configuration
JobConf mrConf = mr.createJobConf();
JobConf job = new JobConf(mrConf);
job.setInt("mapred.max.tracker.failures", 1);
job.setNumMapTasks(30);
job.setNumReduceTasks(0);
job.setMapperClass(SleepJobFailOnHost.class);
job.setMapOutputKeyClass(IntWritable.class);
job.setMapOutputValueClass(NullWritable.class);
job.setOutputFormat(NullOutputFormat.class);
job.setInputFormat(SleepInputFormat.class);
FileInputFormat.setInputPaths(job, inDir);
FileOutputFormat.setOutputPath(job, outDir);
// run the job
JobClient jc = new JobClient(mrConf);
RunningJob running = JobClient.runJob(job);
assertEquals("Job failed", JobStatus.SUCCEEDED, running.getJobState());
assertEquals("Didn't blacklist the host", 1,
jc.getClusterStatus().getBlacklistedTrackers());
assertEquals("Fault count should be 1", 1, mr.getFaultCount(hosts[0]));
// run the same job once again
// there should be no change in blacklist count
running = JobClient.runJob(job);
assertEquals("Job failed", JobStatus.SUCCEEDED, running.getJobState());
assertEquals("Didn't blacklist the host", 1,
jc.getClusterStatus().getBlacklistedTrackers());
assertEquals("Fault count should be 1", 1, mr.getFaultCount(hosts[0]));
}
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:50,代码来源:TestTrackerBlacklistAcrossJobs.java
示例2: testBlacklistAcrossJobs
import org.apache.hadoop.examples.SleepJob.SleepInputFormat; //导入依赖的package包/类
public void testBlacklistAcrossJobs() throws IOException {
if (System.getProperty("hadoop.log.dir") == null) {
String base = new File(".").getAbsolutePath();
System.setProperty("hadoop.log.dir", new Path(base).toString() + "/logs");
}
MiniDFSCluster dfs = null;
MiniMRCluster mr = null;
FileSystem fileSys = null;
Configuration conf = new Configuration();
// setup dfs and input
dfs = new MiniDFSCluster(conf, 1, true, null, hosts);
fileSys = dfs.getFileSystem();
if (!fileSys.mkdirs(inDir)) {
throw new IOException("Mkdirs failed to create " + inDir.toString());
}
UtilsForTests.writeFile(dfs.getNameNode(), conf,
new Path(inDir + "/file"), (short) 1);
// start mr cluster
JobConf jtConf = new JobConf();
jtConf.setInt("mapred.max.tracker.blacklists", 1);
mr = new MiniMRCluster(3, fileSys.getUri().toString(),
1, null, hosts, jtConf);
// setup job configuration
JobConf mrConf = mr.createJobConf();
JobConf job = new JobConf(mrConf);
job.setInt("mapred.max.tracker.failures", 1);
job.setNumMapTasks(30);
job.setNumReduceTasks(0);
job.setMapperClass(SleepJobFailOnHost.class);
job.setMapOutputKeyClass(IntWritable.class);
job.setMapOutputValueClass(NullWritable.class);
job.setOutputFormat(NullOutputFormat.class);
job.setInputFormat(SleepInputFormat.class);
FileInputFormat.setInputPaths(job, inDir);
FileOutputFormat.setOutputPath(job, outDir);
// run the job
JobClient jc = new JobClient(mrConf);
RunningJob running = JobClient.runJob(job);
assertEquals("Job failed", JobStatus.SUCCEEDED, running.getJobState());
assertEquals("Didn't blacklist the host", 1,
jc.getClusterStatus().getBlacklistedTrackers());
assertEquals("Fault count should be 1", 1, mr.getFaultCount(hosts[0]));
// run the same job once again
// there should be no change in blacklist count
running = JobClient.runJob(job);
assertEquals("Job failed", JobStatus.SUCCEEDED, running.getJobState());
assertEquals("Didn't blacklist the host", 1,
jc.getClusterStatus().getBlacklistedTrackers());
assertEquals("Fault count should be 1", 1, mr.getFaultCount(hosts[0]));
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:54,代码来源:TestTrackerBlacklistAcrossJobs.java
示例3: testBlacklistAcrossJobs
import org.apache.hadoop.examples.SleepJob.SleepInputFormat; //导入依赖的package包/类
public void testBlacklistAcrossJobs() throws IOException {
MiniDFSCluster dfs = null;
MiniMRCluster mr = null;
FileSystem fileSys = null;
Configuration conf = new Configuration();
// set up dfs and input
dfs = new MiniDFSCluster(conf, 1, true, null, hosts);
fileSys = dfs.getFileSystem();
if (!fileSys.mkdirs(inDir)) {
throw new IOException("Mkdirs failed to create " + inDir.toString());
}
UtilsForTests.writeFile(dfs.getNameNode(), conf,
new Path(inDir + "/file"), (short) 1);
// start mr cluster
JobConf jtConf = new JobConf();
jtConf.setInt("mapred.max.tracker.blacklists", 1);
mr = new MiniMRCluster(3, fileSys.getUri().toString(),
1, null, hosts, jtConf);
// set up job configuration
JobConf mrConf = mr.createJobConf();
JobConf job = new JobConf(mrConf);
job.setInt("mapred.max.tracker.failures", 1);
job.setNumMapTasks(30);
job.setNumReduceTasks(0);
job.setMapperClass(SleepJobFailOnHost.class);
job.setMapOutputKeyClass(IntWritable.class);
job.setMapOutputValueClass(NullWritable.class);
job.setOutputFormat(NullOutputFormat.class);
job.setInputFormat(SleepInputFormat.class);
FileInputFormat.setInputPaths(job, inDir);
FileOutputFormat.setOutputPath(job, outDir);
// run the job
JobClient jc = new JobClient(mrConf);
RunningJob running = JobClient.runJob(job);
assertEquals("Job failed", JobStatus.SUCCEEDED, running.getJobState());
// heuristic blacklisting is graylisting as of 0.20.Fred
assertEquals("Blacklisted the host", 0,
jc.getClusterStatus().getBlacklistedTrackers());
assertEquals("Didn't graylist the host", 1,
jc.getClusterStatus().getGraylistedTrackers());
assertEquals("Fault count should be 1", 1, mr.getFaultCount(hosts[0]));
// run the same job once again
// there should be no change in blacklist or graylist count, but fault
// count (per-job blacklistings) should go up by one
running = JobClient.runJob(job);
assertEquals("Job failed", JobStatus.SUCCEEDED, running.getJobState());
assertEquals("Blacklisted the host", 0,
jc.getClusterStatus().getBlacklistedTrackers());
assertEquals("Didn't graylist the host", 1,
jc.getClusterStatus().getGraylistedTrackers());
// previously this asserted 1, but makes no sense: each per-job
// blacklisting counts as a new fault, so 2 runs => 2 faults:
assertEquals("Fault count should be 2", 2, mr.getFaultCount(hosts[0]));
}
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:58,代码来源:TestTrackerBlacklistAcrossJobs.java
示例4: testBlacklistAcrossJobs
import org.apache.hadoop.examples.SleepJob.SleepInputFormat; //导入依赖的package包/类
public void testBlacklistAcrossJobs() throws IOException {
MiniDFSCluster dfs = null;
MiniMRCluster mr = null;
FileSystem fileSys = null;
Configuration conf = new Configuration();
// setup dfs and input
dfs = new MiniDFSCluster(conf, 1, true, null, hosts);
fileSys = dfs.getFileSystem();
if (!fileSys.mkdirs(inDir)) {
throw new IOException("Mkdirs failed to create " + inDir.toString());
}
UtilsForTests.writeFile(dfs.getNameNode(), conf,
new Path(inDir + "/file"), (short) 1);
// start mr cluster
JobConf jtConf = new JobConf();
jtConf.setInt("mapred.max.tracker.blacklists", 1);
mr = new MiniMRCluster(3, fileSys.getUri().toString(),
1, null, hosts, jtConf);
// setup job configuration
JobConf mrConf = mr.createJobConf();
JobConf job = new JobConf(mrConf);
job.setInt("mapred.max.tracker.failures", 1);
job.setNumMapTasks(30);
job.setNumReduceTasks(0);
job.setMapperClass(SleepJobFailOnHost.class);
job.setMapOutputKeyClass(IntWritable.class);
job.setMapOutputValueClass(NullWritable.class);
job.setOutputFormat(NullOutputFormat.class);
job.setInputFormat(SleepInputFormat.class);
FileInputFormat.setInputPaths(job, inDir);
FileOutputFormat.setOutputPath(job, outDir);
// run the job
JobClient jc = new JobClient(mrConf);
RunningJob running = JobClient.runJob(job);
assertEquals("Job failed", JobStatus.SUCCEEDED, running.getJobState());
assertEquals("Didn't blacklist the host", 1,
jc.getClusterStatus().getBlacklistedTrackers());
assertEquals("Fault count should be 1", 1, mr.getFaultCount(hosts[0]));
// run the same job once again
// there should be no change in blacklist count
running = JobClient.runJob(job);
assertEquals("Job failed", JobStatus.SUCCEEDED, running.getJobState());
assertEquals("Didn't blacklist the host", 1,
jc.getClusterStatus().getBlacklistedTrackers());
assertEquals("Fault count should be 1", 1, mr.getFaultCount(hosts[0]));
}
开发者ID:thisisvoa,项目名称:hadoop-0.20,代码行数:50,代码来源:TestTrackerBlacklistAcrossJobs.java
注:本文中的org.apache.hadoop.examples.SleepJob.SleepInputFormat类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论