本文整理汇总了Java中org.apache.hadoop.examples.SleepJob类的典型用法代码示例。如果您正苦于以下问题:Java SleepJob类的具体用法?Java SleepJob怎么用?Java SleepJob使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
SleepJob类属于org.apache.hadoop.examples包,在下文中一共展示了SleepJob类的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: submitJobAsUser
import org.apache.hadoop.examples.SleepJob; //导入依赖的package包/类
/**
* Submits a sleep job with 1 map task that runs for a long time(2000 sec)
* @param clusterConf
* @param user the jobOwner
* @return RunningJob that is started
* @throws IOException
* @throws InterruptedException
*/
private RunningJob submitJobAsUser(final JobConf clusterConf, String user)
throws IOException, InterruptedException {
UserGroupInformation ugi =
UserGroupInformation.createUserForTesting(user, new String[] {});
RunningJob job = (RunningJob) ugi.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
JobClient jobClient = new JobClient(clusterConf);
SleepJob sleepJob = new SleepJob();
sleepJob.setConf(clusterConf);
JobConf jobConf = sleepJob.setupJobConf(1, 0, 2000, 1000, 1000, 1000);
RunningJob runningJob = jobClient.submitJob(jobConf);
return runningJob;
}
});
return job;
}
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:26,代码来源:TestJobACLs.java
示例2: doAnAction
import org.apache.hadoop.examples.SleepJob; //导入依赖的package包/类
@Override
public void doAnAction() throws Exception {
System.out.println("==============================\n" +
"Submitting job\n" +
"==================================");
SleepJob job = new SleepJob();
job.setConf(conf);
JobConf jobConf = job.setupJobConf(1, 0, 1, 1, 1, 1);
JobClient jc = new JobClient(jobConf);
RunningJob rj;
try {
rj = jc.submitJob(jobConf);
} catch (IOException e) {
// Job submission is not idempotent, so ignore a failure
System.out.println("==============================\n" +
"Job submission failed. Ignore.\n" +
"==================================");
return;
}
System.out.println("==============================\n" +
"Successfully submitted job " + rj.getJobID() + "\n" +
"==================================");
if (!jc.monitorAndPrintJob(jobConf, rj)) {
throw new IOException("Job failed! " + rj.getFailureInfo());
}
}
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:27,代码来源:TestHAStress.java
示例3: testJobWithInvalidDiskReqs
import org.apache.hadoop.examples.SleepJob; //导入依赖的package包/类
/** check for large jobconfs **/
public void testJobWithInvalidDiskReqs()
throws Exception {
JobConf jtConf = new JobConf();
jtConf
.setLong(JobTracker.MAX_USER_JOBCONF_SIZE_KEY, 1 * 1024L);
mrCluster = new MiniMRCluster(0, "file:///", 0, null, null, jtConf);
JobConf clusterConf = mrCluster.createJobConf();
// No map-memory configuration
JobConf jobConf = new JobConf(clusterConf);
String[] args = { "-m", "0", "-r", "0", "-mt", "0", "-rt", "0" };
String msg = null;
try {
ToolRunner.run(jobConf, new SleepJob(), args);
assertTrue(false);
} catch (RemoteException re) {
System.out.println("Exception " + StringUtils.stringifyException(re));
}
mrCluster.shutdown();
mrCluster = null;
}
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:26,代码来源:TestSubmitJob.java
示例4: runJobAndVerifyFailure
import org.apache.hadoop.examples.SleepJob; //导入依赖的package包/类
private void runJobAndVerifyFailure(JobConf jobConf, long memForMapTasks,
long memForReduceTasks, String expectedMsg)
throws Exception,
IOException {
String[] args = { "-m", "0", "-r", "0", "-mt", "0", "-rt", "0" };
boolean throwsException = false;
String msg = null;
try {
ToolRunner.run(jobConf, new SleepJob(), args);
} catch (RemoteException re) {
throwsException = true;
msg = re.unwrapRemoteException().getMessage();
}
assertTrue(throwsException);
assertNotNull(msg);
String overallExpectedMsg =
"(" + memForMapTasks + " memForMapTasks " + memForReduceTasks
+ " memForReduceTasks): " + expectedMsg;
assertTrue("Observed message - " + msg
+ " - doesn't contain expected message - " + overallExpectedMsg, msg
.contains(overallExpectedMsg));
}
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:24,代码来源:TestSubmitJob.java
示例5: testJobSummaryInfoOfKilledJob
import org.apache.hadoop.examples.SleepJob; //导入依赖的package包/类
/**
* Verifying the job summary information for killed job.
*/
@Test
public void testJobSummaryInfoOfKilledJob() throws IOException,
InterruptedException {
SleepJob job = new SleepJob();
conf.setBoolean("mapreduce.job.complete.cancel.delegation.tokens", false);
job.setConf(conf);
conf = job.setupJobConf(2, 1, 4000, 4000, 100, 100);
JobConf jobConf = new JobConf(conf);
RunningJob runJob = jobClient.submitJob(jobConf);
JobID jobId = runJob.getID();
Assert.assertTrue("Job has not been started for 1 min.",
jtClient.isJobStarted(jobId));
jobClient.killJob(jobId);
LOG.info("Waiting till the job is completed...");
Assert.assertTrue("Job has not been completed for 1 min.",
jtClient.isJobStopped(jobId));
JobInfo jInfo = remoteJTClient.getJobInfo(jobId);
Assert.assertEquals("Job has not been succeeded",
jInfo.getStatus().getRunState(), JobStatus.KILLED);
verifyJobSummaryInfo(jInfo,jobId);
}
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:25,代码来源:TestJobSummary.java
示例6: runSleepJob
import org.apache.hadoop.examples.SleepJob; //导入依赖的package包/类
public JobID runSleepJob(boolean signalJob) throws Exception{
SleepJob job = new SleepJob();
job.setConf(conf);
conf = job.setupJobConf(5, 1, 100, 5, 100, 5);
JobConf jconf = new JobConf(conf);
//Controls the job till all verification is done
FinishTaskControlAction.configureControlActionForJob(conf);
//Submitting the job
RunningJob rJob = cluster.getJTClient().getClient().submitJob(jconf);
JobID jobId = rJob.getID();
JobInfo jInfo = remoteJTClient.getJobInfo(jobId);
LOG.info("jInfo is :" + jInfo);
boolean jobStarted = cluster.getJTClient().isJobStarted(jobId);
Assert.assertTrue("Job has not started even after a minute",
jobStarted );
if(signalJob) {
cluster.signalAllTasks(jobId);
Assert.assertTrue("Job has not stopped yet",
cluster.getJTClient().isJobStopped(jobId));
}
return jobId;
}
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:24,代码来源:TestCMExceptionDuringRunJob.java
示例7: setupJobAndRun
import org.apache.hadoop.examples.SleepJob; //导入依赖的package包/类
private void setupJobAndRun() throws IOException {
SleepJob job = new SleepJob();
job.setConf(conf);
conf = job.setupJobConf(3, 1, 60000, 100, 60000, 100);
JobConf jobConf = new JobConf(conf);
cleanup(outputDir, conf);
jtClient = cluster.getJTClient();
JobClient client = jtClient.getClient();
wovenClient = cluster.getJTClient().getProxy();
RunningJob runJob = client.submitJob(jobConf);
jID = runJob.getID();
jInfo = wovenClient.getJobInfo(jID);
Assert.assertNotNull("Job information is null",jInfo);
Assert.assertTrue("Job has not been started for 1 min.",
jtClient.isJobStarted(jID));
JobStatus jobStatus = jInfo.getStatus();
// Make sure that job should run and completes 40%.
while (jobStatus.getRunState() != JobStatus.RUNNING &&
jobStatus.mapProgress() < 0.4f) {
UtilsForTests.waitFor(100);
jobStatus = wovenClient.getJobInfo(jID).getStatus();
}
}
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:24,代码来源:TestLostTaskTracker.java
示例8: runHighRamJob
import org.apache.hadoop.examples.SleepJob; //导入依赖的package包/类
/**
* The method runs the high ram job
* @param conf configuration for unning the job
* @param jobClient instance
* @param remoteJTClient instance
* @return the job id of the high ram job
* @throws Exception is thrown when the method fails to run the high ram job
*/
public JobID runHighRamJob (Configuration conf, JobClient jobClient,
JTProtocol remoteJTClient,String assertMessage) throws Exception {
SleepJob job = new SleepJob();
String jobArgs []= {"-D","mapred.cluster.max.map.memory.mb=2048",
"-D","mapred.cluster.max.reduce.memory.mb=2048",
"-D","mapred.cluster.map.memory.mb=1024",
"-D","mapreduce.job.complete.cancel.delegation.tokens=false",
"-D","mapred.cluster.reduce.memory.mb=1024",
"-m", "6",
"-r", "2",
"-mt", "2000",
"-rt", "2000",
"-recordt","100"};
JobConf jobConf = new JobConf(conf);
jobConf.setMemoryForMapTask(2048);
jobConf.setMemoryForReduceTask(2048);
int exitCode = ToolRunner.run(jobConf, job, jobArgs);
Assert.assertEquals("Exit Code:", 0, exitCode);
UtilsForTests.waitFor(1000);
JobID jobId = jobClient.getAllJobs()[0].getJobID();
JobInfo jInfo = remoteJTClient.getJobInfo(jobId);
Assert.assertEquals(assertMessage,
jInfo.getStatus().getRunState(), JobStatus.SUCCEEDED);
return jobId;
}
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:34,代码来源:HighRamJobHelper.java
示例9: testLastTaskSpeculation
import org.apache.hadoop.examples.SleepJob; //导入依赖的package包/类
public void testLastTaskSpeculation() throws Exception {
corona = new MiniCoronaCluster.Builder().numTaskTrackers(2).build();
JobConf conf = corona.createJobConf();
conf.setSpeculativeExecution(true);
conf.setMapSpeculativeLag(1L);
conf.setReduceSpeculativeLag(1L);
conf.setLong(JobInProgress.REFRESH_TIMEOUT, 100L);
conf.setLong(CoronaTaskTracker.HEART_BEAT_INTERVAL_KEY, 100L);
conf.setLong(CoronaJobTracker.HEART_BEAT_INTERVAL_KEY, 100L);
long start = System.currentTimeMillis();
SleepJob sleepJob = new SleepJob();
ToolRunner.run(conf, sleepJob,
new String[]{ "-m", "1", "-r", "1",
"-mt", "5000", "-rt", "5000",
"-speculation"});
long end = System.currentTimeMillis();
verifyLaunchedTasks(sleepJob, 2, 2);
new ClusterManagerMetricsVerifier(corona.getClusterManager(),
2, 2, 2, 2, 2, 2, 0, 0).verifyAll();
LOG.info("Time spent for testOneTaskWithOneTaskTracker:" +
(end - start));
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:23,代码来源:TestMiniCoronaSpeculativeTask.java
注:本文中的org.apache.hadoop.examples.SleepJob类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论