• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Java ResourceCalculatorPlugin类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Java中org.apache.hadoop.util.ResourceCalculatorPlugin的典型用法代码示例。如果您正苦于以下问题:Java ResourceCalculatorPlugin类的具体用法?Java ResourceCalculatorPlugin怎么用?Java ResourceCalculatorPlugin使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



ResourceCalculatorPlugin类属于org.apache.hadoop.util包,在下文中一共展示了ResourceCalculatorPlugin类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。

示例1: testDefaultResourceValues

import org.apache.hadoop.util.ResourceCalculatorPlugin; //导入依赖的package包/类
/**
 * Test that verifies default values are configured and reported correctly.
 * 
 * @throws Exception
 */
public void testDefaultResourceValues()
    throws Exception {
  JobConf conf = new JobConf();
  try {
    // Memory values are disabled by default.
    conf.setClass(
        org.apache.hadoop.mapred.TaskTracker.TT_RESOURCE_CALCULATOR_PLUGIN,       
        DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
    setUpCluster(conf);
    JobConf jobConf = miniMRCluster.createJobConf();
    jobConf.setClass(
        org.apache.hadoop.mapred.TaskTracker.TT_RESOURCE_CALCULATOR_PLUGIN,
        DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
    runSleepJob(jobConf);
    verifyTestResults();
  } finally {
    tearDownCluster();
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:25,代码来源:TestTTResourceReporting.java


示例2: testDefaultResourceValues

import org.apache.hadoop.util.ResourceCalculatorPlugin; //导入依赖的package包/类
/**
 * Test that verifies default values are configured and reported correctly.
 * 
 * @throws Exception
 */
@Test(timeout=60000)
public void testDefaultResourceValues()
    throws Exception {
  JobConf conf = new JobConf();
  try {
    // Memory values are disabled by default.
    conf.setClass(
        org.apache.hadoop.mapred.TaskTracker.MAPRED_TASKTRACKER_MEMORY_CALCULATOR_PLUGIN_PROPERTY,
        DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
    setUpCluster(conf);
    JobConf jobConf = miniMRCluster.createJobConf();
    jobConf.setClass(
        org.apache.hadoop.mapred.TaskTracker.MAPRED_TASKTRACKER_MEMORY_CALCULATOR_PLUGIN_PROPERTY,
        DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
    runSleepJob(jobConf);
    verifyTestResults();
  } finally {
    tearDownCluster();
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:26,代码来源:TestTTResourceReporting.java


示例3: getMaxActualSlots

import org.apache.hadoop.util.ResourceCalculatorPlugin; //导入依赖的package包/类
@Override
int getMaxActualSlots(JobConf conf, int numCpuOnTT, TaskType type) {
  Map<Integer, Map<ResourceType, Integer>> cpuToResourcePartitioning =
    CoronaConf.getUncachedCpuToResourcePartitioning(conf);
  if (numCpuOnTT == ResourceCalculatorPlugin.UNAVAILABLE) {
    numCpuOnTT = 1;
  }
  Map<ResourceType, Integer> resourceTypeToCountMap =
    ClusterNode.getResourceTypeToCountMap(numCpuOnTT,
                                          cpuToResourcePartitioning);
  switch (type) {
  case MAP:
    return resourceTypeToCountMap.get(ResourceType.MAP);
  case REDUCE:
    return resourceTypeToCountMap.get(ResourceType.REDUCE);
  default:
    throw new RuntimeException("getMaxActualSlots: Illegal type " + type);
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:20,代码来源:CoronaTaskTracker.java


示例4: testDefaultResourceValues

import org.apache.hadoop.util.ResourceCalculatorPlugin; //导入依赖的package包/类
/**
 * Test that verifies default values are configured and reported correctly.
 * 
 * @throws Exception
 */
public void testDefaultResourceValues()
    throws Exception {
  JobConf conf = new JobConf();
  try {
    // Memory values are disabled by default.
    conf.setClass(
        org.apache.hadoop.mapred.TaskTracker.MAPRED_TASKTRACKER_MEMORY_CALCULATOR_PLUGIN_PROPERTY,
        DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
    setUpCluster(conf);
    JobConf jobConf = miniMRCluster.createJobConf();
    jobConf.setClass(
        org.apache.hadoop.mapred.TaskTracker.MAPRED_TASKTRACKER_MEMORY_CALCULATOR_PLUGIN_PROPERTY,
        DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
    runSleepJob(jobConf);
    verifyTestResults();
  } finally {
    tearDownCluster();
  }
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:25,代码来源:TestTTResourceReporting.java


示例5: initialize

import org.apache.hadoop.util.ResourceCalculatorPlugin; //导入依赖的package包/类
public void initialize(JobConf job, JobID id, 
                       Reporter reporter,
                       boolean useNewApi) throws IOException, 
                                                 ClassNotFoundException,
                                                 InterruptedException {
  jobContext = new JobContext(job, id, reporter);
  taskContext = new TaskAttemptContext(job, taskId, reporter);
  if (getState() == TaskStatus.State.UNASSIGNED) {
    setState(TaskStatus.State.RUNNING);
  }
  if (useNewApi) {
    LOG.debug("using new api for output committer");
    outputFormat =
      ReflectionUtils.newInstance(taskContext.getOutputFormatClass(), job);
    committer = outputFormat.getOutputCommitter(taskContext);
  } else {
    committer = conf.getOutputCommitter();
  }
  Path outputPath = FileOutputFormat.getOutputPath(conf);
  if (outputPath != null) {
    if ((committer instanceof FileOutputCommitter)) {
      FileOutputFormat.setWorkOutputPath(conf, 
        ((FileOutputCommitter)committer).getTempTaskOutputPath(taskContext));
    } else {
      FileOutputFormat.setWorkOutputPath(conf, outputPath);
    }
  }
  committer.setupTask(taskContext);
  Class<? extends ResourceCalculatorPlugin> clazz = conf.getClass(
      TaskTracker.MAPRED_TASKTRACKER_MEMORY_CALCULATOR_PLUGIN_PROPERTY,
          null, ResourceCalculatorPlugin.class);
  resourceCalculator = ResourceCalculatorPlugin
          .getResourceCalculatorPlugin(clazz, conf);
  LOG.info(" Using ResourceCalculatorPlugin : " + resourceCalculator);
  if (resourceCalculator != null) {
    initCpuCumulativeTime =
      resourceCalculator.getProcResourceValues().getCumulativeCpuTime();
  }
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:40,代码来源:Task.java


示例6: initialize

import org.apache.hadoop.util.ResourceCalculatorPlugin; //导入依赖的package包/类
public void initialize(JobConf job, JobID id, 
                       Reporter reporter,
                       boolean useNewApi) throws IOException, 
                                                 ClassNotFoundException,
                                                 InterruptedException {
  jobContext = new JobContextImpl(job, id, reporter);
  taskContext = new TaskAttemptContextImpl(job, taskId, reporter);
  if (getState() == TaskStatus.State.UNASSIGNED) {
    setState(TaskStatus.State.RUNNING);
  }
  if (useNewApi) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("using new api for output committer");
    }
    outputFormat =
      ReflectionUtils.newInstance(taskContext.getOutputFormatClass(), job);
    committer = outputFormat.getOutputCommitter(taskContext);
  } else {
    committer = conf.getOutputCommitter();
  }
  Path outputPath = FileOutputFormat.getOutputPath(conf);
  if (outputPath != null) {
    if ((committer instanceof FileOutputCommitter)) {
      FileOutputFormat.setWorkOutputPath(conf, 
        ((FileOutputCommitter)committer).getTempTaskOutputPath(taskContext));
    } else {
      FileOutputFormat.setWorkOutputPath(conf, outputPath);
    }
  }
  committer.setupTask(taskContext);
  Class<? extends ResourceCalculatorPlugin> clazz =
      conf.getClass(TaskTracker.TT_RESOURCE_CALCULATOR_PLUGIN,
          null, ResourceCalculatorPlugin.class);
  resourceCalculator = ResourceCalculatorPlugin
          .getResourceCalculatorPlugin(clazz, conf);
  LOG.info(" Using ResourceCalculatorPlugin : " + resourceCalculator);
  if (resourceCalculator != null) {
    initCpuCumulativeTime =
      resourceCalculator.getProcResourceValues().getCumulativeCpuTime();
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:42,代码来源:Task.java


示例7: testConfiguredResourceValues

import org.apache.hadoop.util.ResourceCalculatorPlugin; //导入依赖的package包/类
/**
 * Test that verifies that configured values are reported correctly.
 * 
 * @throws Exception
 */
public void testConfiguredResourceValues()
    throws Exception {
  JobConf conf = new JobConf();
  conf.setLong("totalVmemOnTT", 4 * 1024 * 1024 * 1024L);
  conf.setLong("totalPmemOnTT", 2 * 1024 * 1024 * 1024L);
  conf.setLong("mapSlotMemorySize", 1 * 512L);
  conf.setLong("reduceSlotMemorySize", 1 * 1024L);
  conf.setLong("availableVmemOnTT", 4 * 1024 * 1024 * 1024L);
  conf.setLong("availablePmemOnTT", 2 * 1024 * 1024 * 1024L);
  conf.setLong("cumulativeCpuTime", 10000L);
  conf.setLong("cpuFrequency", 2000000L);
  conf.setInt("numProcessors", 8);
  conf.setFloat("cpuUsage", 15.5F);
  conf.setLong("procCumulativeCpuTime", 1000L);
  conf.setLong("procVirtualMemorySize", 2 * 1024 * 1024 * 1024L);
  conf.setLong("procPhysicalMemorySize", 1024 * 1024 * 1024L);

  conf.setClass(
      org.apache.hadoop.mapred.TaskTracker.TT_RESOURCE_CALCULATOR_PLUGIN,       
      DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
  conf.setLong(DummyResourceCalculatorPlugin.MAXVMEM_TESTING_PROPERTY,
      4 * 1024 * 1024 * 1024L);
  conf.setLong(DummyResourceCalculatorPlugin.MAXPMEM_TESTING_PROPERTY,
      2 * 1024 * 1024 * 1024L);
  conf.setLong(JobTracker.MAPRED_CLUSTER_MAP_MEMORY_MB_PROPERTY, 512L);
  conf.setLong(JobTracker.MAPRED_CLUSTER_REDUCE_MEMORY_MB_PROPERTY, 1024L);
  conf.setLong(DummyResourceCalculatorPlugin.CUMULATIVE_CPU_TIME, 10000L);
  conf.setLong(DummyResourceCalculatorPlugin.CPU_FREQUENCY, 2000000L);
  conf.setInt(DummyResourceCalculatorPlugin.NUM_PROCESSORS, 8);
  conf.setFloat(DummyResourceCalculatorPlugin.CPU_USAGE, 15.5F);
  try {
    setUpCluster(conf);
    JobConf jobConf = miniMRCluster.createJobConf();
    jobConf.setMemoryForMapTask(1 * 1024L);
    jobConf.setMemoryForReduceTask(2 * 1024L);
    jobConf.setClass(
      org.apache.hadoop.mapred.TaskTracker.TT_RESOURCE_CALCULATOR_PLUGIN,
      DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
    jobConf.setLong(DummyResourceCalculatorPlugin.PROC_CUMULATIVE_CPU_TIME, 1000L);
    jobConf.setLong(DummyResourceCalculatorPlugin.PROC_VMEM_TESTING_PROPERTY,
                    2 * 1024 * 1024 * 1024L);
    jobConf.setLong(DummyResourceCalculatorPlugin.PROC_PMEM_TESTING_PROPERTY,
                    1024 * 1024 * 1024L);
    runSleepJob(jobConf);
    verifyTestResults();
  } finally {
    tearDownCluster();
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:55,代码来源:TestTTResourceReporting.java


示例8: initialize

import org.apache.hadoop.util.ResourceCalculatorPlugin; //导入依赖的package包/类
public void initialize(JobConf job, JobID id, 
                       Reporter reporter,
                       boolean useNewApi) throws IOException, 
                                                 ClassNotFoundException,
                                                 InterruptedException {
  jobContext = new JobContext(job, id, reporter);
  taskContext = new TaskAttemptContext(job, taskId, reporter);
  if (getState() == TaskStatus.State.UNASSIGNED) {
    setState(TaskStatus.State.RUNNING);
  }
  if (useNewApi) {
    LOG.debug("using new api for output committer");
    outputFormat =
      ReflectionUtils.newInstance(taskContext.getOutputFormatClass(), job);
    committer = outputFormat.getOutputCommitter(taskContext);
  } else {
    committer = conf.getOutputCommitter();
  }
  Path outputPath = FileOutputFormat.getOutputPath(conf);
  if (outputPath != null) {
    if ((committer instanceof FileOutputCommitter)) {
      FileOutputFormat.setWorkOutputPath(conf, 
        ((FileOutputCommitter)committer).getTempTaskOutputPath(taskContext));
    } else {
      FileOutputFormat.setWorkOutputPath(conf, outputPath);
    }
  }
  committer.setupTask(taskContext);
  Class<? extends ResourceCalculatorPlugin> clazz = conf.getClass(
      TaskTracker.MAPRED_TASKTRACKER_MEMORY_CALCULATOR_PLUGIN_PROPERTY,
          null, ResourceCalculatorPlugin.class);
  resourceCalculator = ResourceCalculatorPlugin
          .getResourceCalculatorPlugin(clazz, conf);
  LOG.info(" Using ResourceCalculatorPlugin : " + resourceCalculator);
  if (resourceCalculator != null) {
    initCpuCumulativeTime =
      resourceCalculator.getProcResourceValues().getCumulativeCpuTime();
  }
  
  jmxThreadInfoTracker = new JMXThreadBasedMetrics();
  jmxThreadInfoTracker.registerThreadToTask(
      "MAIN_TASK", Thread.currentThread().getId());
  this.initJvmCpuCumulativeTime = 
      jmxThreadInfoTracker.getCumulativeCPUTime();
  
  cgResourceTracker = new CGroupResourceTracker (
                        job, CGroupResourceTracker.RESOURCE_TRAKCER_TYPE.TASK, 
                        taskId.toString(), resourceCalculator);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:50,代码来源:Task.java


示例9: CGroupResourceTracker

import org.apache.hadoop.util.ResourceCalculatorPlugin; //导入依赖的package包/类
public CGroupResourceTracker(
         JobConf conf, RESOURCE_TRAKCER_TYPE trackerType, 
         String target,
         ResourceCalculatorPlugin plugin) {
  this.resourceCalculaotr = plugin;
  
  boolean taskMemoryControlGroupEnabled = conf.getBoolean(
      TaskTracker.MAPRED_TASKTRACKER_CGROUP_MEM_ENABLE_PROPERTY,
      TaskTracker.DEFAULT_MAPRED_TASKTRACKER_CGROUP_MEM_ENABLE_PROPERTY);
  
  if (taskMemoryControlGroupEnabled) {
    if (MemoryControlGroup.isAvailable()) {
      switch (trackerType) {
        case JOB_TRACKER:
          String jtRootpath = conf.get(
              TaskTrackerMemoryControlGroup.CGROUP_MEM_JT_ROOT, 
              TaskTrackerMemoryControlGroup.DEFAULT_JT_ROOT);
          memControlGroup= new MemoryControlGroup(jtRootpath);
          
          if (isMemTrackerAvailable()) {
            LOG.info("A CGroupResourceTracker for JOB_TRACKER created.");
          }
          break;
        case TASK_TRACKER:
          String ttRootpath = conf.get(
              TaskTrackerMemoryControlGroup.CGROUP_MEM_TT_ROOT, 
              TaskTrackerMemoryControlGroup.DEFAULT_TT_ROOT);
            memControlGroup = new MemoryControlGroup(ttRootpath);
            
          if (isMemTrackerAvailable()) {
            LOG.info("A CGroupResourceTracker for TASK_TRACKER created.");
          }
          break;
        case TASK:
          String rootpath = conf.get(
            TaskTrackerMemoryControlGroup.CGROUP_MEM_ROOT_PROPERTY, 
            TaskTrackerMemoryControlGroup.DEFAULT_CGROUP_MEM_ROOT);
          MemoryControlGroup container = new MemoryControlGroup(rootpath);
          memControlGroup = 
            container.getSubGroup(target);
          
          if (isMemTrackerAvailable()) {
            LOG.info("A CGroupResourceTracker for TASK:" + 
              target + " created.");
          }
          break;
      }
    }
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:51,代码来源:CGroupResourceTracker.java


示例10: TestCpuToMapTasksConfig

import org.apache.hadoop.util.ResourceCalculatorPlugin; //导入依赖的package包/类
@Test
public void TestCpuToMapTasksConfig() throws Exception {
  JobConf conf = new JobConf();
  conf.set("mapred.tasktracker.map.tasks.maximum", "3");
  conf.set("mapred.tasktracker.reduce.tasks.maximum", "1");
  // Test with the original settings
  try {
    miniMRCluster =
        new MiniMRCluster(1, "file:///", 3, null, null, conf);
    taskTracker = miniMRCluster.getTaskTrackerRunner(0).getTaskTracker();
    Assert.assertEquals(3, taskTracker.getMaxCurrentMapTasks());
    Assert.assertEquals(1, taskTracker.getMaxCurrentReduceTasks());
  } finally {
    if (miniMRCluster != null) {
      miniMRCluster.shutdown();
    }
  }

  // Test with the # CPU -> mappers settings
  conf.setClass(org.apache.hadoop.mapred.TaskTracker.
      MAPRED_TASKTRACKER_MEMORY_CALCULATOR_PLUGIN_PROPERTY,
      DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
  conf.set("mapred.tasktracker.cpus.to.maptasks", "4:6, 8:9, 16:15");
  conf.set("mapred.tasktracker.cpus.to.reducetasks", "4:3, 8:7, 16:12");

  // 4 CPU -> 6 mappers, 3 reducers
  try {
    conf.set(DummyResourceCalculatorPlugin.NUM_PROCESSORS, "4");
    miniMRCluster =
        new MiniMRCluster(1, "file:///", 3, null, null, conf);
    taskTracker = miniMRCluster.getTaskTrackerRunner(0).getTaskTracker();
    Assert.assertEquals(6, taskTracker.getMaxCurrentMapTasks());
    Assert.assertEquals(3, taskTracker.getMaxCurrentReduceTasks());
  } finally {
    if (miniMRCluster != null) {
      miniMRCluster.shutdown();
    }
  }

  // 8 CPU -> 9 mappers, 7 reduces
  try {
    conf.set(DummyResourceCalculatorPlugin.NUM_PROCESSORS, "8");
    miniMRCluster =
        new MiniMRCluster(1, "file:///", 3, null, null, conf);
    taskTracker = miniMRCluster.getTaskTrackerRunner(0).getTaskTracker();
    Assert.assertEquals(9, taskTracker.getMaxCurrentMapTasks());
    Assert.assertEquals(7, taskTracker.getMaxCurrentReduceTasks());
  } finally {
    if (miniMRCluster != null) {
      miniMRCluster.shutdown();
    }
  }

  // 16 CPU -> 15 mappers, 12 reduces
  try {
    conf.set(DummyResourceCalculatorPlugin.NUM_PROCESSORS, "16");
    miniMRCluster =
        new MiniMRCluster(1, "file:///", 3, null, null, conf);
    taskTracker = miniMRCluster.getTaskTrackerRunner(0).getTaskTracker();
    Assert.assertEquals(15, taskTracker.getMaxCurrentMapTasks());
    Assert.assertEquals(12, taskTracker.getMaxCurrentReduceTasks());
  } finally {
    if (miniMRCluster != null) {
      miniMRCluster.shutdown();
    }
  }

  // 11 CPU -> 3 mappers, 1 reduce (back to default)
  try {
    conf.set(DummyResourceCalculatorPlugin.NUM_PROCESSORS, "11");
    miniMRCluster =
        new MiniMRCluster(1, "file:///", 3, null, null, conf);
    taskTracker = miniMRCluster.getTaskTrackerRunner(0).getTaskTracker();
    Assert.assertEquals(3, taskTracker.getMaxCurrentMapTasks());
    Assert.assertEquals(1, taskTracker.getMaxCurrentReduceTasks());
  } finally {
    if (miniMRCluster != null) {
      miniMRCluster.shutdown();
    }
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:82,代码来源:TestTTCpuToTaskSlots.java


示例11: testConfiguredResourceValues

import org.apache.hadoop.util.ResourceCalculatorPlugin; //导入依赖的package包/类
/**
 * Test that verifies that configured values are reported correctly.
 * 
 * @throws Exception
 */
@Test(timeout=60000)
public void testConfiguredResourceValues()
    throws Exception {
  JobConf conf = new JobConf();
  conf.setLong("totalVmemOnTT", 4 * 1024 * 1024 * 1024L);
  conf.setLong("totalPmemOnTT", 2 * 1024 * 1024 * 1024L);
  conf.setLong("mapSlotMemorySize", 1 * 512L);
  conf.setLong("reduceSlotMemorySize", 1 * 1024L);
  conf.setLong("availableVmemOnTT", 4 * 1024 * 1024 * 1024L);
  conf.setLong("availablePmemOnTT", 2 * 1024 * 1024 * 1024L);
  conf.setLong("cumulativeCpuTime", 10000L);
  conf.setLong("cpuFrequency", 2000000L);
  conf.setInt("numProcessors", 8);
  conf.setFloat("cpuUsage", 15.5F);
  conf.setLong("procCumulativeCpuTime", 1000L);
  conf.setLong("procVirtualMemorySize", 1 * 1024 * 1024 * 1024L);
  conf.setLong("procPhysicalMemorySize", 1024 * 1024 * 1024L);

  conf.setClass(
      org.apache.hadoop.mapred.TaskTracker.MAPRED_TASKTRACKER_MEMORY_CALCULATOR_PLUGIN_PROPERTY,
      DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
  conf.setLong(DummyResourceCalculatorPlugin.MAXVMEM_TESTING_PROPERTY,
      4 * 1024 * 1024 * 1024L);
  conf.setLong(DummyResourceCalculatorPlugin.MAXPMEM_TESTING_PROPERTY,
      2 * 1024 * 1024 * 1024L);
  conf.setLong(JobTracker.MAPRED_CLUSTER_MAP_MEMORY_MB_PROPERTY, 512L);
  conf.setLong(
      JobTracker.MAPRED_CLUSTER_REDUCE_MEMORY_MB_PROPERTY, 1024L);
  conf.setLong(DummyResourceCalculatorPlugin.CUMULATIVE_CPU_TIME, 10000L);
  conf.setLong(DummyResourceCalculatorPlugin.CPU_FREQUENCY, 2000000L);
  conf.setInt(DummyResourceCalculatorPlugin.NUM_PROCESSORS, 8);
  conf.setFloat(DummyResourceCalculatorPlugin.CPU_USAGE, 15.5F);
  try {
    setUpCluster(conf);
    JobConf jobConf = miniMRCluster.createJobConf();
    jobConf.setMemoryForMapTask(4 * 1024L);
    jobConf.setMemoryForReduceTask(4 * 1024L);
    jobConf.setClass(
      org.apache.hadoop.mapred.TaskTracker.MAPRED_TASKTRACKER_MEMORY_CALCULATOR_PLUGIN_PROPERTY,
      DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
    jobConf.setLong(DummyResourceCalculatorPlugin.PROC_CUMULATIVE_CPU_TIME, 1000L);
    jobConf.setLong(DummyResourceCalculatorPlugin.PROC_VMEM_TESTING_PROPERTY,
                    1 * 1024 * 1024 * 1024L);
    jobConf.setLong(DummyResourceCalculatorPlugin.PROC_PMEM_TESTING_PROPERTY,
                    1024 * 1024 * 1024L);
    runSleepJob(jobConf);
    verifyTestResults();
  } finally {
    tearDownCluster();
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:57,代码来源:TestTTResourceReporting.java


示例12: initialize

import org.apache.hadoop.util.ResourceCalculatorPlugin; //导入依赖的package包/类
public void initialize(JobConf job, JobID id, 
                       Reporter reporter,
                       boolean useNewApi) throws IOException, 
                                                 ClassNotFoundException,
                                                 InterruptedException {
  jobContext = new JobContext(job, id, reporter);
  taskContext = new TaskAttemptContext(job, taskId, reporter);
  if (getState() == TaskStatus.State.UNASSIGNED) {
    setState(TaskStatus.State.RUNNING);
  }
  if (useNewApi) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("using new api for output committer");
    }
    outputFormat =
      ReflectionUtils.newInstance(taskContext.getOutputFormatClass(), job);
    committer = outputFormat.getOutputCommitter(taskContext);
  } else {
    committer = conf.getOutputCommitter();
  }
  Path outputPath = FileOutputFormat.getOutputPath(conf);
  if (outputPath != null) {
    if ((committer instanceof FileOutputCommitter)) {
      FileOutputFormat.setWorkOutputPath(conf, 
        ((FileOutputCommitter)committer).getTempTaskOutputPath(taskContext));
    } else {
      FileOutputFormat.setWorkOutputPath(conf, outputPath);
    }
  }
  committer.setupTask(taskContext);
  Class<? extends ResourceCalculatorPlugin> clazz =
      conf.getClass(TaskTracker.TT_RESOURCE_CALCULATOR_PLUGIN,
          null, ResourceCalculatorPlugin.class);
  resourceCalculator = ResourceCalculatorPlugin
          .getResourceCalculatorPlugin(clazz, conf);
  LOG.info(" Using ResourceCalculatorPlugin : " + resourceCalculator);
  if (resourceCalculator != null) {
    initCpuCumulativeTime =
      resourceCalculator.getProcResourceValues().getCumulativeCpuTime();
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:42,代码来源:Task.java


示例13: testConfiguredResourceValues

import org.apache.hadoop.util.ResourceCalculatorPlugin; //导入依赖的package包/类
/**
 * Test that verifies that configured values are reported correctly.
 * 
 * @throws Exception
 */
public void testConfiguredResourceValues()
    throws Exception {
  JobConf conf = new JobConf();
  conf.setLong("totalVmemOnTT", 4 * 1024 * 1024 * 1024L);
  conf.setLong("totalPmemOnTT", 2 * 1024 * 1024 * 1024L);
  conf.setLong("mapSlotMemorySize", 1 * 512L);
  conf.setLong("reduceSlotMemorySize", 1 * 1024L);
  conf.setLong("availableVmemOnTT", 4 * 1024 * 1024 * 1024L);
  conf.setLong("availablePmemOnTT", 2 * 1024 * 1024 * 1024L);
  conf.setLong("cumulativeCpuTime", 10000L);
  conf.setLong("cpuFrequency", 2000000L);
  conf.setInt("numProcessors", 8);
  conf.setFloat("cpuUsage", 15.5F);
  conf.setLong("procCumulativeCpuTime", 1000L);
  conf.setLong("procVirtualMemorySize", 2 * 1024 * 1024 * 1024L);
  conf.setLong("procPhysicalMemorySize", 1024 * 1024 * 1024L);

  conf.setClass(
      org.apache.hadoop.mapred.TaskTracker.MAPRED_TASKTRACKER_MEMORY_CALCULATOR_PLUGIN_PROPERTY,
      DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
  conf.setLong(DummyResourceCalculatorPlugin.MAXVMEM_TESTING_PROPERTY,
      4 * 1024 * 1024 * 1024L);
  conf.setLong(DummyResourceCalculatorPlugin.MAXPMEM_TESTING_PROPERTY,
      2 * 1024 * 1024 * 1024L);
  conf.setLong(JobTracker.MAPRED_CLUSTER_MAP_MEMORY_MB_PROPERTY, 512L);
  conf.setLong(
      JobTracker.MAPRED_CLUSTER_REDUCE_MEMORY_MB_PROPERTY, 1024L);
  conf.setLong(DummyResourceCalculatorPlugin.CUMULATIVE_CPU_TIME, 10000L);
  conf.setLong(DummyResourceCalculatorPlugin.CPU_FREQUENCY, 2000000L);
  conf.setInt(DummyResourceCalculatorPlugin.NUM_PROCESSORS, 8);
  conf.setFloat(DummyResourceCalculatorPlugin.CPU_USAGE, 15.5F);
  try {
    setUpCluster(conf);
    JobConf jobConf = miniMRCluster.createJobConf();
    jobConf.setMemoryForMapTask(1 * 1024L);
    jobConf.setMemoryForReduceTask(2 * 1024L);
    jobConf.setClass(
      org.apache.hadoop.mapred.TaskTracker.MAPRED_TASKTRACKER_MEMORY_CALCULATOR_PLUGIN_PROPERTY,
      DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
    jobConf.setLong(DummyResourceCalculatorPlugin.PROC_CUMULATIVE_CPU_TIME, 1000L);
    jobConf.setLong(DummyResourceCalculatorPlugin.PROC_VMEM_TESTING_PROPERTY,
                    2 * 1024 * 1024 * 1024L);
    jobConf.setLong(DummyResourceCalculatorPlugin.PROC_PMEM_TESTING_PROPERTY,
                    1024 * 1024 * 1024L);
    runSleepJob(jobConf);
    verifyTestResults();
  } finally {
    tearDownCluster();
  }
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:56,代码来源:TestTTResourceReporting.java



注:本文中的org.apache.hadoop.util.ResourceCalculatorPlugin类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Java Fade类代码示例发布时间:2022-05-22
下一篇:
Java Converters类代码示例发布时间:2022-05-22
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap