本文整理汇总了Java中org.apache.hadoop.mapred.Task.Counter类的典型用法代码示例。如果您正苦于以下问题:Java Counter类的具体用法?Java Counter怎么用?Java Counter使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Counter类属于org.apache.hadoop.mapred.Task包,在下文中一共展示了Counter类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: getResourceUsageMap
import org.apache.hadoop.mapred.Task.Counter; //导入依赖的package包/类
public Map<ResourceType, List<Long>> getResourceUsageMap() {
if (this.job == null) {
return getStdResourceUsageMap();
}
Counters counters = job.getCounters();
Map<ResourceType, List<Long>> resourceUsageMap = new HashMap<ResourceType, List<Long>>();
List<Long> mapperUsages = new ArrayList<Long>();
mapperUsages.add(counters.getCounter(JobInProgress.Counter.MAX_MAP_MEM_BYTES));
mapperUsages.add(counters.getCounter(JobInProgress.Counter.MAX_MAP_INST_MEM_BYTES));
mapperUsages.add(counters.getCounter(JobInProgress.Counter.MAX_MAP_RSS_MEM_BYTES));
List<Long> reducerUsages = new ArrayList<Long>();
reducerUsages.add(counters.getCounter(JobInProgress.Counter.MAX_REDUCE_MEM_BYTES));
reducerUsages.add(counters.getCounter(JobInProgress.Counter.MAX_REDUCE_INST_MEM_BYTES));
reducerUsages.add(counters.getCounter(JobInProgress.Counter.MAX_REDUCE_RSS_MEM_BYTES));
resourceUsageMap.put(ResourceType.MAP, mapperUsages);
resourceUsageMap.put(ResourceType.REDUCE, reducerUsages);
resourceUsageMap.put(ResourceType.JOBTRACKER, new ArrayList<Long>());
return resourceUsageMap;
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:25,代码来源:CoronaJobTracker.java
示例2: SkippingReduceValuesIterator
import org.apache.hadoop.mapred.Task.Counter; //导入依赖的package包/类
public SkippingReduceValuesIterator(RawKeyValueIterator in,
RawComparator<KEY> comparator, Class<KEY> keyClass,
Class<VALUE> valClass, Configuration conf, TaskReporter reporter,
TaskUmbilicalProtocol umbilical) throws IOException {
super(in, comparator, keyClass, valClass, conf, reporter);
this.umbilical = umbilical;
this.skipGroupCounter =
reporter.getCounter(Counter.REDUCE_SKIPPED_GROUPS);
this.skipRecCounter =
reporter.getCounter(Counter.REDUCE_SKIPPED_RECORDS);
this.toWriteSkipRecs = toWriteSkipRecs() &&
SkipBadRecords.getSkipOutputPath(conf)!=null;
this.keyClass = keyClass;
this.valClass = valClass;
this.reporter = reporter;
skipIt = getSkipRanges().skipRangeIterator();
mayBeSkip();
}
开发者ID:iVCE,项目名称:RDFS,代码行数:19,代码来源:ReduceTask.java
示例3: MapSpiller
import org.apache.hadoop.mapred.Task.Counter; //导入依赖的package包/类
MapSpiller(JobConf job,TaskAttemptID tid, TaskReporter rep) throws ClassNotFoundException {
reporter = rep;
conf = job;
this.taskId = tid;
mapOutputFile.setConf(conf);
mapOutputByteCounter = reporter.getCounter(MAP_OUTPUT_BYTES);
Counters.Counter combineInputCounter =
reporter.getCounter(COMBINE_INPUT_RECORDS);
combineOutputCounter = reporter.getCounter(COMBINE_OUTPUT_RECORDS);
fileOutputByteCounter = reporter.getCounter(MAP_OUTPUT_MATERIALIZED_BYTES);
// combiner
combinerRunner = CombinerRunner.create(conf, taskId,
combineInputCounter,
reporter, null);
if (combinerRunner != null) {
combineCollector= new CombineOutputCollector(combineOutputCounter, reporter, conf);
} else {
combineCollector = null;
}
indexCacheList = new ArrayList<SpillRecord>();
spilledRecordsCounter = reporter.getCounter(Counter.SPILLED_RECORDS);
}
开发者ID:mammothcm,项目名称:mammoth,代码行数:23,代码来源:DefaultJvmMemoryManager.java
示例4: finalCounterUpdate
import org.apache.hadoop.mapred.Task.Counter; //导入依赖的package包/类
public void finalCounterUpdate() {
setCounterValue(Counter.MAP_SPILL_NUMBER, numSpillsVal);
setCounterValue(Counter.MAP_SPILL_CPU, mapSpillCPUVal);
setCounterValue(Counter.MAP_SPILL_WALLCLOCK, mapSpillWallClockVal);
setCounterValue(Counter.MAP_SPILL_BYTES, mapSpillBytesVal);
setCounterValue(Counter.MAP_MEM_SORT_CPU, mapMemSortCPUVal);
setCounterValue(Counter.MAP_MEM_SORT_WALLCLOCK, mapMemSortWallClockVal);
setCounterValue(Counter.MAP_MERGE_CPU, mapMergeCPUVal);
setCounterValue(Counter.MAP_MERGE_WALLCLOCK, mapMergeWallClockVal);
setCounterValue(Counter.MAP_SPILL_SINGLERECORD_NUM, mapSpillSingleRecordNum);
setCounterValue(Counter.MAP_SPILL_CPU_JVM, mapSpillJVMCPUVal);
setCounterValue(Counter.MAP_MEM_SORT_CPU_JVM, mapMemSortJVMCPUVal);
setCounterValue(Counter.MAP_MERGE_CPU_JVM, mapMergeJVMCPUVal);
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:16,代码来源:MapSpillSortCounters.java
示例5: SkippingRecordReader
import org.apache.hadoop.mapred.Task.Counter; //导入依赖的package包/类
SkippingRecordReader(RecordReader<K,V> raw, TaskUmbilicalProtocol umbilical,
TaskReporter reporter) throws IOException{
super(raw, reporter);
this.umbilical = umbilical;
this.skipRecCounter = reporter.getCounter(Counter.MAP_SKIPPED_RECORDS);
this.toWriteSkipRecs = toWriteSkipRecs() &&
SkipBadRecords.getSkipOutputPath(conf)!=null;
skipIt = getSkipRanges().skipRangeIterator();
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:10,代码来源:MapTask.java
示例6: run
import org.apache.hadoop.mapred.Task.Counter; //导入依赖的package包/类
@Override
public void run(final JobConf job, final TaskUmbilicalProtocol umbilical)
throws IOException, ClassNotFoundException, InterruptedException {
this.umbilical = umbilical;
taskStartTime = System.currentTimeMillis();
// start thread that will handle communication with parent
TaskReporter reporter = new TaskReporter(getProgress(), umbilical);
reporter.startCommunicationThread();
boolean useNewApi = job.getUseNewMapper();
initialize(job, getJobID(), reporter, useNewApi);
// check if it is a cleanupJobTask
if (jobCleanup) {
runJobCleanupTask(umbilical, reporter);
return;
}
if (jobSetup) {
runJobSetupTask(umbilical, reporter);
return;
}
if (taskCleanup) {
runTaskCleanupTask(umbilical, reporter);
return;
}
if (useNewApi) {
runNewMapper(job, split, umbilical, reporter);
} else {
runOldMapper(job, split, umbilical, reporter);
}
taskEndTime = System.currentTimeMillis();
Counters.Counter taskWallClock = reporter.getCounter(Counter.MAP_TASK_WALLCLOCK);
taskWallClock.setValue(taskEndTime - taskStartTime);
done(umbilical, reporter);
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:37,代码来源:MapTask.java
示例7: finalCounterUpdate
import org.apache.hadoop.mapred.Task.Counter; //导入依赖的package包/类
public void finalCounterUpdate() {
setCounterValue(Counter.MAP_SPILL_NUMBER, numSpillsVal);
setCounterValue(Counter.MAP_SPILL_CPU, mapSpillCPUVal);
setCounterValue(Counter.MAP_SPILL_WALLCLOCK, mapSpillWallClockVal);
setCounterValue(Counter.MAP_SPILL_BYTES, mapSpillBytesVal);
setCounterValue(Counter.MAP_MEM_SORT_CPU, mapMemSortCPUVal);
setCounterValue(Counter.MAP_MEM_SORT_WALLCLOCK, mapMemSortWallClockVal);
setCounterValue(Counter.MAP_MERGE_CPU, mapMergeCPUVal);
setCounterValue(Counter.MAP_MERGE_WALLCLOCK, mapMergeWallClockVal);
setCounterValue(Counter.MAP_SPILL_CPU_JVM, mapSpillJVMCPUVal);
setCounterValue(Counter.MAP_MEM_SORT_CPU_JVM, mapMemSortJVMCPUVal);
setCounterValue(Counter.MAP_MERGE_CPU_JVM, mapMergeJVMCPUVal);
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:15,代码来源:MapTask.java
示例8: incCMClientRetryCounter
import org.apache.hadoop.mapred.Task.Counter; //导入依赖的package包/类
public void incCMClientRetryCounter () {
if (iface instanceof CoronaJobTracker) {
Counters jobCounters = ((CoronaJobTracker)iface).getJobCounters();
if (jobCounters != null) {
LOG.info("inc retry session counter");
jobCounters.incrCounter(JobInProgress.Counter.NUM_SESSION_DRIVER_CM_CLIENT_RETRY, 1);
}
}
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:10,代码来源:SessionDriver.java
示例9: updateRJTFailoverCounters
import org.apache.hadoop.mapred.Task.Counter; //导入依赖的package包/类
void updateRJTFailoverCounters() {
if (job == null ||
stateFetcher.jtFailoverMetrics.restartNum == 0) {
return;
}
job.jobCounters.findCounter(JobInProgress.Counter.NUM_RJT_FAILOVER).
setValue(stateFetcher.jtFailoverMetrics.restartNum);
job.jobCounters.findCounter(JobInProgress.Counter.STATE_FETCH_COST_MILLIS).
setValue(stateFetcher.jtFailoverMetrics.fetchStateCost);
if (stateFetcher.jtFailoverMetrics.savedMappers > 0) {
job.jobCounters.findCounter(JobInProgress.Counter.NUM_SAVED_MAPPERS).
setValue(stateFetcher.jtFailoverMetrics.savedMappers);
job.jobCounters.findCounter(JobInProgress.Counter.SAVED_MAP_CPU_MILLIS).
setValue(stateFetcher.jtFailoverMetrics.savedMapCPU);
job.jobCounters.findCounter(JobInProgress.Counter.SAVED_MAP_WALLCLOCK_MILLIS).
setValue(stateFetcher.jtFailoverMetrics.savedMapWallclock);
}
if (stateFetcher.jtFailoverMetrics.savedReducers > 0) {
job.jobCounters.findCounter(JobInProgress.Counter.NUM_SAVED_REDUCERS).
setValue(stateFetcher.jtFailoverMetrics.savedReducers);
job.jobCounters.findCounter(JobInProgress.Counter.SAVED_REDUCE_CPU_MILLIS).
setValue(stateFetcher.jtFailoverMetrics.savedReduceCPU);
job.jobCounters.findCounter(JobInProgress.Counter.SAVED_REDUCE_WALLCLOCK_MILLIS).
setValue(stateFetcher.jtFailoverMetrics.savedReduceWallclock);
}
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:29,代码来源:CoronaJobTracker.java
示例10: finalCounterUpdate
import org.apache.hadoop.mapred.Task.Counter; //导入依赖的package包/类
public void finalCounterUpdate() {
setCounterValue(Counter.MAP_SPILL_NUMBER, numSpillsVal);
setCounterValue(Counter.MAP_SPILL_CPU, mapSpillCPUVal);
setCounterValue(Counter.MAP_SPILL_WALLCLOCK, mapSpillWallClockVal);
setCounterValue(Counter.MAP_SPILL_BYTES, mapSpillBytesVal);
setCounterValue(Counter.MAP_MEM_SORT_CPU, mapMemSortCPUVal);
setCounterValue(Counter.MAP_MEM_SORT_WALLCLOCK, mapMemSortWallClockVal);
setCounterValue(Counter.MAP_MERGE_CPU, mapMergeCPUVal);
setCounterValue(Counter.MAP_MERGE_WALLCLOCK, mapMergeWallClockVal);
setCounterValue(Counter.MAP_SPILL_SINGLERECORD_NUM, mapSpillSingleRecordNum);
}
开发者ID:iVCE,项目名称:RDFS,代码行数:12,代码来源:MapSpillSortCounters.java
示例11: setCPUCounter
import org.apache.hadoop.mapred.Task.Counter; //导入依赖的package包/类
private void setCPUCounter(ProcResourceValues startProcVals,
ProcResourceValues endProcVals,
org.apache.hadoop.mapred.Counters.Counter counter) {
long cpuUsed = 0;
if (startProcVals != null && endProcVals != null) {
long cpuStartVal = startProcVals.getCumulativeCpuTime();
long cpuEndVal = endProcVals.getCumulativeCpuTime();
if (cpuEndVal > cpuStartVal) {
cpuUsed = cpuEndVal - cpuStartVal;
}
}
counter.setValue(cpuUsed);
}
开发者ID:iVCE,项目名称:RDFS,代码行数:14,代码来源:ReduceTask.java
示例12: setCounterValue
import org.apache.hadoop.mapred.Task.Counter; //导入依赖的package包/类
private void setCounterValue(Counter counter, long value) {
Counters.Counter counterObj = reporter.getCounter(counter);
if (counterObj != null) {
counterObj.setValue(value);
}
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:7,代码来源:MapSpillSortCounters.java
示例13: setWallClockCounter
import org.apache.hadoop.mapred.Task.Counter; //导入依赖的package包/类
private void setWallClockCounter(long wallClock,
org.apache.hadoop.mapred.Counters.Counter counter) {
counter.setValue(wallClock);
}
开发者ID:iVCE,项目名称:RDFS,代码行数:5,代码来源:ReduceTask.java
示例14: NewTrackingRecordWriter
import org.apache.hadoop.mapred.Task.Counter; //导入依赖的package包/类
NewTrackingRecordWriter(org.apache.hadoop.mapreduce.RecordWriter<K,V> real,
org.apache.hadoop.mapreduce.Counter recordCounter) {
this.real = real;
this.outputRecordCounter = recordCounter;
}
开发者ID:iVCE,项目名称:RDFS,代码行数:6,代码来源:ReduceTask.java
示例15: ReduceCopier
import org.apache.hadoop.mapred.Task.Counter; //导入依赖的package包/类
public ReduceCopier(TaskUmbilicalProtocol umbilical, JobConf conf,
TaskReporter reporter
)throws ClassNotFoundException, IOException {
configureClasspath(conf);
this.reporter = reporter;
this.shuffleClientMetrics = new ShuffleClientMetrics(conf);
this.umbilical = umbilical;
this.reduceTask = ReduceTask.this;
this.scheduledCopies = new ArrayList<MapOutputLocation>(100);
this.copyResults = new ArrayList<CopyResult>(100);
this.numCopiers = conf.getInt("mapred.reduce.parallel.copies", 5);
this.maxInFlight = 4 * numCopiers;
Counters.Counter combineInputCounter =
reporter.getCounter(Task.Counter.COMBINE_INPUT_RECORDS);
this.combinerRunner = CombinerRunner.create(conf, getTaskID(),
combineInputCounter,
reporter, null);
if (combinerRunner != null) {
combineCollector =
new CombineOutputCollector(reduceCombineOutputCounter);
}
this.ioSortFactor = conf.getInt("io.sort.factor", 10);
this.abortFailureLimit = Math.max(30, numMaps / 10);
this.maxFetchFailuresBeforeReporting = conf.getInt(
"mapreduce.reduce.shuffle.maxfetchfailures", REPORT_FAILURE_LIMIT);
this.maxFailedUniqueFetches = Math.min(numMaps,
this.maxFailedUniqueFetches);
this.maxInMemOutputs = conf.getInt("mapred.inmem.merge.threshold", 1000);
this.maxInMemCopyPer =
conf.getFloat("mapred.job.shuffle.merge.percent", 0.66f);
final float maxRedPer =
conf.getFloat("mapred.job.reduce.input.buffer.percent", 0f);
if (maxRedPer > 1.0 || maxRedPer < 0.0) {
throw new IOException("mapred.job.reduce.input.buffer.percent" +
maxRedPer);
}
this.maxInMemReduce = (int)Math.min(
Runtime.getRuntime().maxMemory() * maxRedPer, Integer.MAX_VALUE);
// Setup the RamManager
ramManager = new ShuffleRamManager(conf);
localFileSys = FileSystem.getLocal(conf);
rfs = ((LocalFileSystem)localFileSys).getRaw();
// hosts -> next contact time
this.penaltyBox = new LinkedHashMap<String, Long>();
// hostnames
this.uniqueHosts = new HashSet<String>();
// Seed the random number generator with a reasonably globally unique seed
long randomSeed = System.nanoTime() +
(long)Math.pow(this.reduceTask.getPartition(),
(this.reduceTask.getPartition()%10)
);
this.random = new Random(randomSeed);
this.maxMapRuntime = 0;
this.reportReadErrorImmediately =
conf.getBoolean("mapreduce.reduce.shuffle.notify.readerror", true);
}
开发者ID:iVCE,项目名称:RDFS,代码行数:69,代码来源:ReduceTask.java
注:本文中的org.apache.hadoop.mapred.Task.Counter类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论