本文整理汇总了Java中org.apache.hadoop.mapreduce.lib.input.FileInputFormatCounter类的典型用法代码示例。如果您正苦于以下问题:Java FileInputFormatCounter类的具体用法?Java FileInputFormatCounter怎么用?Java FileInputFormatCounter使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
FileInputFormatCounter类属于org.apache.hadoop.mapreduce.lib.input包,在下文中一共展示了FileInputFormatCounter类的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: validateFileCounters
import org.apache.hadoop.mapreduce.lib.input.FileInputFormatCounter; //导入依赖的package包/类
private void validateFileCounters(Counters counter, long fileBytesRead,
long fileBytesWritten, long mapOutputBytes,
long mapOutputMaterializedBytes) {
assertTrue(counter.findCounter(FileInputFormatCounter.BYTES_READ)
.getValue() != 0);
assertEquals(fileBytesRead,
counter.findCounter(FileInputFormatCounter.BYTES_READ).getValue());
assertTrue(counter.findCounter(FileOutputFormatCounter.BYTES_WRITTEN)
.getValue() != 0);
if (mapOutputBytes >= 0) {
assertTrue(counter.findCounter(TaskCounter.MAP_OUTPUT_BYTES).getValue() != 0);
}
if (mapOutputMaterializedBytes >= 0) {
assertTrue(counter.findCounter(TaskCounter.MAP_OUTPUT_MATERIALIZED_BYTES)
.getValue() != 0);
}
}
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestJobCounters.java
示例2: TrackedRecordReader
import org.apache.hadoop.mapreduce.lib.input.FileInputFormatCounter; //导入依赖的package包/类
TrackedRecordReader(TaskReporter reporter, JobConf job)
throws IOException{
inputRecordCounter = reporter.getCounter(TaskCounter.MAP_INPUT_RECORDS);
fileInputByteCounter = reporter.getCounter(FileInputFormatCounter.BYTES_READ);
this.reporter = reporter;
List<Statistics> matchedStats = null;
if (this.reporter.getInputSplit() instanceof FileSplit) {
matchedStats = getFsStatistics(((FileSplit) this.reporter
.getInputSplit()).getPath(), job);
}
fsStats = matchedStats;
bytesInPrev = getInputBytes(fsStats);
rawIn = job.getInputFormat().getRecordReader(reporter.getInputSplit(),
job, reporter);
bytesInCurr = getInputBytes(fsStats);
fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:MapTask.java
示例3: NewTrackingRecordReader
import org.apache.hadoop.mapreduce.lib.input.FileInputFormatCounter; //导入依赖的package包/类
NewTrackingRecordReader(org.apache.hadoop.mapreduce.InputSplit split,
org.apache.hadoop.mapreduce.InputFormat<K, V> inputFormat,
TaskReporter reporter,
org.apache.hadoop.mapreduce.TaskAttemptContext taskContext)
throws InterruptedException, IOException {
this.reporter = reporter;
this.inputRecordCounter = reporter
.getCounter(TaskCounter.MAP_INPUT_RECORDS);
this.fileInputByteCounter = reporter
.getCounter(FileInputFormatCounter.BYTES_READ);
List <Statistics> matchedStats = null;
if (split instanceof org.apache.hadoop.mapreduce.lib.input.FileSplit) {
matchedStats = getFsStatistics(((org.apache.hadoop.mapreduce.lib.input.FileSplit) split)
.getPath(), taskContext.getConfiguration());
}
fsStats = matchedStats;
long bytesInPrev = getInputBytes(fsStats);
this.real = inputFormat.createRecordReader(split, taskContext);
long bytesInCurr = getInputBytes(fsStats);
fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:MapTask.java
示例4: NewMultiTrackingRecordReader
import org.apache.hadoop.mapreduce.lib.input.FileInputFormatCounter; //导入依赖的package包/类
NewMultiTrackingRecordReader(org.apache.hadoop.mapreduce.InputSplit splits[],
org.apache.hadoop.mapreduce.InputFormat<K, V> inputFormat,
TaskReporter reporter,
org.apache.hadoop.mapreduce.TaskAttemptContext taskContext)
throws InterruptedException, IOException {
this.reporter = reporter;
this.inputRecordCounter = reporter
.getCounter(TaskCounter.MAP_INPUT_RECORDS);
this.fileInputByteCounter = reporter
.getCounter(FileInputFormatCounter.BYTES_READ);
this.context=taskContext;
this.splits=splits;
this.inputFormat=inputFormat;
this.splitsLength=0;
}
开发者ID:yncxcw,项目名称:FlexMap,代码行数:17,代码来源:MultiMapTask.java
示例5: initDepricatedMap
import org.apache.hadoop.mapreduce.lib.input.FileInputFormatCounter; //导入依赖的package包/类
@SuppressWarnings({ "deprecation" })
private static void initDepricatedMap() {
depricatedCounterMap.put(FileInputFormat.Counter.class.getName(),
FileInputFormatCounter.class.getName());
depricatedCounterMap.put(FileOutputFormat.Counter.class.getName(),
FileOutputFormatCounter.class.getName());
depricatedCounterMap.put(
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.Counter.class
.getName(), FileInputFormatCounter.class.getName());
depricatedCounterMap.put(
org.apache.hadoop.mapreduce.lib.output.FileOutputFormat.Counter.class
.getName(), FileOutputFormatCounter.class.getName());
}
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:Counters.java
示例6: findCounter
import org.apache.hadoop.mapreduce.lib.input.FileInputFormatCounter; //导入依赖的package包/类
public synchronized Counter findCounter(String group, String name) {
if (name.equals("MAP_INPUT_BYTES")) {
LOG.warn("Counter name MAP_INPUT_BYTES is deprecated. " +
"Use FileInputFormatCounters as group name and " +
" BYTES_READ as counter name instead");
return findCounter(FileInputFormatCounter.BYTES_READ);
}
String newGroupKey = getNewGroupKey(group);
if (newGroupKey != null) {
group = newGroupKey;
}
return getGroup(group).getCounterForName(name);
}
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:Counters.java
示例7: initUsedCounters
import org.apache.hadoop.mapreduce.lib.input.FileInputFormatCounter; //导入依赖的package包/类
protected void initUsedCounters() {
reporter.getCounter(TaskCounter.MAP_INPUT_RECORDS);
reporter.getCounter(TaskCounter.MAP_OUTPUT_RECORDS);
reporter.getCounter(FileInputFormatCounter.BYTES_READ);
reporter.getCounter(TaskCounter.MAP_OUTPUT_BYTES);
reporter.getCounter(TaskCounter.MAP_OUTPUT_MATERIALIZED_BYTES);
reporter.getCounter(TaskCounter.COMBINE_INPUT_RECORDS);
reporter.getCounter(TaskCounter.COMBINE_OUTPUT_RECORDS);
reporter.getCounter(TaskCounter.SPILLED_RECORDS);
}
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:11,代码来源:StatusReportChecker.java
注:本文中的org.apache.hadoop.mapreduce.lib.input.FileInputFormatCounter类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论