本文整理汇总了Java中org.apache.hadoop.mapred.TaskLog.LogFileDetail类的典型用法代码示例。如果您正苦于以下问题:Java LogFileDetail类的具体用法?Java LogFileDetail怎么用?Java LogFileDetail使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
LogFileDetail类属于org.apache.hadoop.mapred.TaskLog包,在下文中一共展示了LogFileDetail类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: copyOriginalIndexFileInfo
import org.apache.hadoop.mapred.TaskLog.LogFileDetail; //导入依赖的package包/类
/**
* @param lInfo
* @param taskLogFileDetails
* @param updatedTaskLogFileDetails
* @param logName
*/
private void copyOriginalIndexFileInfo(JVMInfo lInfo,
Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
Map<Task, Map<LogName, LogFileDetail>> updatedTaskLogFileDetails,
LogName logName) {
if (TaskLog.LOGS_TRACKED_BY_INDEX_FILES.contains(logName)) {
for (Task task : lInfo.getAllAttempts()) {
if (!updatedTaskLogFileDetails.containsKey(task)) {
updatedTaskLogFileDetails.put(task,
new HashMap<LogName, LogFileDetail>());
}
updatedTaskLogFileDetails.get(task).put(logName,
taskLogFileDetails.get(task).get(logName));
}
}
}
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:22,代码来源:TaskLogsTruncater.java
示例2: isTruncationNeeded
import org.apache.hadoop.mapred.TaskLog.LogFileDetail; //导入依赖的package包/类
/**
* Check if truncation of logs is needed for the given jvmInfo. If all the
* tasks that ran in a JVM are within the log-limits, then truncation is not
* needed. Otherwise it is needed.
*
* @param lInfo
* @param taskLogFileDetails
* @param logName
* @return true if truncation is needed, false otherwise
*/
private boolean isTruncationNeeded(JVMInfo lInfo,
Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
LogName logName) {
boolean truncationNeeded = false;
LogFileDetail logFileDetail = null;
for (Task task : lInfo.getAllAttempts()) {
long taskRetainSize =
(task.isMapTask() ? mapRetainSize : reduceRetainSize);
Map<LogName, LogFileDetail> allLogsFileDetails =
taskLogFileDetails.get(task);
logFileDetail = allLogsFileDetails.get(logName);
if (taskRetainSize > MINIMUM_RETAIN_SIZE_FOR_TRUNCATION
&& logFileDetail.length > taskRetainSize) {
truncationNeeded = true;
break;
}
}
return truncationNeeded;
}
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:30,代码来源:TaskLogsTruncater.java
示例3: revertIndexFileInfo
import org.apache.hadoop.mapred.TaskLog.LogFileDetail; //导入依赖的package包/类
/**
* @param lInfo
* @param taskLogFileDetails
* @param updatedTaskLogFileDetails
* @param logName
*/
private void revertIndexFileInfo(PerJVMInfo lInfo,
Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
Map<Task, Map<LogName, LogFileDetail>> updatedTaskLogFileDetails,
LogName logName) {
if (TaskLog.LOGS_TRACKED_BY_INDEX_FILES.contains(logName)) {
for (Task task : lInfo.allAttempts) {
if (!updatedTaskLogFileDetails.containsKey(task)) {
updatedTaskLogFileDetails.put(task,
new HashMap<LogName, LogFileDetail>());
}
updatedTaskLogFileDetails.get(task).put(logName,
taskLogFileDetails.get(task).get(logName));
}
}
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:22,代码来源:TaskLogsMonitor.java
示例4: isTruncationNeeded
import org.apache.hadoop.mapred.TaskLog.LogFileDetail; //导入依赖的package包/类
/**
* Check if truncation of logs is needed for the given jvmInfo. If all the
* tasks that ran in a JVM are within the log-limits, then truncation is not
* needed. Otherwise it is needed.
*
* @param lInfo
* @param taskLogFileDetails
* @param logName
* @return true if truncation is needed, false otherwise
*/
private boolean isTruncationNeeded(PerJVMInfo lInfo,
Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
LogName logName) {
boolean truncationNeeded = false;
LogFileDetail logFileDetail = null;
for (Task task : lInfo.allAttempts) {
long taskRetainSize =
(task.isMapTask() ? mapRetainSize : reduceRetainSize);
Map<LogName, LogFileDetail> allLogsFileDetails =
taskLogFileDetails.get(task);
logFileDetail = allLogsFileDetails.get(logName);
if (taskRetainSize > MINIMUM_RETAIN_SIZE_FOR_TRUNCATION
&& logFileDetail.length > taskRetainSize) {
truncationNeeded = true;
break;
}
}
return truncationNeeded;
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:30,代码来源:TaskLogsMonitor.java
示例5: shouldTruncateLogs
import org.apache.hadoop.mapred.TaskLog.LogFileDetail; //导入依赖的package包/类
/**
* Check the log file sizes generated by the attempts that ran in a
* particular JVM
* @param lInfo
* @return
* @throws IOException
*/
public boolean shouldTruncateLogs(JVMInfo lInfo) throws IOException {
// Read the log-file details for all the attempts that ran in this JVM
Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails;
try {
taskLogFileDetails = getAllLogsFileDetails(lInfo.getAllAttempts());
} catch (IOException e) {
LOG.warn(
"Exception in truncateLogs while getting allLogsFileDetails()."
+ " Ignoring the truncation of logs of this process.", e);
return false;
}
File attemptLogDir = lInfo.getLogLocation();
for (LogName logName : LogName.values()) {
File logFile = new File(attemptLogDir, logName.toString());
if (logFile.exists()) {
if(!isTruncationNeeded(lInfo, taskLogFileDetails, logName)) {
LOG.debug("Truncation is not needed for "
+ logFile.getAbsolutePath());
} else return true;
}
}
return false;
}
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:35,代码来源:TaskLogsTruncater.java
示例6: getAllLogsFileDetails
import org.apache.hadoop.mapred.TaskLog.LogFileDetail; //导入依赖的package包/类
/**
* Get the logFileDetails of all the list of attempts passed.
* @param allAttempts the attempts we are interested in
*
* @return a map of task to the log-file detail
* @throws IOException
*/
private Map<Task, Map<LogName, LogFileDetail>> getAllLogsFileDetails(
final List<Task> allAttempts) throws IOException {
Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails =
new HashMap<Task, Map<LogName, LogFileDetail>>();
for (Task task : allAttempts) {
Map<LogName, LogFileDetail> allLogsFileDetails;
allLogsFileDetails =
TaskLog.getAllLogsFileDetails(task.getTaskID(), task.isTaskCleanupTask());
taskLogFileDetails.put(task, allLogsFileDetails);
}
return taskLogFileDetails;
}
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:20,代码来源:TaskLogsTruncater.java
示例7: updateIndicesAfterLogTruncation
import org.apache.hadoop.mapred.TaskLog.LogFileDetail; //导入依赖的package包/类
/**
* Truncation of logs is done. Now sync the index files to reflect the
* truncated sizes.
*
* @param firstAttempt
* @param updatedTaskLogFileDetails
*/
private void updateIndicesAfterLogTruncation(String location,
Map<Task, Map<LogName, LogFileDetail>> updatedTaskLogFileDetails) {
for (Entry<Task, Map<LogName, LogFileDetail>> entry :
updatedTaskLogFileDetails.entrySet()) {
Task task = entry.getKey();
Map<LogName, LogFileDetail> logFileDetails = entry.getValue();
Map<LogName, Long[]> logLengths = new HashMap<LogName, Long[]>();
// set current and previous lengths
for (LogName logName : TaskLog.LOGS_TRACKED_BY_INDEX_FILES) {
logLengths.put(logName, new Long[] { Long.valueOf(0L),
Long.valueOf(0L) });
LogFileDetail lfd = logFileDetails.get(logName);
if (lfd != null) {
// Set previous lengths
logLengths.get(logName)[0] = Long.valueOf(lfd.start);
// Set current lengths
logLengths.get(logName)[1] = Long.valueOf(lfd.start + lfd.length);
}
}
try {
TaskLog.writeToIndexFile(location, task.getTaskID(),
task.isTaskCleanupTask(), logLengths);
} catch (IOException ioe) {
LOG.warn("Exception encountered while updating index file of task "
+ task.getTaskID()
+ ". Ignoring and continuing with other tasks.", ioe);
}
}
}
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:37,代码来源:TaskLogsTruncater.java
示例8: getAllLogsFileDetails
import org.apache.hadoop.mapred.TaskLog.LogFileDetail; //导入依赖的package包/类
/**
* Get the logFileDetails of all the list of attempts passed.
*
* @param lInfo
* @return a map of task to the log-file detail
* @throws IOException
*/
private Map<Task, Map<LogName, LogFileDetail>> getAllLogsFileDetails(
final List<Task> allAttempts) throws IOException {
Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails =
new HashMap<Task, Map<LogName, LogFileDetail>>();
for (Task task : allAttempts) {
Map<LogName, LogFileDetail> allLogsFileDetails;
allLogsFileDetails =
TaskLog.getAllLogsFileDetails(task.getTaskID(),
task.isTaskCleanupTask());
taskLogFileDetails.put(task, allLogsFileDetails);
}
return taskLogFileDetails;
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:21,代码来源:TaskLogsMonitor.java
示例9: updateIndicesAfterLogTruncation
import org.apache.hadoop.mapred.TaskLog.LogFileDetail; //导入依赖的package包/类
/**
* Truncation of logs is done. Now sync the index files to reflect the
* truncated sizes.
*
* @param firstAttempt
* @param updatedTaskLogFileDetails
*/
private void updateIndicesAfterLogTruncation(TaskAttemptID firstAttempt,
Map<Task, Map<LogName, LogFileDetail>> updatedTaskLogFileDetails) {
for (Entry<Task, Map<LogName, LogFileDetail>> entry :
updatedTaskLogFileDetails.entrySet()) {
Task task = entry.getKey();
Map<LogName, LogFileDetail> logFileDetails = entry.getValue();
Map<LogName, Long[]> logLengths = new HashMap<LogName, Long[]>();
// set current and previous lengths
for (LogName logName : TaskLog.LOGS_TRACKED_BY_INDEX_FILES) {
logLengths.put(logName, new Long[] { Long.valueOf(0L),
Long.valueOf(0L) });
LogFileDetail lfd = logFileDetails.get(logName);
if (lfd != null) {
// Set previous lengths
logLengths.get(logName)[0] = Long.valueOf(lfd.start);
// Set current lengths
logLengths.get(logName)[1] = Long.valueOf(lfd.start + lfd.length);
}
}
try {
TaskLog.writeToIndexFile(firstAttempt, task.getTaskID(),
task.isTaskCleanupTask(), logLengths);
} catch (IOException ioe) {
LOG.warn("Exception in updateIndicesAfterLogTruncation : "
+ StringUtils.stringifyException(ioe));
LOG.warn("Exception encountered while updating index file of task "
+ task.getTaskID()
+ ". Ignoring and continuing with other tasks.");
}
}
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:39,代码来源:TaskLogsMonitor.java
示例10: shouldTruncateLogs
import org.apache.hadoop.mapred.TaskLog.LogFileDetail; //导入依赖的package包/类
/**
* Check the log file sizes generated by the attempts that ran in a
* particular JVM
* @param lInfo
* @return is truncation required?
* @throws IOException
*/
public boolean shouldTruncateLogs(JVMInfo lInfo) throws IOException {
// Read the log-file details for all the attempts that ran in this JVM
Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails;
try {
taskLogFileDetails = getAllLogsFileDetails(lInfo.getAllAttempts());
} catch (IOException e) {
LOG.warn(
"Exception in truncateLogs while getting allLogsFileDetails()."
+ " Ignoring the truncation of logs of this process.", e);
return false;
}
File attemptLogDir = lInfo.getLogLocation();
for (LogName logName : LogName.values()) {
File logFile = new File(attemptLogDir, logName.toString());
if (logFile.exists()) {
if(!isTruncationNeeded(lInfo, taskLogFileDetails, logName)) {
LOG.debug("Truncation is not needed for "
+ logFile.getAbsolutePath());
} else return true;
}
}
return false;
}
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:35,代码来源:TaskLogsTruncater.java
示例11: truncateALogFileOfAnAttempt
import org.apache.hadoop.mapred.TaskLog.LogFileDetail; //导入依赖的package包/类
/**
* Truncate the log file of this task-attempt so that only the last retainSize
* many bytes of each log file is retained and the log file is reduced in size
* saving disk space.
*
* @param taskID Task whose logs need to be truncated
* @param oldLogFileDetail contains the original log details for the attempt
* @param taskRetainSize retain-size
* @param tmpFileOutputStream New log file to write to. Already opened in append
* mode.
* @param logFileInputStream Original log file to read from.
* @return
* @throws IOException
*/
private LogFileDetail truncateALogFileOfAnAttempt(
final TaskAttemptID taskID, final LogFileDetail oldLogFileDetail,
final long taskRetainSize,
final FileOutputStream tmpFileOutputStream,
final FileInputStream logFileInputStream, final LogName logName)
throws IOException {
LogFileDetail newLogFileDetail = new LogFileDetail();
long logSize = 0;
// ///////////// Truncate log file ///////////////////////
// New location of log file is same as the old
newLogFileDetail.location = oldLogFileDetail.location;
if (taskRetainSize > MINIMUM_RETAIN_SIZE_FOR_TRUNCATION
&& oldLogFileDetail.length > taskRetainSize) {
LOG.info("Truncating " + logName + " logs for " + taskID + " from "
+ oldLogFileDetail.length + "bytes to " + taskRetainSize
+ "bytes.");
logSize = taskRetainSize;
byte[] truncatedMsgBytes = TRUNCATED_MSG.getBytes();
tmpFileOutputStream.write(truncatedMsgBytes);
newLogFileDetail.length += truncatedMsgBytes.length;
} else {
LOG.debug("No truncation needed for " + logName + " logs for " + taskID
+ " length is " + oldLogFileDetail.length + " retain size "
+ taskRetainSize + "bytes.");
logSize = oldLogFileDetail.length;
}
long bytesSkipped =
logFileInputStream.skip(oldLogFileDetail.length
- logSize);
if (bytesSkipped != oldLogFileDetail.length - logSize) {
throw new IOException("Erroneously skipped " + bytesSkipped
+ " instead of the expected "
+ (oldLogFileDetail.length - logSize)
+ " while truncating " + logName + " logs for " + taskID );
}
long alreadyRead = 0;
while (alreadyRead < logSize) {
byte tmpBuf[]; // Temporary buffer to read logs
if (logSize - alreadyRead >= DEFAULT_BUFFER_SIZE) {
tmpBuf = new byte[DEFAULT_BUFFER_SIZE];
} else {
tmpBuf = new byte[(int) (logSize - alreadyRead)];
}
int bytesRead = logFileInputStream.read(tmpBuf);
if (bytesRead < 0) {
break;
} else {
alreadyRead += bytesRead;
}
tmpFileOutputStream.write(tmpBuf);
}
newLogFileDetail.length += logSize;
// ////// End of truncating log file ///////////////////////
return newLogFileDetail;
}
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:73,代码来源:TaskLogsTruncater.java
示例12: truncateALogFileOfAnAttempt
import org.apache.hadoop.mapred.TaskLog.LogFileDetail; //导入依赖的package包/类
/**
* Truncate the log file of this task-attempt so that only the last retainSize
* many bytes of each log file is retained and the log file is reduced in size
* saving disk space.
*
* @param taskID Task whose logs need to be truncated
* @param oldLogFileDetail contains the original log details for the attempt
* @param taskRetainSize retain-size
* @param tmpFileWriter New log file to write to. Already opened in append
* mode.
* @param logFileReader Original log file to read from.
* @return
* @throws IOException
*/
private LogFileDetail truncateALogFileOfAnAttempt(
final TaskAttemptID taskID, final LogFileDetail oldLogFileDetail,
final long taskRetainSize, final FileWriter tmpFileWriter,
final FileReader logFileReader) throws IOException {
LogFileDetail newLogFileDetail = new LogFileDetail();
// ///////////// Truncate log file ///////////////////////
// New location of log file is same as the old
newLogFileDetail.location = oldLogFileDetail.location;
if (taskRetainSize > MINIMUM_RETAIN_SIZE_FOR_TRUNCATION
&& oldLogFileDetail.length > taskRetainSize) {
LOG.info("Truncating logs for " + taskID + " from "
+ oldLogFileDetail.length + "bytes to " + taskRetainSize
+ "bytes.");
newLogFileDetail.length = taskRetainSize;
} else {
LOG.info("No truncation needed for " + taskID + " length is "
+ oldLogFileDetail.length + " retain size " + taskRetainSize
+ "bytes.");
newLogFileDetail.length = oldLogFileDetail.length;
}
long charsSkipped =
logFileReader.skip(oldLogFileDetail.length
- newLogFileDetail.length);
if (charsSkipped != oldLogFileDetail.length - newLogFileDetail.length) {
throw new IOException("Erroneously skipped " + charsSkipped
+ " instead of the expected "
+ (oldLogFileDetail.length - newLogFileDetail.length));
}
long alreadyRead = 0;
while (alreadyRead < newLogFileDetail.length) {
char tmpBuf[]; // Temporary buffer to read logs
if (newLogFileDetail.length - alreadyRead >= DEFAULT_BUFFER_SIZE) {
tmpBuf = new char[DEFAULT_BUFFER_SIZE];
} else {
tmpBuf = new char[(int) (newLogFileDetail.length - alreadyRead)];
}
int bytesRead = logFileReader.read(tmpBuf);
if (bytesRead < 0) {
break;
} else {
alreadyRead += bytesRead;
}
tmpFileWriter.write(tmpBuf);
}
// ////// End of truncating log file ///////////////////////
return newLogFileDetail;
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:65,代码来源:TaskLogsMonitor.java
注:本文中的org.apache.hadoop.mapred.TaskLog.LogFileDetail类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论