本文整理汇总了Java中org.apache.hadoop.yarn.sls.scheduler.ContainerSimulator类的典型用法代码示例。如果您正苦于以下问题:Java ContainerSimulator类的具体用法?Java ContainerSimulator怎么用?Java ContainerSimulator使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
ContainerSimulator类属于org.apache.hadoop.yarn.sls.scheduler包,在下文中一共展示了ContainerSimulator类的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: addNewContainer
import org.apache.hadoop.yarn.sls.scheduler.ContainerSimulator; //导入依赖的package包/类
/**
* launch a new container with the given life time
*/
public void addNewContainer(Container container, long lifeTimeMS) {
LOG.debug(MessageFormat.format("NodeManager {0} launches a new " +
"container ({1}).", node.getNodeID(), container.getId()));
if (lifeTimeMS != -1) {
// normal container
ContainerSimulator cs = new ContainerSimulator(container.getId(),
container.getResource(), lifeTimeMS + System.currentTimeMillis(),
lifeTimeMS);
containerQueue.add(cs);
runningContainers.put(cs.getId(), cs);
} else {
// AM container
// -1 means AMContainer
synchronized(amContainerList) {
amContainerList.add(container.getId());
}
}
}
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:NMSimulator.java
示例2: init
import org.apache.hadoop.yarn.sls.scheduler.ContainerSimulator; //导入依赖的package包/类
public void init(int id, int heartbeatInterval,
List<ContainerSimulator> containerList, ResourceManager rm, SLSRunner se,
long traceStartTime, long traceFinishTime, String user, String queue,
boolean isTracked, String oldAppId) {
super.init(traceStartTime, traceStartTime + 1000000L * heartbeatInterval,
heartbeatInterval);
this.user = user;
this.rm = rm;
this.se = se;
this.user = user;
this.queue = queue;
this.oldAppId = oldAppId;
this.isTracked = isTracked;
this.traceStartTimeMS = traceStartTime;
this.traceFinishTimeMS = traceFinishTime;
}
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:AMSimulator.java
示例3: init
import org.apache.hadoop.yarn.sls.scheduler.ContainerSimulator; //导入依赖的package包/类
public void init(int id, int heartbeatInterval,
List<ContainerSimulator> containerList, ResourceManager rm, SLSRunner se,
long traceStartTime, long traceFinishTime, String user, String queue,
boolean isTracked, String oldAppId) {
super.init(id, heartbeatInterval, containerList, rm, se,
traceStartTime, traceFinishTime, user, queue,
isTracked, oldAppId);
amtype = "mapreduce";
// get map/reduce tasks
for (ContainerSimulator cs : containerList) {
if (cs.getType().equals("map")) {
cs.setPriority(PRIORITY_MAP);
pendingMaps.add(cs);
} else if (cs.getType().equals("reduce")) {
cs.setPriority(PRIORITY_REDUCE);
pendingReduces.add(cs);
}
}
allMaps.addAll(pendingMaps);
allReduces.addAll(pendingReduces);
mapTotal = pendingMaps.size();
reduceTotal = pendingReduces.size();
totalContainers = mapTotal + reduceTotal;
}
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:MRAMSimulator.java
示例4: init
import org.apache.hadoop.yarn.sls.scheduler.ContainerSimulator; //导入依赖的package包/类
public void init(String nodeIdStr, int memory, int cores,
int dispatchTime, int heartBeatInterval, ResourceManager rm)
throws IOException, YarnException {
super.init(dispatchTime, dispatchTime + 1000000L * heartBeatInterval,
heartBeatInterval);
// create resource
String rackHostName[] = SLSUtils.getRackHostName(nodeIdStr);
this.node = NodeInfo.newNodeInfo(rackHostName[0], rackHostName[1],
BuilderUtils.newResource(memory, cores));
this.rm = rm;
// init data structures
completedContainerList =
Collections.synchronizedList(new ArrayList<ContainerId>());
releasedContainerList =
Collections.synchronizedList(new ArrayList<ContainerId>());
containerQueue = new DelayQueue<ContainerSimulator>();
amContainerList =
Collections.synchronizedList(new ArrayList<ContainerId>());
runningContainers =
new ConcurrentHashMap<ContainerId, ContainerSimulator>();
// register NM with RM
RegisterNodeManagerRequest req =
Records.newRecord(RegisterNodeManagerRequest.class);
req.setNodeId(node.getNodeID());
req.setResource(node.getTotalCapability());
req.setHttpPort(80);
RegisterNodeManagerResponse response = rm.getResourceTrackerService()
.registerNodeManager(req);
masterKey = response.getNMTokenMasterKey();
}
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:NMSimulator.java
示例5: testAMSimulator
import org.apache.hadoop.yarn.sls.scheduler.ContainerSimulator; //导入依赖的package包/类
@Test
public void testAMSimulator() throws Exception {
// Register one app
MockAMSimulator app = new MockAMSimulator();
List<ContainerSimulator> containers = new ArrayList<ContainerSimulator>();
app.init(1, 1000, containers, rm, null, 0, 1000000l, "user1", "default",
false, "app1");
app.firstStep();
Assert.assertEquals(1, rm.getRMContext().getRMApps().size());
Assert.assertNotNull(rm.getRMContext().getRMApps().get(app.appId));
// Finish this app
app.lastStep();
}
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:TestAMSimulator.java
示例6: middleStep
import org.apache.hadoop.yarn.sls.scheduler.ContainerSimulator; //导入依赖的package包/类
@Override
public void middleStep() throws Exception {
// we check the lifetime for each running containers
ContainerSimulator cs = null;
synchronized(completedContainerList) {
while ((cs = containerQueue.poll()) != null) {
runningContainers.remove(cs.getId());
completedContainerList.add(cs.getId());
LOG.debug(MessageFormat.format("Container {0} has completed",
cs.getId()));
}
}
// send heart beat
NodeHeartbeatRequest beatRequest =
Records.newRecord(NodeHeartbeatRequest.class);
beatRequest.setLastKnownNMTokenMasterKey(masterKey);
NodeStatus ns = Records.newRecord(NodeStatus.class);
ns.setContainersStatuses(generateContainerStatusList());
ns.setNodeId(node.getNodeID());
ns.setKeepAliveApplications(new ArrayList<ApplicationId>());
ns.setResponseId(RESPONSE_ID ++);
ns.setNodeHealthStatus(NodeHealthStatus.newInstance(true, "", 0));
beatRequest.setNodeStatus(ns);
NodeHeartbeatResponse beatResponse =
rm.getResourceTrackerService().nodeHeartbeat(beatRequest);
if (! beatResponse.getContainersToCleanup().isEmpty()) {
// remove from queue
synchronized(releasedContainerList) {
for (ContainerId containerId : beatResponse.getContainersToCleanup()){
if (amContainerList.contains(containerId)) {
// AM container (not killed?, only release)
synchronized(amContainerList) {
amContainerList.remove(containerId);
}
LOG.debug(MessageFormat.format("NodeManager {0} releases " +
"an AM ({1}).", node.getNodeID(), containerId));
} else {
cs = runningContainers.remove(containerId);
containerQueue.remove(cs);
releasedContainerList.add(containerId);
LOG.debug(MessageFormat.format("NodeManager {0} releases a " +
"container ({1}).", node.getNodeID(), containerId));
}
}
}
}
if (beatResponse.getNodeAction() == NodeAction.SHUTDOWN) {
lastStep();
}
}
开发者ID:naver,项目名称:hadoop,代码行数:53,代码来源:NMSimulator.java
示例7: getRunningContainers
import org.apache.hadoop.yarn.sls.scheduler.ContainerSimulator; //导入依赖的package包/类
@VisibleForTesting
Map<ContainerId, ContainerSimulator> getRunningContainers() {
return runningContainers;
}
开发者ID:naver,项目名称:hadoop,代码行数:5,代码来源:NMSimulator.java
示例8: startAMFromSLSTraces
import org.apache.hadoop.yarn.sls.scheduler.ContainerSimulator; //导入依赖的package包/类
/**
* parse workload information from sls trace files
*/
@SuppressWarnings("unchecked")
private void startAMFromSLSTraces(Resource containerResource,
int heartbeatInterval) throws IOException {
// parse from sls traces
JsonFactory jsonF = new JsonFactory();
ObjectMapper mapper = new ObjectMapper();
for (String inputTrace : inputTraces) {
Reader input = new FileReader(inputTrace);
try {
Iterator<Map> i = mapper.readValues(jsonF.createJsonParser(input),
Map.class);
while (i.hasNext()) {
Map jsonJob = i.next();
// load job information
long jobStartTime = Long.parseLong(
jsonJob.get("job.start.ms").toString());
long jobFinishTime = Long.parseLong(
jsonJob.get("job.end.ms").toString());
String user = (String) jsonJob.get("job.user");
if (user == null) user = "default";
String queue = jsonJob.get("job.queue.name").toString();
String oldAppId = jsonJob.get("job.id").toString();
boolean isTracked = trackedApps.contains(oldAppId);
int queueSize = queueAppNumMap.containsKey(queue) ?
queueAppNumMap.get(queue) : 0;
queueSize ++;
queueAppNumMap.put(queue, queueSize);
// tasks
List tasks = (List) jsonJob.get("job.tasks");
if (tasks == null || tasks.size() == 0) {
continue;
}
List<ContainerSimulator> containerList =
new ArrayList<ContainerSimulator>();
for (Object o : tasks) {
Map jsonTask = (Map) o;
String hostname = jsonTask.get("container.host").toString();
long taskStart = Long.parseLong(
jsonTask.get("container.start.ms").toString());
long taskFinish = Long.parseLong(
jsonTask.get("container.end.ms").toString());
long lifeTime = taskFinish - taskStart;
int priority = Integer.parseInt(
jsonTask.get("container.priority").toString());
String type = jsonTask.get("container.type").toString();
containerList.add(new ContainerSimulator(containerResource,
lifeTime, hostname, priority, type));
}
// create a new AM
String amType = jsonJob.get("am.type").toString();
AMSimulator amSim = (AMSimulator) ReflectionUtils.newInstance(
amClassMap.get(amType), new Configuration());
if (amSim != null) {
amSim.init(AM_ID++, heartbeatInterval, containerList, rm,
this, jobStartTime, jobFinishTime, user, queue,
isTracked, oldAppId);
runner.schedule(amSim);
maxRuntime = Math.max(maxRuntime, jobFinishTime);
numTasks += containerList.size();
amMap.put(oldAppId, amSim);
}
}
} finally {
input.close();
}
}
}
开发者ID:naver,项目名称:hadoop,代码行数:75,代码来源:SLSRunner.java
示例9: startAMFromSLSTraces
import org.apache.hadoop.yarn.sls.scheduler.ContainerSimulator; //导入依赖的package包/类
/**
* parse workload information from sls trace files
*/
@SuppressWarnings("unchecked")
private void startAMFromSLSTraces(Resource containerResource,
int heartbeatInterval) throws IOException {
// parse from sls traces
JsonFactory jsonF = new JsonFactory();
ObjectMapper mapper = new ObjectMapper();
for (String inputTrace : inputTraces) {
Reader input =
new InputStreamReader(new FileInputStream(inputTrace), "UTF-8");
try {
Iterator<Map> i = mapper.readValues(jsonF.createJsonParser(input),
Map.class);
while (i.hasNext()) {
Map jsonJob = i.next();
// load job information
long jobStartTime = Long.parseLong(
jsonJob.get("job.start.ms").toString());
long jobFinishTime = Long.parseLong(
jsonJob.get("job.end.ms").toString());
String user = (String) jsonJob.get("job.user");
if (user == null) user = "default";
String queue = jsonJob.get("job.queue.name").toString();
String oldAppId = jsonJob.get("job.id").toString();
boolean isTracked = trackedApps.contains(oldAppId);
int queueSize = queueAppNumMap.containsKey(queue) ?
queueAppNumMap.get(queue) : 0;
queueSize ++;
queueAppNumMap.put(queue, queueSize);
// tasks
List tasks = (List) jsonJob.get("job.tasks");
if (tasks == null || tasks.size() == 0) {
continue;
}
List<ContainerSimulator> containerList =
new ArrayList<ContainerSimulator>();
for (Object o : tasks) {
Map jsonTask = (Map) o;
String hostname = jsonTask.get("container.host").toString();
long taskStart = Long.parseLong(
jsonTask.get("container.start.ms").toString());
long taskFinish = Long.parseLong(
jsonTask.get("container.end.ms").toString());
long lifeTime = taskFinish - taskStart;
int priority = Integer.parseInt(
jsonTask.get("container.priority").toString());
String type = jsonTask.get("container.type").toString();
containerList.add(new ContainerSimulator(containerResource,
lifeTime, hostname, priority, type));
}
// create a new AM
String amType = jsonJob.get("am.type").toString();
AMSimulator amSim = (AMSimulator) ReflectionUtils.newInstance(
amClassMap.get(amType), new Configuration());
if (amSim != null) {
amSim.init(AM_ID++, heartbeatInterval, containerList, rm,
this, jobStartTime, jobFinishTime, user, queue,
isTracked, oldAppId);
runner.schedule(amSim);
maxRuntime = Math.max(maxRuntime, jobFinishTime);
numTasks += containerList.size();
amMap.put(oldAppId, amSim);
}
}
} finally {
input.close();
}
}
}
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:76,代码来源:SLSRunner.java
注:本文中的org.apache.hadoop.yarn.sls.scheduler.ContainerSimulator类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论