本文整理汇总了Java中org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils类的典型用法代码示例。如果您正苦于以下问题:Java SchedulerUtils类的具体用法?Java SchedulerUtils怎么用?Java SchedulerUtils使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
SchedulerUtils类属于org.apache.hadoop.yarn.server.resourcemanager.scheduler包,在下文中一共展示了SchedulerUtils类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: normalizeAndValidateRequests
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils; //导入依赖的package包/类
/**
* Utility method to validate a list resource requests, by insuring that the
* requested memory/vcore is non-negative and not greater than max
*/
public static void normalizeAndValidateRequests(List<ResourceRequest> ask,
Resource maximumResource, String queueName, YarnScheduler scheduler,
RMContext rmContext)
throws InvalidResourceRequestException {
QueueInfo queueInfo = null;
try {
queueInfo = scheduler.getQueueInfo(queueName, false, false);
} catch (IOException e) {
}
for (ResourceRequest resReq : ask) {
SchedulerUtils.normalizeAndvalidateRequest(resReq, maximumResource,
queueName, scheduler, rmContext, queueInfo);
}
}
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:RMServerUtils.java
示例2: removeNode
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils; //导入依赖的package包/类
private synchronized void removeNode(RMNode nodeInfo) {
FiCaSchedulerNode node = getNode(nodeInfo.getNodeID());
if (node == null) {
return;
}
// Kill running containers
for(RMContainer container : node.getRunningContainers()) {
completedContainer(container,
SchedulerUtils.createAbnormalContainerStatus(
container.getContainerId(),
SchedulerUtils.LOST_CONTAINER),
RMContainerEventType.KILL);
}
//Remove the node
this.nodes.remove(nodeInfo.getNodeID());
updateMaximumAllocation(node, false);
// Update cluster metrics
Resources.subtractFrom(clusterResource, node.getRMNode().getTotalCapability());
}
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:FifoScheduler.java
示例3: testAMCrashAtScheduled
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils; //导入依赖的package包/类
@Test
public void testAMCrashAtScheduled() {
// This is to test sending CONTAINER_FINISHED event at SCHEDULED state.
// Verify the state transition is correct.
scheduleApplicationAttempt();
ContainerStatus cs =
SchedulerUtils.createAbnormalContainerStatus(
BuilderUtils.newContainerId(
applicationAttempt.getAppAttemptId(), 1),
SchedulerUtils.LOST_CONTAINER);
// send CONTAINER_FINISHED event at SCHEDULED state,
// The state should be FINAL_SAVING with previous state SCHEDULED
NodeId anyNodeId = NodeId.newInstance("host", 1234);
applicationAttempt.handle(new RMAppAttemptContainerFinishedEvent(
applicationAttempt.getAppAttemptId(), cs, anyNodeId));
// createApplicationAttemptState will return previous state (SCHEDULED),
// if the current state is FINAL_SAVING.
assertEquals(YarnApplicationAttemptState.SCHEDULED,
applicationAttempt.createApplicationAttemptState());
// send ATTEMPT_UPDATE_SAVED event,
// verify the state is changed to state FAILED.
sendAttemptUpdateSavedEvent(applicationAttempt);
assertEquals(RMAppAttemptState.FAILED,
applicationAttempt.getAppAttemptState());
verifyApplicationAttemptFinished(RMAppAttemptState.FAILED);
}
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestRMAppAttemptTransitions.java
示例4: normalizeAndValidateRequests
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils; //导入依赖的package包/类
/**
* Utility method to validate a list resource requests, by insuring that the
* requested memory/vcore is non-negative and not greater than max
*/
public static void normalizeAndValidateRequests(List<ResourceRequest> ask,
Resource maximumResource, String queueName, YarnScheduler scheduler,
RMContext rmContext)
throws InvalidResourceRequestException {
// Get queue from scheduler
QueueInfo queueInfo = null;
try {
queueInfo = scheduler.getQueueInfo(queueName, false, false);
} catch (IOException e) {
}
for (ResourceRequest resReq : ask) {
SchedulerUtils.normalizeAndvalidateRequest(resReq, maximumResource,
queueName, scheduler, rmContext, queueInfo);
}
}
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:21,代码来源:RMServerUtils.java
示例5: removeNode
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils; //导入依赖的package包/类
private synchronized void removeNode(RMNode nodeInfo) {
FiCaSchedulerNode node = getNode(nodeInfo.getNodeID());
if (node == null) {
return;
}
// Kill running containers
for(RMContainer container : node.getRunningContainers()) {
completedContainer(container,
SchedulerUtils.createAbnormalContainerStatus(
container.getContainerId(),
SchedulerUtils.LOST_CONTAINER),
RMContainerEventType.KILL);
}
//Remove the node
this.nodes.remove(nodeInfo.getNodeID());
updateMaximumAllocation(node, false);
// Update cluster metrics
Resources.subtractFrom(clusterResource, node.getTotalResource());
}
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:22,代码来源:FifoScheduler.java
示例6: handleExcessReservedContainer
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils; //导入依赖的package包/类
private void handleExcessReservedContainer(Resource clusterResource,
CSAssignment assignment, FiCaSchedulerNode node, FiCaSchedulerApp app) {
if (assignment.getExcessReservation() != null) {
RMContainer excessReservedContainer = assignment.getExcessReservation();
if (excessReservedContainer.hasIncreaseReservation()) {
unreserveIncreasedContainer(clusterResource,
app, node, excessReservedContainer);
} else {
completedContainer(clusterResource, assignment.getApplication(),
scheduler.getNode(excessReservedContainer.getAllocatedNode()),
excessReservedContainer,
SchedulerUtils.createAbnormalContainerStatus(
excessReservedContainer.getContainerId(),
SchedulerUtils.UNRESERVED_CONTAINER),
RMContainerEventType.RELEASED, null, false);
}
assignment.setExcessReservation(null);
}
}
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:22,代码来源:LeafQueue.java
示例7: removeNode
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils; //导入依赖的package包/类
private synchronized void removeNode(RMNode nodeInfo) {
FiCaSchedulerNode node = getNode(nodeInfo.getNodeID());
if (node == null) {
return;
}
// Kill running containers
for(RMContainer container : node.getRunningContainers()) {
containerCompleted(container,
SchedulerUtils.createAbnormalContainerStatus(
container.getContainerId(),
SchedulerUtils.LOST_CONTAINER),
RMContainerEventType.KILL);
}
//Remove the node
this.nodes.remove(nodeInfo.getNodeID());
// Update cluster metrics
Resources.subtractFrom(clusterResource, node.getRMNode().getTotalCapability());
}
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:21,代码来源:FifoScheduler.java
示例8: removeNode
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils; //导入依赖的package包/类
private synchronized void removeNode(RMNode nodeInfo) {
FiCaSchedulerNode node = getNode(nodeInfo.getNodeID());
if (node == null) {
return;
}
// Kill running containers
for(RMContainer container : node.getCopiedListOfRunningContainers()) {
super.completedContainer(container,
SchedulerUtils.createAbnormalContainerStatus(
container.getContainerId(),
SchedulerUtils.LOST_CONTAINER),
RMContainerEventType.KILL);
}
//Remove the node
this.nodes.remove(nodeInfo.getNodeID());
updateMaximumAllocation(node, false);
// Update cluster metrics
Resources.subtractFrom(clusterResource, node.getTotalResource());
}
开发者ID:hopshadoop,项目名称:hops,代码行数:22,代码来源:FifoScheduler.java
示例9: checkQueuePartition
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils; //导入依赖的package包/类
/**
* Check application can be moved to queue with labels enabled. All labels in
* application life time will be checked
*
* @param appId
* @param dest
* @throws YarnException
*/
private void checkQueuePartition(FiCaSchedulerApp app, LeafQueue dest)
throws YarnException {
if (!YarnConfiguration.areNodeLabelsEnabled(conf)) {
return;
}
Set<String> targetqueuelabels = dest.getAccessibleNodeLabels();
AppSchedulingInfo schedulingInfo = app.getAppSchedulingInfo();
Set<String> appLabelexpressions = schedulingInfo.getRequestedPartitions();
// default partition access always available remove empty label
appLabelexpressions.remove(RMNodeLabelsManager.NO_LABEL);
Set<String> nonAccessiblelabels = new HashSet<String>();
for (String label : appLabelexpressions) {
if (!SchedulerUtils.checkQueueLabelExpression(targetqueuelabels, label,
null)) {
nonAccessiblelabels.add(label);
}
}
if (nonAccessiblelabels.size() > 0) {
throw new YarnException(
"Specified queue=" + dest.getQueueName() + " can't satisfy following "
+ "apps label expressions =" + nonAccessiblelabels
+ " accessible node labels =" + targetqueuelabels);
}
}
开发者ID:hopshadoop,项目名称:hops,代码行数:33,代码来源:CapacityScheduler.java
示例10: killToPreemptContainers
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils; //导入依赖的package包/类
private void killToPreemptContainers(Resource clusterResource,
FiCaSchedulerNode node,
CSAssignment assignment) {
if (assignment.getContainersToKill() != null) {
StringBuilder sb = new StringBuilder("Killing containers: [");
for (RMContainer c : assignment.getContainersToKill()) {
FiCaSchedulerApp application = csContext.getApplicationAttempt(
c.getApplicationAttemptId());
LeafQueue q = application.getCSLeafQueue();
q.completedContainer(clusterResource, application, node, c, SchedulerUtils
.createPreemptedContainerStatus(c.getContainerId(),
SchedulerUtils.PREEMPTED_CONTAINER), RMContainerEventType.KILL,
null, false);
sb.append("(container=" + c.getContainerId() + " resource=" + c
.getAllocatedResource() + ")");
}
sb.append("] for container=" + assignment.getAssignmentInformation()
.getFirstAllocatedOrReservedContainerId() + " resource=" + assignment
.getResource());
LOG.info(sb.toString());
}
}
开发者ID:hopshadoop,项目名称:hops,代码行数:26,代码来源:LeafQueue.java
示例11: validateResourceRequest
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils; //导入依赖的package包/类
private void validateResourceRequest(
ApplicationSubmissionContext submissionContext)
throws InvalidResourceRequestException {
// Validation of the ApplicationSubmissionContext needs to be completed
// here. Only those fields that are dependent on RM's configuration are
// checked here as they have to be validated whether they are part of new
// submission or just being recovered.
// Check whether AM resource requirements are within required limits
if (!submissionContext.getUnmanagedAM()) {
ResourceRequest amReq = BuilderUtils.newResourceRequest(
RMAppAttemptImpl.AM_CONTAINER_PRIORITY, ResourceRequest.ANY,
submissionContext.getResource(), 1);
try {
SchedulerUtils.validateResourceRequest(amReq,
scheduler.getMaximumResourceCapability());
} catch (InvalidResourceRequestException e) {
LOG.warn("RM app submission failed in validating AM resource request"
+ " for application " + submissionContext.getApplicationId(), e);
throw e;
}
}
}
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:24,代码来源:RMAppManager.java
示例12: doneApplicationAttempt
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils; //导入依赖的package包/类
private synchronized void doneApplicationAttempt(
ApplicationAttemptId applicationAttemptId,
RMAppAttemptState rmAppAttemptFinalState, boolean keepContainers)
throws IOException {
FiCaSchedulerApp attempt = getApplicationAttempt(applicationAttemptId);
SchedulerApplication<FiCaSchedulerApp> application =
applications.get(applicationAttemptId.getApplicationId());
if (application == null || attempt == null) {
throw new IOException("Unknown application " + applicationAttemptId +
" has completed!");
}
// Kill all 'live' containers
for (RMContainer container : attempt.getLiveContainers()) {
if (keepContainers
&& container.getState().equals(RMContainerState.RUNNING)) {
// do not kill the running container in the case of work-preserving AM
// restart.
LOG.info("Skip killing " + container.getContainerId());
continue;
}
completedContainer(container,
SchedulerUtils.createAbnormalContainerStatus(
container.getContainerId(), SchedulerUtils.COMPLETED_APPLICATION),
RMContainerEventType.KILL);
}
// Clean up pending requests, metrics etc.
attempt.stop(rmAppAttemptFinalState);
}
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:FifoScheduler.java
示例13: warnOrKillContainer
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils; //导入依赖的package包/类
protected void warnOrKillContainer(RMContainer container) {
ApplicationAttemptId appAttemptId = container.getApplicationAttemptId();
FSAppAttempt app = getSchedulerApp(appAttemptId);
FSLeafQueue queue = app.getQueue();
LOG.info("Preempting container (prio=" + container.getContainer().getPriority() +
"res=" + container.getContainer().getResource() +
") from queue " + queue.getName());
Long time = app.getContainerPreemptionTime(container);
if (time != null) {
// if we asked for preemption more than maxWaitTimeBeforeKill ms ago,
// proceed with kill
if (time + waitTimeBeforeKill < getClock().getTime()) {
ContainerStatus status =
SchedulerUtils.createPreemptedContainerStatus(
container.getContainerId(), SchedulerUtils.PREEMPTED_CONTAINER);
recoverResourceRequestForContainer(container);
// TODO: Not sure if this ever actually adds this to the list of cleanup
// containers on the RMNode (see SchedulerNode.releaseContainer()).
completedContainer(container, status, RMContainerEventType.KILL);
LOG.info("Killing container" + container +
" (after waiting for premption for " +
(getClock().getTime() - time) + "ms)");
}
} else {
// track the request in the FSAppAttempt itself
app.addPreemption(container, getClock().getTime());
}
}
开发者ID:naver,项目名称:hadoop,代码行数:32,代码来源:FairScheduler.java
示例14: removeNode
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils; //导入依赖的package包/类
private synchronized void removeNode(RMNode rmNode) {
FSSchedulerNode node = getFSSchedulerNode(rmNode.getNodeID());
// This can occur when an UNHEALTHY node reconnects
if (node == null) {
return;
}
Resources.subtractFrom(clusterResource, rmNode.getTotalCapability());
updateRootQueueMetrics();
// Remove running containers
List<RMContainer> runningContainers = node.getRunningContainers();
for (RMContainer container : runningContainers) {
completedContainer(container,
SchedulerUtils.createAbnormalContainerStatus(
container.getContainerId(),
SchedulerUtils.LOST_CONTAINER),
RMContainerEventType.KILL);
}
// Remove reservations, if any
RMContainer reservedContainer = node.getReservedContainer();
if (reservedContainer != null) {
completedContainer(reservedContainer,
SchedulerUtils.createAbnormalContainerStatus(
reservedContainer.getContainerId(),
SchedulerUtils.LOST_CONTAINER),
RMContainerEventType.KILL);
}
nodes.remove(rmNode.getNodeID());
queueMgr.getRootQueue().setSteadyFairShare(clusterResource);
queueMgr.getRootQueue().recomputeSteadyShares();
updateMaximumAllocation(node, false);
LOG.info("Removed node " + rmNode.getNodeAddress() +
" cluster capacity: " + clusterResource);
}
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:FairScheduler.java
示例15: getAcls
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils; //导入依赖的package包/类
public Map<AccessType, AccessControlList> getAcls(String queue) {
Map<AccessType, AccessControlList> acls =
new HashMap<AccessType, AccessControlList>();
for (QueueACL acl : QueueACL.values()) {
acls.put(SchedulerUtils.toAccessType(acl), getAcl(queue, acl));
}
return acls;
}
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:CapacitySchedulerConfiguration.java
示例16: dropContainerReservation
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils; //导入依赖的package包/类
@Override
public void dropContainerReservation(RMContainer container) {
if(LOG.isDebugEnabled()){
LOG.debug("DROP_RESERVATION:" + container.toString());
}
completedContainer(container,
SchedulerUtils.createAbnormalContainerStatus(
container.getContainerId(),
SchedulerUtils.UNRESERVED_CONTAINER),
RMContainerEventType.KILL);
}
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:CapacityScheduler.java
示例17: killContainer
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils; //导入依赖的package包/类
@Override
public void killContainer(RMContainer cont) {
if (LOG.isDebugEnabled()) {
LOG.debug("KILL_CONTAINER: container" + cont.toString());
}
recoverResourceRequestForContainer(cont);
completedContainer(cont, SchedulerUtils.createPreemptedContainerStatus(
cont.getContainerId(), SchedulerUtils.PREEMPTED_CONTAINER),
RMContainerEventType.KILL);
}
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:CapacityScheduler.java
示例18: warnOrKillContainer
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils; //导入依赖的package包/类
protected void warnOrKillContainer(RMContainer container) {
ApplicationAttemptId appAttemptId = container.getApplicationAttemptId();
FSAppAttempt app = getSchedulerApp(appAttemptId);
FSLeafQueue queue = app.getQueue();
LOG.info("Preempting container (prio=" + container.getContainer().getPriority() +
"res=" + container.getContainer().getResource() +
") from queue " + queue.getName());
Long time = app.getContainerPreemptionTime(container);
if (time != null) {
// if we asked for preemption more than maxWaitTimeBeforeKill ms ago,
// proceed with kill
if (time + waitTimeBeforeKill < getClock().getTime()) {
ContainerStatus status =
SchedulerUtils.createPreemptedContainerStatus(
container.getContainerId(), SchedulerUtils.PREEMPTED_CONTAINER);
// TODO: Not sure if this ever actually adds this to the list of cleanup
// containers on the RMNode (see SchedulerNode.releaseContainer()).
completedContainer(container, status, RMContainerEventType.KILL);
LOG.info("Killing container" + container +
" (after waiting for preemption for " +
(getClock().getTime() - time) + "ms)");
}
} else {
// track the request in the FSAppAttempt itself
app.addPreemption(container, getClock().getTime());
}
}
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:31,代码来源:FairScheduler.java
示例19: killContainer
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils; //导入依赖的package包/类
@Override
public void killContainer(RMContainer cont) {
if (LOG.isDebugEnabled()) {
LOG.debug("KILL_CONTAINER: container" + cont.toString());
}
completedContainer(cont, SchedulerUtils.createPreemptedContainerStatus(
cont.getContainerId(), SchedulerUtils.PREEMPTED_CONTAINER),
RMContainerEventType.KILL);
}
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:10,代码来源:CapacityScheduler.java
示例20: killContainer
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils; //导入依赖的package包/类
@Override
public void killContainer(RMContainer cont) {
if (LOG.isDebugEnabled()) {
LOG.debug("KILL_CONTAINER: container" + cont.toString());
}
recoverResourceRequestForContainer(cont);
completedContainer(cont, SchedulerUtils.createPreemptedContainerStatus(
cont.getContainerId(), SchedulerUtils.PREEMPTED_CONTAINER),
RMContainerEventType.KILL);
}
开发者ID:yncxcw,项目名称:big-c,代码行数:11,代码来源:CapacityScheduler.java
注:本文中的org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论