//.........这里部分代码省略.........
}
/* lookup the value on the global ompi_server, but error
* if that server wasn't contacted
*/
if (!mca_pubsub_orte_component.server_found) {
opal_show_help("help-ompi-pubsub-orte.txt",
"pubsub-orte:no-server",
true, (long)ORTE_PROC_MY_NAME->vpid,
"lookup from");
return NULL;
}
info_host = &mca_pubsub_orte_component.server;
} else if (NONE == lookup[i]) {
continue;
} else {
/* unknown host! */
opal_show_help("help-ompi-pubsub-orte.txt",
"pubsub-orte:unknown-order",
true, (long)ORTE_PROC_MY_NAME->vpid);
return NULL;
}
/* go look it up */
/* construct the buffer */
buf = OBJ_NEW(opal_buffer_t);
/* pack the lookup command */
if (OPAL_SUCCESS != (ret = opal_dss.pack(buf, &cmd, 1, ORTE_DATA_SERVER_CMD))) {
ORTE_ERROR_LOG(ret);
OBJ_RELEASE(buf);
goto CLEANUP;
}
/* pack the service name */
if (OPAL_SUCCESS != (ret = opal_dss.pack(buf, &service_name, 1, OPAL_STRING))) {
ORTE_ERROR_LOG(ret);
OBJ_RELEASE(buf);
goto CLEANUP;
}
/* send the cmd */
if (0 > (ret = orte_rml.send_buffer_nb(info_host, buf,
ORTE_RML_TAG_DATA_SERVER,
orte_rml_send_callback, NULL))) {
ORTE_ERROR_LOG(ret);
OBJ_RELEASE(buf);
goto CLEANUP;
}
/* get the answer */
OBJ_CONSTRUCT(&xfer, orte_rml_recv_cb_t);
xfer.active = true;
orte_rml.recv_buffer_nb(ORTE_NAME_WILDCARD,
ORTE_RML_TAG_DATA_CLIENT,
ORTE_RML_NON_PERSISTENT,
orte_rml_recv_callback, &xfer);
OMPI_WAIT_FOR_COMPLETION(xfer.active);
/* unpack the return code */
cnt = 1;
if (OPAL_SUCCESS != (ret = opal_dss.unpack(&xfer.data, &rc, &cnt, OPAL_INT))) {
ORTE_ERROR_LOG(ret);
goto CLEANUP;
}
OPAL_OUTPUT_VERBOSE((1, ompi_pubsub_base_framework.framework_output,
"%s pubsub:orte: lookup returned status %d",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), rc));
if (ORTE_SUCCESS == rc) {
/* the server was able to lookup the port - unpack the port name */
cnt=1;
if (OPAL_SUCCESS != (ret = opal_dss.unpack(&xfer.data, &port_name, &cnt, OPAL_STRING))) {
ORTE_ERROR_LOG(ret);
OBJ_DESTRUCT(&xfer);
goto CLEANUP;
}
OPAL_OUTPUT_VERBOSE((1, ompi_pubsub_base_framework.framework_output,
"%s pubsub:orte: lookup returned port %s",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
(NULL == port_name) ? "NULL" : port_name));
if (NULL != port_name) {
/* got an answer - return it */
OBJ_DESTRUCT(&xfer);
return port_name;
}
}
/* if we didn't get a port_name, then continue */
OBJ_DESTRUCT(&xfer);
}
/* only get here if we tried both options and failed - since the
* buffer will already have been cleaned up, just return
*/
CLEANUP:
return NULL;
}
int orte_regex_create(char *nodelist, char **regexp)
{
char *node;
char prefix[ORTE_MAX_NODE_PREFIX];
int i, j, len, startnum, nodenum, numdigits;
bool found, fullname;
char *suffix, *sfx;
orte_regex_node_t *ndreg;
orte_regex_range_t *range;
opal_list_t nodeids;
opal_list_item_t *item, *itm2;
char **regexargs = NULL, *tmp, *tmp2;
char *cptr;
/* define the default */
*regexp = NULL;
cptr = strchr(nodelist, ',');
if (NULL == cptr) {
/* if there is only one node, don't bother */
*regexp = strdup(nodelist);
return ORTE_SUCCESS;
}
/* setup the list of results */
OBJ_CONSTRUCT(&nodeids, opal_list_t);
/* cycle thru the array of nodenames */
node = nodelist;
while (NULL != (cptr = strchr(node, ',')) || 0 < strlen(node)) {
if (NULL != cptr) {
*cptr = '\0';
}
/* determine this node's prefix by looking for first non-alpha char */
fullname = false;
len = strlen(node);
startnum = -1;
memset(prefix, 0, ORTE_MAX_NODE_PREFIX);
numdigits = 0;
for (i=0, j=0; i < len; i++) {
if (!isalpha(node[i])) {
/* found a non-alpha char */
if (!isdigit(node[i])) {
/* if it is anything but a digit, we just use
* the entire name
*/
fullname = true;
break;
}
/* count the size of the numeric field - but don't
* add the digits to the prefix
*/
numdigits++;
if (startnum < 0) {
/* okay, this defines end of the prefix */
startnum = i;
}
continue;
}
if (startnum < 0) {
prefix[j++] = node[i];
}
}
if (fullname || startnum < 0) {
/* can't compress this name - just add it to the list */
ndreg = OBJ_NEW(orte_regex_node_t);
ndreg->prefix = strdup(node);
opal_list_append(&nodeids, &ndreg->super);
/* move to the next posn */
if (NULL == cptr) {
break;
}
node = cptr + 1;
continue;
}
/* convert the digits and get any suffix */
nodenum = strtol(&node[startnum], &sfx, 10);
if (NULL != sfx) {
suffix = strdup(sfx);
} else {
suffix = NULL;
}
/* is this nodeid already on our list? */
found = false;
for (item = opal_list_get_first(&nodeids);
!found && item != opal_list_get_end(&nodeids);
item = opal_list_get_next(item)) {
ndreg = (orte_regex_node_t*)item;
if (0 < strlen(prefix) && NULL == ndreg->prefix) {
continue;
}
if (0 == strlen(prefix) && NULL != ndreg->prefix) {
continue;
}
if (0 < strlen(prefix) && NULL != ndreg->prefix
&& 0 != strcmp(prefix, ndreg->prefix)) {
continue;
}
if (NULL == suffix && NULL != ndreg->suffix) {
continue;
//.........这里部分代码省略.........
static int rank_by(orte_job_t *jdata,
orte_app_context_t *app,
opal_list_t *nodes,
hwloc_obj_type_t target,
unsigned cache_level)
{
hwloc_obj_t obj;
int num_objs, i, j, rc;
orte_vpid_t num_ranked=0;
orte_node_t *node;
orte_proc_t *proc;
orte_vpid_t vpid;
int cnt;
opal_pointer_array_t objs;
bool all_done;
opal_list_item_t *item;
hwloc_obj_t locale;
if (ORTE_RANKING_SPAN & ORTE_GET_RANKING_DIRECTIVE(jdata->map->ranking)) {
return rank_span(jdata, app, nodes, target, cache_level);
} else if (ORTE_RANKING_FILL & ORTE_GET_RANKING_DIRECTIVE(jdata->map->ranking)) {
return rank_fill(jdata, app, nodes, target, cache_level);
}
/* if ranking is not spanned or filled, then we
* default to assign ranks sequentially across
* target objects within a node until that node
* is fully ranked, and then move on to the next
* node
*
* Node 0 Node 1
* Obj 0 Obj 1 Obj 0 Obj 1
* 0 2 1 3 8 10 9 11
* 4 6 5 7 12 14 13 15
*/
/* setup the pointer array */
OBJ_CONSTRUCT(&objs, opal_pointer_array_t);
opal_pointer_array_init(&objs, 2, INT_MAX, 2);
vpid = jdata->num_procs;
cnt = 0;
for (item = opal_list_get_first(nodes);
item != opal_list_get_end(nodes);
item = opal_list_get_next(item)) {
node = (orte_node_t*)item;
/* get the number of objects - only consider those we can actually use */
num_objs = opal_hwloc_base_get_nbobjs_by_type(node->topology, target,
cache_level, OPAL_HWLOC_AVAILABLE);
opal_output_verbose(5, orte_rmaps_base_framework.framework_output,
"mca:rmaps:rank_by: found %d objects on node %s with %d procs",
num_objs, node->name, (int)node->num_procs);
if (0 == num_objs) {
return ORTE_ERR_NOT_SUPPORTED;
}
/* collect all the objects */
for (i=0; i < num_objs; i++) {
obj = opal_hwloc_base_get_obj_by_type(node->topology, target,
cache_level, i, OPAL_HWLOC_AVAILABLE);
opal_pointer_array_set_item(&objs, i, obj);
}
/* cycle across the objects, assigning a proc to each one,
* until all procs have been assigned - unfortunately, since
* more than this job may be mapped onto a node, the number
* of procs on the node can't be used to tell us when we
* are done. Instead, we have to just keep going until all
* procs are ranked - which means we have to make one extra
* pass thru the loop
*
* Perhaps someday someone will come up with a more efficient
* algorithm, but this works for now.
*/
all_done = false;
while (!all_done && cnt < app->num_procs) {
all_done = true;
/* cycle across the objects */
for (i=0; i < num_objs && cnt < app->num_procs; i++) {
obj = (hwloc_obj_t)opal_pointer_array_get_item(&objs, i);
/* find the next proc on this object */
for (j=0; j < node->procs->size && cnt < app->num_procs; j++) {
if (NULL == (proc = (orte_proc_t*)opal_pointer_array_get_item(node->procs, j))) {
continue;
}
/* ignore procs from other jobs */
if (proc->name.jobid != jdata->jobid) {
opal_output_verbose(5, orte_rmaps_base_framework.framework_output,
"mca:rmaps:rank_by skipping proc %s - from another job, num_ranked %d",
ORTE_NAME_PRINT(&proc->name), num_ranked);
continue;
}
/* ignore procs that are already ranked */
if (ORTE_VPID_INVALID != proc->name.vpid) {
continue;
}
/* ignore procs from other apps */
if (proc->app_idx != app->idx) {
continue;
}
//.........这里部分代码省略.........
int
ompi_proc_pack(ompi_proc_t **proclist, int proclistsize,
bool full_info,
opal_buffer_t* buf)
{
int i, rc;
OPAL_THREAD_LOCK(&ompi_proc_lock);
/* cycle through the provided array, packing the OMPI level
* data for each proc. This data may or may not be included
* in any subsequent modex operation, so we include it here
* to ensure completion of a connect/accept handshake. See
* the ompi/mca/dpm framework for an example of where and how
* this info is used.
*
* Eventually, we will review the procedures that call this
* function to see if duplication of communication can be
* reduced. For now, just go ahead and pack the info so it
* can be sent.
*/
for (i=0; i<proclistsize; i++) {
rc = opal_dss.pack(buf, &(proclist[i]->proc_name), 1, OMPI_NAME);
if(rc != OPAL_SUCCESS) {
OMPI_ERROR_LOG(rc);
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
return rc;
}
if (full_info) {
int32_t num_entries;
opal_value_t *kv;
opal_list_t data;
/* fetch all global info we know about the peer - while
* the remote procs may already know some of it, we cannot
* be certain they do. So we must include a full dump of
* everything we know about this proc, excluding INTERNAL
* data that each process computes about its peers
*/
OBJ_CONSTRUCT(&data, opal_list_t);
rc = opal_db.fetch_multiple((opal_identifier_t*)&proclist[i]->proc_name,
OPAL_SCOPE_GLOBAL, NULL, &data);
if (OPAL_SUCCESS != rc) {
OMPI_ERROR_LOG(rc);
num_entries = 0;
} else {
/* count the number of entries we will send */
num_entries = opal_list_get_size(&data);
}
/* put the number of entries into the buffer */
rc = opal_dss.pack(buf, &num_entries, 1, OPAL_INT32);
if (OPAL_SUCCESS != rc) {
OMPI_ERROR_LOG(rc);
break;
}
/* if there are entries, store them */
while (NULL != (kv = (opal_value_t*)opal_list_remove_first(&data))) {
if (OPAL_SUCCESS != (rc = opal_dss.pack(buf, &kv, 1, OPAL_VALUE))) {
OMPI_ERROR_LOG(rc);
break;
}
OBJ_RELEASE(kv);
}
OBJ_DESTRUCT(&data);
} else {
rc = opal_dss.pack(buf, &(proclist[i]->proc_arch), 1, OPAL_UINT32);
if(rc != OPAL_SUCCESS) {
OMPI_ERROR_LOG(rc);
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
return rc;
}
rc = opal_dss.pack(buf, &(proclist[i]->proc_hostname), 1, OPAL_STRING);
if(rc != OPAL_SUCCESS) {
OMPI_ERROR_LOG(rc);
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
return rc;
}
}
}
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
return OMPI_SUCCESS;
}
开发者ID:IanYXXL,项目名称:A1,代码行数:85,代码来源:proc.c
示例8: mca_coll_base_find_available
/*
* Scan down the list of successfully opened components and query each of
* them (the opened list will be one or more components. If the user
* requested a specific component, it will be the only component in the
* opened list). Create and populate the available list of all
* components who indicate that they want to be considered for selection.
* Close all components who do not want to be considered for selection,
* and destroy the opened list.
*
* Also find the basic component while we're doing all of this, and save
* it in a global variable so that we can find it easily later (e.g.,
* during scope selection).
*/
int mca_coll_base_find_available(bool enable_progress_threads,
bool enable_mpi_threads)
{
bool found = false;
mca_base_component_priority_list_item_t *entry;
opal_list_item_t *p;
const mca_base_component_t *component;
/* Initialize the list */
OBJ_CONSTRUCT(&mca_coll_base_components_available, opal_list_t);
mca_coll_base_components_available_valid = true;
/* The list of components that we should check has already been
established in mca_coll_base_open. */
for (found = false,
p = opal_list_remove_first(&mca_coll_base_components_opened);
p != NULL;
p = opal_list_remove_first(&mca_coll_base_components_opened)) {
component = ((mca_base_component_list_item_t *) p)->cli_component;
/* Call a subroutine to do the work, because the component may
represent different versions of the coll MCA. */
entry = OBJ_NEW(mca_base_component_priority_list_item_t);
entry->super.cli_component = component;
entry->cpli_priority = 0;
if (OMPI_SUCCESS == init_query(component, entry,
enable_progress_threads,
enable_mpi_threads)) {
opal_list_append(&mca_coll_base_components_available,
(opal_list_item_t *) entry);
found = true;
} else {
/* If the component doesn't want to run, then close it. It's
already had its close() method invoked; now close it out of
the DSO repository (if it's there). */
mca_base_component_repository_release(component);
OBJ_RELEASE(entry);
}
/* Free the entry from the "opened" list */
OBJ_RELEASE(p);
}
/* The opened list is now no longer useful and we can free it */
OBJ_DESTRUCT(&mca_coll_base_components_opened);
mca_coll_base_components_opened_valid = false;
/* If we have no collective components available, it's an error.
Thanks for playing! */
if (!found) {
/* Need to free all items in the list */
OBJ_DESTRUCT(&mca_coll_base_components_available);
mca_coll_base_components_available_valid = false;
opal_output_verbose(10, mca_coll_base_output,
"coll:find_available: no coll components available!");
orte_show_help("help-mca-base", "find-available:none-found", true,
"coll");
return OMPI_ERROR;
}
/* All done */
return OMPI_SUCCESS;
}
static bool native_get_attr(const char *attr, opal_value_t **kv)
{
opal_buffer_t *msg, *bptr;
opal_list_t vals;
opal_value_t *kp, *lclpeers=NULL, kvn;
pmix_cmd_t cmd = PMIX_GETATTR_CMD;
char **ranks;
int rc, ret;
int32_t cnt;
bool found=false;
opal_hwloc_locality_t locality;
pmix_cb_t *cb;
uint32_t i, myrank;
opal_process_name_t id;
char *cpuset;
opal_buffer_t buf, buf2;
opal_output_verbose(2, opal_pmix_base_framework.framework_output,
"%s pmix:native get_attr called",
OPAL_NAME_PRINT(OPAL_PROC_MY_NAME));
/* try to retrieve the requested value from the dstore */
OBJ_CONSTRUCT(&vals, opal_list_t);
if (OPAL_SUCCESS == opal_dstore.fetch(opal_dstore_internal, &OPAL_PROC_MY_NAME, attr, &vals)) {
*kv = (opal_value_t*)opal_list_remove_first(&vals);
OPAL_LIST_DESTRUCT(&vals);
return true;
}
if (NULL == mca_pmix_native_component.uri) {
/* no server available, so just return */
return false;
}
/* if the value isn't yet available, then we should try to retrieve
* all the available attributes and store them for future use */
msg = OBJ_NEW(opal_buffer_t);
/* pack the cmd */
if (OPAL_SUCCESS != (rc = opal_dss.pack(msg, &cmd, 1, PMIX_CMD_T))) {
OPAL_ERROR_LOG(rc);
OBJ_RELEASE(msg);
return false;
}
/* create a callback object as we need to pass it to the
* recv routine so we know which callback to use when
* the return message is recvd */
cb = OBJ_NEW(pmix_cb_t);
cb->active = true;
/* push the message into our event base to send to the server */
PMIX_ACTIVATE_SEND_RECV(msg, wait_cbfunc, cb);
/* wait for the data to return */
PMIX_WAIT_FOR_COMPLETION(cb->active);
/* we have received the entire data blob for this process - unpack
* and cache all values, keeping the one we requested to return
* to the caller */
cnt = 1;
if (OPAL_SUCCESS != (rc = opal_dss.unpack(&cb->data, &ret, &cnt, OPAL_INT))) {
OPAL_ERROR_LOG(rc);
OBJ_RELEASE(cb);
return false;
}
if (OPAL_SUCCESS == ret) {
/* unpack the buffer containing the values */
cnt = 1;
if (OPAL_SUCCESS != (rc = opal_dss.unpack(&cb->data, &bptr, &cnt, OPAL_BUFFER))) {
OPAL_ERROR_LOG(rc);
OBJ_RELEASE(cb);
return false;
}
cnt = 1;
while (OPAL_SUCCESS == (rc = opal_dss.unpack(bptr, &kp, &cnt, OPAL_VALUE))) {
opal_output_verbose(2, opal_pmix_base_framework.framework_output,
"%s unpacked attr %s",
OPAL_NAME_PRINT(OPAL_PROC_MY_NAME), kp->key);
/* if this is the local topology, we need to save it in a special way */
#if OPAL_HAVE_HWLOC
{
hwloc_topology_t topo;
if (0 == strcmp(PMIX_LOCAL_TOPO, kp->key)) {
opal_output_verbose(2, opal_pmix_base_framework.framework_output,
"%s saving topology",
OPAL_NAME_PRINT(OPAL_PROC_MY_NAME));
/* transfer the byte object for unpacking */
OBJ_CONSTRUCT(&buf, opal_buffer_t);
opal_dss.load(&buf, kp->data.bo.bytes, kp->data.bo.size);
kp->data.bo.bytes = NULL; // protect the data region
kp->data.bo.size = 0;
OBJ_RELEASE(kp);
/* extract the topology */
cnt=1;
if (OPAL_SUCCESS != (rc = opal_dss.unpack(&buf, &topo, &cnt, OPAL_HWLOC_TOPO))) {
OPAL_ERROR_LOG(rc);
OBJ_DESTRUCT(&buf);
continue;
}
OBJ_DESTRUCT(&buf);
//.........这里部分代码省略.........
/* place specified #procs on each node, up to the specified total
* number of procs (if one was given).
*/
static int npernode(orte_job_t *jdata)
{
orte_app_context_t *app;
int i, j, rc=ORTE_SUCCESS;
opal_list_t node_list;
opal_list_item_t *item;
orte_std_cntr_t num_slots;
orte_node_t *node;
int np, nprocs;
int num_nodes;
/* setup the node list */
OBJ_CONSTRUCT(&node_list, opal_list_t);
/* loop through the app_contexts */
for(i=0; i < jdata->apps->size; i++) {
if (NULL == (app = (orte_app_context_t*)opal_pointer_array_get_item(jdata->apps, i))) {
continue;
}
/* use the number of procs if one was given */
if (0 < app->num_procs) {
np = app->num_procs;
} else {
np = INT_MAX;
}
/* for each app_context, we have to get the list of nodes that it can
* use since that can now be modified with a hostfile and/or -host
* option
*/
if(ORTE_SUCCESS != (rc = orte_rmaps_base_get_target_nodes(&node_list, &num_slots, app,
jdata->map->policy))) {
ORTE_ERROR_LOG(rc);
goto error;
}
/* loop through the list of nodes */
num_nodes = opal_list_get_size(&node_list);
nprocs = 0;
while (NULL != (item = opal_list_remove_first(&node_list))) {
node = (orte_node_t*)item;
/* put the specified number of procs on each node */
for (j=0; j < jdata->map->npernode && nprocs < np; j++) {
if (ORTE_SUCCESS != (rc = orte_rmaps_base_claim_slot(jdata, node,
jdata->map->cpus_per_rank, app->idx,
&node_list, jdata->map->oversubscribe,
false, NULL))) {
/** if the code is ORTE_ERR_NODE_FULLY_USED, and we still have
* more procs to place, then that is an error
*/
if (ORTE_ERR_NODE_FULLY_USED != rc ||
j < jdata->map->npernode-1) {
ORTE_ERROR_LOG(rc);
OBJ_RELEASE(node);
goto error;
}
}
nprocs++;
}
OBJ_RELEASE(node);
}
/* update the number of procs in the job */
jdata->num_procs += nprocs;
/* if the user requested a specific number of procs and
* the total number of procs we were able to assign
* doesn't equal the number requested, then we have a
* problem
*/
if (0 < app->num_procs && nprocs < app->num_procs) {
orte_show_help("help-orte-rmaps-base.txt", "rmaps:too-many-procs", true,
app->app, app->num_procs,
"number of nodes", num_nodes,
"npernode", jdata->map->npernode);
return ORTE_ERR_SILENT;
}
/* compute vpids and add proc objects to the job - this has to be
* done after each app_context is mapped in order to keep the
* vpids contiguous within an app_context
*/
if (ORTE_SUCCESS != (rc = orte_rmaps_base_compute_vpids(jdata))) {
ORTE_ERROR_LOG(rc);
return rc;
}
}
error:
while (NULL != (item = opal_list_remove_first(&node_list))) {
OBJ_RELEASE(item);
}
OBJ_DESTRUCT(&node_list);
return rc;
}
static int rte_init(char flags)
{
int ret;
char *error = NULL;
orte_jmap_t *jmap;
/* run the prolog */
if (ORTE_SUCCESS != (ret = orte_ess_base_std_prolog())) {
error = "orte_ess_base_std_prolog";
goto error;
}
/* Start by getting a unique name */
slurm_set_name();
/* if I am a daemon, complete my setup using the
* default procedure
*/
if (orte_process_info.daemon) {
if (ORTE_SUCCESS != (ret = orte_ess_base_orted_setup())) {
ORTE_ERROR_LOG(ret);
error = "orte_ess_base_orted_setup";
goto error;
}
} else if (orte_process_info.tool) {
/* otherwise, if I am a tool proc, use that procedure */
if (ORTE_SUCCESS != (ret = orte_ess_base_tool_setup())) {
ORTE_ERROR_LOG(ret);
error = "orte_ess_base_tool_setup";
goto error;
}
} else {
/* otherwise, I must be an application process - use
* the default procedure to finish my setup
*/
if (ORTE_SUCCESS != (ret = orte_ess_base_app_setup())) {
ORTE_ERROR_LOG(ret);
error = "orte_ess_base_app_setup";
goto error;
}
/* setup the nidmap arrays */
OBJ_CONSTRUCT(&nidmap, opal_pointer_array_t);
opal_pointer_array_init(&nidmap, 8, INT32_MAX, 8);
/* setup array of jmaps */
OBJ_CONSTRUCT(&jobmap, opal_pointer_array_t);
opal_pointer_array_init(&jobmap, 1, INT32_MAX, 1);
jmap = OBJ_NEW(orte_jmap_t);
jmap->job = ORTE_PROC_MY_NAME->jobid;
opal_pointer_array_add(&jobmap, jmap);
/* if one was provided, build my nidmap */
if (ORTE_SUCCESS != (ret = orte_ess_base_build_nidmap(orte_process_info.sync_buf,
&nidmap, &jmap->pmap, &nprocs))) {
ORTE_ERROR_LOG(ret);
error = "orte_ess_base_build_nidmap";
goto error;
}
}
return ORTE_SUCCESS;
error:
orte_show_help("help-orte-runtime.txt",
"orte_init:startup:internal-failure",
true, error, ORTE_ERROR_NAME(ret), ret);
return ret;
}
请发表评论