本文整理汇总了C++中slurmdb_setup_cluster_flags函数的典型用法代码示例。如果您正苦于以下问题:C++ slurmdb_setup_cluster_flags函数的具体用法?C++ slurmdb_setup_cluster_flags怎么用?C++ slurmdb_setup_cluster_flags使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了slurmdb_setup_cluster_flags函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: print_select_ba_request
/**
* print a block request
*/
extern void print_select_ba_request(select_ba_request_t* ba_request)
{
int dim;
uint32_t cluster_flags = slurmdb_setup_cluster_flags();
uint16_t cluster_dims = slurmdb_setup_cluster_dims();
if (ba_request == NULL){
error("print_ba_request Error, request is NULL");
return;
}
debug(" ba_request:");
debug(" geometry:\t");
for (dim=0; dim<cluster_dims; dim++){
debug("%d", ba_request->geometry[dim]);
}
debug(" size:\t%d", ba_request->size);
if (cluster_flags & CLUSTER_FLAG_BGQ) {
for (dim=0; dim<cluster_dims; dim++)
debug(" conn_type:\t%d", ba_request->conn_type[dim]);
} else
debug(" conn_type:\t%d", ba_request->conn_type[0]);
debug(" rotate:\t%d", ba_request->rotate);
debug(" elongate:\t%d", ba_request->elongate);
}
开发者ID:jsollom,项目名称:slurm,代码行数:28,代码来源:node_select.c
示例2: _sprint_range
static void _sprint_range(char *str, uint32_t str_size,
uint32_t lower, uint32_t upper)
{
char tmp[128];
uint32_t cluster_flags = slurmdb_setup_cluster_flags();
if (cluster_flags & CLUSTER_FLAG_BG) {
convert_num_unit((float)lower, tmp, sizeof(tmp), UNIT_NONE,
NO_VAL, CONVERT_NUM_UNIT_EXACT);
} else {
snprintf(tmp, sizeof(tmp), "%u", lower);
}
if (upper > 0) {
char tmp2[128];
if (cluster_flags & CLUSTER_FLAG_BG) {
convert_num_unit((float)upper, tmp2, sizeof(tmp2),
UNIT_NONE, NO_VAL,
CONVERT_NUM_UNIT_EXACT);
} else {
snprintf(tmp2, sizeof(tmp2), "%u", upper);
}
snprintf(str, str_size, "%s-%s", tmp, tmp2);
} else
snprintf(str, str_size, "%s", tmp);
}
开发者ID:fafik23,项目名称:slurm,代码行数:26,代码来源:job_info.c
示例3: slurm_job_will_run
/*
* slurm_job_will_run - determine if a job would execute immediately if
* submitted now
* IN job_desc_msg - description of resource allocation request
* RET 0 on success, otherwise return -1 and set errno to indicate the error
*/
int slurm_job_will_run(job_desc_msg_t *req)
{
will_run_response_msg_t *will_run_resp = NULL;
char buf[64], local_hostname[64];
int rc;
uint32_t cluster_flags = slurmdb_setup_cluster_flags();
char *type = "processors";
char *cluster_name = NULL;
void *ptr = NULL;
if ((req->alloc_node == NULL) &&
(gethostname_short(local_hostname, sizeof(local_hostname)) == 0)) {
req->alloc_node = local_hostname;
}
if (working_cluster_rec)
cluster_name = working_cluster_rec->name;
else
cluster_name = slurmctld_conf.cluster_name;
if (!slurm_load_federation(&ptr) &&
cluster_in_federation(ptr, cluster_name))
rc = _fed_job_will_run(req, &will_run_resp, ptr);
else
rc = slurm_job_will_run2(req, &will_run_resp);
if ((rc == 0) && will_run_resp) {
if (cluster_flags & CLUSTER_FLAG_BG)
type = "cnodes";
slurm_make_time_str(&will_run_resp->start_time,
buf, sizeof(buf));
info("Job %u to start at %s using %u %s on %s",
will_run_resp->job_id, buf,
will_run_resp->proc_cnt, type,
will_run_resp->node_list);
if (will_run_resp->preemptee_job_id) {
ListIterator itr;
uint32_t *job_id_ptr;
char *job_list = NULL, *sep = "";
itr = list_iterator_create(will_run_resp->
preemptee_job_id);
while ((job_id_ptr = list_next(itr))) {
if (job_list)
sep = ",";
xstrfmtcat(job_list, "%s%u", sep, *job_id_ptr);
}
list_iterator_destroy(itr);
info(" Preempts: %s", job_list);
xfree(job_list);
}
slurm_free_will_run_response_msg(will_run_resp);
}
if (req->alloc_node == local_hostname)
req->alloc_node = NULL;
if (ptr)
slurm_destroy_federation_rec(ptr);
return rc;
}
开发者ID:HPCNow,项目名称:slurm,代码行数:66,代码来源:allocate.c
示例4: slurm_print_ctl_conf
/*
* slurm_print_ctl_conf - output the contents of slurm control configuration
* message as loaded using slurm_load_ctl_conf
* IN out - file to write to
* IN slurm_ctl_conf_ptr - slurm control configuration pointer
*/
void slurm_print_ctl_conf ( FILE* out,
slurm_ctl_conf_info_msg_t * slurm_ctl_conf_ptr )
{
char time_str[32], tmp_str[128];
void *ret_list = NULL;
char *select_title = "";
uint32_t cluster_flags = slurmdb_setup_cluster_flags();
if (cluster_flags & CLUSTER_FLAG_BGL)
select_title = "\nBluegene/L configuration\n";
else if (cluster_flags & CLUSTER_FLAG_BGP)
select_title = "\nBluegene/P configuration\n";
else if (cluster_flags & CLUSTER_FLAG_BGQ)
select_title = "\nBluegene/Q configuration\n";
if ( slurm_ctl_conf_ptr == NULL )
return ;
slurm_make_time_str((time_t *)&slurm_ctl_conf_ptr->last_update,
time_str, sizeof(time_str));
snprintf(tmp_str, sizeof(tmp_str), "Configuration data as of %s\n",
time_str);
ret_list = slurm_ctl_conf_2_key_pairs(slurm_ctl_conf_ptr);
if (ret_list) {
slurm_print_key_pairs(out, ret_list, tmp_str);
list_destroy((List)ret_list);
}
slurm_print_key_pairs(out, slurm_ctl_conf_ptr->select_conf_key_pairs,
select_title);
}
开发者ID:jsollom,项目名称:slurm,代码行数:39,代码来源:config_info.c
示例5: as_mysql_register_ctld
extern int as_mysql_register_ctld(mysql_conn_t *mysql_conn,
char *cluster, uint16_t port)
{
char *query = NULL;
char *address = NULL;
char hostname[255];
time_t now = time(NULL);
uint32_t flags = slurmdb_setup_cluster_flags();
int rc = SLURM_SUCCESS;
if (slurmdbd_conf)
fatal("clusteracct_storage_g_register_ctld "
"should never be called from the slurmdbd.");
if (check_connection(mysql_conn) != SLURM_SUCCESS)
return ESLURM_DB_CONNECTION;
if (!mysql_conn->cluster_name) {
error("%s:%d no cluster name", THIS_FILE, __LINE__);
return SLURM_ERROR;
}
if (!mysql_conn->cluster_name)
mysql_conn->cluster_name = xstrdup(cluster);
info("Registering slurmctld for cluster %s at port %u in database.",
cluster, port);
gethostname(hostname, sizeof(hostname));
/* check if we are running on the backup controller */
if (slurmctld_conf.backup_controller
&& !strcmp(slurmctld_conf.backup_controller, hostname)) {
address = slurmctld_conf.backup_addr;
} else
address = slurmctld_conf.control_addr;
query = xstrdup_printf(
"update %s set deleted=0, mod_time=%ld, "
"control_host='%s', control_port=%u, last_port=%u, "
"rpc_version=%d, dimensions=%d, flags=%u, "
"plugin_id_select=%d where name='%s';",
cluster_table, now, address, port, port, SLURM_PROTOCOL_VERSION,
SYSTEM_DIMENSIONS, flags, select_get_plugin_id(), cluster);
xstrfmtcat(query,
"insert into %s "
"(timestamp, action, name, actor, info) "
"values (%ld, %d, '%s', '%s', '%s %u %u %u %u');",
txn_table,
now, DBD_MODIFY_CLUSTERS, cluster,
slurmctld_conf.slurm_user_name, address, port,
SYSTEM_DIMENSIONS, flags, select_get_plugin_id());
if (debug_flags & DEBUG_FLAG_DB_ASSOC)
DB_DEBUG(mysql_conn->conn, "query\n%s", query);
rc = mysql_db_query(mysql_conn, query);
xfree(query);
return rc;
}
开发者ID:FredHutch,项目名称:slurm,代码行数:59,代码来源:as_mysql_cluster.c
示例6: xmalloc
/*
* slurm_step_layout_create - determine how many tasks of a job will be
* run on each node. Distribution is influenced
* by number of cpus on each host.
* IN tlist - hostlist corresponding to task layout
* IN cpus_per_node - cpus per node
* IN cpu_count_reps - how many nodes have same cpu count
* IN num_hosts - number of hosts we have
* IN num_tasks - number of tasks to distribute across these cpus
* IN cpus_per_task - number of cpus per task
* IN task_dist - type of distribution we are using
* IN plane_size - plane size (only needed for the plane distribution)
* RET a pointer to an slurm_step_layout_t structure
* NOTE: allocates memory that should be xfreed by caller
*/
slurm_step_layout_t *slurm_step_layout_create(
const char *tlist,
uint16_t *cpus_per_node, uint32_t *cpu_count_reps,
uint32_t num_hosts,
uint32_t num_tasks,
uint16_t cpus_per_task,
uint16_t task_dist,
uint16_t plane_size)
{
char *arbitrary_nodes = NULL;
slurm_step_layout_t *step_layout =
xmalloc(sizeof(slurm_step_layout_t));
uint32_t cluster_flags = slurmdb_setup_cluster_flags();
step_layout->task_dist = task_dist;
if (task_dist == SLURM_DIST_ARBITRARY) {
hostlist_t hl = NULL;
char *buf = NULL;
/* set the node list for the task layout later if user
* supplied could be different that the job allocation */
arbitrary_nodes = xstrdup(tlist);
hl = hostlist_create(tlist);
hostlist_uniq(hl);
buf = hostlist_ranged_string_xmalloc(hl);
num_hosts = hostlist_count(hl);
hostlist_destroy(hl);
step_layout->node_list = buf;
} else {
step_layout->node_list = xstrdup(tlist);
}
step_layout->task_cnt = num_tasks;
if (cluster_flags & CLUSTER_FLAG_FE) {
/* Limited job step support on front-end systems.
* All jobs execute through front-end on Blue Gene.
* Normally we would not permit execution of job steps,
* but can fake it by just allocating all tasks to
* one of the allocated nodes. */
if ((cluster_flags & CLUSTER_FLAG_BG)
|| (cluster_flags & CLUSTER_FLAG_CRAY_A))
step_layout->node_cnt = num_hosts;
else
step_layout->node_cnt = 1;
} else
step_layout->node_cnt = num_hosts;
if (_init_task_layout(step_layout, arbitrary_nodes,
cpus_per_node, cpu_count_reps,
cpus_per_task,
task_dist, plane_size) != SLURM_SUCCESS) {
slurm_step_layout_destroy(step_layout);
step_layout = NULL;
}
xfree(arbitrary_nodes);
return step_layout;
}
开发者ID:BYUHPC,项目名称:slurm,代码行数:71,代码来源:slurm_step_layout.c
示例7: slurm_job_will_run
/*
* slurm_job_will_run - determine if a job would execute immediately if
* submitted now
* IN job_desc_msg - description of resource allocation request
* RET 0 on success, otherwise return -1 and set errno to indicate the error
*/
int slurm_job_will_run (job_desc_msg_t *req)
{
will_run_response_msg_t *will_run_resp = NULL;
char buf[64];
bool host_set = false;
int rc;
uint32_t cluster_flags = slurmdb_setup_cluster_flags();
char *type = "processors";
if ((req->alloc_node == NULL) &&
(gethostname_short(buf, sizeof(buf)) == 0)) {
req->alloc_node = buf;
host_set = true;
}
rc = slurm_job_will_run2(req, &will_run_resp);
if ((rc == 0) && will_run_resp) {
if (cluster_flags & CLUSTER_FLAG_BG)
type = "cnodes";
slurm_make_time_str(&will_run_resp->start_time,
buf, sizeof(buf));
info("Job %u to start at %s using %u %s"
" on %s",
will_run_resp->job_id, buf,
will_run_resp->proc_cnt, type,
will_run_resp->node_list);
if (will_run_resp->preemptee_job_id) {
ListIterator itr;
uint32_t *job_id_ptr;
char *job_list = NULL, *sep = "";
itr = list_iterator_create(will_run_resp->
preemptee_job_id);
while ((job_id_ptr = list_next(itr))) {
if (job_list)
sep = ",";
xstrfmtcat(job_list, "%s%u", sep, *job_id_ptr);
}
list_iterator_destroy(itr);
info(" Preempts: %s", job_list);
xfree(job_list);
}
slurm_free_will_run_response_msg(will_run_resp);
}
if (host_set)
req->alloc_node = NULL;
return rc;
}
开发者ID:jabl,项目名称:slurm,代码行数:57,代码来源:allocate.c
示例8: _sprint_range
/*
* Borrowed and modified from src/api/job_info.c
*/
void _sprint_range(char *str, uint32_t str_size, uint32_t lower, uint32_t upper) {
char tmp[128];
uint32_t cluster_flags = slurmdb_setup_cluster_flags();
snprintf(tmp, sizeof(tmp), "%u", lower);
if (upper > 0) {
char tmp2[128];
snprintf(tmp2, sizeof(tmp2), "%u", upper);
snprintf(str, str_size, "%s-%s", tmp, tmp2);
} else {
snprintf(str, str_size, "%s", tmp);
}
}
开发者ID:UoA-eResearch,项目名称:cer_mojo,代码行数:17,代码来源:get_jobs.c
示例9: slurm_sprint_node_table
/*
* slurm_sprint_node_table - output information about a specific Slurm nodes
* based upon message as loaded using slurm_load_node
* IN node_ptr - an individual node information record pointer
* IN node_scaling - number of nodes each node represents
* IN one_liner - print as a single line if true
* RET out - char * containing formatted output (must be freed after call)
* NULL is returned on failure.
*/
char *
slurm_sprint_node_table (node_info_t * node_ptr,
int node_scaling, int one_liner )
{
uint16_t my_state = node_ptr->node_state;
char *cloud_str = "", *comp_str = "", *drain_str = "", *power_str = "";
char load_str[32], tmp_line[512], time_str[32];
char *out = NULL, *reason_str = NULL, *select_reason_str = NULL;
uint16_t err_cpus = 0, alloc_cpus = 0;
int cpus_per_node = 1;
int total_used = node_ptr->cpus;
uint32_t cluster_flags = slurmdb_setup_cluster_flags();
if (node_scaling)
cpus_per_node = node_ptr->cpus / node_scaling;
if (my_state & NODE_STATE_CLOUD) {
my_state &= (~NODE_STATE_CLOUD);
cloud_str = "+CLOUD";
}
if (my_state & NODE_STATE_COMPLETING) {
my_state &= (~NODE_STATE_COMPLETING);
comp_str = "+COMPLETING";
}
if (my_state & NODE_STATE_DRAIN) {
my_state &= (~NODE_STATE_DRAIN);
drain_str = "+DRAIN";
}
if (my_state & NODE_STATE_POWER_SAVE) {
my_state &= (~NODE_STATE_POWER_SAVE);
power_str = "+POWER";
}
slurm_get_select_nodeinfo(node_ptr->select_nodeinfo,
SELECT_NODEDATA_SUBCNT,
NODE_STATE_ALLOCATED,
&alloc_cpus);
if (cluster_flags & CLUSTER_FLAG_BG) {
if (!alloc_cpus &&
(IS_NODE_ALLOCATED(node_ptr) ||
IS_NODE_COMPLETING(node_ptr)))
alloc_cpus = node_ptr->cpus;
else
alloc_cpus *= cpus_per_node;
}
total_used -= alloc_cpus;
slurm_get_select_nodeinfo(node_ptr->select_nodeinfo,
SELECT_NODEDATA_SUBCNT,
NODE_STATE_ERROR,
&err_cpus);
if (cluster_flags & CLUSTER_FLAG_BG)
err_cpus *= cpus_per_node;
total_used -= err_cpus;
if ((alloc_cpus && err_cpus) ||
(total_used && (total_used != node_ptr->cpus))) {
my_state &= NODE_STATE_FLAGS;
my_state |= NODE_STATE_MIXED;
}
/****** Line 1 ******/
snprintf(tmp_line, sizeof(tmp_line), "NodeName=%s ", node_ptr->name);
xstrcat(out, tmp_line);
if (cluster_flags & CLUSTER_FLAG_BG) {
slurm_get_select_nodeinfo(node_ptr->select_nodeinfo,
SELECT_NODEDATA_RACK_MP,
0, &select_reason_str);
if (select_reason_str) {
xstrfmtcat(out, "RackMidplane=%s ", select_reason_str);
xfree(select_reason_str);
}
}
if (node_ptr->arch) {
snprintf(tmp_line, sizeof(tmp_line), "Arch=%s ",
node_ptr->arch);
xstrcat(out, tmp_line);
}
snprintf(tmp_line, sizeof(tmp_line), "CoresPerSocket=%u",
node_ptr->cores);
xstrcat(out, tmp_line);
if (one_liner)
xstrcat(out, " ");
else
xstrcat(out, "\n ");
/****** Line 2 ******/
if (node_ptr->cpu_load == NO_VAL)
strcpy(load_str, "N/A");
else {
snprintf(load_str, sizeof(load_str), "%.2f",
//.........这里部分代码省略.........
开发者ID:kwangiit,项目名称:dist_job_launch,代码行数:101,代码来源:node_info.c
示例10: slurmdb_setup_cluster_flags
/*
* slurm_sprint_partition_info - output information about a specific Slurm
* partition based upon message as loaded using slurm_load_partitions
* IN part_ptr - an individual partition information record pointer
* IN one_liner - print as a single line if true
* RET out - char * containing formatted output (must be freed after call)
* NULL is returned on failure.
*/
char *slurm_sprint_partition_info ( partition_info_t * part_ptr,
int one_liner )
{
char tmp[16];
char *out = NULL;
char *allow_deny, *value;
uint16_t force, preempt_mode, val;
uint32_t cluster_flags = slurmdb_setup_cluster_flags();
char *line_end = (one_liner) ? " " : "\n ";
/****** Line 1 ******/
xstrfmtcat(out, "PartitionName=%s", part_ptr->name);
xstrcat(out, line_end);
/****** Line 2 ******/
if ((part_ptr->allow_groups == NULL) ||
(part_ptr->allow_groups[0] == '\0'))
xstrcat(out, "AllowGroups=ALL");
else {
xstrfmtcat(out, "AllowGroups=%s", part_ptr->allow_groups);
}
if (part_ptr->allow_accounts || !part_ptr->deny_accounts) {
allow_deny = "Allow";
if ((part_ptr->allow_accounts == NULL) ||
(part_ptr->allow_accounts[0] == '\0'))
value = "ALL";
else
value = part_ptr->allow_accounts;
} else {
allow_deny = "Deny";
value = part_ptr->deny_accounts;
}
xstrfmtcat(out, " %sAccounts=%s", allow_deny, value);
if (part_ptr->allow_qos || !part_ptr->deny_qos) {
allow_deny = "Allow";
if ((part_ptr->allow_qos == NULL) ||
(part_ptr->allow_qos[0] == '\0'))
value = "ALL";
else
value = part_ptr->allow_qos;
} else {
allow_deny = "Deny";
value = part_ptr->deny_qos;
}
xstrfmtcat(out, " %sQos=%s", allow_deny, value);
xstrcat(out, line_end);
/****** Line 3 ******/
if (part_ptr->allow_alloc_nodes == NULL)
xstrcat(out, "AllocNodes=ALL");
else
xstrfmtcat(out, "AllocNodes=%s", part_ptr->allow_alloc_nodes);
if (part_ptr->alternate != NULL) {
xstrfmtcat(out, " Alternate=%s", part_ptr->alternate);
}
if (part_ptr->flags & PART_FLAG_DEFAULT)
xstrcat(out, " Default=YES");
else
xstrcat(out, " Default=NO");
if (part_ptr->qos_char)
xstrfmtcat(out, " QoS=%s", part_ptr->qos_char);
else
xstrcat(out, " QoS=N/A");
xstrcat(out, line_end);
/****** Line 4 added here for BG partitions only
****** to maintain alphabetized output ******/
if (cluster_flags & CLUSTER_FLAG_BG) {
xstrfmtcat(out, "Midplanes=%s", part_ptr->nodes);
xstrcat(out, line_end);
}
/****** Line 5 ******/
if (part_ptr->default_time == INFINITE)
xstrcat(out, "DefaultTime=UNLIMITED");
else if (part_ptr->default_time == NO_VAL)
xstrcat(out, "DefaultTime=NONE");
else {
char time_line[32];
secs2time_str(part_ptr->default_time * 60, time_line,
sizeof(time_line));
xstrfmtcat(out, "DefaultTime=%s", time_line);
//.........这里部分代码省略.........
开发者ID:Q-Leap-Networks,项目名称:qlustar-slurm,代码行数:101,代码来源:partition_info.c
示例11: slurm_load_slurmd_status
/*
* slurm_load_slurmd_status - issue RPC to get the status of slurmd
* daemon on this machine
* IN slurmd_info_ptr - place to store slurmd status information
* RET 0 or -1 on error
* NOTE: free the response using slurm_free_slurmd_status()
*/
extern int
slurm_load_slurmd_status(slurmd_status_t **slurmd_status_ptr)
{
int rc;
slurm_msg_t req_msg;
slurm_msg_t resp_msg;
uint32_t cluster_flags = slurmdb_setup_cluster_flags();
char *this_addr;
slurm_msg_t_init(&req_msg);
slurm_msg_t_init(&resp_msg);
if (cluster_flags & CLUSTER_FLAG_MULTSD) {
if ((this_addr = getenv("SLURMD_NODENAME"))) {
slurm_conf_get_addr(this_addr, &req_msg.address);
} else {
this_addr = "localhost";
slurm_set_addr(&req_msg.address,
(uint16_t)slurm_get_slurmd_port(),
this_addr);
}
} else {
char this_host[256];
/*
* Set request message address to slurmd on localhost
*/
gethostname_short(this_host, sizeof(this_host));
this_addr = slurm_conf_get_nodeaddr(this_host);
if (this_addr == NULL)
this_addr = xstrdup("localhost");
slurm_set_addr(&req_msg.address,
(uint16_t)slurm_get_slurmd_port(),
this_addr);
xfree(this_addr);
}
req_msg.msg_type = REQUEST_DAEMON_STATUS;
req_msg.data = NULL;
rc = slurm_send_recv_node_msg(&req_msg, &resp_msg, 0);
if ((rc != 0) || !resp_msg.auth_cred) {
error("slurm_slurmd_info: %m");
if (resp_msg.auth_cred)
g_slurm_auth_destroy(resp_msg.auth_cred);
return SLURM_ERROR;
}
if (resp_msg.auth_cred)
g_slurm_auth_destroy(resp_msg.auth_cred);
switch (resp_msg.msg_type) {
case RESPONSE_SLURMD_STATUS:
*slurmd_status_ptr = (slurmd_status_t *) resp_msg.data;
break;
case RESPONSE_SLURM_RC:
rc = ((return_code_msg_t *) resp_msg.data)->return_code;
slurm_free_return_code_msg(resp_msg.data);
if (rc)
slurm_seterrno_ret(rc);
break;
default:
slurm_seterrno_ret(SLURM_UNEXPECTED_MSG_ERROR);
break;
}
return SLURM_PROTOCOL_SUCCESS;
}
开发者ID:mrhaoji,项目名称:slurm,代码行数:74,代码来源:config_info.c
示例12: slurmdb_setup_cluster_flags
extern void *slurm_ctl_conf_2_key_pairs (slurm_ctl_conf_t* slurm_ctl_conf_ptr)
{
List ret_list = NULL;
config_key_pair_t *key_pair;
char tmp_str[128];
uint32_t cluster_flags = slurmdb_setup_cluster_flags();
if ( slurm_ctl_conf_ptr == NULL )
return NULL;
ret_list = list_create(destroy_config_key_pair);
key_pair = xmalloc(sizeof(config_key_pair_t));
key_pair->name = xstrdup("AccountingStorageBackupHost");
key_pair->value =
xstrdup(slurm_ctl_conf_ptr->accounting_storage_backup_host);
list_append(ret_list, key_pair);
accounting_enforce_string(slurm_ctl_conf_ptr->
accounting_storage_enforce,
tmp_str, sizeof(tmp_str));
key_pair = xmalloc(sizeof(config_key_pair_t));
key_pair->name = xstrdup("AccountingStorageEnforce");
key_pair->value = xstrdup(tmp_str);
list_append(ret_list, key_pair);
key_pair = xmalloc(sizeof(config_key_pair_t));
key_pair->name = xstrdup("AccountingStorageHost");
key_pair->value = xstrdup(slurm_ctl_conf_ptr->accounting_storage_host);
list_append(ret_list, key_pair);
key_pair = xmalloc(sizeof(config_key_pair_t));
key_pair->name = xstrdup("AccountingStorageLoc");
key_pair->value = xstrdup(slurm_ctl_conf_ptr->accounting_storage_loc);
list_append(ret_list, key_pair);
snprintf(tmp_str, sizeof(tmp_str), "%u",
slurm_ctl_conf_ptr->accounting_storage_port);
key_pair = xmalloc(sizeof(config_key_pair_t));
key_pair->name = xstrdup("AccountingStoragePort");
key_pair->value = xstrdup(tmp_str);
list_append(ret_list, key_pair);
key_pair = xmalloc(sizeof(config_key_pair_t));
key_pair->name = xstrdup("AccountingStorageType");
key_pair->value = xstrdup(slurm_ctl_conf_ptr->accounting_storage_type);
list_append(ret_list, key_pair);
key_pair = xmalloc(sizeof(config_key_pair_t));
key_pair->name = xstrdup("AccountingStorageUser");
key_pair->value = xstrdup(slurm_ctl_conf_ptr->accounting_storage_user);
list_append(ret_list, key_pair);
key_pair = xmalloc(sizeof(config_key_pair_t));
key_pair->name = xstrdup("AccountingStoreJobComment");
if (slurm_ctl_conf_ptr->acctng_store_job_comment)
key_pair->value = xstrdup("YES");
else
key_pair->value = xstrdup("NO");
list_append(ret_list, key_pair);
key_pair = xmalloc(sizeof(config_key_pair_t));
key_pair->name = xstrdup("AcctGatherEnergyType");
key_pair->value = xstrdup(slurm_ctl_conf_ptr->acct_gather_energy_type);
list_append(ret_list, key_pair);
key_pair = xmalloc(sizeof(config_key_pair_t));
key_pair->name = xstrdup("AcctGatherFilesystemType");
key_pair->value =
xstrdup(slurm_ctl_conf_ptr->acct_gather_filesystem_type);
list_append(ret_list, key_pair);
key_pair = xmalloc(sizeof(config_key_pair_t));
key_pair->name = xstrdup("AcctGatherInfinibandType");
key_pair->value =
xstrdup(slurm_ctl_conf_ptr->acct_gather_infiniband_type);
list_append(ret_list, key_pair);
snprintf(tmp_str, sizeof(tmp_str), "%u sec",
slurm_ctl_conf_ptr->acct_gather_node_freq);
key_pair = xmalloc(sizeof(config_key_pair_t));
key_pair->name = xstrdup("AcctGatherNodeFreq");
key_pair->value = xstrdup(tmp_str);
list_append(ret_list, key_pair);
key_pair = xmalloc(sizeof(config_key_pair_t));
key_pair->name = xstrdup("AcctGatherProfileType");
key_pair->value = xstrdup(slurm_ctl_conf_ptr->acct_gather_profile_type);
list_append(ret_list, key_pair);
key_pair = xmalloc(sizeof(config_key_pair_t));
key_pair->name = xstrdup("AuthInfo");
key_pair->value = xstrdup(slurm_ctl_conf_ptr->authinfo);
list_append(ret_list, key_pair);
key_pair = xmalloc(sizeof(config_key_pair_t));
key_pair->name = xstrdup("AuthType");
key_pair->value = xstrdup(slurm_ctl_conf_ptr->authtype);
list_append(ret_list, key_pair);
//.........这里部分代码省略.........
开发者ID:mrhaoji,项目名称:slurm,代码行数:101,代码来源:config_info.c
示例13: env_array_for_batch_job
/*
* Set in "dest" the environment variables strings relevant to a SLURM batch
* job allocation, overwriting any environment variables of the same name.
* If the address pointed to by "dest" is NULL, memory will automatically be
* xmalloc'ed. The array is terminated by a NULL pointer, and thus is
* suitable for use by execle() and other env_array_* functions.
*
* Sets the variables:
* SLURM_JOB_ID
* SLURM_JOB_NUM_NODES
* SLURM_JOB_NODELIST
* SLURM_JOB_CPUS_PER_NODE
* ENVIRONMENT=BATCH
* HOSTNAME
* LOADLBATCH (AIX only)
*
* Sets OBSOLETE variables (needed for MPI, do not remove):
* SLURM_JOBID
* SLURM_NNODES
* SLURM_NODELIST
* SLURM_NTASKS
* SLURM_TASKS_PER_NODE
*/
extern int
env_array_for_batch_job(char ***dest, const batch_job_launch_msg_t *batch,
const char *node_name)
{
char *tmp = NULL;
uint32_t num_nodes = 0;
uint32_t num_cpus = 0;
int i;
slurm_step_layout_t *step_layout = NULL;
uint32_t num_tasks = batch->ntasks;
uint16_t cpus_per_task;
uint16_t task_dist;
uint32_t cluster_flags = slurmdb_setup_cluster_flags();
_setup_particulars(cluster_flags, dest, batch->select_jobinfo);
/* There is no explicit node count in the batch structure,
* so we need to calculate the node count. */
for (i = 0; i < batch->num_cpu_groups; i++) {
num_nodes += batch->cpu_count_reps[i];
num_cpus += batch->cpu_count_reps[i] * batch->cpus_per_node[i];
}
env_array_overwrite_fmt(dest, "SLURM_JOB_ID", "%u", batch->job_id);
env_array_overwrite_fmt(dest, "SLURM_JOB_NUM_NODES", "%u", num_nodes);
if(cluster_flags & CLUSTER_FLAG_BG)
env_array_overwrite_fmt(dest, "SLURM_BG_NUM_NODES",
"%u", num_nodes);
env_array_overwrite_fmt(dest, "SLURM_JOB_NODELIST", "%s", batch->nodes);
tmp = uint32_compressed_to_str(batch->num_cpu_groups,
batch->cpus_per_node,
batch->cpu_count_reps);
env_array_overwrite_fmt(dest, "SLURM_JOB_CPUS_PER_NODE", "%s", tmp);
xfree(tmp);
env_array_overwrite_fmt(dest, "ENVIRONMENT", "BATCH");
if (node_name)
env_array_overwrite_fmt(dest, "HOSTNAME", "%s", node_name);
/* OBSOLETE, but needed by MPI, do not remove */
env_array_overwrite_fmt(dest, "SLURM_JOBID", "%u", batch->job_id);
env_array_overwrite_fmt(dest, "SLURM_NNODES", "%u", num_nodes);
env_array_overwrite_fmt(dest, "SLURM_NODELIST", "%s", batch->nodes);
if((batch->cpus_per_task != 0) &&
(batch->cpus_per_task != (uint16_t) NO_VAL))
cpus_per_task = batch->cpus_per_task;
else
cpus_per_task = 1; /* default value */
if (cpus_per_task > 1) {
env_array_overwrite_fmt(dest, "SLURM_CPUS_PER_TASK", "%u",
cpus_per_task);
}
if(num_tasks) {
env_array_overwrite_fmt(dest, "SLURM_NTASKS", "%u",
num_tasks);
/* keep around for old scripts */
env_array_overwrite_fmt(dest, "SLURM_NPROCS", "%u",
num_tasks);
} else {
num_tasks = num_cpus / cpus_per_task;
}
if((tmp = getenvp(*dest, "SLURM_ARBITRARY_NODELIST"))) {
task_dist = SLURM_DIST_ARBITRARY;
} else {
tmp = batch->nodes;
task_dist = SLURM_DIST_BLOCK;
}
if(!(step_layout = slurm_step_layout_create(tmp,
batch->cpus_per_node,
batch->cpu_count_reps,
num_nodes,
//.........这里部分代码省略.........
开发者ID:donaghy1,项目名称:slurm,代码行数:101,代码来源:env.c
示例14: slurm_pack_job_will_run
/*
* slurm_pack_job_will_run - determine if a heterogenous job would execute
* immediately if submitted now
* IN job_req_list - List of job_desc_msg_t structures describing the resource
* allocation request
* RET 0 on success, otherwise return -1 and set errno to indicate the error
*/
extern int slurm_pack_job_will_run(List job_req_list)
{
job_desc_msg_t *req;
will_run_response_msg_t *will_run_resp;
char buf[64], local_hostname[64] = "", *sep = "";
int rc = SLURM_SUCCESS;
char *type = "processors";
ListIterator iter, itr;
time_t first_start = (time_t) 0;
uint32_t first_job_id = 0, tot_proc_count = 0, *job_id_ptr;
hostset_t hs = NULL;
char *job_list = NULL;
if (!job_req_list || (list_count(job_req_list) == 0)) {
error("No job descriptors input");
return SLURM_ERROR;
}
(void) gethostname_short(local_hostname, sizeof(local_hostname));
iter = list_iterator_create(job_req_list);
while ((req = (job_desc_msg_t *) list_next(iter))) {
if ((req->alloc_node == NULL) && local_hostname[0])
req->alloc_node = local_hostname;
will_run_resp = NULL;
rc = slurm_job_will_run2(req, &will_run_resp);
if ((rc == SLURM_SUCCESS) && will_run_resp) {
if (first_job_id == 0)
first_job_id = will_run_resp->job_id;
if ((first_start == 0) ||
(first_start < will_run_resp->start_time))
first_start = will_run_resp->start_time;
tot_proc_count += will_run_resp->proc_cnt;
if (hs)
hostset_insert(hs, will_run_resp->node_list);
else
hs = hostset_create(will_run_resp->node_list);
if (will_run_resp->preemptee_job_id) {
itr = list_iterator_create(will_run_resp->
preemptee_job_id);
while ((job_id_ptr = list_next(itr))) {
if (job_list)
sep = ",";
xstrfmtcat(job_list, "%s%u", sep,
*job_id_ptr);
}
list_iterator_destroy(itr);
}
slurm_free_will_run_response_msg(will_run_resp);
}
if (req->alloc_node == local_hostname)
req->alloc_node = NULL;
if (rc != SLURM_SUCCESS)
break;
}
list_iterator_destroy(iter);
if (rc == SLURM_SUCCESS) {
uint32_t cluster_flags = slurmdb_setup_cluster_flags();
char node_list[1028] = "";
if (cluster_flags & CLUSTER_FLAG_BG)
type = "cnodes";
if (hs)
hostset_ranged_string(hs, sizeof(node_list), node_list);
slurm_make_time_str(&first_start, buf, sizeof(buf));
info("Job %u to start at %s using %u %s on %s",
first_job_id, buf, tot_proc_count, type, node_list);
if (job_list)
info(" Preempts: %s", job_list);
}
if (hs)
hostset_destroy(hs);
xfree(job_list);
return rc;
}
开发者ID:HPCNow,项目名称:slurm,代码行数:88,代码来源:allocate.c
示例15: setup_env
int setup_env(env_t *env, bool preserve_env)
{
int rc = SLURM_SUCCESS;
char *dist = NULL, *lllp_dist = NULL;
char addrbuf[INET_ADDRSTRLEN];
uint32_t cluster_flags = slurmdb_setup_cluster_flags();
if (env == NULL)
return SLURM_ERROR;
if (env->task_pid
&& setenvf(&env->env, "SLURM_TASK_PID", "%d", (int)env->task_pid)) {
error("Unable to set SLURM_TASK_PID environment variable");
rc = SLURM_FAILURE;
}
if (!preserve_env && env->ntasks) {
if(setenvf(&env->env, "SLURM_NTASKS", "%d", env->ntasks)) {
error("Unable to set SLURM_NTASKS "
"environment variable");
rc = SLURM_FAILURE;
}
if(setenvf(&env->env, "SLURM_NPROCS", "%d", env->ntasks)) {
error("Unable to set SLURM_NPROCS "
"environment variable");
rc = SLURM_FAILURE;
}
}
if (env->cpus_per_task
&& setenvf(&env->env, "SLURM_CPUS_PER_TASK", "%d",
env->cpus_per_task) ) {
error("Unable to set SLURM_CPUS_PER_TASK");
rc = SLURM_FAILURE;
}
if (env->ntasks_per_node
&& setenvf(&env->env, "SLURM_NTASKS_PER_NODE", "%d",
env->ntasks_per_node) ) {
error("Unable to set SLURM_NTASKS_PER_NODE");
rc = SLURM_FAILURE;
}
if (env->ntasks_per_socket
&& setenvf(&env->env, "SLURM_NTASKS_PER_SOCKET", "%d",
env->ntasks_per_socket) ) {
error("Unable to set SLURM_NTASKS_PER_SOCKET");
rc = SLURM_FAILURE;
}
if (env->ntasks_per_core
&& setenvf(&env->env, "SLURM_NTASKS_PER_CORE", "%d",
env->ntasks_per_core) ) {
error("Unable to set SLURM_NTASKS_PER_CORE");
rc = SLURM_FAILURE;
}
if (env->cpus_on_node
&& setenvf(&env->env, "SLURM_CPUS_ON_NODE", "%d",
env->cpus_on_node) ) {
error("Unable to set SLURM_CPUS_ON_NODE");
rc = SLURM_FAILURE;
}
_set_distribution(env->distribution, &dist, &lllp_dist);
if(dist)
if (setenvf(&env->env, "SLURM_DISTRIBUTION", "%s", dist)) {
error("Can't set SLURM_DISTRIBUTION env variable");
rc = SLURM_FAILURE;
}
if(env->distribution == SLURM_DIST_PLANE)
if (setenvf(&env->env, "SLURM_DIST_PLANESIZE", "%u",
env->plane_size)) {
error("Can't set SLURM_DIST_PLANESIZE "
"env variable");
rc = SLURM_FAILURE;
}
if(lllp_dist)
if (setenvf(&env->env, "SLURM_DIST_LLLP", "%s", lllp_dist)) {
error("Can't set SLURM_DIST_LLLP env variable");
rc = SLURM_FAILURE;
}
if (env->cpu_bind_type) {
char *str_verbose, *str_bind_type, *str_bind_list;
char *str_bind;
int len;
if (env->batch_flag) {
unsetenvp(env->env, "SBATCH_CPU_BIND_VERBOSE");
unsetenvp(env->env, "SBATCH_CPU_BIND_TYPE");
unsetenvp(env->env, "SBATCH_CPU_BIND_LIST");
unsetenvp(env->env, "SBATCH_CPU_BIND");
} else {
unsetenvp(env->env, "SLURM_CPU_BIND_VERBOSE");
unsetenvp(env->env, "SLURM_CPU_BIND_TYPE");
unsetenvp(env->env, "SLURM_CPU_BIND_LIST");
//.........这里部分代码省略.........
开发者ID:donaghy1,项目名称:slurm,代码行数:101,代码来源:env.c
示例16: parse_command_line
/*
* parse_command_line, fill in params data structure with data
*/
extern void parse_command_line(int argc, char **argv)
{
char *env_val = NULL;
int opt_char;
int option_index;
hostlist_t host_list;
bool long_form = false;
bool opt_a_set = false, opt_p_set = false;
bool env_a_set = false, env_p_set = false;
static struct option long_options[] = {
{"all", no_argument, 0, 'a'},
{"bg", no_argument, 0, 'b'},
{"dead", no_argument, 0, 'd'},
{"exact", no_argument, 0, 'e'},
{"federation",no_argument, 0, OPT_LONG_FEDR},
{"help", no_argument, 0, OPT_LONG_HELP},
{"hide", no_argument, 0, OPT_LONG_HIDE},
{"iterate", required_argument, 0, 'i'},
{"local", no_argument, 0, OPT_LONG_LOCAL},
{"long", no_argument, 0, 'l'},
{"cluster", required_argument, 0, 'M'},
{"clusters", required_argument, 0, 'M'},
{"nodes", required_argument, 0, 'n'},
{"noconvert", no_argument, 0, OPT_LONG_NOCONVERT},
{"noheader", no_argument, 0, 'h'},
{"Node", no_argument, 0, 'N'},
{"format", required_argument, 0, 'o'},
{"Format", required_argument, 0, 'O'},
{"partition", required_argument, 0, 'p'},
{"responding",no_argument, 0, 'r'},
{"list-reasons", no_argument, 0, 'R'},
{"summarize", no_argument, 0, 's'},
{"sort", required_argument, 0, 'S'},
{"states", required_argument, 0, 't'},
{"reservation",no_argument, 0, 'T'},
{"usage", no_argument, 0, OPT_LONG_USAGE},
{"verbose", no_argument, 0, 'v'},
{"version", no_argument, 0, 'V'},
{NULL, 0, 0, 0}
};
params.convert_flags = CONVERT_NUM_UNIT_EXACT;
if (slurmctld_conf.fed_params &&
strstr(slurmctld_conf.fed_params, "fed_display"))
params.federation_flag = true;
if (getenv("SINFO_ALL")) {
env_a_set = true;
params.all_flag = true;
}
if (getenv("SINFO_FEDERATION"))
params.federation_flag = true;
if (getenv("SINFO_LOCAL"))
params.local = true;
if ( ( env_val = getenv("SINFO_PARTITION") ) ) {
env_p_set = true;
params.partition = xstrdup(env_val);
params.part_list = _build_part_list(env_val);
params.all_flag = true;
}
if (env_a_set && env_p_set) {
error("Conflicting options, SINFO_ALL and SINFO_PARTITION, specified. "
"Please choose one or the other.");
exit(1);
}
if ( ( env_val = getenv("SINFO_SORT") ) )
params.sort = xstrdup(env_val);
if ( ( env_val = getenv("SLURM_CLUSTERS") ) ) {
if (!(params.clusters = slurmdb_get_info_cluster(env_val))) {
print_db_notok(env_val, 1);
exit(1);
}
working_cluster_rec = list_peek(params.clusters);
params.local = true;
}
while ((opt_char = getopt_long(argc, argv,
"abdehi:lM:n:No:O:p:rRsS:t:TvV",
long_options, &option_index)) != -1) {
switch (opt_char) {
case (int)'?':
fprintf(stderr,
"Try \"sinfo --help\" for more information\n");
exit(1);
break;
case (int)'a':
opt_a_set = true;
xfree(params.partition);
FREE_NULL_LIST(params.part_list);
params.all_flag = true;
break;
case (int)'b':
params.cluster_flags = slurmdb_setup_cluster_flags();
if (params.cluster_flags & CLUSTER_FLAG_BG)
params.bg_flag = true;
else {
//.........这里部分代码省略.........
开发者ID:HPCNow,项目名称:slurm,代码行数:101,代码来源:opts.c
示例17: env_array_for_job
/*
* Set in "dest" the environment variables relevant to a SLURM job
* allocation, overwriting any environment variables of the same name.
* If the address pointed to by "dest" is NULL, memory will automatically be
* xmalloc'ed. The array is terminated by a NULL pointer, and thus is
* suitable for use by execle() and other env_array_* functions.
*
* Sets the variables:
* SLURM_JOB_ID
* SLURM_JOB_NUM_NODES
* SLURM_JOB_NODELIST
* SLURM_JOB_CPUS_PER_NODE
* LOADLBATCH (AIX only)
* SLURM_BG_NUM_NODES, MPIRUN_PARTITION, MPIRUN_NOFREE, and
* MPIRUN_NOALLOCATE (BG only)
*
* Sets OBSOLETE variables (needed for MPI, do not remove):
* SLURM_JOBID
* SLURM_NNODES
* SLURM_NODELIST
* SLURM_TASKS_PER_NODE
*/
int
env_array_for_job(char ***dest, const resource_allocation_response_msg_t *alloc,
const job_desc_msg_t *desc)
{
char *tmp = NULL;
char *dist = NULL, *lllp_dist = NULL;
slurm_step_layout_t *step_layout = NULL;
uint32_t num_tasks = desc->num_tasks;
int rc = SLURM_SUCCESS;
uint32_t node_cnt = alloc->node_cnt;
uint32_t cluster_flags = slurmdb_setup_cluster_flags();
_setup_particulars(cluster_flags, dest, alloc->select_jobinfo);
if (cluster_flags & CLUSTER_FLAG_BG) {
select_g_select_jobinfo_get(alloc->select_jobinfo,
SELECT_JOBDATA_NODE_CNT,
&node_cnt);
if (!node_cnt)
node_cnt = alloc->node_cnt;
env_array_overwrite_fmt(dest, "SLURM_BG_NUM_NODES",
"%u", node_cnt);
}
env_array_overwrite_fmt(dest, "SLURM_JOB_ID", "%u", alloc->job_id);
env_array_overwrite_fmt(dest, "SLURM_JOB_NUM_NODES", "%u", node_cnt);
env_array_overwrite_fmt(dest, "SLURM_JOB_NODELIST", "%s",
alloc->node_list);
_set_distribution(desc->task_dist, &dist, &lllp_dist);
if(dist)
env_array_overwrite_fmt(dest, "SLURM_DISTRIBUTION", "%s",
dist);
if(desc->task_dist == SLURM_DIST_PLANE)
env_array_overwrite_fmt(dest, "SLURM_DIST_PLANESIZE",
"%u", desc->plane_size);
if(lllp_dist)
env_array_overwrite_fmt(dest, "SLURM_DIST_LLLP", "%s",
lllp_dist);
tmp = uint32_compressed_to_str(alloc->num_cpu_groups,
alloc->cpus_per_node,
alloc->cpu_count_reps);
env_array_overwrite_fmt(dest, "SLURM_JOB_CPUS_PER_NODE", "%s", tmp);
xfree(tmp);
/* OBSOLETE, but needed by MPI, do not remove */
env_array_overwrite_fmt(dest, "SLURM_JOBID", "%u", alloc->job_id);
env_array_overwrite_fmt(dest, "SLURM_NNODES", "%u", node_cnt);
env_array_overwrite_fmt(dest, "SLURM_NODELIST", "%s", alloc->node_list);
if(num_tasks == NO_VAL) {
/* If we know how many tasks we are going to do then
|
请发表评论