本文整理汇总了C++中MPIDI_FUNC_ENTER函数的典型用法代码示例。如果您正苦于以下问题:C++ MPIDI_FUNC_ENTER函数的具体用法?C++ MPIDI_FUNC_ENTER怎么用?C++ MPIDI_FUNC_ENTER使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了MPIDI_FUNC_ENTER函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: MPID_nem_lmt_dma_initiate_lmt
int MPID_nem_lmt_dma_initiate_lmt(MPIDI_VC_t *vc, MPIDI_CH3_Pkt_t *pkt, MPID_Request *sreq)
{
int mpi_errno = MPI_SUCCESS;
MPID_nem_pkt_lmt_rts_t * const rts_pkt = (MPID_nem_pkt_lmt_rts_t *)pkt;
MPIU_CHKPMEM_DECL(1);
MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_LMT_DMA_INITIATE_LMT);
MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_LMT_DMA_INITIATE_LMT);
MPIU_CHKPMEM_MALLOC(sreq->ch.s_cookie, knem_cookie_t *, sizeof(knem_cookie_t), mpi_errno, "s_cookie");
mpi_errno = send_sreq_data(vc, sreq, sreq->ch.s_cookie);
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
MPID_nem_lmt_send_RTS(vc, rts_pkt, sreq->ch.s_cookie, sizeof(knem_cookie_t));
fn_exit:
MPIU_CHKPMEM_COMMIT();
MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_LMT_DMA_INITIATE_LMT);
return mpi_errno;
fn_fail:
MPIU_CHKPMEM_REAP();
goto fn_exit;
}
开发者ID:adevress,项目名称:MPICH-BlueGene,代码行数:23,代码来源:mpid_nem_lmt_dma.c
示例2: MPIDI_CH3U_Comm_register_destroy_hook
int MPIDI_CH3U_Comm_register_destroy_hook(int (*hook_fn)(struct MPID_Comm *, void *), void *param)
{
int mpi_errno = MPI_SUCCESS;
hook_elt *elt;
MPIU_CHKPMEM_DECL(1);
MPIDI_STATE_DECL(MPID_STATE_MPIDI_CH3U_COMM_REGISTER_DESTROY_HOOK);
MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_CH3U_COMM_REGISTER_DESTROY_HOOK);
MPIU_CHKPMEM_MALLOC(elt, hook_elt *, sizeof(hook_elt), mpi_errno, "hook_elt");
elt->hook_fn = hook_fn;
elt->param = param;
MPL_LL_PREPEND(destroy_hooks_head, destroy_hooks_tail, elt);
fn_exit:
MPIDI_FUNC_EXIT(MPID_STATE_MPIDI_CH3U_COMM_REGISTER_DESTROY_HOOK);
return mpi_errno;
fn_fail:
MPIU_CHKPMEM_REAP();
goto fn_exit;
}
开发者ID:mpifl,项目名称:mpich3newforfile,代码行数:23,代码来源:ch3u_comm.c
示例3: MPID_nem_mxm_poll
int MPID_nem_mxm_poll(int in_blocking_progress)
{
int mpi_errno = MPI_SUCCESS;
MPID_Request *req = NULL;
MPIDI_STATE_DECL(MPID_STATE_MXM_POLL);
MPIDI_FUNC_ENTER(MPID_STATE_MXM_POLL);
while (!MPID_nem_mxm_queue_empty(mxm_obj->sreq_queue)) {
MPID_nem_mxm_queue_dequeue(&mxm_obj->sreq_queue, &req);
_mxm_handle_sreq(req);
}
mpi_errno = _mxm_poll();
if (mpi_errno)
MPIR_ERR_POP(mpi_errno);
fn_exit:
MPIDI_FUNC_EXIT(MPID_STATE_MXM_POLL);
return mpi_errno;
fn_fail:
goto fn_exit;
}
开发者ID:tjhei,项目名称:fgmpi,代码行数:23,代码来源:mxm_poll.c
示例4: MPIDI_CH3_Rendezvous_push
int MPIDI_CH3_Rendezvous_push(MPIDI_VC_t * vc, MPID_Request * sreq)
{
MPIDI_STATE_DECL(MPID_STATE_MPIDI_CH3I_RNDV_PUSH);
MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_CH3I_RNDV_PUSH);
if (SMP_INIT
&& vc->smp.local_nodes >= 0
&& vc->smp.local_nodes != g_smpi.my_local_id)
{
MPIU_Assert(sreq->mrail.protocol == VAPI_PROTOCOL_R3);
MPIDI_CH3_SMP_Rendezvous_push(vc, sreq);
return MPI_SUCCESS;
}
switch (sreq->mrail.protocol)
{
case VAPI_PROTOCOL_RPUT:
MPIDI_CH3I_MRAILI_Rendezvous_rput_push(vc, sreq);
break;
case VAPI_PROTOCOL_RGET:
MPIDI_CH3I_MRAILI_Rendezvous_rget_push(vc, sreq);
break;
#ifdef _ENABLE_UD_
case VAPI_PROTOCOL_UD_ZCOPY:
MPIDI_CH3I_MRAILI_Rendezvous_zcopy_push(vc, sreq,
&(MPIDI_CH3I_RDMA_Process.zcopy_info));
break;
#endif
default:
MPIDI_CH3_Rendezvous_r3_push(vc, sreq);
break;
}
MPIDI_FUNC_EXIT(MPID_STATE_MPIDI_CH3I_RNDV_PUSH);
return MPI_SUCCESS;
}
开发者ID:hpc,项目名称:mvapich2-cce,代码行数:37,代码来源:ch3_rndvtransfer.c
示例5: append_overflow
static int append_overflow(int i)
{
int mpi_errno = MPI_SUCCESS;
int ret;
ptl_me_t me;
ptl_process_t id_any;
MPIDI_STATE_DECL(MPID_STATE_APPEND_OVERFLOW);
MPIDI_FUNC_ENTER(MPID_STATE_APPEND_OVERFLOW);
MPIU_Assert(i >= 0 && i < NUM_OVERFLOW_ME);
id_any.phys.pid = PTL_PID_ANY;
id_any.phys.nid = PTL_NID_ANY;
me.start = overflow_buf[i];
me.length = OVERFLOW_LENGTH;
me.ct_handle = PTL_CT_NONE;
me.uid = PTL_UID_ANY;
me.options = ( PTL_ME_OP_PUT | PTL_ME_MANAGE_LOCAL | PTL_ME_NO_TRUNCATE | PTL_ME_MAY_ALIGN |
PTL_ME_IS_ACCESSIBLE | PTL_ME_EVENT_LINK_DISABLE );
me.match_id = id_any;
me.match_bits = 0;
me.ignore_bits = ~((ptl_match_bits_t)0);
me.min_free = PTL_LARGE_THRESHOLD;
/* if there is no space to append the entry, process outstanding events and try again */
ret = PtlMEAppend(MPIDI_nem_ptl_ni, MPIDI_nem_ptl_pt, &me, PTL_OVERFLOW_LIST, (void *)(size_t)i,
&overflow_me_handle[i]);
MPIR_ERR_CHKANDJUMP1(ret, mpi_errno, MPI_ERR_OTHER, "**ptlmeappend", "**ptlmeappend %s", MPID_nem_ptl_strerror(ret));
fn_exit:
MPIDI_FUNC_EXIT(MPID_STATE_APPEND_OVERFLOW);
return mpi_errno;
fn_fail:
goto fn_exit;
}
开发者ID:zhanglt,项目名称:mpich,代码行数:37,代码来源:ptl_poll.c
示例6: MPIDI_CH3_Init
int MPIDI_CH3_Init(int has_parent, MPIDI_PG_t *pg_p, int pg_rank)
{
int mpi_errno = MPI_SUCCESS;
int i;
MPIDI_STATE_DECL(MPID_STATE_MPIDI_CH3_INIT);
MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_CH3_INIT);
/* Override split_type */
MPID_Comm_fns = &comm_fns;
mpi_errno = MPID_nem_init (pg_rank, pg_p, has_parent);
if (mpi_errno) MPIR_ERR_POP (mpi_errno);
nemesis_initialized = 1;
MPIDI_CH3I_my_rank = pg_rank;
MPIDI_CH3I_my_pg = pg_p;
/*
* Initialize Progress Engine
*/
mpi_errno = MPIDI_CH3I_Progress_init();
if (mpi_errno) MPIR_ERR_SETFATALANDJUMP (mpi_errno, MPI_ERR_OTHER, "**init_progress");
for (i = 0; i < pg_p->size; i++)
{
mpi_errno = MPIDI_CH3_VC_Init(&pg_p->vct[i]);
if (mpi_errno) MPIR_ERR_POP(mpi_errno);
}
fn_exit:
MPIDI_FUNC_EXIT(MPID_STATE_MPIDI_CH3_INIT);
return mpi_errno;
fn_fail:
goto fn_exit;
}
开发者ID:Niharikareddy,项目名称:mpich,代码行数:37,代码来源:ch3_init.c
示例7: MPID_GPID_GetAllInComm
int MPID_GPID_GetAllInComm( MPID_Comm *comm_ptr, int local_size,
MPID_Gpid local_gpids[], int *singlePG )
{
int mpi_errno = MPI_SUCCESS;
int i;
int *gpid = (int*)&local_gpids[0];
int lastPGID = -1, pgid;
MPIDI_VCR vc;
MPIDI_STATE_DECL(MPID_STATE_MPID_GPID_GETALLINCOMM);
MPIDI_FUNC_ENTER(MPID_STATE_MPID_GPID_GETALLINCOMM);
MPIU_Assert(comm_ptr->local_size == local_size);
*singlePG = 1;
for (i=0; i<comm_ptr->local_size; i++) {
vc = comm_ptr->dev.vcrt->vcr_table[i];
/* Get the process group id as an int */
MPIDI_PG_IdToNum( vc->pg, &pgid );
*gpid++ = pgid;
if (lastPGID != pgid) {
if (lastPGID != -1)
*singlePG = 0;
lastPGID = pgid;
}
*gpid++ = vc->pg_rank;
MPIU_DBG_MSG_FMT(COMM,VERBOSE, (MPIU_DBG_FDEST,
"pgid=%d vc->pg_rank=%d",
pgid, vc->pg_rank));
}
MPIDI_FUNC_EXIT(MPID_STATE_MPID_GPID_GETALLINCOMM);
return mpi_errno;
}
开发者ID:Niharikareddy,项目名称:mpich,代码行数:37,代码来源:mpid_vc.c
示例8: pkt_ckpt_marker_handler
static int pkt_ckpt_marker_handler(MPIDI_VC_t *vc, MPIDI_CH3_Pkt_t *pkt, MPIDI_msg_sz_t *buflen, MPID_Request **req)
{
int mpi_errno = MPI_SUCCESS;
MPID_nem_pkt_ckpt_marker_t * const ckpt_pkt = (MPID_nem_pkt_ckpt_marker_t *)pkt;
MPIDI_STATE_DECL(MPID_STATE_PKT_CKPT_MARKER_HANDLER);
MPIDI_FUNC_ENTER(MPID_STATE_PKT_CKPT_MARKER_HANDLER);
if (!checkpointing) {
mpi_errno = MPIDI_nem_ckpt_start();
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
}
MPIU_Assert(current_wave == ckpt_pkt->wave);
--marker_count;
/* We're checkpointing the shared memory region, so we don't need
to flush the channels between local processes, only remote
processes */
if (marker_count == 0) {
MPIDI_nem_ckpt_finish_checkpoint = TRUE;
/* make sure we break out of receive loop into progress loop */
MPIDI_CH3_Progress_signal_completion();
}
*buflen = sizeof(MPIDI_CH3_Pkt_t);
*req = NULL;
fn_exit:
MPIDI_FUNC_EXIT(MPID_STATE_PKT_CKPT_MARKER_HANDLER);
return mpi_errno;
fn_fail:
goto fn_exit;
}
开发者ID:adevress,项目名称:MPICH-BlueGene,代码行数:37,代码来源:mpid_nem_ckpt.c
示例9: MPID_nem_lmt_dma_handle_cookie
int MPID_nem_lmt_dma_handle_cookie(MPIDI_VC_t *vc, MPID_Request *req, MPID_IOV cookie)
{
int mpi_errno = MPI_SUCCESS;
MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_LMT_DMA_HANDLE_COOKIE);
MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_LMT_DMA_HANDLE_COOKIE);
if (cookie.MPID_IOV_LEN == 0 && cookie.MPID_IOV_BUF == NULL) {
/* req is a send request, we need to initiate another knem request and
send a COOKIE message back to the receiver indicating the lid
returned from the ioctl. */
int complete;
knem_cookie_t s_cookie;
/* This function will invoke the OnDataAvail function to load more data. */
mpi_errno = check_req_complete(vc, req, &complete);
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
/* If we were complete we should have received a DONE message instead
of a COOKIE message. */
MPIU_Assert(!complete);
mpi_errno = do_dma_send(vc, req, req->dev.iov_count, &req->dev.iov[0], &s_cookie);
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
MPID_nem_lmt_send_COOKIE(vc, req, &s_cookie, sizeof(knem_cookie_t));
}
else {
/* req is a receive request and we need to continue receiving using the
lid provided in the cookie iov. */
mpi_errno = MPID_nem_lmt_dma_start_recv(vc, req, cookie);
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
}
fn_fail:
MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_LMT_DMA_HANDLE_COOKIE);
return MPI_SUCCESS;
}
开发者ID:adevress,项目名称:MPICH-BlueGene,代码行数:37,代码来源:mpid_nem_lmt_dma.c
示例10: get_hca_type
/**
* Get the type of a device.
*
* @param dev the device.
* @param ctx the device context.
* @param hca_type the type (output).
*
* @return MPI_SUCCESS if succeded, MPI_ERR_OTHER if failed
*
* \see HCA_Type
*/
static inline int get_hca_type (struct ibv_device* dev, struct ibv_context* ctx, HCA_Type* hca_type)
{
MPIDI_STATE_DECL(MPID_STATE_GET_HCA_TYPE);
MPIDI_FUNC_ENTER(MPID_STATE_GET_HCA_TYPE);
int ret;
int mpi_errno = MPI_SUCCESS;
struct ibv_device_attr dev_attr;
memset(&dev_attr, 0, sizeof(struct ibv_device_attr));
char* dev_name = (char*) ibv_get_device_name(dev);
if (!dev_name)
{
MPIU_ERR_SETANDJUMP(mpi_errno, MPI_ERR_OTHER, "**ibv_get_device_name");
}
ret = ibv_query_device(ctx, &dev_attr);
if (ret)
{
MPIU_ERR_SETANDJUMP1(
mpi_errno,
MPI_ERR_OTHER,
"**ibv_query_device",
"**ibv_query_device %s",
dev_name
);
}
if ((mpi_errno = hcaNameToType(dev_name, hca_type)) != MPI_SUCCESS)
{
MPIU_ERR_POP(mpi_errno);
}
fn_fail:
MPIDI_FUNC_EXIT(MPID_STATE_GET_HCA_TYPE);
return mpi_errno;
}
开发者ID:hpc,项目名称:mvapich2-cce,代码行数:48,代码来源:ib_hca.c
示例11: MPIDI_Open_port
static int MPIDI_Open_port(MPID_Info *info_ptr, char *port_name)
{
int mpi_errno = MPI_SUCCESS;
int str_errno = MPIU_STR_SUCCESS;
int len;
int port_name_tag = 0; /* this tag is added to the business card,
which is then returned as the port name */
int myRank = MPIR_Process.comm_world->rank;
MPIDI_STATE_DECL(MPID_STATE_MPIDI_OPEN_PORT);
MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_OPEN_PORT);
mpi_errno = get_port_name_tag(&port_name_tag);
MPIR_ERR_CHKANDJUMP(mpi_errno,mpi_errno,MPI_ERR_OTHER,"**argstr_port_name_tag");
len = MPI_MAX_PORT_NAME;
str_errno = MPIU_Str_add_int_arg(&port_name, &len,
MPIDI_CH3I_PORT_NAME_TAG_KEY, port_name_tag);
MPIR_ERR_CHKANDJUMP(str_errno, mpi_errno, MPI_ERR_OTHER, "**argstr_port_name_tag");
/* This works because Get_business_card uses the same MPIU_Str_xxx
functions as above to add the business card to the input string */
/* FIXME: We should instead ask the mpid_pg routines to give us
a connection string. There may need to be a separate step to
restrict us to a connection information that is only valid for
connections between processes that are started separately (e.g.,
may not use shared memory). We may need a channel-specific
function to create an exportable connection string. */
mpi_errno = MPIDI_CH3_Get_business_card(myRank, port_name, len);
MPIU_DBG_MSG_FMT(CH3, VERBOSE, (MPIU_DBG_FDEST, "port_name = %s", port_name));
fn_exit:
MPIDI_FUNC_EXIT(MPID_STATE_MPIDI_OPEN_PORT);
return mpi_errno;
fn_fail:
goto fn_exit;
}
开发者ID:Niharikareddy,项目名称:mpich,代码行数:37,代码来源:mpid_port.c
示例12: MPIDI_CH3U_Handle_send_req
int MPIDI_CH3U_Handle_send_req(MPIDI_VC_t * vc, MPID_Request * sreq,
int *complete)
{
int mpi_errno = MPI_SUCCESS;
int (*reqFn)(MPIDI_VC_t *, MPID_Request *, int *);
MPIDI_STATE_DECL(MPID_STATE_MPIDI_CH3U_HANDLE_SEND_REQ);
MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_CH3U_HANDLE_SEND_REQ);
/* Use the associated function rather than switching on the old ca field */
/* Routines can call the attached function directly */
reqFn = sreq->dev.OnDataAvail;
if (!reqFn) {
MPIU_Assert(MPIDI_Request_get_type(sreq) != MPIDI_REQUEST_TYPE_GET_RESP);
MPIDI_CH3U_Request_complete(sreq);
*complete = 1;
}
else {
mpi_errno = reqFn( vc, sreq, complete );
}
MPIDI_FUNC_EXIT(MPID_STATE_MPIDI_CH3U_HANDLE_SEND_REQ);
return mpi_errno;
}
开发者ID:abhinavvishnu,项目名称:matex,代码行数:24,代码来源:ch3u_handle_send_req.c
示例13: MPID_nem_tcp_get_vc_from_conninfo
int MPID_nem_tcp_get_vc_from_conninfo (char *pg_id, int pg_rank, struct MPIDI_VC **vc)
{
int mpi_errno = MPI_SUCCESS;
MPIDI_PG_t *pg;
MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_TCP_GET_VC_FROM_CONNINFO);
MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_TCP_GET_VC_FROM_CONNINFO);
MPIU_DBG_MSG_FMT(NEM_SOCK_DET, VERBOSE, (MPIU_DBG_FDEST, "pg_id=%s pg_rank=%d", pg_id, pg_rank));
mpi_errno = MPIDI_PG_Find (pg_id, &pg);
if (mpi_errno) MPIU_ERR_POP (mpi_errno);
MPIU_ERR_CHKINTERNAL(pg == NULL, mpi_errno, "invalid PG");
MPIU_ERR_CHKINTERNAL(pg_rank < 0 || pg_rank > MPIDI_PG_Get_size (pg), mpi_errno, "invalid pg_rank");
MPIDI_PG_Get_vc_set_active (pg, pg_rank, vc);
fn_exit:
MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_TCP_GET_VC_FROM_CONNINFO);
return mpi_errno;
fn_fail:
goto fn_exit;
}
开发者ID:OngOngoing,项目名称:219351_homework,代码行数:24,代码来源:tcp_utility.c
示例14: MPIDI_VCR_Dup
int MPIDI_VCR_Dup(MPIDI_VCR orig_vcr, MPIDI_VCR * new_vcr)
{
MPIDI_STATE_DECL(MPID_STATE_MPID_VCR_DUP);
MPIDI_FUNC_ENTER(MPID_STATE_MPID_VCR_DUP);
/* We are allowed to create a vc that belongs to no process group
as part of the initial connect/accept action, so in that case,
ignore the pg ref count update */
/* XXX DJG FIXME-MT should we be checking this? */
/* we probably need a test-and-incr operation or equivalent to avoid races */
if (MPIU_Object_get_ref(orig_vcr) == 0 && orig_vcr->pg) {
MPIDI_VC_add_ref( orig_vcr );
MPIDI_VC_add_ref( orig_vcr );
MPIDI_PG_add_ref( orig_vcr->pg );
}
else {
MPIDI_VC_add_ref(orig_vcr);
}
MPIU_DBG_MSG_FMT(REFCOUNT,TYPICAL,(MPIU_DBG_FDEST,"Incr VCR %p ref count",orig_vcr));
*new_vcr = orig_vcr;
MPIDI_FUNC_EXIT(MPID_STATE_MPID_VCR_DUP);
return MPI_SUCCESS;
}
开发者ID:Niharikareddy,项目名称:mpich,代码行数:24,代码来源:mpid_vc.c
示例15: vbuf_init_rma_put
void vbuf_init_rma_put(vbuf *v, void *l_addr, uint32_t lkey,
void *r_addr, uint32_t rkey, int len, int rail)
{
MPIDI_STATE_DECL(MPID_STATE_VBUF_INIT_RMA_PUT);
MPIDI_FUNC_ENTER(MPID_STATE_VBUF_INIT_RMA_PUT);
v->desc.u.sr.next = NULL;
v->desc.u.sr.send_flags = IBV_SEND_SIGNALED;
v->desc.u.sr.opcode = IBV_WR_RDMA_WRITE;
v->desc.u.sr.wr_id = (uintptr_t) v;
v->desc.u.sr.num_sge = 1;
v->desc.u.sr.wr.rdma.remote_addr = (uintptr_t)(r_addr);
v->desc.u.sr.wr.rdma.rkey = rkey;
v->desc.u.sr.sg_list = &(v->desc.sg_entry);
v->desc.sg_entry.length = len;
v->desc.sg_entry.lkey = lkey;
v->desc.sg_entry.addr = (uintptr_t)(l_addr);
v->padding = RDMA_ONE_SIDED;
v->rail = rail;
MPIDI_FUNC_EXIT(MPID_STATE_VBUF_INIT_RMA_PUT);
}
开发者ID:hpc,项目名称:mvapich2-cce,代码行数:24,代码来源:vbuf.c
示例16: MPID_Comm_failure_ack
int MPID_Comm_failure_ack(MPID_Comm *comm_ptr)
{
int mpi_errno = MPI_SUCCESS;
MPIDI_STATE_DECL(MPID_STATE_MPID_COMM_FAILURE_ACK);
MPIDI_FUNC_ENTER(MPID_STATE_MPID_COMM_FAILURE_ACK);
/* Update the list of failed processes that we know about locally.
* This part could technically be turned off and be a correct
* implementation, but it would be slower about propagating failure
* information. Also, this is the failure case so speed isn't as
* important. */
MPIDI_CH3U_Check_for_failed_procs();
/* Update the marker for the last known failed process in this
* communciator. */
comm_ptr->dev.last_ack_rank = MPIDI_last_known_failed;
fn_exit:
MPIDI_FUNC_EXIT(MPID_STATE_MPID_COMM_FAILURE_ACK);
return mpi_errno;
fn_fail:
goto fn_exit;
}
开发者ID:syftalent,项目名称:dist-sys-exercises-1,代码行数:24,代码来源:mpid_comm_failure_ack.c
示例17: MPIDI_CH3U_Request_unpack_srbuf
int MPIDI_CH3U_Request_unpack_srbuf(MPID_Request * rreq)
{
MPI_Aint last;
int tmpbuf_last;
int mpi_errno = MPI_SUCCESS;
MPIDI_STATE_DECL(MPID_STATE_MPIDI_CH3U_REQUEST_UNPACK_SRBUF);
MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_CH3U_REQUEST_UNPACK_SRBUF);
tmpbuf_last = (int)(rreq->dev.segment_first + rreq->dev.tmpbuf_sz);
if (rreq->dev.segment_size < tmpbuf_last)
{
tmpbuf_last = (int)rreq->dev.segment_size;
}
last = tmpbuf_last;
MPID_Segment_unpack(rreq->dev.segment_ptr, rreq->dev.segment_first,
&last, rreq->dev.tmpbuf);
if (last == 0 || last == rreq->dev.segment_first)
{
/* --BEGIN ERROR HANDLING-- */
/* If no data can be unpacked, then we have a datatype processing
problem. Adjust the segment info so that the remaining
data is received and thrown away. */
MPIR_STATUS_SET_COUNT(rreq->status, rreq->dev.segment_first);
rreq->dev.segment_size = rreq->dev.segment_first;
rreq->dev.segment_first += tmpbuf_last;
rreq->status.MPI_ERROR = MPIR_Err_create_code(MPI_SUCCESS,
MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_TYPE,
"**dtypemismatch", 0);
/* --END ERROR HANDLING-- */
}
else if (tmpbuf_last == rreq->dev.segment_size)
{
/* --BEGIN ERROR HANDLING-- */
if (last != tmpbuf_last)
{
/* received data was not entirely consumed by unpack() because too
few bytes remained to fill the next basic datatype.
Note: the segment_first field is set to segment_last so that if
this is a truncated message, extra data will be read
off the pipe. */
MPIR_STATUS_SET_COUNT(rreq->status, last);
rreq->dev.segment_size = last;
rreq->dev.segment_first = tmpbuf_last;
rreq->status.MPI_ERROR = MPIR_Err_create_code(MPI_SUCCESS,
MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_TYPE,
"**dtypemismatch", 0);
}
/* --END ERROR HANDLING-- */
}
else
{
rreq->dev.tmpbuf_off = (int)(tmpbuf_last - last);
if (rreq->dev.tmpbuf_off > 0)
{
/* move any remaining data to the beginning of the buffer.
Note: memmove() is used since the data regions could
overlap. */
memmove(rreq->dev.tmpbuf, (char *) rreq->dev.tmpbuf +
(last - rreq->dev.segment_first), rreq->dev.tmpbuf_off);
}
rreq->dev.segment_first = last;
}
MPIDI_FUNC_EXIT(MPID_STATE_MPIDI_CH3U_REQUEST_UNPACK_SRBUF);
return mpi_errno;
}
开发者ID:kleiter,项目名称:mpich,代码行数:67,代码来源:ch3u_request.c
示例18: MPID_nem_mxm_issend
int MPID_nem_mxm_issend(MPIDI_VC_t * vc, const void *buf, int count, MPI_Datatype datatype,
int rank, int tag, MPID_Comm * comm, int context_offset,
MPID_Request ** sreq_ptr)
{
int mpi_errno = MPI_SUCCESS;
MPID_Request *sreq = NULL;
MPID_Datatype *dt_ptr;
int dt_contig;
MPIDI_msg_sz_t data_sz;
MPI_Aint dt_true_lb;
MPID_nem_mxm_vc_area *vc_area = NULL;
MPID_nem_mxm_req_area *req_area = NULL;
MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_MXM_ISSEND);
MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_MXM_ISSEND);
MPIDI_Datatype_get_info(count, datatype, dt_contig, data_sz, dt_ptr, dt_true_lb);
/* create a request */
MPIDI_Request_create_sreq(sreq, mpi_errno, goto fn_exit);
MPIU_Assert(sreq != NULL);
MPIDI_Request_set_type(sreq, MPIDI_REQUEST_TYPE_SEND);
MPIDI_VC_FAI_send_seqnum(vc, seqnum);
MPIDI_Request_set_seqnum(sreq, seqnum);
if (HANDLE_GET_KIND(datatype) != HANDLE_KIND_BUILTIN) {
MPID_Datatype_get_ptr(datatype, sreq->dev.datatype_ptr);
MPID_Datatype_add_ref(sreq->dev.datatype_ptr);
}
sreq->partner_request = NULL;
sreq->dev.OnDataAvail = NULL;
sreq->dev.tmpbuf = NULL;
sreq->ch.vc = vc;
sreq->ch.noncontig = FALSE;
_dbg_mxm_output(5,
"isSend ========> Sending USER msg for req %p (context %d to %d tag %d size %d) \n",
sreq, comm->context_id + context_offset, rank, tag, data_sz);
vc_area = VC_BASE(vc);
req_area = REQ_BASE(sreq);
req_area-> ctx = sreq;
req_area->iov_buf = req_area->tmp_buf;
req_area->iov_count = 0;
req_area->iov_buf[0].ptr = NULL;
req_area->iov_buf[0].length = 0;
if (data_sz) {
if (dt_contig) {
req_area->iov_count = 1;
req_area->iov_buf[0].ptr = (char *) (buf) + dt_true_lb;
req_area->iov_buf[0].length = data_sz;
}
else {
MPIDI_msg_sz_t last;
MPI_Aint packsize = 0;
sreq->ch.noncontig = TRUE;
sreq->dev.segment_ptr = MPID_Segment_alloc();
MPIU_ERR_CHKANDJUMP1((sreq->dev.segment_ptr == NULL), mpi_errno, MPI_ERR_OTHER,
"**nomem", "**nomem %s", "MPID_Segment_alloc");
MPIR_Pack_size_impl(count, datatype, &packsize);
last = data_sz;
if (packsize > 0) {
sreq->dev.tmpbuf = MPIU_Malloc((size_t) packsize);
MPIU_Assert(sreq->dev.tmpbuf);
MPID_Segment_init(buf, count, datatype, sreq->dev.segment_ptr, 0);
MPID_Segment_pack(sreq->dev.segment_ptr, 0, &last, sreq->dev.tmpbuf);
req_area->iov_count = 1;
req_area->iov_buf[0].ptr = sreq->dev.tmpbuf;
req_area->iov_buf[0].length = last;
}
}
}
vc_area->pending_sends += 1;
mpi_errno = _mxm_isend(vc_area->mxm_ep, req_area, MXM_MPICH_ISEND_SYNC,
(mxm_mq_h) comm->dev.ch.netmod_priv, comm->rank, tag, _mxm_tag_mpi2mxm(tag,
comm->context_id
+
context_offset),
0);
if (mpi_errno)
MPIU_ERR_POP(mpi_errno);
_dbg_mxm_out_req(sreq);
fn_exit:
*sreq_ptr = sreq;
MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_MXM_ISSEND);
return mpi_errno;
fn_fail:
goto fn_exit;
}
开发者ID:mpifl,项目名称:mpich3newforfile,代码行数:97,代码来源:mxm_send.c
示例19: MPIDI_CH3_iStartMsg
int MPIDI_CH3_iStartMsg(MPIDI_VC_t * vc, void * hdr, MPIDI_msg_sz_t hdr_sz,
MPID_Request ** sreq_ptr)
{
MPID_Request * sreq = NULL;
MPIDI_CH3I_VC *vcch = &vc->ch;
int mpi_errno = MPI_SUCCESS;
MPIDI_STATE_DECL(MPID_STATE_MPIDI_CH3_ISTARTMSG);
MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_CH3_ISTARTMSG);
MPIU_Assert( hdr_sz <= sizeof(MPIDI_CH3_Pkt_t));
/* The SOCK channel uses a fixed length header, the size of which is the
maximum of all possible packet headers */
hdr_sz = sizeof(MPIDI_CH3_Pkt_t);
MPIU_DBG_STMT(CH3_CHANNEL,VERBOSE,
MPIDI_DBG_Print_packet((MPIDI_CH3_Pkt_t*)hdr));
if (vcch->state == MPIDI_CH3I_VC_STATE_CONNECTED) /* MT */
{
/* Connection already formed. If send queue is empty attempt to send
data, queuing any unsent data. */
if (MPIDI_CH3I_SendQ_empty(vcch)) /* MT */
{
MPIU_Size_t nb;
int rc;
MPIU_DBG_MSG(CH3_CHANNEL,VERBOSE,
"send queue empty, attempting to write");
MPIU_DBG_PKT(vcch->conn,hdr,"istartmsg");
/* MT: need some signalling to lock down our right to use the
channel, thus insuring that the progress engine does
not also try to write */
rc = MPIDU_Sock_write(vcch->sock, hdr, hdr_sz, &nb);
if (rc == MPI_SUCCESS)
{
MPIU_DBG_MSG_D(CH3_CHANNEL,VERBOSE,
"wrote %ld bytes", (unsigned long) nb);
if (nb == hdr_sz)
{
MPIU_DBG_MSG_D(CH3_CHANNEL,VERBOSE,
"entire write complete, " MPIDI_MSG_SZ_FMT " bytes", nb);
/* done. get us out of here as quickly as possible. */
}
else
{
MPIU_DBG_MSG_D(CH3_CHANNEL,VERBOSE,
"partial write of " MPIDI_MSG_SZ_FMT " bytes, request enqueued at head", nb);
sreq = create_request(hdr, hdr_sz, nb);
if (!sreq) {
MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER,"**nomem");
}
MPIDI_CH3I_SendQ_enqueue_head(vcch, sreq);
MPIU_DBG_MSG_FMT(CH3_CHANNEL,VERBOSE,
(MPIU_DBG_FDEST,"posting write, vc=0x%p, sreq=0x%08x", vc, sreq->handle));
vcch->conn->send_active = sreq;
mpi_errno = MPIDU_Sock_post_write(vcch->conn->sock, sreq->dev.iov[0].MPID_IOV_BUF,
sreq->dev.iov[0].MPID_IOV_LEN, sreq->dev.iov[0].MPID_IOV_LEN, NULL);
/* --BEGIN ERROR HANDLING-- */
if (mpi_errno != MPI_SUCCESS)
{
mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_FATAL, FCNAME, __LINE__, MPI_ERR_OTHER,
"**ch3|sock|postwrite", "ch3|sock|postwrite %p %p %p",
sreq, vcch->conn, vc);
goto fn_fail;
}
/* --END ERROR HANDLING-- */
}
}
/* --BEGIN ERROR HANDLING-- */
else
{
MPIU_DBG_MSG_D(CH3_CHANNEL,TYPICAL,
"ERROR - MPIDU_Sock_write failed, rc=%d", rc);
sreq = MPID_Request_create();
if (!sreq) {
MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER,"**nomem");
}
sreq->kind = MPID_REQUEST_SEND;
MPID_cc_set(&(sreq->cc), 0);
sreq->status.MPI_ERROR = MPIR_Err_create_code( rc,
MPIR_ERR_RECOVERABLE, FCNAME, __LINE__,
MPI_ERR_INTERN, "**ch3|sock|writefailed",
"**ch3|sock|writefailed %d", rc );
/* Make sure that the caller sees this error */
mpi_errno = sreq->status.MPI_ERROR;
}
/* --END ERROR HANDLING-- */
}
else
{
MPIU_DBG_MSG(CH3_CHANNEL,VERBOSE,
"send in progress, request enqueued");
sreq = create_request(hdr, hdr_sz, 0);
if (!sreq) {
MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER,"**nomem");
}
//.........这里部分代码省略.........
开发者ID:adevress,项目名称:MPICH-BlueGene,代码行数:101,代码来源:ch3_istartmsg.c
示例20: MPIDI_Comm_spawn_multiple
int MPIDI_Comm_spawn_multiple(int count, char **commands,
char ***argvs, const int *maxprocs,
MPID_Info **info_ptrs, int root,
MPID_Comm *comm_ptr, MPID_Comm
**intercomm, int *errcodes)
{
char port_name[MPI_MAX_PORT_NAME];
int *info_keyval_sizes=0, i, mpi_errno=MPI_SUCCESS;
PMI_keyval_t **info_keyval_vectors=0, preput_keyval_vector;
int *pmi_errcodes = 0, pmi_errno;
int total_num_processes, should_accept = 1;
MPIDI_STATE_DECL(MPID_STATE_MPIDI_COMM_SPAWN_MULTIPLE);
MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_COMM_SPAWN_MULTIPLE);
if (comm_ptr->rank == root) {
/* create an array for the pmi error codes */
total_num_processes = 0;
for (i=0; i<count; i++) {
total_num_processes += maxprocs[i];
}
pmi_errcodes = (int*)MPIU_Malloc(sizeof(int) * total_num_processes);
if (pmi_errcodes == NULL) {
MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER,"**nomem");
}
/* initialize them to 0 */
for (i=0; i<total_num_processes; i++)
pmi_errcodes[i] = 0;
/* Open a port for the spawned processes to connect to */
/* FIXME: info may be needed for port name */
mpi_errno = MPID_Open_port(NULL, port_name);
/* --BEGIN ERROR HANDLING-- */
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
/* --END ERROR HANDLING-- */
/* Spawn the processes */
#ifdef USE_PMI2_API
MPIU_Assert(count > 0);
{
int *argcs = MPIU_Malloc(count*sizeof(int));
struct MPID_Info preput;
struct MPID_Info *preput_p[1] = { &preput };
MPIU_Assert(argcs);
/*
info_keyval_sizes = MPIU_Malloc(count * sizeof(int));
*/
/* FIXME cheating on constness */
preput.key = (char *)PARENT_PORT_KVSKEY;
preput.value = port_name;
preput.next = NULL;
/* compute argcs array */
for (i = 0; i < count; ++i) {
argcs[i] = 0;
if (argvs != NULL && argvs[i] != NULL) {
while (argvs[i][argcs[i]]) {
++argcs[i];
}
}
/* a fib for now */
/*
info_keyval_sizes[i] = 0;
*/
}
/* XXX DJG don't need this, PMI API is thread-safe? */
/*MPIU_THREAD_CS_ENTER(PMI,);*/
/* release the global CS for spawn PMI calls */
MPIU_THREAD_CS_EXIT(ALLFUNC,);
pmi_errno = PMI2_Job_Spawn(count, (const char **)commands,
argcs, (const char ***)argvs,
maxprocs,
info_keyval_sizes, (const MPID_Info **)info_ptrs,
1, (const struct MPID_Info **)preput_p,
NULL, 0,
/*jobId, jobIdSize,*/ /* XXX DJG job stuff? */
pmi_errcodes);
MPIU_THREAD_CS_ENTER(ALLFUNC,);
/*MPIU_THREAD_CS_EXIT(PMI,);*/
MPIU_Free(argcs);
if (pmi_errno != PMI2_SUCCESS) {
MPIU_ERR_SETANDJUMP1(mpi_errno, MPI_ERR_OTHER,
"**pmi_spawn_multiple", "**pmi_spawn_multiple %d", pmi_errno);
}
}
#else
/* FIXME: This is *really* awkward. We should either
Fix on MPI-style info data structures for PMI (avoid unnecessary
duplication) or add an MPIU_Info_getall(...) that creates
the necessary arrays of key/value pairs */
/* convert the infos into PMI keyvals */
info_keyval_sizes = (int *) MPIU_Malloc(count * sizeof(int));
info_keyval_vectors =
(PMI_keyval_t**) MPIU_Malloc(count * sizeof(PMI_keyval_t*));
//.........这里部分代码省略.........
开发者ID:adevress,项目名称:MPICH-BlueGene,代码行数:101,代码来源:ch3u_comm_spawn_multiple.c
注:本文中的MPIDI_FUNC_ENTER函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论