• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ MPI_Wait函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中MPI_Wait函数的典型用法代码示例。如果您正苦于以下问题:C++ MPI_Wait函数的具体用法?C++ MPI_Wait怎么用?C++ MPI_Wait使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了MPI_Wait函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: scr_swap_files_copy

static int scr_swap_files_copy(
  int have_outgoing, const char* file_send, scr_meta* meta_send, int rank_send, uLong* crc32_send,
  int have_incoming, const char* file_recv, scr_meta* meta_recv, int rank_recv, uLong* crc32_recv,
  MPI_Comm comm)
{
  int rc = SCR_SUCCESS;
  MPI_Request request[2];
  MPI_Status  status[2];

  /* allocate MPI send buffer */
  char *buf_send = NULL;
  if (have_outgoing) {
    buf_send = (char*) scr_align_malloc(scr_mpi_buf_size, scr_page_size);
    if (buf_send == NULL) {
      scr_abort(-1, "Allocating memory: malloc(%ld) errno=%d %s @ %s:%d",
              scr_mpi_buf_size, errno, strerror(errno), __FILE__, __LINE__
      );
      return SCR_FAILURE;
    }
  }

  /* allocate MPI recv buffer */
  char *buf_recv = NULL;
  if (have_incoming) {
    buf_recv = (char*) scr_align_malloc(scr_mpi_buf_size, scr_page_size);
    if (buf_recv == NULL) {
      scr_abort(-1, "Allocating memory: malloc(%ld) errno=%d %s @ %s:%d",
              scr_mpi_buf_size, errno, strerror(errno), __FILE__, __LINE__
      );
      return SCR_FAILURE;
    }
  }

  /* open the file to send: read-only mode */
  int fd_send = -1;
  if (have_outgoing) {
    fd_send = scr_open(file_send, O_RDONLY);
    if (fd_send < 0) {
      scr_abort(-1, "Opening file for send: scr_open(%s, O_RDONLY) errno=%d %s @ %s:%d",
              file_send, errno, strerror(errno), __FILE__, __LINE__
      );
    }
  }

  /* open the file to recv: truncate, write-only mode */
  int fd_recv = -1;
  if (have_incoming) {
    mode_t mode_file = scr_getmode(1, 1, 0);
    fd_recv = scr_open(file_recv, O_WRONLY | O_CREAT | O_TRUNC, mode_file);
    if (fd_recv < 0) {
      scr_abort(-1, "Opening file for recv: scr_open(%s, O_WRONLY | O_CREAT | O_TRUNC, ...) errno=%d %s @ %s:%d",
              file_recv, errno, strerror(errno), __FILE__, __LINE__
      );
    }
  }

  /* exchange file chunks */
  int nread, nwrite;
  int sending = 0;
  if (have_outgoing) {
    sending = 1;
  }
  int receiving = 0;
  if (have_incoming) {
    receiving = 1;
  }
  while (sending || receiving) {
    /* if we are still receiving a file, post a receive */
    if (receiving) {
      MPI_Irecv(buf_recv, scr_mpi_buf_size, MPI_BYTE, rank_recv, 0, comm, &request[0]);
    }

    /* if we are still sending a file, read a chunk, send it, and wait */
    if (sending) {
      nread = scr_read(file_send, fd_send, buf_send, scr_mpi_buf_size);
      if (scr_crc_on_copy && nread > 0) {
        *crc32_send = crc32(*crc32_send, (const Bytef*) buf_send, (uInt) nread);
      }
      if (nread < 0) {
        nread = 0;
      }
      MPI_Isend(buf_send, nread, MPI_BYTE, rank_send, 0, comm, &request[1]);
      MPI_Wait(&request[1], &status[1]);
      if (nread < scr_mpi_buf_size) {
        sending = 0;
      }
    }

    /* if we are still receiving a file,
     * wait on our receive to complete and write the data */
    if (receiving) {
      MPI_Wait(&request[0], &status[0]);
      MPI_Get_count(&status[0], MPI_BYTE, &nwrite);
      if (scr_crc_on_copy && nwrite > 0) {
        *crc32_recv = crc32(*crc32_recv, (const Bytef*) buf_recv, (uInt) nwrite);
      }
      scr_write(file_recv, fd_recv, buf_recv, nwrite);
      if (nwrite < scr_mpi_buf_size) {
        receiving = 0;
      }
//.........这里部分代码省略.........
开发者ID:c-a-h,项目名称:scr,代码行数:101,代码来源:scr_cache_rebuild.c


示例2: defined


//.........这里部分代码省略.........
      log2N++;

      // XXX bail out of we don't have a power-of-two node count, 
      //     at least until we implement 3-2 reduction phases
      if ((N & 1) && (N > 1)) {
        Tcl_SetResult(interp, (char *) "parallel allreduce only allowed for even power-of-two node count", TCL_STATIC);
        return TCL_ERROR;
      }
    }
    N = app->par_size();

    // copy incoming data into initial "result" object
    Tcl_Obj *resultobj = Tcl_NewStringObj((const char *) argv[3], strlen(argv[3])+1);

    // An all-reduce tree with hypercube connectivity with 
    // log2(N) communication/reduction phases.  At each phase, we compute
    // the peer/destination node we will communicate with using an XOR of
    // our node ID with the current hypercube dimension.  If we have an
    // incomplete hypercube topology (e.g. non-power-of-two node count), 
    // we have to do special 3-2 communication rounds (not implemented yet).
    // The current implementation requires that all existing nodes 
    // participate, and that they contribute a valid data item.
    // If we wish to support reductions where a node may not contribute,
    // we would need to handle that similarly to a peer node that doesn't 
    // exist, but we would likely determine this during the parameter length
    // exchange step.
    int src=app->par_rank(); // src node is this node
    for (i=0; i<log2N; i++) {
      int mask = 1 << i;     // generate bitmask to use in the XOR
      int dest = src ^ mask; // XOR src node with bitmask to find dest node
      Tcl_Obj *oldresultobj = resultobj; // track old result 

      // Check to make sure dest node exists for non-power-of-two 
      // node counts (an incomplete hypercube).  If not, skip to the next
      // communication/reduction phase.
      if (dest < N) {
        char *sendbuf = Tcl_GetString(oldresultobj);
        int sendsz = strlen(sendbuf)+1;
        int recvsz = 0;
        MPI_Request handle;
        MPI_Status status;

        //
        // Exchange required receive buffer size for data exchange with peer
        //

        // Post non-blocking receive for data size
        MPI_Irecv(&recvsz, 1, MPI_INT, dest,
                  VMD_MPI_TAG_ALLREDUCE_ARGLENGTH, MPI_COMM_WORLD, &handle);

        // Post blocking send for data size
        MPI_Send(&sendsz, 1, MPI_INT, dest,
                 VMD_MPI_TAG_ALLREDUCE_ARGLENGTH, MPI_COMM_WORLD);

        // Wait for non-blocking receive of data size to complete
        MPI_Wait(&handle, &status); 

// printf("src[%d], dest[%d], value '%s', recvsz: %d\n", src, dest, sendbuf, recvsz);

        // Allocate or resize receive buffer 
        char * recvbuf = (char *) malloc(recvsz);

        //
        // Exchange the data payload
        // 

        // Post non-blocking receive for data
        MPI_Irecv(recvbuf, recvsz, MPI_BYTE, dest,
                  VMD_MPI_TAG_ALLREDUCE_PAYLOAD, MPI_COMM_WORLD, &handle);
        
        // Post blocking send for data
        MPI_Send(sendbuf, sendsz, MPI_BYTE, dest,
                 VMD_MPI_TAG_ALLREDUCE_PAYLOAD, MPI_COMM_WORLD);

        // Wait for receive of data
        MPI_Wait(&handle, &status); 

        // Perform the reduction operation on our existing and incoming data.
        // We build a Tcl command string with the user-defined proc, this 
        // node's previous result and the incoming data, and evaluate it.
        if (Tcl_VarEval(interp, argv[2], " ", 
                        sendbuf, " ", recvbuf, NULL) != TCL_OK) {
          printf("Error occured during reduction!\n");    
        }

        // Free the receive buffer
        free(recvbuf);

        // Prep for next reduction step.  Set result object to result of
        // the latest communication/reduction phase.
        resultobj = Tcl_GetObjResult(interp);
      }
    }

    // Set the final Tcl result if necessary
    Tcl_SetObjResult(interp, resultobj);

    return TCL_OK;
#endif
  }
开发者ID:tmd-gpat,项目名称:MOLding,代码行数:101,代码来源:cmd_parallel.C


示例3: IMB_ialltoall_pure

void IMB_ialltoall_pure(struct comm_info* c_info,
                        int size,
                        struct iter_schedule* ITERATIONS,
                        MODES RUN_MODE,
                        double* time)
/*


                      MPI-NBC benchmark kernel
                      Benchmarks MPI_Ialltoall.



Input variables:

-c_info               (type struct comm_info*)
                      Collection of all base data for MPI;
                      see [1] for more information


-size                 (type int)
                      Basic message size in bytes

-ITERATIONS           (type struct iter_schedule *)
                      Repetition scheduling

-RUN_MODE             (type MODES)
                      (only MPI-2 case: see [1])


Output variables:

-time                 (type double*)
                      Timing result per sample


*/
{
    int         i = 0;
    Type_Size   s_size,
                r_size;
    int         s_num = 0,
                r_num;
    MPI_Request request;
    MPI_Status  status;
    double      t_pure = 0.;

#ifdef CHECK
    defect=0.;
#endif
    ierr = 0;

    /* GET SIZE OF DATA TYPE */
    MPI_Type_size(c_info->s_data_type, &s_size);
    MPI_Type_size(c_info->s_data_type, &r_size);
    if ((s_size != 0) && (r_size != 0)) {
        s_num = size / s_size;
        r_num = size / r_size;
    }

    if(c_info->rank != -1) {
        for (i = 0; i < N_BARR; i++) {
            MPI_Barrier(c_info->communicator);
        }

        t_pure = MPI_Wtime();
        for(i = 0; i < ITERATIONS->n_sample; i++)
        {
            ierr = MPI_Ialltoall((char*)c_info->s_buffer + i % ITERATIONS->s_cache_iter * ITERATIONS->s_offs,
                                 s_num,
                                 c_info->s_data_type,
                                 (char*)c_info->r_buffer + i % ITERATIONS->r_cache_iter * ITERATIONS->r_offs,
                                 r_num,
                                 c_info->r_data_type,
                                 c_info->communicator,
                                 &request);
            MPI_ERRHAND(ierr);
            MPI_Wait(&request, &status);
            CHK_DIFF("Ialltoall_pure", c_info,
                     (char*)c_info->r_buffer + i % ITERATIONS->r_cache_iter * ITERATIONS->r_offs,
                     ((size_t)c_info->rank * (size_t) size), 0, ((size_t)c_info->num_procs * (size_t)size),
                     1, put, 0, ITERATIONS->n_sample, i, -2, &defect);
        }
        t_pure = (MPI_Wtime() - t_pure) / ITERATIONS->n_sample;
    }

    time[0] = t_pure;
}
开发者ID:Cai900205,项目名称:test,代码行数:88,代码来源:IMB_alltoall.c


示例4: main

int main( int argc, char *argv[] )
{

    double       sbuf[20000];
#ifdef FOO
    double rbuf[20000];
#endif
    int          rank;
    int          n, flag, size;
    int          err = 0;
    int          verbose = 0;
    MPI_Status   status;
    MPI_Request  req;

    MPI_Init( &argc, &argv );
    MPI_Comm_rank( MPI_COMM_WORLD, &rank );
    MPI_Comm_size( MPI_COMM_WORLD, &size );

    if (size < 2) {
	printf( "Cancel test requires at least 2 processes\n" );
	MPI_Abort( MPI_COMM_WORLD, 1 );
    }

    /* Short Message Test */
    n = 200;

    if (rank == 1) { /* begin if rank = 1 */
	MPI_Isend( sbuf, n, MPI_DOUBLE, 0, 1, MPI_COMM_WORLD, &req );
	MPI_Cancel(&req); 
	MPI_Wait(&req, &status);
	MPI_Test_cancelled(&status, &flag);
	if (!flag) {
	    err++;
	    printf( "Cancelling a short message failed where it should succeed.\n" );
	}
	else if (verbose)
	{
	    printf("Cancelling a short message succeeded.\n");
	}
    }  /* end if rank == 1 */

#ifdef FOO
/* Note that MPI-2 specifies that status.MPI_ERROR is only set by
   multiple completion (e.g., MPI_Waitsome) and not by test_cancelled.
*/
    MPI_Barrier(MPI_COMM_WORLD); 

    if (rank == 0) {  /* begin if rank == 0 */
	MPI_Recv( rbuf, n, MPI_DOUBLE, 1, 1, MPI_COMM_WORLD, &status);
    }  /* end if rank = 0 */
    else if (rank == 1) { /* begin if rank = 1 */
	MPI_Isend( sbuf, n, MPI_DOUBLE, 0, 1, MPI_COMM_WORLD, &req );
	MPI_Cancel(&req); 
	MPI_Wait(&req, &status);
	MPI_Test_cancelled(&status, &flag);
	if (!flag && status.MPI_ERROR != MPI_SUCCESS) {
	    err++;
	    printf( "Cancel of a send returned an error in the status field.\n" );
	}
	  /* end if status.MPI_ERROR */
    }  /* end if rank == 1 */
#endif

    MPI_Barrier(MPI_COMM_WORLD);

    /* Eager Message Test */
    n = 3000;

    if (rank == 1) { /* begin if rank = 1 */
	MPI_Isend( sbuf, n, MPI_DOUBLE, 0, 1, MPI_COMM_WORLD, &req );
	MPI_Cancel(&req);
	MPI_Wait(&req, &status);
	MPI_Test_cancelled(&status, &flag);
	if (!flag) {
	    err++;
	    printf( "Cancelling an eager message (3000 doubles) failed where it should succeed.\n" );
	}
	else if (verbose)
	{
	    printf("Cancelling an eager message (3000 doubles) succeeded.\n");
	}
    }  /* end if rank == 1 */

#ifdef FOO
    MPI_Barrier(MPI_COMM_WORLD); 

    if (rank == 0) {  /* begin if rank == 0 */
	MPI_Irecv(rbuf, n, MPI_DOUBLE, 1, 1, MPI_COMM_WORLD, &req );
	MPI_Wait( &req, &status);
    }  /* end if rank = 0 */
    else if (rank == 1) { /* begin if rank = 1 */
	MPI_Isend( sbuf, n, MPI_DOUBLE, 0, 1, MPI_COMM_WORLD, &req );
	MPI_Cancel(&req);
	MPI_Wait(&req, &status);
	MPI_Test_cancelled(&status, &flag);
	if (!flag && status.MPI_ERROR != MPI_SUCCESS) {
	    err++;
	    printf( "Cancel of a send returned an error in the status field.\n" );
	}
	/* end if status.MPI_ERROR */
//.........这里部分代码省略.........
开发者ID:Shurakai,项目名称:SimGrid,代码行数:101,代码来源:cancelmessages.c


示例5: xattr_basic_test

static int xattr_basic_test(int ea_name_size, int ea_value_size)
{
	int ret, fd = -1;
	int sub_testno = 1;
	char dest[PATH_MAX];

	MPI_Request request;
	MPI_Status  status;

	unsigned long i, j;

	xattr_name_sz = ea_name_size;
	xattr_value_sz = ea_value_size;


	snprintf(orig_path, PATH_MAX, "%s/multi_original_xattr_refile",
		 workplace);
	snprintf(dest, PATH_MAX, "%s_target", orig_path);

	root_printf("  *SubTest %d: Prep original inode.\n", sub_testno++);

	if (!rank) {

		ret = prep_orig_file(orig_path, file_size, 1);

		should_exit(ret);
	}

	MPI_Barrier_Sync();

	root_printf("  *SubTest %d: Prep %ld xattr name list among nodes.\n",
		    sub_testno++, xattr_nums);

	for (i = 0; i < xattr_nums; i++) {

		memset(xattr_name, 0, xattr_name_sz + 1);
		memset(xattr_value, 0, xattr_value_sz);
		memset(xattr_value_get, 0, xattr_value_sz);

		if (!rank) {

			xattr_name_generator(i, USER, xattr_name_sz,
					     xattr_name_sz);
			strcpy(xattr_name_list_set[i], xattr_name);

			for (j = 1; j < size; j++) {

				ret = MPI_Isend(xattr_name, xattr_name_sz + 1,
						MPI_BYTE, j, 1, MPI_COMM_WORLD,
						&request);
				if (ret != MPI_SUCCESS)
					abort_printf("MPI_Isend failed: %d\n",
						     ret);

				MPI_Wait(&request, &status);
			}

		} else {

			ret = MPI_Irecv(xattr_name, xattr_name_sz + 1, MPI_BYTE,
					0, 1, MPI_COMM_WORLD, &request);
			if (ret != MPI_SUCCESS)
				abort_printf("MPI_Irecv failed: %d\n", ret);

			MPI_Wait(&request, &status);
			strcpy(xattr_name_list_set[i], xattr_name);

		}

	}

	MPI_Barrier_Sync();

	root_printf("  *SubTest %d: Prep original inode with %ld EAs.\n",
		    sub_testno++, xattr_nums);

	if (!rank) {

		fd = open64(orig_path, open_rw_flags);

		for (i = 0; i < xattr_nums; i++) {

			strcpy(xattr_name, xattr_name_list_set[i]);
			xattr_value_constructor(i);
			ret = add_or_update_ea(NORMAL, fd, XATTR_CREATE, "add");
			should_exit(ret);
			ret = read_ea(NORMAL, fd);
			should_exit(ret);
			ret = xattr_value_validator(i);
			should_exit(ret);

		}

		ret = do_reflinks(orig_path, orig_path, ref_counts, 0);
		should_exit(ret);
		ret = reflink(orig_path, dest, 1);
		should_exit(ret);

	}

//.........这里部分代码省略.........
开发者ID:goldwynr,项目名称:ocfs2-test,代码行数:101,代码来源:multi_reflink_test.c


示例6: main

int main(int argc, char *argv[])
{
	int rank, nprocs;
	int i, token1, token2, tag1 = 11, tag2 = 22, rounds, flag1, flag2;
	int left, right;

    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
	
	MPI_Status status1, status2;
	MPI_Request reqs1[2], reqs2[2];
	
	if ((rounds = atoi(argv[1])) <= 0) {
			if (rank == 0)
				fprintf(stderr, "invalid rounds: %s\n", argv[1]);
			MPI_Finalize();
			return 0;
        }
	
	token1 = 0;
	token2 = nprocs * rounds;

    right = (rank + 1) % nprocs;
    left = (nprocs + rank - 1) % nprocs;
	
    /* Process with rank 0 sends the first token */
    if (rank == 0)
	{
		MPI_Isend(&token1, 1, MPI_INT, right, tag1, MPI_COMM_WORLD, &reqs1[1]);MPI_Wait(&reqs1[1],&status1);
		MPI_Isend(&token2, 1, MPI_INT, left, tag2, MPI_COMM_WORLD, &reqs1[0]);MPI_Wait(&reqs1[0],&status2);
	}

	for (i= 0; i < rounds; i++)
	{
		MPI_Irecv(&token2, 1, MPI_INT, right, tag2, MPI_COMM_WORLD, &reqs2[0]);
		
		
		MPI_Test(&reqs2[0], &flag2, &status2);
		while (flag2 == 0) {
			MPI_Irecv(&token1, 1, MPI_INT, left, tag1, MPI_COMM_WORLD, &reqs2[1]);
			MPI_Test(&reqs2[0], &flag2, &status2);
		}
		
		printf("Rank %3d: received token1 %2d from rank %3d. (counter clock-wise)\n", rank, token2, right);
		token2 = token2 - 1;
		
		//sleep(1); /* To see the output printed one by one simulating message passing */
		
		if (((rank != 0) || (i < (rounds - 1))) )
		{
			/* Only send it on if we are not rank 0, or therefsdf are more rounds to go */
			MPI_Isend(&token2, 1, MPI_INT, left, tag2, MPI_COMM_WORLD, &reqs1[0]);MPI_Wait(&reqs1[0],&status2);
		}
		
		
		
		MPI_Test(&reqs2[1], &flag1, &status1);
		while (flag1 == 0) {
			MPI_Test(&reqs2[1], &flag1, &status1);
		}
		
		printf("Rank %3d: received token1 %2d from rank %3d. (clock-wise)\n", rank, token1, left);
		token1 = token1 + 1;
		
		//sleep(1); /* To see the output printed one by one simulating message passing */
		
		if (((rank != 0) || (i < (rounds - 1))) )
		{
			/* Only send it on if we are not rank 0, or there are more rounds to go */
			MPI_Isend(&token1, 1, MPI_INT, right, tag1, MPI_COMM_WORLD, &reqs1[1]);MPI_Wait(&reqs1[1],&status1);
		}
	}
	
	MPI_Finalize();
	return 0;
}
开发者ID:oplol,项目名称:univ-code,代码行数:77,代码来源:aosproj2_1.c


示例7: Output_Charge_Density


//.........这里部分代码省略.........
	  }
        }

        MPI_Isend(&tmp_array0[0], Num_Snd_Grid1[IDS]*Ngrid2*Ngrid3,
                  MPI_DOUBLE, IDS, tag, mpi_comm_level1, &request);
      }

      /* Recv */
      if (Num_Rcv_Grid1[IDR]!=0){

        tmp_array1 = (double*)malloc(sizeof(double)*Num_Rcv_Grid1[IDR]*Ngrid2*Ngrid3); 

        MPI_Recv(&tmp_array1[0], Num_Rcv_Grid1[IDR]*Ngrid2*Ngrid3,
                MPI_DOUBLE, IDR, tag, mpi_comm_level1, &stat);

        for (i=0; i<Num_Rcv_Grid1[IDR]; i++){ 
	  n1 = Rcv_Grid1[IDR][i];
          nn1 = My_Cell0[n1];
          nn0 = n1 - Start_Grid1[myid];
          MN0 = i*Ngrid2*Ngrid3;
          for (n2=0; n2<Ngrid2; n2++){
            MN2 = n2*Ngrid3;
	    for (n3=0; n3<Ngrid3; n3++){
	      MN = MN0 + MN2 + n3;
	      V[k][nn0][MN2+n3] = tmp_array1[MN];
	    }
	  }
        }

        free(tmp_array1);
      }

      if (Num_Snd_Grid1[IDS]!=0){
        MPI_Wait(&request,&stat);
        free(tmp_array0);
      }

    }

    /* use own densities */
    for (n1=Start_Grid1[myid]; n1<=End_Grid1[myid]; n1++){
      nn1 = My_Cell0[n1];
      nn0 = n1 - Start_Grid1[myid]; 
      if (nn1!=-1){
        MN1 = nn1*Ngrid2*Ngrid3;
        for (n2=0; n2<Ngrid2; n2++){
          MN2 = n2*Ngrid3;

          if (k<=1){
	    for (n3=0; n3<Ngrid3; n3++){
	      MN = MN1 + MN2 + n3;
	      V[k][nn0][MN2+n3] = Density_Grid[k][MN] - 0.5*ADensity_Grid[MN];
	    }    
	  }
          else {
	    for (n3=0; n3<Ngrid3; n3++){
	      MN = MN1 + MN2 + n3;
	      V[k][nn0][MN2+n3] = Density_Grid[k][MN];
	    }    
	  }
        }    
      }
    }

  } /* mul */
开发者ID:certik,项目名称:openmx,代码行数:66,代码来源:RestartFileDFT.c


示例8: pzgstrs


//.........这里部分代码省略.........
			       iam, x[ii-XK_H], pi);
#endif
		    }
		}
		/*
		 * Perform local block modifications: lsum[i] -= L_i,k * X[k]
		 */
		nb = lsub[0] - 1;
		lptr = BC_HEADER + LB_DESCRIPTOR + knsupc;
		luptr = knsupc; /* Skip diagonal block L(k,k). */
		
		zlsum_fmod(lsum, x, &x[ii], rtemp, nrhs, knsupc, k,
			   fmod, nb, lptr, luptr, xsup, grid, Llu, 
			   send_req, stat);
	    }
	} /* if diagonal process ... */
    } /* for k ... */

    /* -----------------------------------------------------------
       Compute the internal nodes asynchronously by all processes.
       ----------------------------------------------------------- */
#if ( DEBUGlevel>=2 )
    printf("(%2d) nfrecvx %4d,  nfrecvmod %4d,  nleaf %4d\n",
	   iam, nfrecvx, nfrecvmod, nleaf);
#endif

    while ( nfrecvx || nfrecvmod ) { /* While not finished. */

	/* Receive a message. */
#ifdef ISEND_IRECV
	/* -MPI- FATAL: Remote protocol queue full */
	MPI_Irecv( recvbuf, maxrecvsz, SuperLU_MPI_DOUBLE_COMPLEX,
                 MPI_ANY_SOURCE, MPI_ANY_TAG, grid->comm, &recv_req );
	MPI_Wait( &recv_req, &status );
#else
	MPI_Recv( recvbuf, maxrecvsz, SuperLU_MPI_DOUBLE_COMPLEX,
                  MPI_ANY_SOURCE, MPI_ANY_TAG, grid->comm, &status );
#endif

        k = (*recvbuf).r;

#if ( DEBUGlevel>=2 )
	printf("(%2d) Recv'd block %d, tag %2d\n", iam, k, status.MPI_TAG);
#endif
	
	switch ( status.MPI_TAG ) {
	  case Xk:
	      --nfrecvx;
	      lk = LBj( k, grid ); /* Local block number, column-wise. */
	      lsub = Lrowind_bc_ptr[lk];
	      lusup = Lnzval_bc_ptr[lk];
	      if ( lsub ) {
		  nb   = lsub[0];
		  lptr = BC_HEADER;
		  luptr = 0;
		  knsupc = SuperSize( k );

		  /*
		   * Perform local block modifications: lsum[i] -= L_i,k * X[k]
		   */
		  zlsum_fmod(lsum, x, &recvbuf[XK_H], rtemp, nrhs, knsupc, k,
			     fmod, nb, lptr, luptr, xsup, grid, Llu, 
			     send_req, stat);
	      } /* if lsub */

	      break;
开发者ID:Chang-Liu-0520,项目名称:hypre,代码行数:67,代码来源:pzgstrs.c


示例9: main


//.........这里部分代码省略.........
#endif

    Master = (rank == 0);	

    if(Master && verbose)
	printf("Size (bytes)\n------------\n");
    for(msglen = msglen_min; msglen <= msglen_max; msglen *= 2) {

	sendbuf = malloc(msglen);
	recvbuf = malloc(msglen);
	if(sendbuf == NULL || recvbuf == NULL) {
	    printf("Can't allocate %d bytes\n",msglen);
	    MPI_Abort( MPI_COMM_WORLD, 1 );
	}

	ival = 0;
	for (i=0; i<msglen; i++) {
	    sendbuf[i] = ival++;
	    recvbuf[i] = 0;
	}


	if(Master && verbose) 
	    printf("%d\n",msglen);
	fflush(stdout);

	MPI_Barrier(MPI_COMM_WORLD);
		
	/* Send/Recv */
	if(Master) 
	    MPI_Send(sendbuf,msglen,MPI_CHAR,1,TAG1,MPI_COMM_WORLD);
	else {
	    Resetbuf( recvbuf, msglen );
	    MPI_Recv(recvbuf,msglen,MPI_CHAR,0,TAG1,MPI_COMM_WORLD,&status);
	    Checkbuf( recvbuf, msglen, &status );
	}

	MPI_Barrier(MPI_COMM_WORLD);

	/* Ssend/Recv */
	if(Master) 
	    MPI_Ssend(sendbuf,msglen,MPI_CHAR,1,TAG2,MPI_COMM_WORLD);
	else {
	    Resetbuf( recvbuf, msglen );
	    MPI_Recv(recvbuf,msglen,MPI_CHAR,0,TAG2,MPI_COMM_WORLD,&status);
	    Checkbuf( recvbuf, msglen, &status );
	}

	MPI_Barrier(MPI_COMM_WORLD);
		
	/* Rsend/Recv */
	if (Master) {
	    MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, 1, TAGSR,
			  MPI_BOTTOM, 0, MPI_INT, 1, TAGSR,
			  MPI_COMM_WORLD, &status );
	    MPI_Rsend( sendbuf,msglen,MPI_CHAR,1,TAG3,MPI_COMM_WORLD );
	}
	else {
	    Resetbuf( recvbuf, msglen );
	    MPI_Irecv( recvbuf,msglen,MPI_CHAR,0,TAG3,MPI_COMM_WORLD,&request);
	    MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, 0, TAGSR,
			  MPI_BOTTOM, 0, MPI_INT, 0, TAGSR,
			  MPI_COMM_WORLD, &status );
	    MPI_Wait( &request, &status );
	    Checkbuf( recvbuf, msglen, &status );
	}
	    
	MPI_Barrier(MPI_COMM_WORLD);

	/* Isend/Recv - receive not ready */
	if(Master) {
	    MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, 1, TAGSR,
			  MPI_BOTTOM, 0, MPI_INT, 1, TAGSR,
			  MPI_COMM_WORLD, &status );
	    MPI_Isend(sendbuf,msglen,MPI_CHAR,1,TAG4,MPI_COMM_WORLD, &request);
	    MPI_Wait( &request, &status );
	}
	else {
	    Resetbuf( recvbuf, msglen );
	    MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, 0, TAGSR,
			  MPI_BOTTOM, 0, MPI_INT, 0, TAGSR,
			  MPI_COMM_WORLD, &status );
	    MPI_Recv(recvbuf,msglen,MPI_CHAR,0,TAG4,MPI_COMM_WORLD,&status);
	    Checkbuf( recvbuf, msglen, &status );
	}

	MPI_Barrier(MPI_COMM_WORLD);

	free(sendbuf);
	free(recvbuf);
    }

    if (rank == 0) {
	/* If we do not abort, we saw no errors */
	printf( " No Errors\n" );
    }

    MPI_Finalize();
    return 0;
}
开发者ID:davidheryanto,项目名称:sc14,代码行数:101,代码来源:longmsgs.c


示例10: main


//.........这里部分代码省略.........
  bail_out(error);
  right_buf_in   = right_buf_out +   RADIUS*height;
  left_buf_out   = right_buf_out + 2*RADIUS*height;
  left_buf_in    = right_buf_out + 3*RADIUS*height;

  for (iter = 0; iter<=iterations; iter++){

    /* start timer after a warmup iteration */
    if (iter == 1) {
      MPI_Barrier(MPI_COMM_WORLD);
      local_stencil_time = wtime();
    }

    /* need to fetch ghost point data from neighbors in y-direction                 */
    if (my_IDy < Num_procsy-1) {
      MPI_Irecv(top_buf_in, RADIUS*width, MPI_DTYPE, top_nbr, 101,
                MPI_COMM_WORLD, &(request[1]));
      for (kk=0,j=jend-RADIUS+1; j<=jend; j++) for (i=istart; i<=iend; i++) {
          top_buf_out[kk++]= IN(i,j);
      }
      MPI_Isend(top_buf_out, RADIUS*width,MPI_DTYPE, top_nbr, 99,
                MPI_COMM_WORLD, &(request[0]));
    }
    if (my_IDy > 0) {
      MPI_Irecv(bottom_buf_in,RADIUS*width, MPI_DTYPE, bottom_nbr, 99,
                MPI_COMM_WORLD, &(request[3]));
      for (kk=0,j=jstart; j<=jstart+RADIUS-1; j++) for (i=istart; i<=iend; i++) {
          bottom_buf_out[kk++]= IN(i,j);
      }
      MPI_Isend(bottom_buf_out, RADIUS*width,MPI_DTYPE, bottom_nbr, 101,
                MPI_COMM_WORLD, &(request[2]));
    }
    if (my_IDy < Num_procsy-1) {
      MPI_Wait(&(request[0]), MPI_STATUS_IGNORE);
      MPI_Wait(&(request[1]), MPI_STATUS_IGNORE);
      for (kk=0,j=jend+1; j<=jend+RADIUS; j++) for (i=istart; i<=iend; i++) {
          IN(i,j) = top_buf_in[kk++];
      }
    }
    if (my_IDy > 0) {
      MPI_Wait(&(request[2]), MPI_STATUS_IGNORE);
      MPI_Wait(&(request[3]), MPI_STATUS_IGNORE);
      for (kk=0,j=jstart-RADIUS; j<=jstart-1; j++) for (i=istart; i<=iend; i++) {
          IN(i,j) = bottom_buf_in[kk++];
      }
    }

    /* need to fetch ghost point data from neighbors in x-direction                 */
    if (my_IDx < Num_procsx-1) {
      MPI_Irecv(right_buf_in, RADIUS*height, MPI_DTYPE, right_nbr, 1010,
                MPI_COMM_WORLD, &(request[1+4]));
      for (kk=0,j=jstart; j<=jend; j++) for (i=iend-RADIUS+1; i<=iend; i++) {
          right_buf_out[kk++]= IN(i,j);
      }
      MPI_Isend(right_buf_out, RADIUS*height, MPI_DTYPE, right_nbr, 990,
              MPI_COMM_WORLD, &(request[0+4]));
    }
    if (my_IDx > 0) {
      MPI_Irecv(left_buf_in, RADIUS*height, MPI_DTYPE, left_nbr, 990,
                MPI_COMM_WORLD, &(request[3+4]));
      for (kk=0,j=jstart; j<=jend; j++) for (i=istart; i<=istart+RADIUS-1; i++) {
          left_buf_out[kk++]= IN(i,j);
      }
      MPI_Isend(left_buf_out, RADIUS*height, MPI_DTYPE, left_nbr, 1010,
                MPI_COMM_WORLD, &(request[2+4]));
    }
开发者ID:elliottslaughter,项目名称:Kernels,代码行数:67,代码来源:stencil.c


示例11: main

int main(int argc, char *argv[])
{
    int rank, nproc, i;
    int errors = 0, all_errors = 0;
    int *buf = NULL, *winbuf = NULL;
    MPI_Win window;

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &nproc);

    if (nproc < 2) {
        if (rank == 0)
            printf("Error: must be run with two or more processes\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    MPI_Alloc_mem(MAX_SIZE * sizeof(int), MPI_INFO_NULL, &buf);
    MPI_Alloc_mem(MAX_SIZE * sizeof(int), MPI_INFO_NULL, &winbuf);
    MPI_Win_create(winbuf, MAX_SIZE * sizeof(int), sizeof(int), MPI_INFO_NULL,
                   MPI_COMM_WORLD, &window);

    MPI_Win_lock_all(0, window);

    /* Test Raccumulate local completion with small data.
     * Small data is always copied to header packet as immediate data. */
    if (rank == 1) {
        for (i = 0; i < ITER; i++) {
            MPI_Request acc_req;
            int val = -1;

            buf[0] = rank * i;
            MPI_Raccumulate(&buf[0], 1, MPI_INT, 0, 0, 1, MPI_INT, MPI_MAX, window, &acc_req);
            MPI_Wait(&acc_req, MPI_STATUS_IGNORE);

            /* reset local buffer to check local completion */
            buf[0] = 0;
            MPI_Win_flush(0, window);

            MPI_Get(&val, 1, MPI_INT, 0, 0, 1, MPI_INT, window);
            MPI_Win_flush(0, window);

            if (val != rank * i) {
                printf("%d - Got %d in small Raccumulate test, expected %d (%d * %d)\n", rank, val,
                       rank * i, rank, i);
                errors++;
            }
        }
    }

    MPI_Barrier(MPI_COMM_WORLD);

    /* Test Raccumulate local completion with large data .
     * Large data is not suitable for 1-copy optimization, and always sent out
     * from user buffer. */
    if (rank == 1) {
        for (i = 0; i < ITER; i++) {
            MPI_Request acc_req;
            int val0 = -1, val1 = -1, val2 = -1;
            int j;

            /* initialize data */
            for (j = 0; j < MAX_SIZE; j++) {
                buf[j] = rank + j + i;
            }

            MPI_Raccumulate(buf, MAX_SIZE, MPI_INT, 0, 0, MAX_SIZE, MPI_INT, MPI_REPLACE, window,
                            &acc_req);
            MPI_Wait(&acc_req, MPI_STATUS_IGNORE);

            /* reset local buffer to check local completion */
            buf[0] = 0;
            buf[MAX_SIZE - 1] = 0;
            buf[MAX_SIZE / 2] = 0;
            MPI_Win_flush(0, window);

            /* get remote values which are modified in local buffer after wait */
            MPI_Get(&val0, 1, MPI_INT, 0, 0, 1, MPI_INT, window);
            MPI_Get(&val1, 1, MPI_INT, 0, MAX_SIZE - 1, 1, MPI_INT, window);
            MPI_Get(&val2, 1, MPI_INT, 0, MAX_SIZE / 2, 1, MPI_INT, window);
            MPI_Win_flush(0, window);

            if (val0 != rank + i) {
                printf("%d - Got %d in large Raccumulate test, expected %d\n", rank,
                       val0, rank + i);
                errors++;
            }
            if (val1 != rank + MAX_SIZE - 1 + i) {
                printf("%d - Got %d in large Raccumulate test, expected %d\n", rank,
                       val1, rank + MAX_SIZE - 1 + i);
                errors++;
            }
            if (val2 != rank + MAX_SIZE / 2 + i) {
                printf("%d - Got %d in large Raccumulate test, expected %d\n", rank,
                       val2, rank + MAX_SIZE / 2 + i);
                errors++;
            }
        }
    }

//.........这里部分代码省略.........
开发者ID:NexMirror,项目名称:MPICH,代码行数:101,代码来源:racc_local_comp.c


示例12: MP_quit

/* MP_quit disconnects current node from MP-System:
 * Parameters:
 *     IN isError - error number, 0 if normal exit
 * Returns: Bool: success (1) or failure (0)
 *
 * MPI Version: MPI requires that all sent messages must be received
 * before quitting. Receive or cancel all pending messages (using msg.
 * count), then quit from MPI.
 */
rtsBool MP_quit(int isError) {
    StgWord data[2];
    MPI_Request sysRequest2;

    data[0] = PP_FINISH;
    data[1] = isError;

    if (IAmMainThread) {
        int i;

        IF_PAR_DEBUG(mpcomm,
                     debugBelch("Main PE stopping MPI system (exit code: %d)\n",
                                isError));

        // bcast FINISH to other PEs
        for (i=2; i<=(int)nPEs; i++) {
            // synchronous send operation in order 2..nPEs ... might slow down.
            MPI_Isend(&pingMessage, 1, MPI_INT, i-1, PP_FINISH,
                      sysComm, &sysRequest2);
            MPI_Send(data,2*sizeof(StgWord),MPI_BYTE,i-1, PP_FINISH, MPI_COMM_WORLD);
            MPI_Wait(&sysRequest2, MPI_STATUS_IGNORE);
        }

        // receive answers from all children (just counting)
        while (finishRecvd < mpiWorldSize-1) {
            MPI_Recv(data, 2*sizeof(StgWord), MPI_BYTE, MPI_ANY_SOURCE, PP_FINISH,
                     MPI_COMM_WORLD, &status);
            ASSERT(status.MPI_TAG == PP_FINISH);
            // and receive corresponding sysComm ping:
            MPI_Recv(&pingMessage, 1, MPI_INT, status.MPI_SOURCE, PP_FINISH,
                     sysComm, MPI_STATUS_IGNORE);
            IF_PAR_DEBUG(mpcomm,
                         debugBelch("Received FINISH reply from %d\n",
                                    status.MPI_SOURCE));
            finishRecvd++;
        }

    } else {

        IF_PAR_DEBUG(mpcomm,
                     debugBelch("Non-main PE stopping MPI system (exit code: %d)\n",
                                isError));
        // send FINISH to rank 0
        MPI_Isend(&pingMessage, 1, MPI_INT, 0, PP_FINISH, sysComm, &sysRequest2);
        MPI_Send(data, 2*sizeof(StgWord), MPI_BYTE, 0, PP_FINISH, MPI_COMM_WORLD);
        // can omit: MPI_Wait(&sysRequest2, MPI_STATUS_IGNORE);

        // if non-main PE terminates first, await answer
        if (finishRecvd < 1) {
            MPI_Recv(data, 2*sizeof(StgWord), MPI_BYTE, 0, PP_FINISH,
                     MPI_COMM_WORLD, MPI_STATUS_IGNORE);
            MPI_Recv(&pingMessage, 1, MPI_INT, 0, PP_FINISH,
                     sysComm, MPI_STATUS_IGNORE);
            finishRecvd++;
        }
    }

    // TODO: receive or cancel all pending messages...
    /* ------------------------------------------------
     *q&d solution:
     * receive anything retrievable by MPI_Probe
     * then get in sync
     * then again receive remaining messages
     *
     * (since buffering is used, and buffers are detached to force
     * messages, a PE might get stuck detaching its mpiMsgBuffer, and
     * send another message as soon as buffer space is available again.
     * The other PEs will not )
     *
     * ---------------------------------------------- */
    {
        // allocate fresh buffer to avoid overflow
        void* voidbuffer;
        int voidsize;

        // we might come here because of requesting too much buffer (bug!)
        voidsize = (INT_MAX / sizeof(StgWord) < DATASPACEWORDS)?\
                   INT_MAX : DATASPACEWORDS * sizeof(StgWord);

        voidbuffer = (void*)
                     stgMallocBytes(voidsize, "voidBuffer");

        // receive whatever is out there...
        while (MP_probe()) {
            MPI_Recv(voidbuffer, voidsize, MPI_BYTE,
                     MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
            if (ISSYSCODE(status.MPI_TAG))
                MPI_Recv(voidbuffer, 1, MPI_INT,
                         MPI_ANY_SOURCE, MPI_ANY_TAG,
                         sysComm, MPI_STATUS_IGNORE);
        }
//.........这里部分代码省略.........
开发者ID:jberthold,项目名称:ghc,代码行数:101,代码来源:MPIComm.c


示例13: halo_communication

/** Perform communication according to the parallelization scheme
 *  described by the halo communicator
 * @param hc halo communicator describing the parallelization scheme
 */
void halo_communication(HaloCommunicator *hc, void *base) {
  int n, comm_type, s_node, r_node;
  void *s_buffer, *r_buffer ;

  Fieldtype fieldtype;
  MPI_Datatype datatype;
  MPI_Request request;
  MPI_Status status;

    HALO_TRACE(fprintf(stderr, "%d: halo_comm base=%p num=%d\n", this_node, base, hc->num)) ;

    for (n = 0; n < hc->num; n++) {

	HALO_TRACE(fprintf(stderr, "%d: halo_comm round %d\n", this_node, n)) ;

	comm_type = hc->halo_info[n].type ;
	s_buffer = (char *)base + hc->halo_info[n].s_offset;
	r_buffer = (char *)base + hc->halo_info[n].r_offset;

	switch (comm_type) {

	    case HALO_LOCL:
	      fieldtype = hc->halo_info[n].fieldtype;
	      halo_dtcopy(r_buffer,s_buffer,1,fieldtype);
	      break ;

	    case HALO_SENDRECV:
	      datatype = hc->halo_info[n].datatype;
	      s_node = hc->halo_info[n].source_node ;
	      r_node = hc->halo_info[n].dest_node ;
	      
	      HALO_TRACE(fprintf(stderr,"%d: halo_comm sendrecv %d to %d (%d) (%p)\n",this_node,s_node,r_node,REQ_HALO_SPREAD,&datatype));

	      MPI_Sendrecv(s_buffer, 1, datatype, r_node, REQ_HALO_SPREAD,
			   r_buffer, 1, datatype, s_node, REQ_HALO_SPREAD,
			   MPI_COMM_WORLD, &status);
	      break ;

	    case HALO_SEND:
	      datatype = hc->halo_info[n].datatype;
	      fieldtype = hc->halo_info[n].fieldtype;
	      s_node = hc->halo_info[n].source_node ;
	      r_node = hc->halo_info[n].dest_node ;
	      
	      HALO_TRACE(fprintf(stderr,"%d: halo_comm send to %d.\n",this_node,r_node));

	      MPI_Isend(s_buffer, 1, datatype, r_node, REQ_HALO_SPREAD, MPI_COMM_WORLD, &request);
	      halo_dtset(r_buffer,0,fieldtype);
	      MPI_Wait(&request,&status);
	      break;

	    case HALO_RECV:
	      datatype = hc->halo_info[n].datatype;
	      s_node = hc->halo_info[n].source_node ;
	      r_node = hc->halo_info[n].dest_node ;

	      HALO_TRACE(fprintf(stderr,"%d: halo_comm recv from %d.\n",this_node,s_node));

	      MPI_Irecv(r_buffer, 1, datatype, s_node, REQ_HALO_SPREAD, MPI_COMM_WORLD, &request);
	      MPI_Wait(&request,&status);
	      break;

	    case HALO_OPEN:
	      fieldtype = hc->halo_info[n].fieldtype;

	      HALO_TRACE(fprintf(stderr,"%d: halo_comm open boundaries\n",this_node));

	      /* \todo this does not work for the n_i - <n_i> */
	      halo_dtset(r_buffer,0,fieldtype);
	      break;
	      
	}

    }

}
开发者ID:wmueller,项目名称:espresso,代码行数:80,代码来源:halo.c


示例14: spmd_wait

SEXP spmd_wait(SEXP R_request, SEXP R_status){
	spmd_errhandler(
		MPI_Wait(&request[INTEGER(R_request)[0]],
			&status[INTEGER(R_status)[0]]));
	return(R_NilValue);
} /* End of spmd_wait(). */
开发者ID:RBigData,项目名称:pbdMPI,代码行数:6,代码来源:spmd_wait.c


示例15: main


//.........这里部分代码省略.........

  for (iter = 0; iter<=iterations; iter++){

    /* start timer after a warmup iteration */
    if (iter == 1) {
      MPI_Barrier(MPI_COMM_WORLD);
      local_stencil_time = wtime();
    }

    /* need to fetch ghost point data from neighbors in y-direction                 */
    if (top_nbr != -1) {
      MPI_Irecv(top_buf_in, RADIUS*width_rank, MPI_DTYPE, top_nbr, 101,
                MPI_COMM_WORLD, &(request[1]));
      for (int kk=0,j=jend_rank-RADIUS+1; j<=jend_rank; j++)
      for (int i=istart_rank; i<=iend_rank; i++) {
        top_buf_out[kk++]= IN(i,j);
      }
      MPI_Isend(top_buf_out, RADIUS*width_rank,MPI_DTYPE, top_nbr, 99,
                MPI_COMM_WORLD, &(request[0]));
    }

    if (bottom_nbr != -1) {
      MPI_Irecv(bottom_buf_in,RADIUS*width_rank, MPI_DTYPE, bottom_nbr, 99,
                MPI_COMM_WORLD, &(request[3]));
      for (int kk=0,j=jstart_rank; j<=jstart_rank+RADIUS-1; j++)
      for (int i=istart_rank; i<=iend_rank; i++) {
        bottom_buf_out[kk++]= IN(i,j);
      }
      MPI_Isend(bottom_buf_out, RADIUS*width_rank,MPI_DTYPE, bottom_nbr, 101,
 	  MPI_COMM_WORLD, &(request[2]));
      }

    if (top_nbr != -1) {
      MPI_Wait(&(request[0]), MPI_STATUS_IGNORE);
      MPI_Wait(&(request[1]), MPI_STATUS_IGNORE);
      for (int kk=0,j=jend_rank+1; j<=jend_rank+RADIUS; j++)
      for (int i=istart_rank; i<=iend_rank; i++) {
        IN(i,j) = top_buf_in[kk++];
      }
    }

    if (bottom_nbr != -1) {
      MPI_Wait(&(request[2]), MPI_STATUS_IGNORE);
      MPI_Wait(&(request[3]), MPI_STATUS_IGNORE);
      for (int kk=0,j=jstart_rank-RADIUS; j<=jstart_rank-1; j++)
      for (int i=istart_rank; i<=iend_rank; i++) {
        IN(i,j) = bottom_buf_in[kk++];
      }
    }

    /* LOAD/STORE FENCE */
    MPI_Win_sync(shm_win_in);

    /* need to fetch ghost point data from neighbors in x-direction                 */
    if (right_nbr != -1) {
      MPI_Irecv(right_buf_in, RADIUS*height_rank, MPI_DTYPE, right_nbr, 1010,
                MPI_COMM_WORLD, &(request[1+4]));
      for (int kk=0,j=jstart_rank; j<=jend_rank; j++)
      for (int i=iend_rank-RADIUS+1; i<=iend_rank; i++) {
        right_buf_out[kk++]= IN(i,j);
      }
      MPI_Isend(right_buf_out, RADIUS*height_rank, MPI_DTYPE, right_nbr, 990,
                MPI_COMM_WORLD, &(request[0+4]));
    }

    if (left_nbr != -1) {
开发者ID:elliottslaughter,项目名称:Kernels,代码行数:67,代码来源:stencil.c


示例16: VecLoad_Binary

PetscErrorCode VecLoad_Binary(Vec vec, PetscViewer viewer)
{
  PetscMPIInt    size,rank,tag;
  int            fd;
  PetscInt       i,rows = 0,n,*range,N,bs;
  PetscErrorCode ierr;
  PetscBool      flag;
  PetscScalar    *avec,*avecwork;
  MPI_Comm       comm;
  MPI_Request    request;
  MPI_Status     status;
#if defined(PETSC_HAVE_MPIIO)
  PetscBool      useMPIIO;
#endif

  PetscFunctionBegin;
  ierr = PetscObjectGetComm((PetscObject)viewer,&comm);CHKERRQ(ierr);
  ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
  ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);

  ierr = PetscViewerBinaryGetDescriptor(viewer,&fd);CHKERRQ(ierr);
  ierr = PetscViewerBinaryReadVecHeader_Private(viewer,&rows);CHKERRQ(ierr);
  /* Set Vec sizes,blocksize,and type if not already set. Block size first so that local sizes will be compatible. */
  ierr = PetscOptionsGetInt(((PetscObject)vec)->prefix, "-vecload_block_size", &bs, &flag);CHKERRQ(ierr);
  if (flag) {
    ierr = VecSetBlockSize(vec, bs);CHKERRQ(ierr);
  }
  if (vec->map->n < 0 && vec->map->N < 0) {
    ierr = VecSetSizes(vec,PETSC_DECIDE,rows);CHKERRQ(ierr);
  }

  /* If sizes and type already set,check if the vector global size is correct */
  ierr = VecGetSize(vec, &N);CHKERRQ(ierr);
  if (N != rows) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED, "Vector in file different length (%d) then input vector (%d)", rows, N);

#if defined(PETSC_HAVE_MPIIO)
  ierr = PetscViewerBinaryGetMPIIO(viewer,&useMPIIO);CHKERRQ(ierr);
  if (useMPIIO) {
    ierr = VecLoad_Binary_MPIIO(vec, viewer);CHKERRQ(ierr);
    PetscFunctionReturn(0);
  }
#endif

  ierr = VecGetLocalSize(vec,&n);CHKERRQ(ierr);
  ierr = PetscObjectGetNewTag((PetscObject)viewer,&tag);CHKERRQ(ierr);
  ierr = VecGetArray(vec,&avec);CHKERRQ(ierr);
  if (!rank) {
    ierr = PetscBinaryRead(fd,avec,n,PETSC_SCALAR);CHKERRQ(ierr);

    if (size > 1) {
      /* read in o 

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ MPI_Waitall函数代码示例发布时间:2022-05-30
下一篇:
C++ MPI_Unpack函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap