• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ MPI_Cart_shift函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中MPI_Cart_shift函数的典型用法代码示例。如果您正苦于以下问题:C++ MPI_Cart_shift函数的具体用法?C++ MPI_Cart_shift怎么用?C++ MPI_Cart_shift使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了MPI_Cart_shift函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: ParallelCalc

void ParallelCalc()
{
	double max, allmax;
	while (true)
	{
		max = 0;
		for (int i = 1; i < K + 1; i++)
			for (int j = 1; j < N + 1; j++)
			{
				double u0 = u1[i * (N + 2) + j];
				u1[i * (N + 2) + j] = 0.25 * (u1[(i - 1) * (N + 2) + j]
				+ u1[(i + 1) * (N + 2) + j] + u1[i * (N + 2) + j - 1]
				+ u1[i * (N + 2) + j + 1] - h * h * f1[(i - 1) * N + j - 1]);
				double d = abs(u1[i * (N + 2) + j] - u0);
				if (d > max)
					max = d;
			}
		MPI_Allreduce(&max, &allmax, 1, MPI_DOUBLE, MPI_MAX,
		MPI_COMM_WORLD);
		if (allmax <= eps)
			break;
		int RSrc, RDest;
		MPI_Status s;
		MPI_Cart_shift(BAND_COMM, 0, 1, &RSrc, &RDest);
		MPI_Sendrecv(&u1[K * (N + 2)], N + 2, MPI_DOUBLE, RDest, 0, u1, N + 2, MPI_DOUBLE, RSrc, 0, BAND_COMM, &s);
		MPI_Cart_shift(BAND_COMM, 0, -1, &RSrc, &RDest);
		MPI_Sendrecv(&u1[N + 2], N + 2, MPI_DOUBLE, RDest, 0, &u1[(K + 1) * (N + 2)], N + 2, MPI_DOUBLE, RSrc, 0, BAND_COMM,&s);
	}
}
开发者ID:artjomjuferov,项目名称:university,代码行数:29,代码来源:mpi.cpp


示例2: init_mpi

static void init_mpi(void)
{
    MPI_Comm_size(MPI_COMM_WORLD, &nproc); //プロセス数の取得
    int dim = 2;          //number of dimension
    int procs[2] = {0,0}; //[0]: x方向の分割数, [1]:y方向の分割数 がはいる
    int period[2] = {0,0};//境界条件, 0は固定境界
    MPI_Comm grid_comm;
    int reorder = 1;   //re-distribute rank flag

    MPI_Dims_create(nproc, dim, procs); //縦横を何分割にするか自動的に計算
    MPI_Cart_create(MPI_COMM_WORLD, 2, procs, period, reorder, &grid_comm); //領域を自動分割 => procs, grid_commは変更される
    MPI_Cart_shift(grid_comm, 0, 1, &ltRank, &rtRank);
    MPI_Cart_shift(grid_comm, 1, 1, &bmRank, &tpRank);

    //プロセス座標において自分がどの位置に居るのか求める(何行何列に居るか)
    int coordinates[2];
    MPI_Comm_rank(grid_comm, &rank);
    MPI_Cart_coords(grid_comm, rank, 2, coordinates);

    SUB_N_X = N_PX / procs[0];
    SUB_N_Y = N_PY / procs[1];
    SUB_N_PX = SUB_N_X + 2; //のりしろ(となりの領域の値が入る部分)の分2大きい
    SUB_N_PY = SUB_N_Y + 2;
    SUB_N_CELL = SUB_N_PX*SUB_N_PY;
    offsetX = coordinates[0] * SUB_N_X; //ランクのインデックスではなく, セル単位のオフセットなのでSUB_N_Xずれる
    offsetY = coordinates[1] * SUB_N_Y;

    /*これだと, 1個のデータをSUB_N_PY跳び(次のデータまでSUB_N_PY-1個隙間がある),SUB_N_X行ぶん取ってくる事になる */
    MPI_Type_vector(SUB_N_X, 1, SUB_N_PY, MPI_C_DOUBLE_COMPLEX, &X_DIRECTION_DOUBLE_COMPLEX);
    MPI_Type_commit(&X_DIRECTION_DOUBLE_COMPLEX);
}
开发者ID:rennone,项目名称:mpiFDTD,代码行数:31,代码来源:mpiTM_UPML.c


示例3: topo_cartesian

void topo_cartesian(int rank, int *dims, int *coords, int *neigh)
{
  int periods[3];
  
  MPI_Comm commcart;
  
  periods[0]=1;
  periods[1]=1;
  periods[2]=1;

  // creation of cartesian communicator
  MPI_Cart_create(MPI_COMM_WORLD,3,dims,periods,0,&commcart);

  // getting the cartesian position
  MPI_Cart_coords(commcart,rank,3,coords);
  
  // getting the neighbors
  MPI_Cart_shift(commcart,0,1,neigh+0,neigh+1); //X
  MPI_Cart_shift(commcart,1,1,neigh+2,neigh+3); //Y
  MPI_Cart_shift(commcart,2,1,neigh+4,neigh+5); //Z

  printf(" proc #%d has coordinates %d %d %d and neighbors Xm=%d Xp=%d Ym=%d Yp=%d Zm=%d Zp=%d \n",rank,coords[0],coords[1],coords[2],neigh[0],neigh[1],neigh[2],neigh[3],neigh[4],neigh[5]);
  //  printf(" dims = %d %d %d\n",dims[0],dims[1],dims[2]);

}
开发者ID:domaubert,项目名称:Cudaton,代码行数:25,代码来源:communication.c


示例4: neighbour_table

/* Function that creates a list of the neighbours for each processes */ 
inline void neighbour_table(int* neighbours, MPI_Comm grid, int proc_rank){
  int move, id;
  int coord[2];
  id = 1;
  // move from a proc to an other ==> move = 1
  move = 1;
  // get Left and Right
  MPI_Cart_shift(grid, id, move, &neighbours[L], &neighbours[R]);
  id = 0;
  // get Up and Down
  MPI_Cart_shift(grid, id, move, &neighbours[U], &neighbours[D]);
  // get current proc coordinates
  MPI_Cart_coords(grid, proc_rank, 2, coord);
  coord[0]--;
  coord[1]--;
  // determine Up-Left neighbour
  MPI_Cart_rank(grid, coord, &neighbours[UL]);
  coord[1]+=2;
  // determine Up-Right neighbour
  MPI_Cart_rank(grid, coord, &neighbours[UR]);
  coord[0]+=2;
  // determine Down-Right neighbour
  MPI_Cart_rank(grid, coord, &neighbours[LR]);
  coord[1]-=2;
  // determine Down-Left neighbour
  MPI_Cart_rank(grid, coord, &neighbours[LL]);
  return;
}
开发者ID:vincent-jj,项目名称:tdp6,代码行数:29,代码来源:life_mpi.c


示例5: initial_send

void initial_send(MPI_Comm &Comm_Cart,int rank, float **A_tmp, float **A,
					 float **B_tmp,float **B, MPI_Status &status, int size, int n){
	int mycoords[2] ={0,0};
	MPI_Cart_coords(Comm_Cart,rank,2,mycoords);

	int a_left_displ, a_right_displ, b_top_displ, b_bot_displ;

	a_left_displ=a_right_displ=b_top_displ=b_bot_displ=rank;

	// Shifts the initial value of A(i,j) by i steps to the left (in j direction)
	MPI_Cart_shift(Comm_Cart, 0, mycoords[1], &b_top_displ, &b_bot_displ);
	MPI_Cart_shift(Comm_Cart, 1, mycoords[0], &a_left_displ, &a_right_displ);


	float *sendptrA, *recvptrA,*sendptrB, *recvptrB;
	sendptrA = &(A[0][0]);
	recvptrA = &(A_tmp[0][0]);

	sendptrB = &(B[0][0]);
	recvptrB = &(B_tmp[0][0]);


	// Sends initial values of A to the left
	MPI_Sendrecv(sendptrA,n*n, MPI_FLOAT, a_left_displ,  lr_tag,
				 recvptrA,n*n, MPI_FLOAT, a_right_displ, lr_tag,
					Comm_Cart, &status);

	// Sends initial values of B to the top
	MPI_Sendrecv(sendptrB,n*n, MPI_FLOAT, b_top_displ, bt_tag,
				 recvptrB,n*n, MPI_FLOAT, b_bot_displ, bt_tag,
					Comm_Cart, &status);

}
开发者ID:HPCSEprojectBT,项目名称:GEMM,代码行数:33,代码来源:matrix_setup.hpp


示例6: boundary

void boundary(GRID_INFO_TYPE* grid, int spin[][LENGTH], int nbr1[] , int nbr2[])
{
 int i,*U2,*D2,*R2,*L2,m,n,tag=50;
     U2=(int *)malloc(LENGTH*sizeof(int));
     D2=(int *)malloc(LENGTH*sizeof(int));
     R2=(int *)malloc(LENGTH*sizeof(int));
     L2=(int *)malloc(LENGTH*sizeof(int));
/* put  the spins at the left, right, up and down boundaries into separate arrays */
     for (i = 1 ; i < LENGTH-1 ; i++)    
     {  U2[i]=spin[i][1];
       D2[i]=spin[i][LENGTH-2];
       L2[i]=spin[1][i];
       R2[i]=spin[LENGTH-2][i];
      }
/*
send the boundary arrays to appropriate neighbor process
Remeber 0 is up and down and  1 is left and right. 
*/
       MPI_Cart_shift(grid->comm,0,-1,&m,&n); /* find the neighbor process down the current one */ 
       MPI_Send(U2,LENGTH, MPI_INT,n,tag,MPI_COMM_WORLD); /* send U2 buffer with dimension LENGTH of type MPI_INT to  process "n" with "tag" */
       MPI_Cart_shift(grid->comm,0,1,&m,&n);
       MPI_Send(D2,LENGTH, MPI_INT,n,tag,MPI_COMM_WORLD);
       MPI_Cart_shift(grid->comm,1,1,&m,&n);
       MPI_Send(R2,LENGTH, MPI_INT,n,tag,MPI_COMM_WORLD);
       MPI_Cart_shift(grid->comm,1,-1,&m,&n);
       MPI_Send(L2,LENGTH, MPI_INT,n,tag,MPI_COMM_WORLD);
	free(U2);
	free(D2);
	free(L2);
	free(R2);
}
开发者ID:rahulporuri,项目名称:computational_physics,代码行数:31,代码来源:mpi_ising_model.c


示例7: calc_node_neighbors

void calc_node_neighbors(int node)
{
  int dir;
  
  map_node_array(node,node_pos);
  for(dir=0;dir<3;dir++) {
    int buf;
    MPI_Cart_shift(comm_cart, dir, -1, &buf, node_neighbors + 2*dir);
    MPI_Cart_shift(comm_cart, dir, 1, &buf, node_neighbors + 2*dir + 1);

    /* left boundary ? */
    if (node_pos[dir] == 0) {
      boundary[2*dir] = 1;
    }
    else {
      boundary[2*dir] = 0;
    }
    /* right boundary ? */
   if (node_pos[dir] == node_grid[dir]-1) {
      boundary[2*dir+1] = -1;
    }
    else {
      boundary[2*dir+1] = 0;
    }
  }
  GRID_TRACE(printf("%d: node_grid %d %d %d, pos %d %d %d, node_neighbors ", this_node, node_grid[0], node_grid[1], node_grid[2], node_pos[0], node_pos[1], node_pos[2]));
}
开发者ID:Petr-Melenev,项目名称:espresso-dev,代码行数:27,代码来源:grid.cpp


示例8: collectAfterPre

void collectAfterPre(Vector u, const Vector v)
{
  int source, dest;
  if (u->comm_rank == 0) {
    int len=u->len-1;
    dcopy(&len, v->data, &v->stride, u->data+1, &u->stride);
  } else if (u->comm_rank == u->comm_size-1) {
    int len=v->len-1;
    dcopy(&len, v->data+1, &v->stride, u->data+1, &u->stride);
  } else
    copyVector(u, v);

  // west
  double recv;
  MPI_Cart_shift(*u->comm, 0,   -1, &source, &dest);
  MPI_Sendrecv(v->data,        1, MPI_DOUBLE, dest,   0,
               u->data, 1, MPI_DOUBLE, source, 0, *u->comm, MPI_STATUS_IGNORE);
  if (source > -1)
    u->data[u->len-2] += u->data[0];

  // east
  MPI_Cart_shift(*u->comm,  0,   1, &source, &dest);
  MPI_Sendrecv(v->data+v->len-1, 1, MPI_DOUBLE, dest,   1,
               u->data,          1, MPI_DOUBLE, source, 1, *u->comm, MPI_STATUS_IGNORE);
  if (source > -1)
    u->data[1] += u->data[0];

  u->data[0] = u->data[u->len-1] = 0.0;
}
开发者ID:akva2,项目名称:tma4280,代码行数:29,代码来源:poisson1D-MPI.c


示例9: main

int main(int argc, char *argv[]) {
    int SIZE = atoi(argv[1]);
    MPI_Status status;
    MPI_Comm comm_3d;
    int rank, p;
    int dims[3], periods[3], coords[3];
    dims[0] = dims[1] = dims[2] = periods[0] = periods[1] = periods[2] = 0;
    int i;
    int *b;
    int *my_b = (int *) malloc(SIZE * sizeof(int));

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &p);

    // Start the vector that is broadcast.
    if (rank == 0){
        b = (int *) malloc(SIZE * sizeof(int));
        for (i = 0; i < SIZE; i++) my_b[i] = b[i] = i;
    }

    // Create the mesh.
    MPI_Dims_create(p, 3, dims);
    MPI_Cart_create(MPI_COMM_WORLD, 3, dims, periods, 0, &comm_3d);

    // Load the coordinates.
    MPI_Cart_coords(comm_3d, rank, 3, coords);

    // The first column will start the broadcast along the rows.
    double start = MPI_Wtime();
    int dim_0_succ, dim_0_pred, dim_1_succ, dim_1_pred, dim_2_succ, dim_2_pred;
    dim_0_succ = dim_0_pred = dim_1_succ = dim_1_pred = dim_2_succ = dim_2_pred = 0;
    MPI_Cart_shift(comm_3d, 0, 1, &dim_0_pred, &dim_0_succ);
    MPI_Cart_shift(comm_3d, 1, 1, &dim_1_pred, &dim_1_succ);
    MPI_Cart_shift(comm_3d, 2, 1, &dim_2_pred, &dim_2_succ);

    if (coords[0] == 0 && coords[1] == 0 && coords[2]) {
        MPI_Send(b, SIZE, MPI_INT, dim_0_succ, 0, MPI_COMM_WORLD);
        MPI_Send(b, SIZE, MPI_INT, dim_1_succ, 0, MPI_COMM_WORLD);
        MPI_Send(b, SIZE, MPI_INT, dim_2_succ, 0, MPI_COMM_WORLD);
    } else if (coords[1] == 0 && coords[2] == 0){
        MPI_Recv(my_b, SIZE, MPI_INT, dim_0_pred, 0, MPI_COMM_WORLD, &status);
        MPI_Send(my_b, SIZE, MPI_INT, dim_0_succ, 0, MPI_COMM_WORLD);
        MPI_Send(my_b, SIZE, MPI_INT, dim_1_succ, 0, MPI_COMM_WORLD);
        MPI_Send(my_b, SIZE, MPI_INT, dim_2_succ, 0, MPI_COMM_WORLD);
    } else if  (coords[3] == 0){
        MPI_Recv(my_b, SIZE, MPI_INT, dim_1_pred, 0, MPI_COMM_WORLD, &status);
        MPI_Send(my_b, SIZE, MPI_INT, dim_1_succ, 0, MPI_COMM_WORLD);
        MPI_Send(my_b, SIZE, MPI_INT, dim_2_succ, 0, MPI_COMM_WORLD);
    } else {
        MPI_Recv(my_b, SIZE, MPI_INT, dim_2_pred, 0, MPI_COMM_WORLD, &status);
        MPI_Send(my_b, SIZE, MPI_INT, dim_2_succ, 0, MPI_COMM_WORLD);
    }

    double end = MPI_Wtime();
 
    if (rank == 0) printf("%d %f\n", SIZE, end - start);
     
    MPI_Finalize();
}
开发者ID:cenezaraujo,项目名称:Algorithm,代码行数:60,代码来源:broadcast_hyper.c


示例10: MatrixMatrixMultiply

void MatrixMatrixMultiply(double ***a, double ***b, double ***c, int mra, int
        mca, int mrb, int mcb, int *ra, int *ca, int *rb, int *cb, MPI_Comm
        comm)
{
    /*from the teaching book */
    int i, j;
    int num_procs, dims[2], periods[2];
    int myrank, my2drank, mycoords[2];
    int uprank, downrank, leftrank, rightrank, coords[2];
    int shiftsource, shiftdest;
    MPI_Status status; 
    MPI_Comm comm_2d;

    MPI_Comm_size(comm, &num_procs);
    MPI_Comm_rank(comm_2d, &my2drank);
    
    dims[0] = dims[1] = 0;
    MPI_Dims_create(num_procs, 2, dims);
    periods[0]= periods[1] = 1;

    MPI_Cart_create(comm, 2, dims, periods, 1, &comm_2d);
    MPI_Comm_rank(comm_2d, &my2drank);
    MPI_Cart_coords(comm_2d, my2drank, 2, mycoords);

    MPI_Cart_shift(comm_2d, 1, -1, &rightrank, &leftrank);
    MPI_Cart_shift(comm_2d, 0, -1, &downrank, &uprank);

    int ia = my2drank;
    int ib = my2drank;

    MPI_Cart_shift(comm_2d, 1, -mycoords[0], &shiftsource, &shiftdest);
    MPI_Sendrecv_replace((*a)[0], mra*mca, MPI_DOUBLE, shiftdest, 1,
            shiftsource, 1, comm_2d, &status);
    MPI_Sendrecv_replace(&ia, 1, MPI_INT, shiftdest, 1, shiftsource, 1,
            comm_2d, &status);

    MPI_Cart_shift(comm_2d, 0, -mycoords[1], &shiftsource, &shiftdest);
    MPI_Sendrecv_replace((*b)[0], mrb*mcb, MPI_DOUBLE, shiftdest, 1,
            shiftsource, 1, comm_2d, &status);  
    MPI_Sendrecv_replace(&ib, 1, MPI_INT, shiftdest, 1, shiftsource, 1,
            comm_2d, &status);

    for (i=0; i<dims[0]; i++){
        MatrixMultiply(ra[ia], ca[ia], rb[ib], cb[ib], *a, *b, c); /* c=c + a*b */

        MPI_Sendrecv_replace((*a)[0], mra*mca, MPI_DOUBLE, leftrank, 1,
                rightrank, 1, comm_2d, &status);
        MPI_Sendrecv_replace((*b)[0], mrb*mcb, MPI_DOUBLE, uprank, 1, downrank,
                1, comm_2d, &status);

        MPI_Sendrecv_replace(&ia, 1, MPI_INT, leftrank, 1, rightrank, 1,
                comm_2d, &status);
        MPI_Sendrecv_replace(&ib, 1, MPI_INT, uprank, 1, downrank, 1,
                comm_2d, &status);
    }

    MPI_Comm_free(&comm_2d);    
}
开发者ID:idunklo,项目名称:Inf3380,代码行数:58,代码来源:oblig2.c


示例11: main

 int main (int argc, char** argv)
 {
    int num_tasks;

    char hostname[80];

    int dims[DIM];
    dims[0] = DIM_0;
    dims[1] = DIM_1;
    dims[2] = DIM_2;

    int periods[DIM] = {false, false, false};
    int reorder = true;
    int my_rank;

    int coords[DIM];

    MPI_Comm cartcomm, y_comm;

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
    MPI_Comm_size(MPI_COMM_WORLD, &num_tasks);

    if (num_tasks != SIZE) {
        if (my_rank == 0) {
            printf("We need %d proccesses, %d given. Exiting.\n", SIZE, num_tasks);
        }
        
        MPI_Finalize();

		return 0;
        
    }         
    
    gethostname(hostname, 79);
	MPI_Cart_create(MPI_COMM_WORLD, DIM, dims, periods, reorder, &cartcomm);
	MPI_Cart_coords(cartcomm, my_rank, DIM, coords);
	printf("%-15.12s: MPI_COMM_WORLD rank %2d: (%d, %d, %d)\n", hostname, my_rank, coords[0], coords[1], coords[2]);
	
	//neighbors
	int src, dest;
	for (int i = 0; i < 3; i++) {
		MPI_Cart_shift(cartcomm, i, +1, src, dest);
		printf("i am %d and my right neighbor in %d is %d", dest, i, src);
		MPI_Cart_shift(cartcomm, i, -1, src, dest);	
		printf("i am %d and my left neighbor in %d is %d", dest, i, src);
	}
	
	
	int keep_dims[1];
	keep_dims[0] = 1;
	MPI_Cart_sub(cartcomm, keep_dims, &y_comm);
	printf("%d: my y rank is %d", my_rank, coords[1]);

    MPI_Finalize();

    return 0;
 }
开发者ID:ThomasRueckert,项目名称:ParalleleProgrammierung,代码行数:58,代码来源:a1.c


示例12: send_to

/*
 * int direction (0 horizontal, 1 vertical)
 * int distance  
 */
void send_to(MPI_Comm *comm, int direction, float *A, int size, int row, int column, int n) {
  int prev_rank, next_rank;
  int distance = 1;
  MPI_Cart_shift(*comm, direction, distance, &prev_rank, &next_rank);
  while(next_rank >= 0) {
    MPI_Send(A + row * n + column, size, MPI_FLOAT, next_rank, 0, *comm);
    MPI_Cart_shift(*comm, direction, ++distance, &prev_rank, &next_rank);
  }
} 
开发者ID:david11,项目名称:MPIDeterminant,代码行数:13,代码来源:determinant.c


示例13: main

int main(int argc, char *argv[])
{
	int *matrix_a;
	int *matrix_b;
	int *matrix_c;

	const char *matrix_a_filename = argv[1];
	const char *matrix_b_filename = argv[2];
	const char *matrix_c_filename = argv[3];

	MPI_Comm matrix_comm;

	MPI_Init(&argc, &argv);

	create_matrix_comm(MPI_COMM_WORLD, &matrix_comm);
	MPI_Comm_size(matrix_comm, &size);
	MPI_Comm_rank(matrix_comm, &rank);

	compute_matrixes_variables(matrix_a_filename, matrix_comm);

	alloc_submatrix_buffer(&matrix_a);
	alloc_submatrix_buffer(&matrix_b);
	alloc_submatrix_buffer(&matrix_c);

	distribute_matrix(matrix_a_filename, matrix_a, matrix_comm);
	distribute_matrix(matrix_b_filename, matrix_b, matrix_comm);

	/* The actual cannon algorithms */
	int row_source, row_dst;
	int col_source, col_dst;
	MPI_Cart_shift(matrix_comm, 0, -1, &row_source, &row_dst);
	MPI_Cart_shift(matrix_comm, 1, -1, &col_source, &col_dst);
	int i;
	for (i = 0; i < pp_dims; i++) {
		compute_matrix_mul(matrix_a, matrix_b, matrix_c, N);
		MPI_Sendrecv_replace(matrix_a, sub_n * sub_n, MPI_INT,
				     row_source, MPI_ANY_TAG, row_dst, MPI_ANY_TAG,
				     matrix_comm, MPI_STATUS_IGNORE);

		MPI_Sendrecv_replace(matrix_b, sub_n * sub_n, MPI_INT,
				     col_source, MPI_ANY_TAG, col_dst, MPI_ANY_TAG,
				     matrix_comm, MPI_STATUS_IGNORE);
	}


	write_result(matrix_c_filename, matrix_c, matrix_comm);

	free(matrix_a);
	free(matrix_b);
	free(matrix_c);

	MPI_Comm_free(&matrix_comm);

	MPI_Finalize();
	return 0;
}
开发者ID:vheon,项目名称:dotlab,代码行数:56,代码来源:oddeven.c


示例14: heatMPISetup

/******************************************************
 * Function to setup MPI data.
 *
 * (1) Initializes MPI
 * (2) Creates a cartesian communicator for border exchange
 * (3) Distributes the overall grid to the processes
 * (4) Sets up helpful data-type and MPI buffer
 *
 ******************************************************/
void heatMPISetup (int* pargc, char*** pargv, heatGrid *grid, dataMPI* configMPI)
{
    int size,
        dims[2] = {0,0},
        periods[2] = {1,1},
        coords[2];
    int buf_size;
    char *buf;

    /* ==== (1) ==== */
    /* Base init*/
    MPI_Init (pargc, pargv);
    MPI_Comm_rank (MPI_COMM_WORLD, &configMPI->rank);
    MPI_Comm_size (MPI_COMM_WORLD, &size);

    /* ==== (2) ==== */
    /* Create cartesian communicator*/
    MPI_Dims_create (size, 2, dims);
    MPI_Cart_create (MPI_COMM_WORLD, 2, dims, periods, 0, &configMPI->cart);

    /* Store neighbors in the grid */
    MPI_Cart_shift (configMPI->cart, 0, 1, &configMPI->left, &configMPI->right);
    MPI_Cart_shift (configMPI->cart, 1, 1, &configMPI->up,    &configMPI->down);

    /* ==== (3) ==== */
    /* Distribute overall grid to processes */
    MPI_Cart_coords (configMPI->cart, configMPI->rank, 2, coords); /*My coordinate*/

    configMPI->start_x = 1 + (grid->xsize/dims[0])*coords[0];
    if (coords[0]+1 != dims[0])
        /* coords 0 to N-1 get an equal distribution*/
        configMPI->num_cells_x = grid->xsize / (dims[0]);
    else
        /* last coord gets the rest */
        configMPI->num_cells_x = grid->xsize - configMPI->start_x + 1;

    configMPI->start_y = 1 + (grid->ysize/dims[1])*coords[1];
    if (coords[1]+1 != dims[1])
            /* coords 0 to N-1 get an equal distribution*/
            configMPI->num_cells_y = grid->ysize / (dims[1]);
        else
            /* last coord gets the rest */
            configMPI->num_cells_y = grid->ysize - configMPI->start_y + 1;

    /* ==== (4) ==== */
    /* Create datatype to communicate one column */
    MPI_Type_vector (
            configMPI->num_cells_y, /* #blocks */
            1, /* #elements per block */
            grid->xsize+2, /* #stride */
            MPI_DOUBLE, /* old type */
            &configMPI->columntype /* new type */ );
    MPI_Type_commit (&configMPI->columntype);
}
开发者ID:subodhvsharma,项目名称:benchmarks,代码行数:63,代码来源:heat-errors.c


示例15: main_replaced

int main_replaced(int argc, char** argv){

    //Initialization
    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    //Reading image
    if(rank == 0){
        image = read_bmp("Lenna_blur.bmp");
    }

    //Creating cartesian communicator
    MPI_Dims_create(size, 2, dims);
    MPI_Cart_create( MPI_COMM_WORLD, 2, dims, periods, 0, &cart_comm );
    MPI_Cart_coords( cart_comm, rank, 2, coords );

    MPI_Cart_shift( cart_comm, 0, 1, &north, &south );
    MPI_Cart_shift( cart_comm, 1, 1, &west, &east );

    local_image_size[0] = image_size[0]/dims[0];
    local_image_size[1] = image_size[1]/dims[1];

    //Allocating buffers
    int lsize = local_image_size[0]*local_image_size[1];
    int lsize_border = (local_image_size[0] + 2*BORDER)*(local_image_size[1] + 2*BORDER);
    local_image_orig = (unsigned char*)malloc(sizeof(unsigned char)*lsize);
    local_image[0] = (unsigned char*)calloc(lsize_border, sizeof(unsigned char));
    local_image[1] = (unsigned char*)calloc(lsize_border, sizeof(unsigned char));

    create_types();

    distribute_image();

    initialilze_guess();

    //Main loop
    for(int i = 0; i < ITERATIONS; i++){
        exchange_borders(i);
        perform_convolution(i);
    }

    gather_image();

    MPI_Finalize();

    //Write image
    if(rank==0){
        write_bmp(image, image_size[0], image_size[1]);
    }

    exit(0);
}
开发者ID:ChornHulio,项目名称:Uni-Workspace,代码行数:53,代码来源:test.c


示例16: main

int main( int argc, char** argv ) {

    int numtasks = 0; 
 
    MPI_( MPI_Init( &argc, &argv ) );
    MPI_( MPI_Errhandler_set( MPI_COMM_WORLD, MPI_ERRORS_RETURN ) );
    MPI_( MPI_Comm_size( MPI_COMM_WORLD, &numtasks ) );
    
    const int DIM = int( std::sqrt( double( numtasks ) ) );
    std::vector< int > dims( 2, DIM );
    std::vector< int > periods( 2, 0 ); //periodic - false -> non-periodic
    const int reorder = 0; //false - no reorder
    MPI_Comm cartcomm;
    MPI_( MPI_Cart_create( MPI_COMM_WORLD, 2, &dims[ 0 ], &periods[ 0 ], reorder, &cartcomm ) ); 
    int task = -1;
    MPI_( MPI_Comm_rank( cartcomm, &task ) );
    std::vector< int > coords( 2, -1 );
    MPI_( MPI_Cart_coords( cartcomm, task, 2, &coords[ 0 ] ) );
     
    std::vector< int > neighbors( 4, -1 );
    enum { UP = 0, DOWN, LEFT, RIGHT };
    // compute the shifted source and destination ranks, given a shift direction and amount
    //MPI_Cart_shift is uses to find two "nearby" neighbors of the calling process
    //along a specified direction of an N-dimensional grid
    //The direction and offset are specified as a signed integer
    //If the sign of the displacement is positive the "source" rank is lower
    //than the destination rank; if it's negative the opposite is true 
    MPI_( MPI_Cart_shift( cartcomm, 0, 1, &neighbors[ UP ],   &neighbors[ DOWN ] ) );
    MPI_( MPI_Cart_shift( cartcomm, 1, 1, &neighbors[ LEFT ], &neighbors[ RIGHT ] ) );
    int sendbuf = task;
    const int tag = 0x01;
    std::vector< int > recvbuf( 4, MPI_PROC_NULL ); 
    std::vector< MPI_Request > reqs( 2 * 4 );
    for( int i = 0; i != 4; ++i ) {
        int dest = neighbors[ i ];
        int src  = neighbors[ i ];
        MPI_( MPI_Isend( &sendbuf, 1, MPI_INT, dest, tag, MPI_COMM_WORLD, &reqs[ i ] ) );
        MPI_( MPI_Irecv( &recvbuf[ i ], 1, MPI_INT, src, tag, MPI_COMM_WORLD, &reqs[ i + 4 ] ) );
    }
    std::vector< MPI_Status  > status( 2 * 4 ); 
    MPI_( MPI_Waitall( 8, &reqs[ 0 ], &status[ 0 ] ) );

    std::ostringstream os;
    os << "rank= " << task << " coords= " << coords[ 0 ] << ',' << coords[ 1 ]
       << " neighbors= " << neighbors[ UP ] << ',' << neighbors[ DOWN ] << ','
       << neighbors[ LEFT ] << ',' << neighbors[ RIGHT ] << '\n';
    std::cout << os.str(); os.flush();
 
    MPI_( MPI_Finalize() );
    
    return 0;
}
开发者ID:lichinka,项目名称:cuda-mpi-scratch,代码行数:52,代码来源:mpi10.cpp


示例17: main

int main(int argc,char **argv){
	int wymiar=2,sasiedzi[4],koordynaty[2],rank;
	int dims[2]={2,3};
	int okresowosc[2]={1,0};
	MPI_Init(&argc,&argv);
	MPI_Comm comm_cart;
	MPI_Cart_create(MPI_COMM_WORLD,wymiar,dims,okresowosc,0,&comm_cart);
	MPI_Comm_rank(comm_cart,&rank);
	MPI_Cart_coords(comm_cart,rank,2,koordynaty);
	MPI_Cart_shift(comm_cart,0,1,&sasiedzi[0],&sasiedzi[1]);
	MPI_Cart_shift(comm_cart,1,1,&sasiedzi[2],&sasiedzi[3]);
	printf("rank = %d wspolrzedne to %d %d sasiedzi= %d %d %d %d \n",rank,koordynaty[0],koordynaty[1],sasiedzi[0],sasiedzi[1],sasiedzi[2],sasiedzi[3]);
	MPI_Finalize();	
	return 0;
}
开发者ID:kura-pl,项目名称:priry,代码行数:15,代码来源:cart.c


示例18: findNeighbours

/*-----------------------------------------------------------*/
int findNeighbours(){
	int dims[1];
	int periods[1];
	int reorder;

	/* find a good process distribution */
	dims[0] = 0; /* zero so that dims_create tries to rearrange */
	MPI_Dims_create(numMPIprocs, 1, dims);

	/* set periods equal to TURE for periodic boundary conditions ... */
	periods[0] = TRUE;
	/* ...and reorder = FALSE */
	reorder = FALSE;

	/* Create the cartesian topology */
	MPI_Cart_create(comm, 1, dims, periods, reorder, &commCart);

	/* Find the ranks of the left and right neighbour */
	MPI_Cart_shift(commCart, 0, 1, &leftNeighbour, &rightNeighbour);

    MPI_Isend(&myMPIRank, 1, MPI_INT, leftNeighbour, TAG, commCart, &requestArray[0]);

    MPI_Isend(&myMPIRank, 1, MPI_INT, rightNeighbour, TAG, commCart, &requestArray[1]);

    MPI_Irecv(&myGASPIrightNeighbour, 1, MPI_INT, leftNeighbour, TAG, commCart, &requestArray[2]);

    MPI_Irecv(&myGASPIleftNeighbour, 1, MPI_INT, rightNeighbour, TAG, commCart, &requestArray[3]);

    MPI_Waitall(4, requestArray, statusArray);

	return 0;
}
开发者ID:jbreitbart,项目名称:OpenMP-GASPI-MicroBenchmark-Suite,代码行数:33,代码来源:parallelEnvironment.c


示例19: MPI_Dims_create

void SubDomain::init(int mpi_rank, int mpi_size, Discretization& discretization)
{
    // determine the number of subdomains in the x and y dimensions
    int dims[2] = { 0, 0 };
    MPI_Dims_create(mpi_size, 2, dims);
    ndomy = dims[0];
    ndomx = dims[1];

    // create a 2D non-periodic cartesian topology
    int periods[2] = { 0, 0 };
    MPI_Comm comm_cart;
    MPI_Cart_create(MPI_COMM_WORLD, 2, dims, periods, 0, &comm_cart);
    domain.comm_cart = comm_cart;

    // retrieve coordinates of the rank in the topology
    int coords[2];
    MPI_Cart_coords(comm_cart, mpi_rank, 2, coords);
    domy = coords[0]+1;
    domx = coords[1]+1;

    // set neighbours for all directions
    MPI_Cart_shift(comm_cart, 0, 1, &neighbour_south, &neighbour_north);
    MPI_Cart_shift(comm_cart, 1, 1, &neighbour_west, &neighbour_east);

    // get bounding box
    nx = discretization.nx / ndomx;
    ny = discretization.ny / ndomy;
    startx = (domx-1)*nx+1;
    starty = (domy-1)*ny+1;

    // adjust for grid dimensions that do not divided evenly between the
    // sub-domains
    if( domx == ndomx )
        nx = discretization.nx - startx + 1;
    if( domy == ndomy )
        ny = discretization.ny - starty + 1;

    endx = startx + nx -1;
    endy = starty + ny -1;

    // get total number of grid points in this sub-domain
    N = nx*ny;

    rank = mpi_rank;
    size = mpi_size;
}
开发者ID:eth-cscs,项目名称:SummerSchool2016,代码行数:46,代码来源:data.cpp


示例20: main

int main(int argc,char **argv)
{
    int rank,neighbors[4],coords[2];
    int periods[2]= {1, 0};
    int ndims=2;
    int dims[2]={1,0};
    MPI_Comm comm_cart;
    MPI_Init(&argc,&argv);
    MPI_Cart_create(MPI_COMM_WORLD,ndims,dims,periods,0,&comm_cart);
    MPI_Comm_rank(comm_cart,&rank);
    MPI_Cart_coords(comm_cart,rank,2,coords);
    MPI_Cart_shift(comm_cart,0,1,&neighbors[0],&neighbors[1]);
    MPI_Cart_shift(comm_cart,1,1,&neighbors[2],&neighbors[3]);
    printf("rank= %d wspolrzedne = %d %d sasiedzi= %d %d %d %d\n",rank,coords[0],coords[1],neighbors[0],neighbors[1],neighbors[2],neighbors[3]);
    MPI_Finalize();
    return 0;
}
开发者ID:kura-pl,项目名称:priry,代码行数:17,代码来源:main2.c



注:本文中的MPI_Cart_shift函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ MPI_Comm_dup函数代码示例发布时间:2022-05-30
下一篇:
C++ MPI_Cart_create函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap