本文整理汇总了C++中MPI_Scatter函数的典型用法代码示例。如果您正苦于以下问题:C++ MPI_Scatter函数的具体用法?C++ MPI_Scatter怎么用?C++ MPI_Scatter使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了MPI_Scatter函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: slave
void slave(MPI_Comm ring_comm)
{
int numbers[2];
int n;
/* Receive n from root node */
MPI_Bcast(&n /* Receive n from root */, 1 /* rx 1 number */ , MPI_INT, 0, ring_comm);
for(int i=0; i<floorf((float)n/(2*p)); i++){
/* Receive 2 numbers from root */
MPI_Scatter(NULL, 2 /*Send 2 bytes to everyone from array */, MPI_INT,
numbers, 2 /* Receive 2 bytes from root */, MPI_INT, 0 /* id of root node */,
ring_comm);
int min_no;
work_t work;
work.no1 = numbers[0];
work.no2 = numbers[1];
work_result_t work_result;
do_work(work, &work_result);
min_no = work_result.min_no;
/* Slaves send the minimum of two numbers */
MPI_Reduce(&min_no /* everyone sends 1 number to root */, NULL ,1 /* 1 number */, MPI_INT,
MPI_MIN , 0 /* id of root node */,
ring_comm);
}/*for*/
return ;
}/* slave */
开发者ID:MetalGeek7,项目名称:UIC-Masters,代码行数:30,代码来源:hw1-ring-method1.c
示例2: main
int main(int argc, char *argv[]) {
int send[DATA_SIZE], recv[DATA_SIZE];
int rank, size, count, root, res;
MPI_Status status;
MPI_Init(&argc, &argv); // initialize MPI
MPI_Comm_rank(MPI_COMM_WORLD, &rank); // get own rank/ID
MPI_Comm_size(MPI_COMM_WORLD, &size); // get total number of processes
if(rank == 0) { //If root: Generate data to be distributed.
}
//Send data to all nodes. here: an integer array of length "count".
count = (DATA_SIZE / size); // each receive gets chunk of same size
// scatter: if rank=0, send data (and get own share); otherwise: receive data
MPI_Scatter(send, count, MPI_INT, recv, count, MPI_INT, 0, MPI_COMM_WORLD);
// Each node processes its share of data and sends the result (here: int "res") to root.
MPI_Gather(&res, 1, MPI_INT, recv, 1, MPI_INT, 0, MPI_COMM_WORLD);
if(rank == 0) { //If root: process the received data.
}
MPI_Finalize(); // shut down MPI
return 0;
}
开发者ID:chenguohui,项目名称:distributedComputingExamples,代码行数:26,代码来源:gatherScatterBareBones.c
示例3: main
int main(int argc,char** argv){
int rank,size,epp;
int* dataSend=NULL;
int i;
MPI_Init(NULL,NULL);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
MPI_Comm_size(MPI_COMM_WORLD,&size);
epp=2;
if(rank==0){
printf("Master creating data...\n");
dataSend=(int*)malloc(sizeof(int)*size*epp);
for(i=0;i<epp*size;i++)
dataSend[i]=i;
}
int* dataRecv1=(int*)malloc(sizeof(int)*epp);
MPI_Scatter(dataSend,epp,MPI_INT,dataRecv1,epp,MPI_INT,0,MPI_COMM_WORLD);
float subavg=0.0f;
for(i=0;i<epp;i++)
subavg+=dataRecv1[i];
subavg/=epp;
printf("%d calculates subavg as %f\n",rank,subavg);
float finalAvg;
MPI_Reduce(&subavg,&finalAvg,1,MPI_FLOAT,MPI_SUM,0,MPI_COMM_WORLD);
if(rank==0){
printf("The average is : %f",finalAvg/size);
}
MPI_Finalize();
}
开发者ID:Farheen2302,项目名称:MPI_programs2_,代码行数:28,代码来源:average_reduce.c
示例4: testAllCollective
void testAllCollective(){
int count = 1000;
int root=2;
int *in, *out;
MPI_Comm comm=MPI_COMM_WORLD;
if(rank==0){
MPI_Scatter( buf0, 100, MPI_INT, buf1, 100, MPI_INT, root, comm);
MPI_Allreduce( in, out, count, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
}
else{
MPI_Scatter( buf0, 100, MPI_INT, buf1, 100, MPI_INT, root, comm);
MPI_Allreduce( in, out, count, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
}
}
开发者ID:sessionc,项目名称:mpi-pabble-extractor,代码行数:16,代码来源:combi2.c
示例5: main
int main(int argc,char* argv[]){
int rank,size;
int i,n;
int* A=NULL;
int D[2],sum;
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
MPI_Comm_size(MPI_COMM_WORLD,&size);
int ctr=size*2;
A=(int*)malloc(sizeof(int)*size*2);
if(rank==0){
printf("Enter %d Elements :\n",size*2);
for(i=0;i<size*2;i++)
scanf("%d",&A[i]);
}
while(ctr!=1){
MPI_Scatter(A,2,MPI_INT,D,2,MPI_INT,0,MPI_COMM_WORLD);
sum=D[0]+D[1];
MPI_Gather(&sum,1,MPI_INT,A,1,MPI_INT,0,MPI_COMM_WORLD);
ctr/=2;
}
if(rank==0)
printf("Total Sum : %d\n",A[0]);
MPI_Finalize();
}
开发者ID:farhan0581,项目名称:Linux_Stuff,代码行数:26,代码来源:lognSum.c
示例6: parallelsumArray
double parallelsumArray(double * arr, int num, int id, int numProc){
int root = 0;
double sum, totalSum, end, start, scatter, s;
if(id == 0){ //master reads array and scatters value
MPI_Bcast(&num, 1, MPI_INT, root, MPI_COMM_WORLD);
printf("%i\t%g\n", numProc, *arr);
}
else{ //workers sum their part of the array
MPI_Bcast(&num, 1, MPI_INT, root, MPI_COMM_WORLD);
}
int numElements = num/numProc;
double* localA = malloc(sizeof(double) * numElements);
start = MPI_Wtime();
MPI_Scatter(arr, numElements, MPI_DOUBLE, localA, numElements, MPI_DOUBLE, root, MPI_COMM_WORLD);
end = MPI_Wtime();
scatter = end - start;
start = MPI_Wtime();
sum = sumArray(localA, numElements);
MPI_Reduce(&sum, &totalSum, numProc, MPI_DOUBLE, MPI_SUM, root, MPI_COMM_WORLD);
end = MPI_Wtime();
s = end - start;
if(id == 0){
printf("scatter: %f, sum: %f\n", scatter, s);
}
//free(localA);
return totalSum;
}
开发者ID:tjdevries,项目名称:cs_374_project,代码行数:30,代码来源:mpiArraySum.c
示例7: type_map
static PyObject *scatter_array(PyObject *self, PyObject *args) {
PyArrayObject *x;
PyArrayObject *d;
int source, error, count, numprocs;
MPI_Datatype mpi_type;
/* process the parameters */
if (!PyArg_ParseTuple(args, "OOi", &x, &d, &source))
return NULL;
/* Input check and determination of MPI type */
mpi_type = type_map(x, &count);
if (!mpi_type) return NULL;
error = MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
count = count/numprocs;
/* call the MPI routine */
error = MPI_Scatter(x->data, count, mpi_type, d->data, count,
mpi_type, source, MPI_COMM_WORLD);
if (error != 0) {
rank_raise_mpi_runtime(error, "MPI_Scatter");
return NULL;
}
Py_INCREF(Py_None);
return (Py_None);
}
开发者ID:congma,项目名称:pypar,代码行数:29,代码来源:mpiext.c
示例8: main
int main(int argc, char* argv[]) {
int numtasks, rank, sendcount, recvcount, source;
float sendbuf[SIZE][SIZE] = {
{1.0, 2.0, 3.0, 4.0},
{5.0, 6.0, 7.0, 8.0},
{9.0, 10.0, 11.0, 12.0},
{13.0, 14.0, 15.0, 16.0} };
float recvbuf[SIZE];
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &numtasks);
if (numtasks == SIZE) {
source = 1;
sendcount = SIZE;
recvcount = SIZE;
MPI_Scatter(sendbuf,sendcount,MPI_FLOAT,recvbuf,recvcount,
MPI_FLOAT,source,MPI_COMM_WORLD);
printf("rank= %d Results: %f %f %f %f\n",rank,recvbuf[0],
recvbuf[1],recvbuf[2],recvbuf[3]);
}
else
printf("Must specify %d processors. Terminating.\n",SIZE);
MPI_Finalize();
}
开发者ID:amigniox,项目名称:mpi-mycode,代码行数:30,代码来源:scatter.c
示例9: main
int main(int argc, char *argv[]){
int numTasks, rank, sendCount, recvCount, source;
// Array which distributed over processes.
float sendBuf[SIZE][SIZE] = {{1.0,2.0,3.0,4.0},{5.0,6.0,7.0,8.0},{9.0,10.0,11.0,12.0},{13.0,14.0,15.0,16.0}};
float recvBuf[SIZE];
// Initilizae MPI enivronment.
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
MPI_Comm_size(MPI_COMM_WORLD,&numTasks);
// Must Specify the #of proc is 4
if(numTasks == SIZE){
source = 1;
sendCount = SIZE;
recvCount = SIZE;
// Scattering array amoung the processes.
MPI_Scatter(sendBuf,sendCount,MPI_FLOAT,recvBuf,recvCount,MPI_FLOAT,source,MPI_COMM_WORLD);
printf("Process#%d Results: %f %f %f %f \n",rank,recvBuf[0],recvBuf[1],recvBuf[2],recvBuf[3]);
}
else{
printf("Must specify %d processors. Terminating \n",SIZE);
}
MPI_Finalize();
}
开发者ID:dhavallad,项目名称:MPI_Program,代码行数:25,代码来源:MPI_Scatter.c
示例10: main
int main(int argc, char** argv) {
if (argc != 2) {
fprintf(stderr, "Usage: avg num_elements_per_proc\n");
exit(1);
}
int num_elements_per_proc = atoi(argv[1]);
// Seed the random number generator to get different results each time
srand(time(NULL));
MPI_Init(NULL, NULL);
int world_rank;
MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
int world_size;
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
// Create a random array of elements on the root process. Its total
// size will be the number of elements per process times the number
// of processes
float *rand_nums = NULL;
if (world_rank == 0) {
rand_nums = create_rand_nums(num_elements_per_proc * world_size);
}
// For each process, create a buffer that will hold a subset of the entire
// array
float *sub_rand_nums = (float *)malloc(sizeof(float) * num_elements_per_proc);
assert(sub_rand_nums != NULL);
// Scatter the random numbers from the root process to all processes in
// the MPI world
MPI_Scatter(rand_nums, num_elements_per_proc, MPI_FLOAT, sub_rand_nums,
num_elements_per_proc, MPI_FLOAT, 0, MPI_COMM_WORLD);
// Compute the average of your subset
float sub_avg = compute_avg(sub_rand_nums, num_elements_per_proc);
// Gather all partial averages down to all the processes
float *sub_avgs = (float *)malloc(sizeof(float) * world_size);
assert(sub_avgs != NULL);
MPI_Allgather(&sub_avg, 1, MPI_FLOAT, sub_avgs, 1, MPI_FLOAT, MPI_COMM_WORLD);
// Now that we have all of the partial averages, compute the
// total average of all numbers. Since we are assuming each process computed
// an average across an equal amount of elements, this computation will
// produce the correct answer.
float avg = compute_avg(sub_avgs, world_size);
printf("Avg of all elements from proc %d is %f\n", world_rank, avg);
// Clean up
if (world_rank == 0) {
free(rand_nums);
}
free(sub_avgs);
free(sub_rand_nums);
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
}
开发者ID:ReiVerdugo,项目名称:parallelism-homework,代码行数:60,代码来源:all_avg.c
示例11: main
int main (int argc, char* argv[]) {
int miID, procesos, local[4], localplus[4], externo;
srand(time(NULL));
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &miID);
MPI_Comm_size(MPI_COMM_WORLD, &procesos);
if (miID == 0)
{
local[0] = rand() % 100 + 1;
local[1] = rand() % 100 + 1;
local[2] = rand() % 100 + 1;
local[3] = rand() % 100 + 1;
}
//MPI_Scatter(dato, n° de datos, tipo, variable de destino, n° de datos a recibir, comunicador)
MPI_Scatter(&local, 1, MPI_INT, &externo, 1, MPI_INT, 0, MPI_COMM_WORLD);
printf("El proceso %d tiene el número %d\n", miID, externo);
MPI_Gather(&externo, 1, MPI_INT, &localplus, 1, MPI_INT, 0, MPI_COMM_WORLD);
if (miID == 0)
printf ("Datos: [%d,%d,%d,%d]\n",localplus[0], localplus[1], localplus[2], localplus[3]);
MPI_Finalize();
return 0;
}
开发者ID:AlexToga02,项目名称:MPI,代码行数:31,代码来源:skatter.c
示例12: main
int main(int argc,char **argv){
MPI_Init(&argc,&argv);
int rank,size,r,q,*A,*B,*C,i,min=100;
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
MPI_Comm_size(MPI_COMM_WORLD,&size);
r=N%(size);
if(r==0) q=N/size;
else q=(N+size-r)/size;
B=(int*)calloc(q,sizeof(int));
if(rank==0){
A=(int*)calloc(N,sizeof(int));
C=(int*)calloc(size,sizeof(int));
for(i=0;i<N;i++){
A[i]=rand()%100;
printf("%d\n",A[i]);
}
}
MPI_Scatter(A,q,MPI_INT,B,q,MPI_INT,0,MPI_COMM_WORLD);
for(i=0;i<q;i++) if(min>B[i]) min=B[i];
MPI_Gather(&min,1,MPI_INT,C,1,MPI_INT,0,MPI_COMM_WORLD);
if(rank==0){
for(i=1;i<size;i++){
if(min>C[i]) min=C[i];
}
printf("Wynik to %d\n",min);
free(A);
free(C);
}
free(B);
MPI_Finalize();
return 0;
}
开发者ID:kura-pl,项目名称:priry,代码行数:33,代码来源:minimum_gather.c
示例13: mpi_scatter_
FORT_DLL_SPEC void FORT_CALL mpi_scatter_ ( void*v1, MPI_Fint *v2, MPI_Fint *v3, void*v4, MPI_Fint *v5, MPI_Fint *v6, MPI_Fint *v7, MPI_Fint *v8, MPI_Fint *ierr ){
#ifndef HAVE_MPI_F_INIT_WORKS_WITH_C
if (MPIR_F_NeedInit){ mpirinitf_(); MPIR_F_NeedInit = 0; }
#endif
if (v4 == MPIR_F_MPI_IN_PLACE) v4 = MPI_IN_PLACE;
*ierr = MPI_Scatter( v1, (int)*v2, (MPI_Datatype)(*v3), v4, (int)*v5, (MPI_Datatype)(*v6), (int)*v7, (MPI_Comm)(*v8) );
}
开发者ID:agrimaldi,项目名称:pmap,代码行数:8,代码来源:scatterf.c
示例14: mpi_scatter
void mpi_scatter (void *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype,
void *recvbuf, MPI_Fint *recvcount, MPI_Fint *recvtype,
MPI_Fint *root, MPI_Fint *comm, MPI_Fint *__ierr)
{
*__ierr = MPI_Scatter (sendbuf, *sendcount, MPI_Type_f2c (*sendtype),
recvbuf, *recvcount, MPI_Type_f2c (*recvtype),
*root, MPI_Comm_f2c (*comm));
}
开发者ID:JeremyFyke,项目名称:cime,代码行数:8,代码来源:f_wrappers_pmpi.c
示例15: mpi_random_seed_slave
void mpi_random_seed_slave(int pnode, int cnt) {
int this_seed;
MPI_Scatter(NULL, 1, MPI_INT, &this_seed, 1, MPI_INT, 0, comm_cart);
RANDOM_TRACE(printf("%d: Received seed %d\n", this_node, this_seed));
init_random_seed(this_seed);
}
开发者ID:Clemson-MSE,项目名称:espresso,代码行数:8,代码来源:random.cpp
示例16: dd_scatter
void dd_scatter(gmx_domdec_t gmx_unused *dd, int gmx_unused nbytes, void gmx_unused *src, void gmx_unused *dest)
{
#ifdef GMX_MPI
MPI_Scatter(src, nbytes, MPI_BYTE,
dest, nbytes, MPI_BYTE,
DDMASTERRANK(dd), dd->mpi_comm_all);
#endif
}
开发者ID:ElsevierSoftwareX,项目名称:SOFTX-D-15-00003,代码行数:8,代码来源:domdec_network.c
示例17: main
int main(int argc, char *argv[]){
int numprocs,rank,namelen;
char processor_name[MPI_MAX_PROCESSOR_NAME];
int size = 100000;
int splitsize;
//make arrays
int *a = malloc(size * sizeof(int));
int result = 0;
int idx = 0;
int sub_result = 0;
double start,end;
for(idx = 0;idx <size;idx++){
a[idx]=idx;
}
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
MPI_Barrier(MPI_COMM_WORLD);
if(rank==0)
start = MPI_Wtime();
splitsize = (int) size/numprocs;
int *sub_a = malloc(splitsize * sizeof(int));
int *sub_results = malloc(numprocs * sizeof(int));
MPI_Scatter(a,splitsize,MPI_INT,sub_a,splitsize,MPI_INT,0,MPI_COMM_WORLD);
for(idx=0;idx<splitsize;idx++)
sub_result += sub_a[idx];
MPI_Gather(&sub_result,1,MPI_INT,sub_results,1,MPI_INT,0,MPI_COMM_WORLD);
for(idx=0;idx<numprocs;idx++)
result += sub_results[idx];
MPI_Barrier(MPI_COMM_WORLD);
if(rank==0)
end = MPI_Wtime();
MPI_Finalize();
if(rank==0){
printf("\n results is %i, should be %i \n ",result,(size*(size-1))/2);
printf("\n time was : %f \n ",end-start);
}
free(a);
free(sub_a);
free(sub_results);
return 0;
}
开发者ID:luisben,项目名称:compParaleloV2015,代码行数:58,代码来源:MPI_P7_SumaElementosVector.c
示例18: main
int main(int argc, char *argv[])
{
int rank,nprocs,n;
int *arr,i,per,t,j;
MPI_Init(&argc,&argv);
MPI_Comm_rank (MPI_COMM_WORLD, &rank);
MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
if(rank==0)
scanf("%d",&t);
MPI_Bcast(&t,1,MPI_INT,0,MPI_COMM_WORLD);
for(j=0;j<t;j++){
if(rank == 0)
{
scanf("%d",&n);
per = n/nprocs + 1;
arr = (int *)malloc(sizeof(int)*(2*n+32));
for(i=0;i<n;i++)
scanf("%d",&arr[i*2]);
for(i=0;i<n;i++)
scanf("%d",&arr[2*i+1]);
}
MPI_Bcast(&per,1,MPI_INT,0,MPI_COMM_WORLD);
int *client_arr = (int *)malloc(sizeof(int)*2*per);
int *res = (int *)malloc(sizeof(int)*per);
MPI_Scatter(arr, 2*per, MPI_INT, client_arr,
2*per, MPI_INT, 0, MPI_COMM_WORLD);
for(i=0;i<per;i++)
res[i] = client_arr[2*i+1] - client_arr[2*i];
MPI_Gather(res, per, MPI_INT, arr, per, MPI_INT, 0,
MPI_COMM_WORLD);
if(rank == 0)
{
for(i=0;i<n;i++)
printf("%d ",arr[i]);
printf("\n");
}
}
/*if(rank == 0)
{
int i;
for(i=1;i<nprocs;i++)
MPI_Send(&arr[i], 1, MPI_INT, i, 0, MPI_COMM_WORLD);
}*/
/*if(rank!=0)
{
int x;
MPI_Recv(&x,1, MPI_INT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
printf("%d: %d\n",rank,x);
}
*/
MPI_Finalize();
return 0;
}
开发者ID:arikj,项目名称:IHPC,代码行数:58,代码来源:planet.c
示例19: master
void master(MPI_Comm ring_comm)
{
int n;
int *array = NULL;
int *subarray = NULL;
int numbers[2];
int lowest_no = 0;
int lowest_no_so_far = 0;
read_input(&array, &n);
/* Let the slaves know too how many numbers we have to work on. */
MPI_Bcast(&n /* Bcast n to everyone */, 1, MPI_INT, 0, ring_comm);
lowest_no_so_far = array[0]; /* Assume the first number is the lowest */
subarray = array;
for(int i=0; i<floorf((float)n/(2*p)); i++){
/* Scatter Data to processors including self */
MPI_Scatter(subarray, 2 /*Send 2 bytes to everyone from array */, MPI_INT,
numbers, 2 /* Receive 2 bytes from self */, MPI_INT, 0 /* id of root node */,
ring_comm);
work_t work;
work_result_t work_result;
work.no1 = numbers[0];
work.no2 = numbers[1];
int min_no;
do_work(work, &work_result);
min_no = work_result.min_no;
MPI_Reduce(&min_no, &lowest_no /* root receives 1 number from everyone and applies reduction operation on the way*/ ,1 /*1 number */, MPI_INT,
MPI_MIN , 0 /* id of root node */,
ring_comm);
if(lowest_no < lowest_no_so_far) lowest_no_so_far = lowest_no;
//printf("Min [ ");
for(int j=0; j<p*2; j+=2){
//printf("(%d, %d) ,",subarray[j], subarray[j+1]);
}
//printf("\b ] = %d.\n", lowest_no);
//printf("Lowest no so far is %d\n", lowest_no_so_far);
subarray = subarray + p*2;
}/*for*/
printf("Lowest no is %d.\n", lowest_no_so_far);
free(array);
return ;
}/* master */
开发者ID:MetalGeek7,项目名称:UIC-Masters,代码行数:58,代码来源:hw1-ring-method1.c
示例20: main
int main(int argc, char *argv[])
{
int myrank, P, from, to, i, j, k;
int tag = 666; /* any value will do */
MPI_Status status;
MPI_Init (&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &myrank); /* who am i */
MPI_Comm_size(MPI_COMM_WORLD, &P); /* number of processors */
/* Just to use the simple variants of MPI_Gather and MPI_Scatter we */
/* impose that SIZE is divisible by P. By using the vector versions, */
/* (MPI_Gatherv and MPI_Scatterv) it is easy to drop this restriction. */
if (SIZE%P!=0) {
if (myrank==0) printf("Matrix size not divisible by number of processors\n");
MPI_Finalize();
exit(-1);
}
from = myrank * SIZE/P;
to = (myrank+1) * SIZE/P;
/* Process 0 fills the input matrices and broadcasts them to the rest */
/* (actually, only the relevant stripe of A is sent to each process) */
if (myrank==0) {
fill_matrix(A);
fill_matrix(B);
}
MPI_Bcast (B, SIZE*SIZE, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Scatter (A, SIZE*SIZE/P, MPI_INT, A[from], SIZE*SIZE/P, MPI_INT, 0, MPI_COMM_WORLD);
printf("computing slice %d (from row %d to %d)\n", myrank, from, to-1);
for (i=from; i<to; i++)
for (j=0; j<SIZE; j++) {
C[i][j]=0;
for (k=0; k<SIZE; k++)
C[i][j] += A[i][k]*B[k][j];
}
MPI_Gather (C[from], SIZE*SIZE/P, MPI_INT, C, SIZE*SIZE/P, MPI_INT, 0, MPI_COMM_WORLD);
if (myrank==0) {
printf("\n\n");
print_matrix(A);
printf("\n\n\t * \n");
print_matrix(B);
printf("\n\n\t = \n");
print_matrix(C);
printf("\n\n");
}
MPI_Finalize();
return 0;
}
开发者ID:bharat1226,项目名称:golib,代码行数:57,代码来源:matrixMul.c
注:本文中的MPI_Scatter函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论