本文整理汇总了C++中MPI_Win_create函数的典型用法代码示例。如果您正苦于以下问题:C++ MPI_Win_create函数的具体用法?C++ MPI_Win_create怎么用?C++ MPI_Win_create使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了MPI_Win_create函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: main
int main(int argc, char *argv[])
{
int rank, nprocs, A[SIZE2], B[SIZE2], i;
MPI_Win win;
int errs = 0;
MTest_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&nprocs);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
if (nprocs != 2) {
printf("Run this program with 2 processes\n");
MPI_Abort(MPI_COMM_WORLD,1);
}
if (rank == 0) {
for (i=0; i<SIZE2; i++) A[i] = B[i] = i;
MPI_Win_create(NULL, 0, 1, MPI_INFO_NULL, MPI_COMM_WORLD, &win);
for (i=0; i<SIZE1; i++) {
MPI_Win_lock(MPI_LOCK_SHARED, 1, 0, win);
MPI_Put(A+i, 1, MPI_INT, 1, i, 1, MPI_INT, win);
MPI_Win_unlock(1, win);
}
for (i=0; i<SIZE1; i++) {
MPI_Win_lock(MPI_LOCK_SHARED, 1, 0, win);
MPI_Get(B+i, 1, MPI_INT, 1, SIZE1+i, 1, MPI_INT, win);
MPI_Win_unlock(1, win);
}
MPI_Win_free(&win);
for (i=0; i<SIZE1; i++)
if (B[i] != (-4)*(i+SIZE1)) {
printf("Get Error: B[%d] is %d, should be %d\n", i, B[i], (-4)*(i+SIZE1));
errs++;
}
}
else { /* rank=1 */
for (i=0; i<SIZE2; i++) B[i] = (-4)*i;
MPI_Win_create(B, SIZE2*sizeof(int), sizeof(int), MPI_INFO_NULL,
MPI_COMM_WORLD, &win);
MPI_Win_free(&win);
for (i=0; i<SIZE1; i++) {
if (B[i] != i) {
printf("Put Error: B[%d] is %d, should be %d\n", i, B[i], i);
errs++;
}
}
}
/* if (rank==0) printf("Done\n");*/
MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
开发者ID:huangjun-pg,项目名称:mpich2-yarn,代码行数:60,代码来源:test4.c
示例2: main
int main(int argc, char *argv[])
{
int n, myid, numprocs, i, ierr;
double PI25DT = 3.141592653589793238462643;
double mypi, pi, h, sum, x;
MPI_Win nwin, piwin;
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
MPI_Comm_rank(MPI_COMM_WORLD,&myid);
if (myid == 0) {
MPI_Win_create(&n, sizeof(int), 1, MPI_INFO_NULL,
MPI_COMM_WORLD, &nwin);
MPI_Win_create(&pi, sizeof(double), 1, MPI_INFO_NULL,
MPI_COMM_WORLD, &piwin);
}
else {
MPI_Win_create(MPI_BOTTOM, 0, 1, MPI_INFO_NULL,
MPI_COMM_WORLD, &nwin);
MPI_Win_create(MPI_BOTTOM, 0, 1, MPI_INFO_NULL,
MPI_COMM_WORLD, &piwin);
}
while (1) {
if (myid == 0) {
fprintf(stdout, "Enter the number of intervals: (0 quits) ");
fflush(stdout);
ierr=scanf("%d",&n);
pi = 0.0;
}
MPI_Win_fence(0, nwin);
if (myid != 0)
MPI_Get(&n, 1, MPI_INT, 0, 0, 1, MPI_INT, nwin);
MPI_Win_fence(0, nwin);
if (n == 0)
break;
else {
h = 1.0 / (double) n;
sum = 0.0;
for (i = myid + 1; i <= n; i += numprocs) {
x = h * ((double)i - 0.5);
sum += (4.0 / (1.0 + x*x));
}
mypi = h * sum;
MPI_Win_fence( 0, piwin);
MPI_Accumulate(&mypi, 1, MPI_DOUBLE, 0, 0, 1, MPI_DOUBLE,
MPI_SUM, piwin);
MPI_Win_fence(0, piwin);
if (myid == 0) {
fprintf(stdout, "pi is approximately %.16f, Error is %.16f\n",
pi, fabs(pi - PI25DT));
fflush(stdout);
}
}
}
MPI_Win_free(&nwin);
MPI_Win_free(&piwin);
MPI_Finalize();
return 0;
}
开发者ID:Julio-Anjos,项目名称:simgrid,代码行数:60,代码来源:ircpi.c
示例3: inithash
void inithash(numb N)
{
MPI_Init(NULL,NULL);
status=MPI_Info_create(&info);
status=MPI_Info_set(info,"same_size","true");
comm=MPI_COMM_WORLD;
MPI_Comm_rank(comm,&rank);
MPI_Comm_size(comm,&size);
hashlen = 2*N+1;
nobj = 2*N/size + 1;
/* hashtab = calloc(hashlen,sizeof(obj));*/
hashtab = (numb *)calloc(nobj,sizeof(numb));
hashcount = (numb *)calloc(nobj,sizeof(numb));
if (hashtab == NULL || hashcount == NULL) exit(1);
collisions = 0;
count=0;
/* testArray = (int *) malloc(sizeof(int)*10);*/
status = MPI_Win_create(hashtab,nobj*sizeof(int),sizeof(numb),MPI_INFO_NULL,comm,&win);
status = MPI_Win_create(hashcount,nobj*sizeof(int),sizeof(numb),MPI_INFO_NULL,comm,&win2);
}
开发者ID:nickaj,项目名称:mpi-hash,代码行数:26,代码来源:parallelhash.c
示例4: printf
SweptDiscretization2D::SweptDiscretization2D(int n,int substeps,int dataPointSize,int totalConstants,int totalConservedQuantities,int remoteConstantsCount)
{
if(totalConstants<remoteConstantsCount)
{
printf("Remote Constants Cannot be more than the total constants!!\n");
exit(1);
}
this->n = n;
this->substeps = substeps;
this->dataPointSize = dataPointSize;
//this->constants = totalConstants;
this->constants = totalConstants + totalConservedQuantities;
this->remoteConstantsCount = remoteConstantsCount;
this->totalConservedQuantities = totalConservedQuantities;
this->checkForRemoteCycles = -1;
this->outputToFile = -1;
this->conserveCheckFreq = -1;
this->firstConservedCheck = true;
this->outputLength = 1;
panelSize = 2;
for(int i=n;i>2;i=i-2)panelSize += i;panelSize *= 2;
int constantsToAdd = panelSize * this->constants;
panelSize = panelSize * substeps * dataPointSize;
communicationSize = panelSize + constantsToAdd;
foundationSize = ((n+2) * (n+2) * substeps * dataPointSize + ((n+2) * (n+2) * this->constants));
resultArray = NULL;
MPI_Alloc_mem(foundationSize * sizeof(double), MPI_INFO_NULL, &foundation);
MPI_Win_create(foundation,foundationSize * sizeof(double),1, MPI_INFO_NULL,MPI_COMM_WORLD, &foundationWindow);
if(remoteConstantsCount > 0)
{
MPI_Alloc_mem(n * n * remoteConstantsCount * sizeof(double), MPI_INFO_NULL, &remoteConstants);
MPI_Win_create(remoteConstants,n * n * remoteConstantsCount * sizeof(double),1, MPI_INFO_NULL,MPI_COMM_WORLD, &constantsWindow);
constantsArrayBytes = (unsigned char*) malloc(n * n * remoteConstantsCount * sizeof(double) * pg.mpiSize);
}
if(totalConservedQuantities > 0)
{
convervedVariables = new double[totalConservedQuantities];
memset(convervedVariables,'0',sizeof(double) * totalConservedQuantities);
}
staging = new double[foundationSize];
northPanel = new double[panelSize + constantsToAdd];
southPanel = new double[panelSize + constantsToAdd];
eastPanel = new double[panelSize + constantsToAdd];
westPanel = new double[panelSize + constantsToAdd];
//Define Swept2D components
up = new UpPyramid(n,foundation,staging,substeps,dataPointSize,this->constants,totalConservedQuantities);
dp = new DownPyramid(n,foundation,staging,substeps,dataPointSize,this->constants,totalConservedQuantities);
hb = new HorizontalBridge(n,foundation,staging,substeps,dataPointSize,this->constants,totalConservedQuantities);
vb = new VerticalBridge(n,foundation,staging,substeps,dataPointSize,this->constants,totalConservedQuantities);
firstRun = true;
}
开发者ID:hubailmm,项目名称:Euler2D,代码行数:55,代码来源:SweptDiscretization2D.cpp
示例5: allocate_memory
void allocate_memory(int rank, char *rbuf, int size, WINDOW type, MPI_Win *win)
{
MPI_Status reqstat;
switch (type){
case WIN_DYNAMIC:
MPI_CHECK(MPI_Win_create_dynamic(MPI_INFO_NULL, MPI_COMM_WORLD, win));
MPI_CHECK(MPI_Win_attach(*win, (void *)rbuf, size));
MPI_CHECK(MPI_Get_address(rbuf, &sdisp_local));
if(rank == 0){
MPI_CHECK(MPI_Send(&sdisp_local, 1, MPI_AINT, 1, 1, MPI_COMM_WORLD));
MPI_CHECK(MPI_Recv(&sdisp_remote, 1, MPI_AINT, 1, 1, MPI_COMM_WORLD, &reqstat));
}
else{
MPI_CHECK(MPI_Recv(&sdisp_remote, 1, MPI_AINT, 0, 1, MPI_COMM_WORLD, &reqstat));
MPI_CHECK(MPI_Send(&sdisp_local, 1, MPI_AINT, 0, 1, MPI_COMM_WORLD));
}
break;
case WIN_CREATE:
MPI_CHECK(MPI_Win_create(rbuf, size, 1, MPI_INFO_NULL, MPI_COMM_WORLD, win));
break;
default:
MPI_CHECK(MPI_Win_allocate(size, 1, MPI_INFO_NULL, MPI_COMM_WORLD, rbuf, win));
break;
}
}
开发者ID:Cai900205,项目名称:test,代码行数:26,代码来源:osu_get_acc_latency.c
示例6: main
int main( int argc, char *argv[] )
{
int buf[2];
MPI_Win win;
MPI_Errhandler newerr;
int i;
MTest_Init( &argc, &argv );
/* Run this test multiple times to expose storage leaks (we found a leak
of error handlers with this test) */
for (i=0;i<1000; i++) {
calls = 0;
MPI_Win_create( buf, 2*sizeof(int), sizeof(int),
MPI_INFO_NULL, MPI_COMM_WORLD, &win );
mywin = win;
MPI_Win_create_errhandler( eh, &newerr );
MPI_Win_set_errhandler( win, newerr );
MPI_Win_call_errhandler( win, MPI_ERR_OTHER );
MPI_Errhandler_free( &newerr );
if (calls != 1) {
errs++;
printf( "Error handler not called\n" );
}
MPI_Win_free( &win );
}
MTest_Finalize( errs );
MPI_Finalize();
return 0;
}
开发者ID:OngOngoing,项目名称:219351_homework,代码行数:34,代码来源:wincall.c
示例7: getTotalBlockSize
asagi::Grid::Error grid::NumaDistStaticGrid::init() {
unsigned long blockSize = getTotalBlockSize();
unsigned long masterBlockCount = getLocalBlockCount();
asagi::Grid::Error error;
// Create the local cache
error = NumaLocalCacheGrid::init();
if (error != asagi::Grid::SUCCESS)
return error;
// Distribute the blocks
error = NumaLocalStaticGrid::init();
if (error != asagi::Grid::SUCCESS)
return error;
// Create the mpi window for distributed blocks
if(pthread_equal(m_threadHandle.getMasterthreadId(), pthread_self())){
if (MPI_Win_create(m_threadHandle.getStaticPtr(m_threadHandle.getMasterthreadId(),m_id),
getType().getSize() * blockSize * masterBlockCount,
getType().getSize(),
MPI_INFO_NULL,
getMPICommunicator(),
&m_threadHandle.mpiWindow) != MPI_SUCCESS)
return asagi::Grid::MPI_ERROR;
}
return asagi::Grid::SUCCESS;
}
开发者ID:Manuel1605,项目名称:ASAGI,代码行数:27,代码来源:numadiststaticgrid.cpp
示例8: ARMCIX_Create_mutexes_hdl
/** Create a mutex group. Collective.
*
* @param[in] count Number of mutexes to create on the calling process
* @return Handle to the mutex group
*/
armcix_mutex_hdl_t ARMCIX_Create_mutexes_hdl(int count, ARMCI_Group *pgroup) {
int ierr, i;
armcix_mutex_hdl_t hdl;
hdl = malloc(sizeof(struct armcix_mutex_hdl_s));
ARMCII_Assert(hdl != NULL);
MPI_Comm_dup(pgroup->comm, &hdl->comm);
if (count > 0) {
MPI_Alloc_mem(count*sizeof(long), MPI_INFO_NULL, &hdl->base);
ARMCII_Assert(hdl->base != NULL);
} else {
hdl->base = NULL;
}
hdl->count = count;
// Initialize mutexes to 0
for (i = 0; i < count; i++)
hdl->base[i] = 0;
ierr = MPI_Win_create(hdl->base, count*sizeof(long), sizeof(long) /* displacement size */,
MPI_INFO_NULL, hdl->comm, &hdl->window);
ARMCII_Assert(ierr == MPI_SUCCESS);
return hdl;
}
开发者ID:jeffhammond,项目名称:armci-mpi,代码行数:33,代码来源:mutex_hdl_spin.c
示例9: serverWinCreate
static void
serverWinCreate(void)
{
int ranks[1], modelID;
MPI_Comm commCalc = commInqCommCalc ();
MPI_Group groupCalc;
int nProcsModel = commInqNProcsModel ();
MPI_Info no_locks_info;
xmpi(MPI_Info_create(&no_locks_info));
xmpi(MPI_Info_set(no_locks_info, "no_locks", "true"));
xmpi(MPI_Win_create(MPI_BOTTOM, 0, 1, no_locks_info, commCalc, &getWin));
/* target group */
ranks[0] = nProcsModel;
xmpi ( MPI_Comm_group ( commCalc, &groupCalc ));
xmpi ( MPI_Group_excl ( groupCalc, 1, ranks, &groupModel ));
rxWin = xcalloc((size_t)nProcsModel, sizeof (rxWin[0]));
size_t totalBufferSize = collDefBufferSizes();
rxWin[0].buffer = (unsigned char*) xmalloc(totalBufferSize);
size_t ofs = 0;
for ( modelID = 1; modelID < nProcsModel; modelID++ )
{
ofs += rxWin[modelID - 1].size;
rxWin[modelID].buffer = rxWin[0].buffer + ofs;
}
xmpi(MPI_Info_free(&no_locks_info));
xdebug("%s", "created mpi_win, allocated getBuffer");
}
开发者ID:AZed,项目名称:cdo,代码行数:32,代码来源:pio_server.c
示例10: main
int main (int argc,char *argv[]) {
int i;
double w[NEL];
MPI_Aint win_size,warr_size;
MPI_Win *win;
win_size=sizeof(MPI_Win);
warr_size=sizeof(MPI_DOUBLE)*NEL;
MPI_Init (&argc, &argv);
for(i=0;i<NTIMES;i++) {
MPI_Alloc_mem(win_size,MPI_INFO_NULL,&win);
MPI_Win_create(w,warr_size,sizeof(double),MPI_INFO_NULL,MPI_COMM_WORLD,win);
MPI_Win_free(win);
MPI_Free_mem(win);
}
MPI_Finalize();
return 0;
}
开发者ID:gpaulsen,项目名称:ompi-www,代码行数:26,代码来源:mleak.win.c
示例11: main
int main(int argc, char *argv[])
{
int errs = 0, err;
int rank, size;
int *buf, bufsize;
int *result;
int *rmabuf, rsize, rcount;
MPI_Comm comm;
MPI_Win win;
MPI_Request req;
MPI_Datatype derived_dtp;
MTest_Init(&argc, &argv);
bufsize = 256 * sizeof(int);
buf = (int *) malloc(bufsize);
if (!buf) {
fprintf(stderr, "Unable to allocated %d bytes\n", bufsize);
MPI_Abort(MPI_COMM_WORLD, 1);
}
result = (int *) malloc(bufsize);
if (!result) {
fprintf(stderr, "Unable to allocated %d bytes\n", bufsize);
MPI_Abort(MPI_COMM_WORLD, 1);
}
rcount = 16;
rsize = rcount * sizeof(int);
rmabuf = (int *) malloc(rsize);
if (!rmabuf) {
fprintf(stderr, "Unable to allocated %d bytes\n", rsize);
MPI_Abort(MPI_COMM_WORLD, 1);
}
MPI_Type_contiguous(2, MPI_INT, &derived_dtp);
MPI_Type_commit(&derived_dtp);
/* The following loop is used to run through a series of communicators
* that are subsets of MPI_COMM_WORLD, of size 1 or greater. */
while (MTestGetIntracommGeneral(&comm, 1, 1)) {
int count = 0;
if (comm == MPI_COMM_NULL)
continue;
/* Determine the sender and receiver */
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &size);
MPI_Win_create(buf, bufsize, 2 * sizeof(int), MPI_INFO_NULL, comm, &win);
/* To improve reporting of problems about operations, we
* change the error handler to errors return */
MPI_Win_set_errhandler(win, MPI_ERRORS_RETURN);
/** TEST OPERATIONS USING ACTIVE TARGET (FENCE) SYNCHRONIZATION **/
MPI_Win_fence(0, win);
TEST_FENCE_OP("Put", MPI_Put(rmabuf, count, MPI_INT, TARGET, 0, count, MPI_INT, win);
);
TEST_FENCE_OP("Get", MPI_Get(rmabuf, count, MPI_INT, TARGET, 0, count, MPI_INT, win);
);
开发者ID:NexMirror,项目名称:MPICH,代码行数:60,代码来源:rmazero.c
示例12: main
int main(int argc, char *argv[])
{
MPI_Win win;
int flag, tmp, rank;
int base[1024], errs = 0;
MPI_Request req;
MTest_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Win_create(base, 1024 * sizeof(int), sizeof(int), MPI_INFO_NULL, MPI_COMM_WORLD, &win);
if (rank == 0) {
MPI_Win_lock(MPI_LOCK_EXCLUSIVE, 0, 0, win);
MPI_Barrier(MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
MPI_Win_unlock(0, win);
} else {
MPI_Barrier(MPI_COMM_WORLD);
MPI_Win_lock(MPI_LOCK_EXCLUSIVE, 0, 0, win);
MPI_Rput(&tmp, 1, MPI_INT, 0, 0, 1, MPI_INT, win, &req);
MPI_Test(&req, &flag, MPI_STATUS_IGNORE);
MPI_Barrier(MPI_COMM_WORLD);
MPI_Win_unlock(0, win);
}
MPI_Win_free(&win);
MTest_Finalize(errs);
return MTestReturnValue(errs);
}
开发者ID:jeffhammond,项目名称:mpich,代码行数:31,代码来源:nb_test.c
示例13: main
int main(int argc, char **argv) {
int i, rank, nproc, mpi_type_size;
int errors = 0, all_errors = 0;
TYPE_C *val_ptr, *res_ptr;
MPI_Win win;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &nproc);
MPI_Type_size(TYPE_MPI, &mpi_type_size);
assert(mpi_type_size == sizeof(TYPE_C));
val_ptr = malloc(sizeof(TYPE_C)*nproc);
res_ptr = malloc(sizeof(TYPE_C)*nproc);
MPI_Win_create(val_ptr, sizeof(TYPE_C)*nproc, sizeof(TYPE_C), MPI_INFO_NULL, MPI_COMM_WORLD, &win);
/* Test self communication */
reset_vars(val_ptr, res_ptr, win);
for (i = 0; i < ITER; i++) {
TYPE_C one = 1, result = -1;
MPI_Win_lock(MPI_LOCK_EXCLUSIVE, rank, 0, win);
MPI_Fetch_and_op(&one, &result, TYPE_MPI, rank, 0, MPI_SUM, win);
MPI_Win_unlock(rank, win);
}
MPI_Win_lock(MPI_LOCK_EXCLUSIVE, rank, 0, win);
if ( CMP(val_ptr[0], ITER) ) {
SQUELCH( printf("%d->%d -- SELF: expected "TYPE_FMT", got "TYPE_FMT"\n", rank, rank, (TYPE_C) ITER, val_ptr[0]); );
errors++;
}
开发者ID:Julio-Anjos,项目名称:simgrid,代码行数:35,代码来源:fetch_and_op.c
示例14: main
int main(int argc, char *argv[])
{
int rank, nproc;
int errors = 0, all_errors = 0;
int buf, my_buf;
MPI_Win win;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &nproc);
MPI_Win_create(&buf, sizeof(int), sizeof(int),
MPI_INFO_NULL, MPI_COMM_WORLD, &win);
MPI_Win_set_errhandler(win, MPI_ERRORS_RETURN);
MPI_Win_fence(0, win);
MPI_Win_lock(MPI_LOCK_SHARED, 0, MPI_MODE_NOCHECK, win);
MPI_Get(&my_buf, 1, MPI_INT, 0, 0, 1, MPI_INT, win);
MPI_Win_unlock(0, win);
/* This should fail because the window is no longer in a fence epoch */
CHECK_ERR(MPI_Get(&my_buf, 1, MPI_INT, 0, 0, 1, MPI_INT, win));
MPI_Win_fence(0, win);
MPI_Win_free(&win);
MPI_Reduce(&errors, &all_errors, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
if (rank == 0 && all_errors == 0) printf(" No Errors\n");
MPI_Finalize();
return 0;
}
开发者ID:abhinavvishnu,项目名称:matex,代码行数:35,代码来源:win_sync_lock_fence.c
示例15: main
int main(int argc, char *argv[])
{
int rank;
int errors = 0, all_errors = 0;
int buf;
MPI_Win win;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Win_create(&buf, sizeof(int), sizeof(int), MPI_INFO_NULL, MPI_COMM_WORLD, &win);
MPI_Win_set_errhandler(win, MPI_ERRORS_RETURN);
/* This should fail because the window is not locked. */
CHECK_ERR(MPI_Win_unlock(0, win));
MPI_Win_free(&win);
MPI_Reduce(&errors, &all_errors, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
if (rank == 0 && all_errors == 0)
printf(" No Errors\n");
MPI_Finalize();
return 0;
}
开发者ID:Niharikareddy,项目名称:mpich,代码行数:27,代码来源:win_sync_unlock.c
示例16: main
int main(int argc, char **argv)
{
int rank, nproc;
int out_val, i, counter = 0;
MPI_Win win;
MTest_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &nproc);
MPI_Win_create(&counter, sizeof(int), sizeof(int), MPI_INFO_NULL, MPI_COMM_WORLD, &win);
for (i = 0; i < NITER; i++) {
MPI_Win_lock(MPI_LOCK_SHARED, rank, 0, win);
MPI_Get_accumulate(&acc_val, 1, MPI_INT, &out_val, 1, MPI_INT,
rank, 0, 1, MPI_INT, MPI_SUM, win);
MPI_Win_unlock(rank, win);
if (out_val != acc_val * i) {
errs++;
printf("Error: got %d, expected %d at iter %d\n", out_val, acc_val * i, i);
break;
}
}
MPI_Win_free(&win);
MTest_Finalize(errs);
return MTestReturnValue(errs);
}
开发者ID:jeffhammond,项目名称:mpich,代码行数:32,代码来源:get_acc_local.c
示例17: initialize
void initialize(field * temperature1, field * temperature2,
parallel_data * parallel)
{
int i, j;
// Allocate also ghost layers
temperature1->data =
malloc_2d(temperature1->nx + 2, temperature1->ny + 2);
temperature2->data =
malloc_2d(temperature2->nx + 2, temperature2->ny + 2);
// Create RMA window. In principle, only borders would be needed
// but it is simpler to expose the whole array
MPI_Win_create(&temperature1->data[0][0],
(temperature1->nx + 2) * (temperature1->ny +
2) * sizeof(double),
sizeof(double), MPI_INFO_NULL, parallel->comm,
&temperature1->rma_window);
MPI_Win_create(&temperature2->data[0][0],
(temperature2->nx + 2) * (temperature2->ny +
2) * sizeof(double),
sizeof(double), MPI_INFO_NULL, parallel->comm,
&temperature2->rma_window);
// Initialize to zero
memset(temperature1->data[0], 0.0,
(temperature1->nx + 2) * (temperature1->ny + 2)
* sizeof(double));
for (i = 0; i < temperature1->nx + 2; i++) {
temperature1->data[i][0] = 30.0;
temperature1->data[i][temperature1->ny + 1] = -10.0;
}
if (parallel->rank == 0) {
for (j = 0; j < temperature1->ny + 2; j++)
temperature1->data[0][j] = 15.0;
} else if (parallel->rank == parallel->size - 1) {
for (j = 0; j < temperature1->ny + 2; j++)
temperature1->data[temperature1->nx + 1][j] = -25.0;
}
copy_field(temperature1, temperature2);
}
开发者ID:natj,项目名称:csc-ss14,代码行数:46,代码来源:ex13_heat_one_sided.c
示例18: main
int main(int argc, char *argv[])
{
int errs = 0, err;
int rank, size;
int *buf, bufsize;
int *result;
int *rmabuf, rsize, rcount;
MPI_Comm comm;
MPI_Win win;
MPI_Request req;
MTest_Init(&argc, &argv);
bufsize = 256 * sizeof(int);
buf = (int *) malloc(bufsize);
if (!buf) {
fprintf(stderr, "Unable to allocated %d bytes\n", bufsize);
MPI_Abort(MPI_COMM_WORLD, 1);
}
result = (int *) malloc(bufsize);
if (!result) {
fprintf(stderr, "Unable to allocated %d bytes\n", bufsize);
MPI_Abort(MPI_COMM_WORLD, 1);
}
rcount = 16;
rsize = rcount * sizeof(int);
rmabuf = (int *) malloc(rsize);
if (!rmabuf) {
fprintf(stderr, "Unable to allocated %d bytes\n", rsize);
MPI_Abort(MPI_COMM_WORLD, 1);
}
/* The following illustrates the use of the routines to
* run through a selection of communicators and datatypes.
* Use subsets of these for tests that do not involve combinations
* of communicators, datatypes, and counts of datatypes */
while (MTestGetIntracommGeneral(&comm, 1, 1)) {
if (comm == MPI_COMM_NULL)
continue;
/* Determine the sender and receiver */
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &size);
MPI_Win_create(buf, bufsize, sizeof(int), MPI_INFO_NULL, comm, &win);
/* To improve reporting of problems about operations, we
* change the error handler to errors return */
MPI_Win_set_errhandler(win, MPI_ERRORS_RETURN);
/** TEST OPERATIONS USING ACTIVE TARGET (FENCE) SYNCHRONIZATION **/
MPI_Win_fence(0, win);
TEST_FENCE_OP("Put",
MPI_Put(rmabuf, rcount, MPI_INT, MPI_PROC_NULL, 0, rcount, MPI_INT, win);
);
TEST_FENCE_OP("Get",
MPI_Get(rmabuf, rcount, MPI_INT, MPI_PROC_NULL, 0, rcount, MPI_INT, win);
);
开发者ID:NexMirror,项目名称:MPICH,代码行数:58,代码来源:rmanull.c
示例19: _ZMPI_Alltoall_int_proclists_put
static int _ZMPI_Alltoall_int_proclists_put(int alloc_mem, int nphases, int *sendbuf, int nsprocs, int *sprocs, int *recvbuf, int nrprocs, int *rprocs, MPI_Comm comm)
{
int i, p, size, rank, *rcounts_put;
MPI_Win win;
MPI_Comm_size(comm, &size);
MPI_Comm_rank(comm, &rank);
if (alloc_mem) MPI_Alloc_mem(size * sizeof(int), MPI_INFO_NULL, &rcounts_put);
else rcounts_put = recvbuf;
if (nrprocs >= 0)
for (i = 0; i < nrprocs; ++i) rcounts_put[rprocs[i]] = DEFAULT_INT;
else
for (i = 0; i < size; ++i) rcounts_put[i] = DEFAULT_INT;
MPI_Win_create(rcounts_put, size * sizeof(int), sizeof(int), MPI_INFO_NULL, comm, &win);
MPI_Win_fence(MPI_MODE_NOSTORE|MPI_MODE_NOPRECEDE, win);
for (p = 0; p < nphases; ++p)
{
/* printf("%d: phase = %d of %d\n", rank, p, nphases);*/
if (rank % nphases == p)
{
if (nsprocs >= 0)
{
for (i = 0; i < nsprocs; ++i)
if (sendbuf[sprocs[i]] != DEFAULT_INT) MPI_Put(&sendbuf[sprocs[i]], 1, MPI_INT, sprocs[i], rank, 1, MPI_INT, win);
} else
{
for (i = 0; i < size; ++i)
if (sendbuf[i] != DEFAULT_INT) MPI_Put(&sendbuf[i], 1, MPI_INT, i, rank, 1, MPI_INT, win);
}
}
if (p < nphases - 1) MPI_Win_fence(0, win);
}
MPI_Win_fence(MPI_MODE_NOPUT|MPI_MODE_NOSUCCEED, win);
MPI_Win_free(&win);
if (alloc_mem)
{
if (nrprocs >= 0)
for (i = 0; i < nrprocs; ++i) recvbuf[rprocs[i]] = rcounts_put[rprocs[i]];
else
for (i = 0; i < size; ++i) recvbuf[i] = rcounts_put[i];
MPI_Free_mem(rcounts_put);
}
return MPI_SUCCESS;
}
开发者ID:fweik,项目名称:scafacos,代码行数:57,代码来源:zmpi_tools.c
示例20: init_scatter_constant
scatter_constant* init_scatter_constant(void* array, size_t array_count, size_t elt_size, void* constant, size_t nrequests_max, MPI_Datatype dt) {
scatter_constant* sc = (scatter_constant*)xmalloc(sizeof(scatter_constant));
sc->array = array;
sc->elt_size = elt_size;
sc->constant = constant;
sc->datatype = dt;
sc->valid = 0;
MPI_Win_create(array, array_count * elt_size, elt_size, MPI_INFO_NULL, MPI_COMM_WORLD, &sc->win);
return sc;
}
开发者ID:RockyMeadow,项目名称:SimgridServerService,代码行数:10,代码来源:onesided.c
注:本文中的MPI_Win_create函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论