本文整理汇总了C++中MPI_Type_contiguous函数的典型用法代码示例。如果您正苦于以下问题:C++ MPI_Type_contiguous函数的具体用法?C++ MPI_Type_contiguous怎么用?C++ MPI_Type_contiguous使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了MPI_Type_contiguous函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: _setup_mpsort_mpi
static void
_setup_mpsort_mpi(struct crmpistruct * o,
struct crstruct * d,
void * myoutbase, size_t myoutnmemb,
MPI_Comm comm)
{
o->comm = comm;
MPI_Comm_size(comm, &o->NTask);
MPI_Comm_rank(comm, &o->ThisTask);
o->mybase = d->base;
o->mynmemb = d->nmemb;
o->myoutbase = myoutbase;
o->myoutnmemb = myoutnmemb;
MPI_Allreduce(&o->mynmemb, &o->nmemb, 1, MPI_TYPE_PTRDIFF, MPI_SUM, comm);
MPI_Allreduce(&o->myoutnmemb, &o->outnmemb, 1, MPI_TYPE_PTRDIFF, MPI_SUM, comm);
if(o->outnmemb != o->nmemb) {
fprintf(stderr, "total number of items in the item does not match the input %ld != %ld\n",
o->outnmemb, o->nmemb);
abort();
}
MPI_Type_contiguous(d->rsize, MPI_BYTE, &o->MPI_TYPE_RADIX);
MPI_Type_commit(&o->MPI_TYPE_RADIX);
MPI_Type_contiguous(d->size, MPI_BYTE, &o->MPI_TYPE_DATA);
MPI_Type_commit(&o->MPI_TYPE_DATA);
}
开发者ID:rainwoodman,项目名称:MP-sort,代码行数:34,代码来源:mpsort-mpi.c
示例2: create_indexed_gap_ddt
static MPI_Datatype
create_indexed_gap_ddt( void )
{
ddt_gap dt[2];
MPI_Datatype dt1, dt2, dt3;
int bLength[2] = { 2, 1 };
MPI_Datatype types[2] = { MPI_INT, MPI_FLOAT };
MPI_Aint displ[2];
MPI_Get_address( &(dt[0].is[0].i[0]), &(displ[0]) );
MPI_Get_address( &(dt[0].is[0].f), &(displ[1]) );
displ[1] -= displ[0];
displ[0] -= displ[0];
MPI_Type_create_struct( 2, bLength, displ, types, &dt1 );
/*MPI_DDT_DUMP( dt1 );*/
MPI_Type_contiguous( 3, dt1, &dt2 );
/*MPI_DDT_DUMP( dt2 );*/
bLength[0] = 1;
bLength[1] = 1;
MPI_Get_address( &(dt[0].v1), &(displ[0]) );
MPI_Get_address( &(dt[0].is[0]), &(displ[1]) );
displ[1] -= displ[0];
displ[0] -= displ[0];
types[0] = MPI_INT;
types[1] = dt2;
MPI_Type_create_struct( 2, bLength, displ, types, &dt3 );
/*MPI_DDT_DUMP( dt3 );*/
MPI_Type_free( &dt1 );
MPI_Type_free( &dt2 );
MPI_Type_contiguous( 10, dt3, &dt1 );
MPI_DDT_DUMP( dt1 );
MPI_Type_free( &dt3 );
MPI_Type_commit( &dt1 );
return dt1;
}
开发者ID:Dissolubilis,项目名称:ompi-svn-mirror,代码行数:35,代码来源:to_self.c
示例3: MPIOI_Type_block
/* Returns MPI_SUCCESS on success, an MPI error code on failure. Code above
* needs to call MPIO_Err_return_xxx.
*/
int MPIOI_Type_block(int *array_of_gsizes, int dim, int ndims, int nprocs,
int rank, int darg, int order, MPI_Aint orig_extent,
MPI_Datatype type_old, MPI_Datatype *type_new,
MPI_Aint *st_offset)
{
/* nprocs = no. of processes in dimension dim of grid
rank = coordinate of this process in dimension dim */
int blksize, global_size, mysize, i, j;
MPI_Aint stride;
global_size = array_of_gsizes[dim];
if (darg == MPI_DISTRIBUTE_DFLT_DARG)
blksize = (global_size + nprocs - 1)/nprocs;
else {
blksize = darg;
/* --BEGIN ERROR HANDLING-- */
if (blksize <= 0) {
return MPI_ERR_ARG;
}
if (blksize * nprocs < global_size) {
return MPI_ERR_ARG;
}
/* --END ERROR HANDLING-- */
}
j = global_size - blksize*rank;
mysize = ADIOI_MIN(blksize, j);
if (mysize < 0) mysize = 0;
stride = orig_extent;
if (order == MPI_ORDER_FORTRAN) {
if (dim == 0)
MPI_Type_contiguous(mysize, type_old, type_new);
else {
for (i=0; i<dim; i++) stride *= array_of_gsizes[i];
MPI_Type_hvector(mysize, 1, stride, type_old, type_new);
}
}
else {
if (dim == ndims-1)
MPI_Type_contiguous(mysize, type_old, type_new);
else {
for (i=ndims-1; i>dim; i--) stride *= array_of_gsizes[i];
MPI_Type_hvector(mysize, 1, stride, type_old, type_new);
}
}
*st_offset = blksize * rank;
/* in terms of no. of elements of type oldtype in this dimension */
if (mysize == 0) *st_offset = 0;
return MPI_SUCCESS;
}
开发者ID:hpc,项目名称:mvapich-cce,代码行数:60,代码来源:ad_darray.c
示例4: handle
/*@
MPI_File_get_view - Returns the file view
Input Parameters:
. fh - file handle (handle)
Output Parameters:
. disp - displacement (nonnegative integer)
. etype - elementary datatype (handle)
. filetype - filetype (handle)
. datarep - data representation (string)
.N fortran
@*/
int MPI_File_get_view(MPI_File fh, MPI_Offset * disp, MPI_Datatype * etype,
MPI_Datatype * filetype, char *datarep)
{
int error_code;
ADIO_File adio_fh;
static char myname[] = "MPI_FILE_GET_VIEW";
int i, j, k, combiner;
MPI_Datatype copy_etype, copy_filetype;
ROMIO_THREAD_CS_ENTER();
adio_fh = MPIO_File_resolve(fh);
/* --BEGIN ERROR HANDLING-- */
MPIO_CHECK_FILE_HANDLE(adio_fh, myname, error_code);
if (datarep == NULL) {
error_code = MPIO_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE,
myname, __LINE__, MPI_ERR_ARG, "**iodatarepnomem", 0);
error_code = MPIO_Err_return_file(adio_fh, error_code);
goto fn_exit;
}
/* --END ERROR HANDLING-- */
*disp = adio_fh->disp;
ADIOI_Strncpy(datarep,
(adio_fh->is_external32 ? "external32" : "native"), MPI_MAX_DATAREP_STRING);
MPI_Type_get_envelope(adio_fh->etype, &i, &j, &k, &combiner);
if (combiner == MPI_COMBINER_NAMED)
*etype = adio_fh->etype;
else {
/* FIXME: It is wrong to use MPI_Type_contiguous; the user could choose to
* re-implement MPI_Type_contiguous in an unexpected way. Either use
* MPID_Barrier as in MPICH or PMPI_Type_contiguous */
MPI_Type_contiguous(1, adio_fh->etype, ©_etype);
/* FIXME: Ditto for MPI_Type_commit - use NMPI or PMPI */
MPI_Type_commit(©_etype);
*etype = copy_etype;
}
/* FIXME: Ditto for MPI_Type_xxx - use NMPI or PMPI */
MPI_Type_get_envelope(adio_fh->filetype, &i, &j, &k, &combiner);
if (combiner == MPI_COMBINER_NAMED)
*filetype = adio_fh->filetype;
else {
MPI_Type_contiguous(1, adio_fh->filetype, ©_filetype);
MPI_Type_commit(©_filetype);
*filetype = copy_filetype;
}
fn_exit:
ROMIO_THREAD_CS_EXIT();
return MPI_SUCCESS;
}
开发者ID:ParaStation,项目名称:psmpi2,代码行数:71,代码来源:get_view.c
示例5: MPI_Init
void CMPICommunicator::Init(int argc, char *argv[])
{
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Type_contiguous(sizeof(Cell), MPI_BYTE, &cellDatatype);
MPI_Type_commit(&cellDatatype);
MPI_Type_contiguous(sizeof(Status), MPI_BYTE, &statusDatatype);
MPI_Type_commit(&statusDatatype);
}
开发者ID:BigWEric,项目名称:Zombie-Apocalypse-Simulation-MPI-OPENMP-,代码行数:12,代码来源:MPICommunicator.cpp
示例6: Construct_MPI_Datatypes
void Construct_MPI_Datatypes(int rows, int cols)
{
// Contiguous memory vector
MPI_Type_contiguous(cols, MPI_DOUBLE, &MPI_Vector);
MPI_Type_commit(&MPI_Vector);
// Contiguous memory matrix
MPI_Type_contiguous(rows, MPI_Vector, &MPI_Matrix);
MPI_Type_commit(&MPI_Matrix);
return;
}
开发者ID:JordanBlocher,项目名称:openmpi,代码行数:12,代码来源:dynamic.cpp
示例7: Build_matrix_type
void Build_matrix_type(
LOCAL_MATRIX_T* local_A /* in */) {
MPI_Datatype temp_mpi_t;
int block_lengths[2];
MPI_Aint displacements[2];
MPI_Datatype typelist[2];
MPI_Aint start_address;
MPI_Aint address;
MPI_Type_contiguous(Order(local_A)*Order(local_A),
MPI_FLOAT, &temp_mpi_t);
block_lengths[0] = block_lengths[1] = 1;
typelist[0] = MPI_INT;
typelist[1] = temp_mpi_t;
MPI_Get_address(local_A, &start_address);
MPI_Get_address(&(local_A->n_bar), &address);
displacements[0] = address - start_address;
MPI_Get_address(local_A->entries, &address);
displacements[1] = address - start_address;
MPI_Type_create_struct(2, block_lengths, displacements,
typelist, &local_matrix_mpi_t);
MPI_Type_commit(&local_matrix_mpi_t);
} /* Build_matrix_type */
开发者ID:BoyzInB,项目名称:Assignment-1,代码行数:28,代码来源:fox.c
示例8: dgraphAllreduceMaxSum2
int
dgraphAllreduceMaxSum2 (
Gnum * reduloctab, /* Pointer to array of local Gnum data */
Gnum * reduglbtab, /* Pointer to array of reduced Gnum data */
int redumaxsumnbr, /* Number of max + sum Gnum operations */
MPI_User_function * redufuncptr, /* Pointer to operator function */
MPI_Comm proccomm) /* Communicator to be used for reduction */
{
MPI_Datatype redutypedat; /* Data type for finding best separator */
MPI_Op reduoperdat; /* Handle of MPI operator for finding best separator */
if ((MPI_Type_contiguous (redumaxsumnbr, GNUM_MPI, &redutypedat) != MPI_SUCCESS) ||
(MPI_Type_commit (&redutypedat) != MPI_SUCCESS) ||
(MPI_Op_create (redufuncptr, 1, &reduoperdat) != MPI_SUCCESS)) {
errorPrint ("dgraphAllreduceMaxSum: communication error (1)");
return (1);
}
if (MPI_Allreduce (reduloctab, reduglbtab, 1, redutypedat, reduoperdat, proccomm) != MPI_SUCCESS) {
errorPrint ("dgraphAllreduceMaxSum: communication error (2)");
return (1);
}
if ((MPI_Op_free (&reduoperdat) != MPI_SUCCESS) ||
(MPI_Type_free (&redutypedat) != MPI_SUCCESS)) {
errorPrint ("dgraphAllreduceMaxSum: communication error (3)");
return (1);
}
return (0);
}
开发者ID:AlaaHadji,项目名称:specfem3d,代码行数:31,代码来源:dgraph_allreduce.c
示例9: gather
void gather(int rank, int size, const int gran, body *bodies){
int i, j;
int sendto = (rank + 1) % size;
int recvfrom = ((rank + size) - 1) % size;
MPI_Datatype bodytype;
MPI_Type_contiguous(3, MPI_DOUBLE, &bodytype);
MPI_Type_commit(&bodytype);
MPI_Status status;
body *outbuf = (body *) malloc(gran*sizeof(body));
if (rank != 0) {
//memcpy(outbuf, bodies, gran*sizeof(body));
MPI_Send(bodies, gran, bodytype, recvfrom, 0, MPI_COMM_WORLD);
for(i=0; i<size-rank-1; i++){
MPI_Recv(outbuf, gran, bodytype, sendto, 0, MPI_COMM_WORLD, &status);
MPI_Send(outbuf, gran, bodytype, recvfrom, 0, MPI_COMM_WORLD);
}
}
else {
FILE *oFile;
oFile = fopen("peval_out.txt", "w");
//memcpy(outbuf, bodies, gran*sizeof(body));
for(j=0; j<gran; j++)
fprintf(oFile, "%15.10f %15.10f %15.10f\n", bodies[j].x, bodies[j].y, bodies[j].m);
for(i=0; i<size-rank-1; i++){
MPI_Recv(outbuf, gran, bodytype, sendto, 0, MPI_COMM_WORLD, &status);
for(j=0; j<gran; j++)
fprintf(oFile, "%15.10f %15.10f %15.10f\n", outbuf[j].x, outbuf[j].y, outbuf[j].m);
}
fclose(oFile);
}
free(outbuf);
}
开发者ID:mohamadi,项目名称:Profiling-mpi,代码行数:34,代码来源:vnbody.c
示例10: mpiReduce_pickerV3
void mpiReduce_pickerV3(float *resDataAbsMaxPaddedGlobal,
size_t *resDataMaxIndPaddedGlobal,
size_t resSize,
eXCorrMerge bAbs)
{
resSizeMPI = resSize;
MPI_Datatype mpiType;
MPI_Type_contiguous((int) 2, MPI_FLOAT, &mpiType);
MPI_Type_commit(&mpiType);
float *resDataGlobalNode = NULL;
float *resDataGlobalNodeReduce = NULL;
array_new(resDataGlobalNode, 2*resSize);
array_new(resDataGlobalNodeReduce, 2*resSize);
memcpy(resDataGlobalNode,
resDataAbsMaxPaddedGlobal,
resSize*sizeof(float));
mpiOp_array_typecast(resDataMaxIndPaddedGlobal,
resDataGlobalNode+resSize,
resSize);
MPI_Op mpiOp;
switch (bAbs) {
case XCORR_MERGE_NEGATIVE:
MPI_Op_create((MPI_User_function *) mpiOp_xcorrMergeResultGlobalV3Abs,
1, // commutative
&mpiOp);
break;
case XCORR_MERGE_POSITIVE:
MPI_Op_create((MPI_User_function *) mpiOp_xcorrMergeResultGlobalV3,
1, // commutative
&mpiOp);
break;
default:
ERROR("mpiReduce_pickerV3", "unsupported merging mode");
}
MPI_Reduce(resDataGlobalNode,
resDataGlobalNodeReduce,
(int) resSize, // resSize elements of size 2*sizeof(float)
mpiType,
mpiOp,
0,
MPI_COMM_WORLD);
MPI_Op_free(&mpiOp);
memcpy(resDataAbsMaxPaddedGlobal,
resDataGlobalNodeReduce,
resSize*sizeof(float));
mpiOp_array_typecast(resDataGlobalNodeReduce+resSize,
resDataMaxIndPaddedGlobal,
resSize);
array_delete(resDataGlobalNode);
array_delete(resDataGlobalNodeReduce);
MPI_Type_free(&mpiType);
}
开发者ID:hvthaibk,项目名称:ccrunch,代码行数:60,代码来源:mpi_xcorr.cpp
示例11: create_pattern
void create_pattern(gchar* name, PatternType type, gint iter, gint elem, gint level, GroupBlock* group)
{
Verbose("Creating pattern%d \"%s\" elem %d level %d\n", type, name, elem, level);
Pattern* pattern = pattern_new(type, iter, elem, level);
gint groupSize = (group? group->groupsize : size);
gint groupRank;
if (group)
MPI_Comm_rank(group->mpicomm, &groupRank);
else
groupRank = rank;
Verbose("GroupSize = %d, GroupRank = %d\n", groupSize, groupRank);
MPI_Type_contiguous(elem, MPI_BYTE, &pattern->eType);
MPI_Type_commit(&pattern->eType);
pattern->type_size = 1;
switch (type) {
/* contiguous data */
case PATTERN1: {
int array_sizes[] = { groupSize };
int array_subsizes[] = { 1 };
int array_starts[] = { groupRank };
MPI_Type_create_subarray(
1, /* number of array dimensions*/
array_sizes, /* number of eTypes in each dimension of the full array*/
array_subsizes, /* number of eTypes in each dimension of the subarray */
array_starts, /* starting coordinates of the subarray in each dimension*/
MPI_ORDER_C, /* array storage order flag (state) */
pattern->eType, /* eType (old datatype) */
&pattern->datatype);
MPI_Type_commit(&pattern->datatype);
break;
}
/* non-contiguous data */
case PATTERN2: {
int array_sizes[] = { iter, groupSize };
int array_subsizes[] = { iter, 1 };
int array_starts[] = { 0, groupRank };
MPI_Type_create_subarray(
2, /* number of array dimensions*/
array_sizes, /* number of eTypes in each dimension of the full array*/
array_subsizes, /* number of eTypes in each dimension of the subarray */
array_starts, /* starting coordinates of the subarray in each dimension*/
MPI_ORDER_C, /* array storage order flag (state) */
pattern->eType, /* eType (old datatype) */
&pattern->datatype);
MPI_Type_commit(&pattern->datatype);
break;
}
default: Error("Pattern%d not yet supported!\n", type);
}
g_hash_table_insert(patternMap, name, pattern);
}
开发者ID:drunz,项目名称:parabench,代码行数:60,代码来源:patterns.c
示例12: parms_InitComplex
/* ----------------- Initialize complex data type and ops for MPI ----*/
void parms_InitComplex()
{
MPI_Type_contiguous(2, MPI_DOUBLE, &MPI_CMPLX);
MPI_Type_commit( &MPI_CMPLX );
MPI_Op_create((MPI_User_function *)complex_sum, true, &MPI_CMPLX_SUM);
}
开发者ID:georgeliao,项目名称:pVBARMS,代码行数:8,代码来源:parms_complex.c
示例13: main
int main(int argc, char *argv[])
{
int rank;
MPI_Status status;
MPI_Datatype type;
double buffer[10] = {
1.11, 2.22, 3.33, 4.44, 5.55, 6.66, 7.77, 8.88, 9.99, 10.1010
};
MPI_Init(&argc, &argv);
MPI_Type_contiguous(5, MPI_DOUBLE, &type);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (rank == 0)
{
MPI_Send(buffer, 2, type, 1, 123, MPI_COMM_WORLD);
}
else if (rank == 1)
{
double b[10];
MPI_Recv(b, 2, type, 0, 123, MPI_COMM_WORLD, &status);
}
MPI_Finalize();
return 0;
}
开发者ID:msurkovsky,项目名称:aislinn,代码行数:28,代码来源:uncommited.cpp
示例14: offsetof
void BBLSGraph::createDatatypes() {
// BBLSNode struct
int block_lengths[5];
block_lengths[0] = 1;
block_lengths[1] = 1;
block_lengths[2] = 1;
block_lengths[3] = 1;
block_lengths[4] = 1;
MPI_Aint displacements[5];
displacements[0] = offsetof(BBLSNode, type);
displacements[1] = offsetof(BBLSNode, output);
displacements[2] = offsetof(BBLSNode, inputLeft);
displacements[3] = offsetof(BBLSNode, inputRight);
displacements[4] = sizeof(BBLSNode);
MPI_Datatype types[5];
types[0] = MPI_INT;
types[1] = MPI_UNSIGNED;
types[2] = MPI_UNSIGNED;
types[3] = MPI_UNSIGNED;
types[4] = MPI_UB;
MPI_Type_struct(5, block_lengths, displacements, types, &mpi_nodeType);
MPI_Type_commit(&mpi_nodeType);
// 3 BBLSNodes
MPI_Type_contiguous(3, mpi_nodeType, &mpi_threeNodes);
MPI_Type_commit(&mpi_threeNodes);
}
开发者ID:Jnesselr,项目名称:BBLS,代码行数:30,代码来源:BBLSGraph.cpp
示例15: escrita
void escrita()
{
int i;
MPI_Type_contiguous(TAMTUPLA, MPI_INT,&tupla);
MPI_Type_commit(&tupla);
ret = MPI_File_open( MPI_COMM_WORLD, "arquivofinal.dat",
MPI_MODE_WRONLY | MPI_MODE_CREATE,
MPI_INFO_NULL, &arquivofinal);
if (ret == 0)
printf("Arquivo final aberto com sucesso no processo %d \n", meu_ranque);
else
{
printf("Arquivo final aberto com erro no processo %d \n", meu_ranque);
MPI_Abort(MPI_COMM_WORLD, 1);
}
MPI_File_set_view( arquivofinal, 0,
MPI_INT, MPI_INT,
"native", MPI_INFO_NULL);
for (i = 0; i < TAMBUF; i+=TAMTUPLA)
MPI_File_write_ordered( arquivofinal, buf_leitura + i, 1, tupla, MPI_STATUS_IGNORE);
MPI_File_close(&arquivofinal);
}
开发者ID:jonnyguio,项目名称:progparela,代码行数:26,代码来源:partB.c
示例16: type_create_contiguous_x
static int type_create_contiguous_x(MPI_Count count,
MPI_Datatype oldtype, MPI_Datatype *newtype)
{
/* to make 'count' fit MPI-3 type processing routines (which take integer
* counts), we construct a type consisting of N INT_MAX chunks followed by
* a remainder. e.g for a count of 4000000000 bytes you would end up with
* one 2147483647-byte chunk followed immediately by a 1852516353-byte
* chunk */
MPI_Datatype chunks, remainder;
MPI_Aint lb, extent, disps[2];
int blocklens[2];
MPI_Datatype types[2];
MPI_Count c = count/INT_MAX;
MPI_Count r = count%INT_MAX;
MPI_Type_vector(c, INT_MAX, INT_MAX, oldtype, &chunks);
MPI_Type_contiguous(r, oldtype, &remainder);
MPI_Type_get_extent(oldtype, &lb, &extent);
blocklens[0] = 1; blocklens[1] = 1;
disps[0] = 0; disps[1] = c*extent*INT_MAX;
types[0] = chunks; types[1] = remainder;
MPI_Type_create_struct(2, blocklens, disps, types, newtype);
MPI_Type_free(&chunks);
MPI_Type_free(&remainder);
return MPI_SUCCESS;
}
开发者ID:mpifl,项目名称:mpich3newforfile,代码行数:32,代码来源:utils.c
示例17: transpose_type
/* Extract an m x n submatrix within an m x N matrix and transpose it.
Assume storage by rows; the defined datatype accesses by columns */
MPI_Datatype transpose_type(int N, int m, int n, MPI_Datatype type)
/* computes a datatype for the transpose of an mxn matrix
with entries of type type */
{
MPI_Datatype subrow, subrow1, submatrix;
MPI_Aint lb, extent;
MPI_Type_vector(m, 1, N, type, &subrow);
MPI_Type_get_extent(type, &lb, &extent);
MPI_Type_create_resized(subrow, 0, extent, &subrow1);
MPI_Type_contiguous(n, subrow1, &submatrix);
MPI_Type_commit(&submatrix);
MPI_Type_free( &subrow );
MPI_Type_free( &subrow1 );
/* Add a consistency test: the size of submatrix should be
n * m * sizeof(type) and the extent should be ((m-1)*N+n) * sizeof(type) */
{
int tsize;
MPI_Aint textent, llb;
MPI_Type_size( type, &tsize );
MPI_Type_get_true_extent( submatrix, &llb, &textent );
if (textent != tsize * (N * (m-1)+n)) {
fprintf( stderr, "Transpose Submatrix extent is %ld, expected %ld (%d,%d,%d)\n",
(long)textent, (long)(tsize * (N * (m-1)+n)), N, n, m );
}
}
return(submatrix);
}
开发者ID:OngOngoing,项目名称:219351_homework,代码行数:33,代码来源:alltoallw1.c
示例18: DefineMPITypes
DefineMPITypes()
{
Winspecs winspecs;
Flags flags;
rect rectangle;
int len[3], disp[3];
MPI_Datatype types[3];
NUM_type = MPI_DOUBLE;
MPI_Type_contiguous(6, MPI_INT, &winspecs_type);
MPI_Type_commit(&winspecs_type);
len[0] = 10;
len[1] = 2;
len[2] = 6;
disp[0] = (int) ((char *) (&(flags.breakout)) - (char *) (&(flags)));
disp[1] = (int) ((char *) (&(flags.boundary_sq)) - (char *) (&(flags)));
disp[2] = (int) ((char *) (&(flags.rmin)) - (char *) (&(flags)));
types[0] = MPI_INT;
types[1] = MPI_DOUBLE;
types[2] = NUM_type;
MPI_Type_struct(3, len, disp, types, &flags_type);
MPI_Type_commit(&flags_type);
len[0] = 5;
disp[0] = (int) ((char *) (&(rectangle.l)) - (char *) (&(rectangle)));
types[0] = MPI_INT;
MPI_Type_struct(1, len, disp, types, &rect_type);
MPI_Type_commit(&rect_type);
return 0;
}
开发者ID:MartinLidh,项目名称:tddc78,代码行数:34,代码来源:pm_genproc_cleanedup.c
示例19: main
int main(int argc, char *argv[])
{
int errs = 0, err;
int rank, size;
int *buf, bufsize;
int *result;
int *rmabuf, rsize, rcount;
MPI_Comm comm;
MPI_Win win;
MPI_Request req;
MPI_Datatype derived_dtp;
MTest_Init(&argc, &argv);
bufsize = 256 * sizeof(int);
buf = (int *) malloc(bufsize);
if (!buf) {
fprintf(stderr, "Unable to allocated %d bytes\n", bufsize);
MPI_Abort(MPI_COMM_WORLD, 1);
}
result = (int *) malloc(bufsize);
if (!result) {
fprintf(stderr, "Unable to allocated %d bytes\n", bufsize);
MPI_Abort(MPI_COMM_WORLD, 1);
}
rcount = 16;
rsize = rcount * sizeof(int);
rmabuf = (int *) malloc(rsize);
if (!rmabuf) {
fprintf(stderr, "Unable to allocated %d bytes\n", rsize);
MPI_Abort(MPI_COMM_WORLD, 1);
}
MPI_Type_contiguous(2, MPI_INT, &derived_dtp);
MPI_Type_commit(&derived_dtp);
/* The following loop is used to run through a series of communicators
* that are subsets of MPI_COMM_WORLD, of size 1 or greater. */
while (MTestGetIntracommGeneral(&comm, 1, 1)) {
int count = 0;
if (comm == MPI_COMM_NULL)
continue;
/* Determine the sender and receiver */
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &size);
MPI_Win_create(buf, bufsize, 2 * sizeof(int), MPI_INFO_NULL, comm, &win);
/* To improve reporting of problems about operations, we
* change the error handler to errors return */
MPI_Win_set_errhandler(win, MPI_ERRORS_RETURN);
/** TEST OPERATIONS USING ACTIVE TARGET (FENCE) SYNCHRONIZATION **/
MPI_Win_fence(0, win);
TEST_FENCE_OP("Put", MPI_Put(rmabuf, count, MPI_INT, TARGET, 0, count, MPI_INT, win);
);
TEST_FENCE_OP("Get", MPI_Get(rmabuf, count, MPI_INT, TARGET, 0, count, MPI_INT, win);
);
开发者ID:NexMirror,项目名称:MPICH,代码行数:60,代码来源:rmazero.c
示例20: create_indexed_gap_optimized_ddt
static MPI_Datatype
create_indexed_gap_optimized_ddt( void )
{
MPI_Datatype dt1, dt2, dt3;
int bLength[3];
MPI_Datatype types[3];
MPI_Aint displ[3];
MPI_Type_contiguous( 40, MPI_BYTE, &dt1 );
MPI_Type_create_resized( dt1, 0, 44, &dt2 );
bLength[0] = 4;
bLength[1] = 9;
bLength[2] = 36;
types[0] = MPI_BYTE;
types[1] = dt2;
types[2] = MPI_BYTE;
displ[0] = 0;
displ[1] = 8;
displ[2] = 44 * 9 + 8;
MPI_Type_create_struct( 3, bLength, displ, types, &dt3 );
MPI_Type_free( &dt1 );
MPI_Type_free( &dt2 );
MPI_DDT_DUMP( dt3 );
MPI_Type_commit( &dt3 );
return dt3;
}
开发者ID:Dissolubilis,项目名称:ompi-svn-mirror,代码行数:31,代码来源:to_self.c
注:本文中的MPI_Type_contiguous函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论