本文整理汇总了C++中MPI_File_open函数的典型用法代码示例。如果您正苦于以下问题:C++ MPI_File_open函数的具体用法?C++ MPI_File_open怎么用?C++ MPI_File_open使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了MPI_File_open函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: qpb_read_gauge
void
qpb_read_gauge(qpb_gauge_field gauge_field, size_t offset, size_t precision, char fname[])
{
MPI_Datatype mpi_dtype_link, filetype;
if(precision == 32)
{
MPI_Type_contiguous(4*2*NC*NC*ND, MPI_BYTE, &mpi_dtype_link);
}
else if(precision == 64)
{
MPI_Type_contiguous(8*2*NC*NC*ND, MPI_BYTE, &mpi_dtype_link);
}
else
{
fprintf(stderr, "%s: precision should be either 32 or 64\n", __func__);
exit(QPB_NOT_IMPLEMENTED_ERROR);
}
MPI_Type_commit(&mpi_dtype_link);
int starts[ND], l_dim[ND], g_dim[ND];
for(int i=0; i<ND; i++)
{
starts[i] = problem_params.coords[i]*problem_params.l_dim[i];
l_dim[i] = problem_params.l_dim[i];
g_dim[i] = problem_params.g_dim[i];
};
int ierr = MPI_Type_create_subarray(ND, g_dim, l_dim, starts, MPI_ORDER_C,
mpi_dtype_link, &filetype);
MPI_Type_commit(&filetype);
MPI_File fhandle;
ierr = MPI_File_open(MPI_COMM_WORLD, fname,
MPI_MODE_RDONLY, MPI_INFO_NULL, &fhandle);
if(ierr != MPI_SUCCESS)
{
if(am_master)
{
fprintf(stderr, "%s: MPI_File_open() returned in error\n", fname);
exit(QPB_FILE_ERROR);
}
}
ierr = MPI_File_set_view(fhandle, (MPI_Offset)offset, mpi_dtype_link, filetype,
"native", MPI_INFO_NULL);
if(ierr != MPI_SUCCESS)
{
if(am_master)
{
fprintf(stderr, "%s: MPI_File_set_view() returned in error\n", fname);
exit(QPB_FILE_ERROR);
}
}
void *buffer = NULL;
if(precision == 32)
{
buffer = qpb_alloc(problem_params.l_vol*ND*sizeof(qpb_link_float));
}
else if(precision == 64)
{
buffer = qpb_alloc(problem_params.l_vol*ND*sizeof(qpb_link_double));
}
MPI_Status status;
ierr = MPI_File_read_all(fhandle, buffer, problem_params.l_vol,
mpi_dtype_link, &status);
MPI_Type_free(&mpi_dtype_link);
MPI_Type_free(&filetype);
ierr = MPI_File_close(&fhandle);
if(ierr != MPI_SUCCESS)
{
if(am_master)
{
fprintf(stderr, "%s: MPI_File_close() returned in error\n", fname);
exit(QPB_FILE_ERROR);
}
}
if(ierr != MPI_SUCCESS)
{
if(am_master)
{
fprintf(stderr, "%s: MPI_File_read() returned in error\n", fname);
exit(QPB_FILE_ERROR);
}
}
if(!qpb_is_bigendian())
{
if(precision == 32)
qpb_byte_swap_float(buffer, problem_params.l_vol*ND*NC*NC*2);
if(precision == 64)
qpb_byte_swap_double(buffer, problem_params.l_vol*ND*NC*NC*2);
}
//.........这里部分代码省略.........
开发者ID:g-koutsou,项目名称:qpb,代码行数:101,代码来源:qpb_read_gauge.c
示例2: main
int main(int argc, char **argv)
{
MPI_Datatype newtype;
int i, ndims, array_of_gsizes[3], array_of_distribs[3];
int order, nprocs, len, flag, err;
int array_of_dargs[3], array_of_psizes[3];
int *readbuf, *writebuf, bufcount, mynod;
char filename[1024];
MPI_File fh;
MPI_Status status;
MPI_Aint size_with_aint;
MPI_Offset size_with_offset;
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD, &mynod);
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
/* process 0 takes the file name as a command-line argument and
broadcasts it to other processes */
if (!mynod) {
i = 1;
while ((i < argc) && strcmp("-fname", *argv)) {
i++;
argv++;
}
if (i >= argc) {
printf("\n*# Usage: large_array -fname filename\n\n");
MPI_Abort(MPI_COMM_WORLD, 1);
}
argv++;
len = strlen(*argv);
strcpy(filename, *argv);
MPI_Bcast(&len, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(filename, len+1, MPI_CHAR, 0, MPI_COMM_WORLD);
printf("This program creates a 4 Gbyte file. Don't run it if you don't have that much disk space!\n");
}
else {
MPI_Bcast(&len, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(filename, len+1, MPI_CHAR, 0, MPI_COMM_WORLD);
}
/* create the distributed array filetype */
ndims = 3;
order = MPI_ORDER_C;
array_of_gsizes[0] = 1024;
array_of_gsizes[1] = 1024;
array_of_gsizes[2] = 4*1024/sizeof(int);
array_of_distribs[0] = MPI_DISTRIBUTE_BLOCK;
array_of_distribs[1] = MPI_DISTRIBUTE_BLOCK;
array_of_distribs[2] = MPI_DISTRIBUTE_BLOCK;
array_of_dargs[0] = MPI_DISTRIBUTE_DFLT_DARG;
array_of_dargs[1] = MPI_DISTRIBUTE_DFLT_DARG;
array_of_dargs[2] = MPI_DISTRIBUTE_DFLT_DARG;
for (i=0; i<ndims; i++) array_of_psizes[i] = 0;
MPI_Dims_create(nprocs, ndims, array_of_psizes);
/* check if MPI_Aint is large enough for size of global array.
if not, complain. */
size_with_aint = sizeof(int);
for (i=0; i<ndims; i++) size_with_aint *= array_of_gsizes[i];
size_with_offset = sizeof(int);
for (i=0; i<ndims; i++) size_with_offset *= array_of_gsizes[i];
if (size_with_aint != size_with_offset) {
printf("Can't use an array of this size unless the MPI implementation defines a 64-bit MPI_Aint\n");
MPI_Abort(MPI_COMM_WORLD, 1);
}
MPI_Type_create_darray(nprocs, mynod, ndims, array_of_gsizes,
array_of_distribs, array_of_dargs,
array_of_psizes, order, MPI_INT, &newtype);
MPI_Type_commit(&newtype);
/* initialize writebuf */
MPI_Type_size(newtype, &bufcount);
bufcount = bufcount/sizeof(int);
writebuf = (int *) malloc(bufcount * sizeof(int));
if (!writebuf) printf("Process %d, not enough memory for writebuf\n", mynod);
for (i=0; i<bufcount; i++) writebuf[i] = mynod*1024 + i;
/* write the array to the file */
MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_CREATE | MPI_MODE_RDWR,
MPI_INFO_NULL, &fh);
MPI_File_set_view(fh, 0, MPI_INT, newtype, "native", MPI_INFO_NULL);
MPI_File_write_all(fh, writebuf, bufcount, MPI_INT, &status);
MPI_File_close(&fh);
free(writebuf);
/* now read it back */
readbuf = (int *) calloc(bufcount, sizeof(int));
if (!readbuf) printf("Process %d, not enough memory for readbuf\n", mynod);
MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_CREATE | MPI_MODE_RDWR,
MPI_INFO_NULL, &fh);
//.........这里部分代码省略.........
开发者ID:MartinLidh,项目名称:tddc78,代码行数:101,代码来源:large_array.c
示例3: eigenvalues_Jacobi
//.........这里部分代码省略.........
eigenvectors_su3v = calloc(N2*(*nr_of_eigenvalues), sizeof(su3_vector));;
eigenvls_su3v = (double*)malloc((*nr_of_eigenvalues)*sizeof(double));
inv_eigenvls_su3v = (double*)malloc((*nr_of_eigenvalues)*sizeof(double));
}
solver_it_max = 64;
/* compute the maximal one first */
/* DEBUG
jdher_su3vect(N*sizeof(su3_vector)/sizeof(_Complex double), N2*sizeof(su3_vector)/sizeof(_Complex double),
50., 1.e-12,
1, 15, 8, max_iterations, 1, 0, 0, NULL,
CG, solver_it_max,
threshold_max, decay_max, verbosity,
&converged, (_Complex double*) max_eigenvector, (double*) &max_eigenvalue_su3v,
&returncode2, JD_MAXIMAL, 1,tslice,f);
*/
atime = gettime();
/* (re-) compute minimal eigenvalues */
converged = 0;
solver_it_max = 256;
if(maxmin)
jdher_su3vect(N*sizeof(su3_vector)/sizeof(_Complex double), N2*sizeof(su3_vector)/sizeof(_Complex double),
50., prec,
(*nr_of_eigenvalues), j_max, j_min,
max_iterations, blocksize, blockwise, v0dim, (_Complex double*) eigenvectors_su3v,
CG, solver_it_max,
threshold_max, decay_max, verbosity,
&converged, (_Complex double*) eigenvectors_su3v, eigenvls_su3v,
&returncode, JD_MAXIMAL, 1,tslice,
f);
else
jdher_su3vect(N*sizeof(su3_vector)/sizeof(_Complex double), N2*sizeof(su3_vector)/sizeof(_Complex double),
0., prec,
(*nr_of_eigenvalues), j_max, j_min,
max_iterations, blocksize, blockwise, v0dim, (_Complex double*) eigenvectors_su3v,
CG, solver_it_max,
threshold_min, decay_min, verbosity,
&converged, (_Complex double*) eigenvectors_su3v, eigenvls_su3v,
&returncode, JD_MINIMAL, 1,tslice,
f);
etime = gettime();
if(g_proc_id == 0) {
printf("Eigenvalues computed in %e sec. (gettime)\n", etime-atime);
}
/* Printout eigenvalues. */
if(g_proc_id == 0) {
sprintf(eigvl_filename,"eigenvalues.%.3d.%.4d", tslice, nstore);
efp=fopen(eigvl_filename,"w");
for(v0dim = 0; v0dim < (*nr_of_eigenvalues); v0dim++) {
fprintf(efp,"%e\n",eigenvls_su3v[v0dim]);
}
fclose(efp);
}
/* Printout eigenvectors. */
for(v0dim = 0; v0dim < (*nr_of_eigenvalues); v0dim++) {
sprintf(filename, "eigenvector.%.3d.%.3d.%.4d", v0dim, tslice, nstore);
s=(su3_vector*)&eigenvectors_su3v[v0dim*N2];
#ifdef MPI
# ifdef HAVE_LIBLEMON
// SEGNO: dovrebbe stampare 8*2*3*SPACEVOLUME data per file, ma ne stampa 8*2*4n*SPACEVOLUME (n=4-1 per ev 0-3)
MPI_File_open(g_cart_grid, filename, MPI_MODE_WRONLY | MPI_MODE_CREATE, MPI_INFO_NULL, &fp);
writer = lemonCreateWriter(&fp, g_cart_grid);
header = lemonCreateHeader(1 /* MB */, 1 /* ME */, "lattice-su3_vector-data",SPACEVOLUME*3*sizeof(_Complex double));
lemonWriteRecordHeader(header, writer);
lemonDestroyHeader(header);
lemonWriteLatticeParallel(writer, s, siteSize, dims);
lemonWriterCloseRecord(writer);
lemonDestroyWriter(writer);
MPI_File_close(&fp);
# else
if(g_proc_id == 0) {
printf("Cannot write eigenvectors: you need LEMON for writing eigenvectors with MPI\n");
}
# endif
#else
fp=fopen(filename,"wb");
fwrite(s,siteSize,SPACEVOLUME,fp);
fclose(fp);
#endif // MPI
sqnorm=square_norm_su3vect(s,SPACEVOLUME,1);
if(g_proc_id == 0) {
printf("wrote eigenvector | |^2 = %e \n",sqnorm);
}
}
returnvalue=eigenvls_su3v[0];
free(max_eigenvector_);
#else
fprintf(stderr, "lapack not available, so JD method for EV computation not available \n");
#endif // LAPACK
return(returnvalue);
}
开发者ID:LorenzoRiggio,项目名称:tmLQCD,代码行数:101,代码来源:eigenvalues_Jacobi.c
示例4: writecomputetrace_
void writecomputetrace_(int* pcompstep, double* pdtime, double* pcpu_t)
#endif
{
COMPUTE_TRACE_FLAG = 1; // only for param print
// now the control is off to the solver, i/o kernel just provides the function
// it's up to solver to decide when to use compute trace
// if(COMPUTE_TRACE_FLAG != 1)
// return;
char tracefname[kMaxPathLen];
int formatparam = trace_ioop;
int nfile = trace_nf;
int stepnum = *pcompstep;
double dtime = *pdtime;
double cpu_t = *pcpu_t;
// only write every few steps TODO: get param(13) IOCOMM and compare with it
if(stepnum%100 != 0) return;
//printf("iostep is %d, dtime = %lf\n", *pcompstep, *pdtime);
int temp_rank;
MPI_Comm_rank(MPI_COMM_WORLD, &temp_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mysize);
MPI_Barrier(MPI_COMM_WORLD);
memset((void*)tracefname, 0, kMaxPathLen);
//sprintf(tracefname, "%s/compute-trace-%d-proc-ioop-%d-nf-%d-t%.5d.dat",
// kOutputPath, mysize, formatparam, nfile, stepnum);
// note: this might be called before going into any io func, so "path" is not set yet
sprintf(tracefname, "%s/compute-trace-%d-proc-istep-%.5d-ioop-%d-nf-%d.dat",
kOutputPath, mysize, stepnum, trace_ioop, trace_nf);
//printf("my filename %s (myrank=%d) \n", tracefname, temp_rank);
// write the actual file
if (1) {
MPI_File timefile;
int rc;
rc = MPI_File_open(MPI_COMM_WORLD, tracefname,
MPI_MODE_CREATE | MPI_MODE_WRONLY , MPI_INFO_NULL, &timefile);
if(rc) {
if(temp_rank == 0) printf("Unble to open file %s, error code:%d! \n", tracefname, rc);
}
char mytime[128];
memset(mytime, 0, 128);
sprintf(mytime, "%10d %10.3lf %10.3lf\n",
temp_rank, dtime, cpu_t);
int len = strlen(mytime);
//printf("str len = %d\n", len);
long long offsets = temp_rank * len ;
MPI_Status write_data_status;
MPI_File_write_at_all_begin(timefile,
offsets,
mytime,
len,
MPI_CHAR);
MPI_File_write_at_all_end(timefile,
mytime,
&write_data_status);
MPI_File_close( & timefile );
}
//printf("writecomputetrace() finished, myrank = %d\n", temp_rank);
}
开发者ID:YHUCD,项目名称:NEKCEM,代码行数:70,代码来源:mpiio_util.c
示例5: main
int main(int argc, char *argv[])
{
int width, height, maxiter, flag;
double x[2], y[2], c[2];
char *image, *stats;
int comm_sz, my_rank;
double t1, t2, delta;
// Get and parse the program parameters
getParams(argv, &flag, c, x, y, &width, &height, &maxiter, &image, &stats);
// Allocate space for the image
int *iterations = (int*)malloc( sizeof(int) * width * height );
assert(iterations != NULL);
// Start MPI
MPI_Init(NULL, NULL);
MPI_Comm_size(MPI_COMM_WORLD, &comm_sz);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
// Begin process timer
t1 = MPI_Wtime();
/* compute set */
int maxCount = parallelJulia(x, width, y, height, c, flag, maxiter, iterations, my_rank, comm_sz, MPI_COMM_WORLD);
// Stop timer and compute time elapse
t2 = MPI_Wtime();
delta = t2 - t1;
if (my_rank == 0)
{
/* save our picture for the viewer */
printf("\nMaster process %d creating image...\n", my_rank);
saveBMP(image, iterations, width, height);
printf("\nFinished image creation\n");
}
// Wait for all processes to finish Julia computations
MPI_Barrier(MPI_COMM_WORLD);
// Open stats file
MPI_File statsFile;
if (MPI_File_open(MPI_COMM_WORLD, stats, MPI_MODE_CREATE|MPI_MODE_WRONLY, MPI_INFO_NULL, &statsFile) == MPI_SUCCESS)
{
// Generate statistic string
char message[100];
sprintf(message, "process %d: max iterations reached = %d, time elapsed = %lf\n", my_rank, maxCount, delta);
MPI_File_write_ordered(statsFile, message, strlen(message), MPI_CHAR, MPI_STATUS_IGNORE);
MPI_File_close(&statsFile);
}
else printf("Problem opening file on process %d\n", my_rank);
// Close MPI environment
MPI_Finalize();
// Free reserved memory
free(iterations);
return 0;
}
开发者ID:GenevaS,项目名称:Parallel_MPI,代码行数:66,代码来源:main.c
示例6: main
int main(int argc, char *argv[])
{
int rank, size;
const int N = atoi(argv[1]);
// printf("Number of testcase = %d\n", N);
MPI_Init (&argc, &argv);
double start_time, end_time;
MPI_Comm_rank (MPI_COMM_WORLD, &rank);
MPI_Comm_size (MPI_COMM_WORLD, &size);
// printf("My rank is %d \n", rank);
//start_time = MPI_Wtime();
MPI_File fin, fout;
MPI_Status status;
int *root_arr;
int max_arr_size = size > N ? size : N;
int ret = MPI_File_open(MPI_COMM_WORLD, argv[2],
MPI_MODE_RDONLY, MPI_INFO_NULL, &fin);
if (rank == ROOT) {
root_arr = new int[max_arr_size+3];
// printf("Enter rank 0 statement ... \n");
MPI_File_read(fin, root_arr, N, MPI_INT, &status);
/*
for (int i = 0; i < N; ++i)
printf("[START] [Rank %d] root_arr[%d] = %d\n", rank, i, root_arr[i]);
printf("Out Rank 0 statement ... \n");
*/
}
MPI_File_close(&fin);
MPI_Barrier(MPI_COMM_WORLD); // Wait for rank0 to read file
int rank_num = size > N ? N : size;
const int LAST = rank_num - 1;
int num_per_node = N / rank_num;
int *local_arr;
int num_per_node_diff = N - num_per_node * rank_num;
int diff = num_per_node_diff;
bool has_remain = false;
bool has_remain_rank = rank_num % 2 ? true : false;
if (num_per_node_diff > 0) {
// Send remaining elements to size - 1
has_remain = true;
if (rank == ROOT) {
MPI_Send(root_arr + N - diff, diff, MPI_INT, LAST, 0, MPI_COMM_WORLD);
} else if (rank == LAST) {
// Handle special case
num_per_node += num_per_node_diff;
local_arr = new int[num_per_node+1];
MPI_Recv(local_arr + num_per_node - diff, diff,
MPI_INT, 0, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
}
} else if(rank == rank_num - 1) {
local_arr = new int[num_per_node+1];
}
MPI_Barrier(MPI_COMM_WORLD); // Wait for rank0 to read file
if (rank != rank_num - 1)
local_arr = new int[num_per_node+1];
// MPI_Scatter (send_buf, send_count, send_type, recv_buf, recv_count, recv_type, root, comm)
if (rank < LAST)
MPI_Scatter(root_arr, num_per_node, MPI_INT, local_arr,
num_per_node, MPI_INT, ROOT, MPI_COMM_WORLD);
else
MPI_Scatter(root_arr, num_per_node-diff, MPI_INT, local_arr,
num_per_node-diff, MPI_INT, ROOT, MPI_COMM_WORLD);
// printf("[Rank %d] num_per_node_size = %d\n" ,rank, num_per_node);
MPI_Barrier(MPI_COMM_WORLD);
/*
for (int i = 0; i < num_per_node; ++i)
printf("[BEFORE] [Rank %d] local_arr[%d] = %d\n", rank, i, local_arr[i]);
*/
if (rank < rank_num) {
std::sort(local_arr, local_arr + num_per_node);
}
MPI_Barrier(MPI_COMM_WORLD);
/*
for (int i = 0; i < num_per_node; ++i)
printf("[AFTER] [Rank %d] local_arr[%d] = %d\n", rank, i, local_arr[i]);
*/
// printf("rank %d is arrived\n", rank);
MPI_Barrier(MPI_COMM_WORLD); // Wait for rank0 to read file
int *recv_buf, *send_buf;
int recv_len, send_len, success;
if (rank_num > 1 && rank < rank_num) {
if (rank == ROOT) {
send_len = num_per_node;
MPI_Send(&send_len, 1, MPI_INT, rank+1, 0, MPI_COMM_WORLD);
//.........这里部分代码省略.........
开发者ID:ChihMin,项目名称:Parallel_Programming,代码行数:101,代码来源:advanced.cpp
示例7: IOR_Open_MPIIO
void *
IOR_Open_MPIIO(char * testFileName,
IOR_param_t * param)
{
int fd_mode = (int)0,
offsetFactor,
tasksPerFile,
transfersPerBlock = param->blockSize
/ param->transferSize;
struct fileTypeStruct {
int globalSizes[2],
localSizes[2],
startIndices[2];
} fileTypeStruct;
MPI_File * fd;
MPI_Comm comm;
MPI_Info mpiHints = MPI_INFO_NULL;
fd = (MPI_File *)malloc(sizeof(MPI_File));
if (fd == NULL) ERR("Unable to malloc MPI_File");
*fd = 0;
/* set IOR file flags to MPIIO flags */
/* -- file open flags -- */
if (param->openFlags & IOR_RDONLY) {fd_mode |= MPI_MODE_RDONLY;}
if (param->openFlags & IOR_WRONLY) {fd_mode |= MPI_MODE_WRONLY;}
if (param->openFlags & IOR_RDWR) {fd_mode |= MPI_MODE_RDWR;}
if (param->openFlags & IOR_APPEND) {fd_mode |= MPI_MODE_APPEND;}
if (param->openFlags & IOR_CREAT) {fd_mode |= MPI_MODE_CREATE;}
if (param->openFlags & IOR_EXCL) {fd_mode |= MPI_MODE_EXCL;}
if (param->openFlags & IOR_TRUNC) {
fprintf(stdout, "File truncation not implemented in MPIIO\n");
}
if (param->openFlags & IOR_DIRECT) {
fprintf(stdout, "O_DIRECT not implemented in MPIIO\n");
}
/*
* MPI_MODE_UNIQUE_OPEN mode optimization eliminates the overhead of file
* locking. Only open a file in this mode when the file will not be con-
* currently opened elsewhere, either inside or outside the MPI environment.
*/
fd_mode |= MPI_MODE_UNIQUE_OPEN;
if (param->filePerProc) {
comm = MPI_COMM_SELF;
} else {
comm = testComm;
}
SetHints(&mpiHints, param->hintsFileName);
/*
* note that with MP_HINTS_FILTERED=no, all key/value pairs will
* be in the info object. The info object that is attached to
* the file during MPI_File_open() will only contain those pairs
* deemed valid by the implementation.
*/
/* show hints passed to file */
if (rank == 0 && param->showHints) {
fprintf(stdout, "\nhints passed to MPI_File_open() {\n");
ShowHints(&mpiHints);
fprintf(stdout, "}\n");
}
MPI_CHECK(MPI_File_open(comm, testFileName, fd_mode, mpiHints, fd),
"cannot open file");
/* show hints actually attached to file handle */
if (rank == 0 && param->showHints) {
MPI_CHECK(MPI_File_get_info(*fd, &mpiHints),
"cannot get file info");
fprintf(stdout, "\nhints returned from opened file {\n");
ShowHints(&mpiHints);
fprintf(stdout, "}\n");
}
/* preallocate space for file */
if (param->preallocate && param->open == WRITE) {
MPI_CHECK(MPI_File_preallocate(*fd,
(MPI_Offset)(param->segmentCount*param->blockSize*param->numTasks)),
"cannot preallocate file");
}
/* create file view */
if (param->useFileView) {
/* create contiguous transfer datatype */
MPI_CHECK(MPI_Type_contiguous(param->transferSize / sizeof(IOR_size_t),
MPI_LONG_LONG_INT, ¶m->transferType),
"cannot create contiguous datatype");
MPI_CHECK(MPI_Type_commit(¶m->transferType),
"cannot commit datatype");
if (param->filePerProc) {
offsetFactor = 0;
tasksPerFile = 1;
} else {
offsetFactor = (rank + rankOffset) % param->numTasks;
tasksPerFile = param->numTasks;
}
/*
* create file type using subarray
//.........这里部分代码省略.........
开发者ID:gcongiu,项目名称:E10,代码行数:101,代码来源:aiori-MPIIO.c
示例8: PIDX_aggregated_io
int PIDX_aggregated_io(PIDX_file_io_id io_id, Agg_buffer agg_buf, PIDX_block_layout block_layout, int MODE)
{
int64_t data_offset = 0;
char file_name[PATH_MAX];
int i = 0, k = 0;
uint32_t *headers;
int total_header_size = 0;
#ifdef PIDX_RECORD_TIME
double t1, t2, t3, t4, t5;
#endif
#if PIDX_HAVE_MPI
int mpi_ret;
MPI_File fh;
MPI_Status status;
#else
int fh;
#endif
int total_chunk_size = (io_id->idx->chunk_size[0] * io_id->idx->chunk_size[1] * io_id->idx->chunk_size[2] * io_id->idx->chunk_size[3] * io_id->idx->chunk_size[4]);
if (enable_caching == 1 && agg_buf->var_number == io_id->init_index && agg_buf->sample_number == 0)
{
#ifdef PIDX_RECORD_TIME
t1 = PIDX_get_time();
#endif
int adjusted_file_index = 0;
int l = pow(2, ((int)log2((unsigned int) agg_buf->file_number * io_id->idx->blocks_per_file)));
adjusted_file_index = (l * (io_id->idx_d->idx_count[0] * io_id->idx_d->idx_count[1] * io_id->idx_d->idx_count[2]) + (((unsigned int) agg_buf->file_number * io_id->idx->blocks_per_file) - l) + (io_id->idx_d->color * l)) / io_id->idx->blocks_per_file;
generate_file_name(io_id->idx->blocks_per_file, io_id->idx->filename_template, (unsigned int) /*agg_buf->file_number*/adjusted_file_index, file_name, PATH_MAX);
#if !SIMULATE_IO
#if PIDX_HAVE_MPI
mpi_ret = MPI_File_open(MPI_COMM_SELF, file_name, MPI_MODE_WRONLY, MPI_INFO_NULL, &fh);
if (mpi_ret != MPI_SUCCESS)
{
fprintf(stderr, "[%s] [%d] MPI_File_open() failed filename %s.\n", __FILE__, __LINE__, file_name);
return PIDX_err_io;
}
#else
fh = open(file_name, O_WRONLY);
#endif
#endif
#ifdef PIDX_RECORD_TIME
t2 = PIDX_get_time();
#endif
data_offset = 0;
total_header_size = (10 + (10 * io_id->idx->blocks_per_file)) * sizeof (uint32_t) * io_id->idx->variable_count;
headers = (uint32_t*)malloc(total_header_size);
memset(headers, 0, total_header_size);
#if !SIMULATE_IO
if (enable_caching == 1)
memcpy (headers, cached_header_copy, total_header_size);
else
{
//TODO
}
#endif
#ifdef PIDX_RECORD_TIME
t3 = PIDX_get_time();
#endif
uint64_t header_size = (io_id->idx_d->start_fs_block * io_id->idx_d->fs_block_size);
#if !SIMULATE_IO
unsigned char* temp_buffer = (unsigned char*)realloc(agg_buf->buffer, agg_buf->buffer_size + header_size);
if (temp_buffer == NULL)
{
fprintf(stderr, "[%s] [%d] realloc() failed.\n", __FILE__, __LINE__);
return PIDX_err_io;
}
else
{
agg_buf->buffer = temp_buffer;
memmove(agg_buf->buffer + header_size, agg_buf->buffer, agg_buf->buffer_size);
memcpy(agg_buf->buffer, headers, total_header_size);
memset(agg_buf->buffer + total_header_size, 0, (header_size - total_header_size));
}
#endif
free(headers);
#if !SIMULATE_IO
#if PIDX_HAVE_MPI
mpi_ret = MPI_File_write_at(fh, 0, agg_buf->buffer, agg_buf->buffer_size + header_size, MPI_BYTE, &status);
if (mpi_ret != MPI_SUCCESS)
{
fprintf(stderr, "[%s] [%d] MPI_File_write_at() failed for filename %s.\n", __FILE__, __LINE__, file_name);
return PIDX_err_io;
}
int write_count;
MPI_Get_count(&status, MPI_BYTE, &write_count);
if (write_count != agg_buf->buffer_size + header_size)
{
//.........这里部分代码省略.........
开发者ID:spetruzza,项目名称:PIDX,代码行数:101,代码来源:PIDX_file_io.c
示例9: writePLY
static void writePLY(
MPI_Comm comm, std::string fname,
int nvertices, int nverticesPerObject,
int ntriangles, int ntrianglesPerObject,
int nObjects,
const std::vector<int3>& mesh,
const std::vector<float3>& vertices)
{
int rank;
MPI_Check( MPI_Comm_rank(comm, &rank) );
int totalVerts = 0;
MPI_Check( MPI_Reduce(&nvertices, &totalVerts, 1, MPI_INT, MPI_SUM, 0, comm) );
int totalTriangles = 0;
MPI_Check( MPI_Reduce(&ntriangles, &totalTriangles, 1, MPI_INT, MPI_SUM, 0, comm) );
MPI_File f;
MPI_Check( MPI_File_open(comm, fname.c_str(), MPI_MODE_CREATE|MPI_MODE_DELETE_ON_CLOSE|MPI_MODE_WRONLY, MPI_INFO_NULL, &f) );
MPI_Check( MPI_File_close(&f) );
MPI_Check( MPI_File_open(comm, fname.c_str(), MPI_MODE_WRONLY | MPI_MODE_CREATE, MPI_INFO_NULL, &f) );
int headerSize = 0;
MPI_Offset fileOffset = 0;
if (rank == 0)
{
std::stringstream ss;
ss << "ply\n";
ss << "format binary_little_endian 1.0\n";
ss << "element vertex " << totalVerts << "\n";
ss << "property float x\nproperty float y\nproperty float z\n";
//ss << "property float xnormal\nproperty float ynormal\nproperty float znormal\n";
ss << "element face " << totalTriangles << "\n";
ss << "property list int int vertex_index\n";
ss << "end_header\n";
std::string content = ss.str();
headerSize = content.length();
MPI_Check( MPI_File_write_at(f, fileOffset, content.c_str(), headerSize, MPI_CHAR, MPI_STATUS_IGNORE) );
}
MPI_Check( MPI_Bcast(&headerSize, 1, MPI_INT, 0, comm) );
fileOffset += headerSize;
fileOffset += writeToMPI(vertices, f, fileOffset, comm);
int verticesOffset = 0;
MPI_Check( MPI_Exscan(&nvertices, &verticesOffset, 1, MPI_INT, MPI_SUM, comm));
std::vector<int4> connectivity;
for(int j = 0; j < nObjects; ++j)
for(int i = 0; i < ntrianglesPerObject; ++i)
{
int3 vertIds = mesh[i] + nverticesPerObject * j + verticesOffset;
connectivity.push_back({3, vertIds.x, vertIds.y, vertIds.z});
}
fileOffset += writeToMPI(connectivity, f, fileOffset, comm);
MPI_Check( MPI_File_close(&f));
}
开发者ID:dimaleks,项目名称:uDeviceX,代码行数:64,代码来源:dump_mesh.cpp
示例10: time
int ChimeraCheckCommand::execute(){
try{
if (abort == true) { if (calledHelp) { return 0; } return 2; }
for (int i = 0; i < fastaFileNames.size(); i++) {
m->mothurOut("Checking sequences from " + fastaFileNames[i] + " ..." ); m->mothurOutEndLine();
int start = time(NULL);
string thisNameFile = "";
if (nameFileNames.size() != 0) { thisNameFile = nameFileNames[i]; }
chimera = new ChimeraCheckRDP(fastaFileNames[i], templatefile, thisNameFile, svg, increment, ksize, outputDir);
if (m->control_pressed) { delete chimera; return 0; }
if (outputDir == "") { outputDir = m->hasPath(fastaFileNames[i]); }//if user entered a file with a path then preserve it
map<string, string> variables;
variables["[filename]"] = outputDir + m->getRootName(m->getSimpleName(fastaFileNames[i]));
string outputFileName = getOutputFileName("chimera", variables);
outputNames.push_back(outputFileName); outputTypes["chimera"].push_back(outputFileName);
#ifdef USE_MPI
int pid, numSeqsPerProcessor;
int tag = 2001;
vector<unsigned long long> MPIPos;
MPI_Status status;
MPI_Comm_rank(MPI_COMM_WORLD, &pid); //find out who we are
MPI_Comm_size(MPI_COMM_WORLD, &processors);
MPI_File inMPI;
MPI_File outMPI;
int outMode=MPI_MODE_CREATE|MPI_MODE_WRONLY;
int inMode=MPI_MODE_RDONLY;
char outFilename[1024];
strcpy(outFilename, outputFileName.c_str());
char inFileName[1024];
strcpy(inFileName, fastaFileNames[i].c_str());
MPI_File_open(MPI_COMM_WORLD, inFileName, inMode, MPI_INFO_NULL, &inMPI); //comm, filename, mode, info, filepointer
MPI_File_open(MPI_COMM_WORLD, outFilename, outMode, MPI_INFO_NULL, &outMPI);
if (m->control_pressed) { MPI_File_close(&inMPI); MPI_File_close(&outMPI); for (int j = 0; j < outputNames.size(); j++) { m->mothurRemove(outputNames[j]); } outputTypes.clear(); delete chimera; return 0; }
if (pid == 0) { //you are the root process
MPIPos = m->setFilePosFasta(fastaFileNames[i], numSeqs); //fills MPIPos, returns numSeqs
//send file positions to all processes
for(int j = 1; j < processors; j++) {
MPI_Send(&numSeqs, 1, MPI_INT, j, tag, MPI_COMM_WORLD);
MPI_Send(&MPIPos[0], (numSeqs+1), MPI_LONG, j, tag, MPI_COMM_WORLD);
}
//figure out how many sequences you have to align
numSeqsPerProcessor = numSeqs / processors;
int startIndex = pid * numSeqsPerProcessor;
if(pid == (processors - 1)){ numSeqsPerProcessor = numSeqs - pid * numSeqsPerProcessor; }
//align your part
driverMPI(startIndex, numSeqsPerProcessor, inMPI, outMPI, MPIPos);
if (m->control_pressed) { MPI_File_close(&inMPI); MPI_File_close(&outMPI); for (int j = 0; j < outputNames.size(); j++) { m->mothurRemove(outputNames[j]); } outputTypes.clear(); delete chimera; return 0; }
//wait on chidren
for(int j = 1; j < processors; j++) {
char buf[5];
MPI_Recv(buf, 5, MPI_CHAR, j, tag, MPI_COMM_WORLD, &status);
}
}else{ //you are a child process
MPI_Recv(&numSeqs, 1, MPI_INT, 0, tag, MPI_COMM_WORLD, &status);
MPIPos.resize(numSeqs+1);
MPI_Recv(&MPIPos[0], (numSeqs+1), MPI_LONG, 0, tag, MPI_COMM_WORLD, &status);
//figure out how many sequences you have to align
numSeqsPerProcessor = numSeqs / processors;
int startIndex = pid * numSeqsPerProcessor;
if(pid == (processors - 1)){ numSeqsPerProcessor = numSeqs - pid * numSeqsPerProcessor; }
//align your part
driverMPI(startIndex, numSeqsPerProcessor, inMPI, outMPI, MPIPos);
if (m->control_pressed) { MPI_File_close(&inMPI); MPI_File_close(&outMPI); for (int j = 0; j < outputNames.size(); j++) { m->mothurRemove(outputNames[j]); } outputTypes.clear(); delete chimera; return 0; }
//tell parent you are done.
char buf[5];
strcpy(buf, "done");
MPI_Send(buf, 5, MPI_CHAR, 0, tag, MPI_COMM_WORLD);
}
//close files
MPI_File_close(&inMPI);
MPI_File_close(&outMPI);
//.........这里部分代码省略.........
开发者ID:EdwardMoseley,项目名称:mothur,代码行数:101,代码来源:chimeracheckcommand.cpp
示例11: write_read_samples
static int write_read_samples(PIDX_file_io_id io_id, int variable_index, uint64_t hz_start_index, uint64_t hz_count, unsigned char* hz_buffer, int64_t buffer_offset, PIDX_block_layout layout, int MODE)
{
int samples_per_file, block_number, file_index, file_count, ret = 0, block_negative_offset = 0, file_number;
int bytes_per_sample, bytes_per_datatype;
int i = 0;
char file_name[PATH_MAX];
off_t data_offset = 0;
samples_per_file = io_id->idx_d->samples_per_block * io_id->idx->blocks_per_file;
bytes_per_datatype = (io_id->idx->variable[variable_index]->bits_per_value / 8) * (io_id->idx->chunk_size[0] * io_id->idx->chunk_size[1] * io_id->idx->chunk_size[2] * io_id->idx->chunk_size[3] * io_id->idx->chunk_size[4]) / (io_id->idx->compression_factor);
#if !SIMULATE_IO
hz_buffer = hz_buffer + buffer_offset * bytes_per_datatype * io_id->idx->variable[variable_index]->values_per_sample;
#endif
while (hz_count)
{
block_number = hz_start_index / io_id->idx_d->samples_per_block;
file_number = hz_start_index / samples_per_file;
file_index = hz_start_index % samples_per_file;
file_count = samples_per_file - file_index;
if ((int64_t)file_count > hz_count)
file_count = hz_count;
// build file name
int adjusted_file_index = 0;
int l = pow(2, ((int)log2((unsigned int) file_number * io_id->idx->blocks_per_file)));
adjusted_file_index = (l * (io_id->idx_d->idx_count[0] * io_id->idx_d->idx_count[1] * io_id->idx_d->idx_count[2]) + (((unsigned int) file_number * io_id->idx->blocks_per_file) - l) + (io_id->idx_d->color * l)) / io_id->idx->blocks_per_file;
ret = generate_file_name(io_id->idx->blocks_per_file, io_id->idx->filename_template, /*file_number*/adjusted_file_index, file_name, PATH_MAX);
if (ret == 1)
{
fprintf(stderr, "[%s] [%d] generate_file_name() failed.\n", __FILE__, __LINE__);
return PIDX_err_io;
}
data_offset = 0;
bytes_per_sample = io_id->idx->variable[variable_index]->bits_per_value / 8;
data_offset = file_index * bytes_per_sample * io_id->idx->variable[variable_index]->values_per_sample;
data_offset += io_id->idx_d->start_fs_block * io_id->idx_d->fs_block_size;
block_negative_offset = PIDX_blocks_find_negative_offset(io_id->idx->blocks_per_file, block_number, layout);
data_offset -= block_negative_offset * io_id->idx_d->samples_per_block * bytes_per_sample * io_id->idx->variable[variable_index]->values_per_sample;
for (l = 0; l < variable_index; l++)
{
bytes_per_sample = io_id->idx->variable[l]->bits_per_value / 8;
for (i = 0; i < io_id->idx->blocks_per_file; i++)
if (PIDX_blocks_is_block_present((i + (io_id->idx->blocks_per_file * file_number)), layout))
data_offset = data_offset + (io_id->idx->variable[l]->values_per_sample * bytes_per_sample * io_id->idx_d->samples_per_block);
}
if(MODE == PIDX_WRITE)
{
#if !SIMULATE_IO
#if PIDX_HAVE_MPI
#ifdef PIDX_DUMP_IO
if (io_id->idx_d->dump_io_info == 1 && io_id->idx->current_time_step == 0)
{
fprintf(io_dump_fp, "[A] Count %lld Target Disp %d (%d %d)\n", (long long)file_count * io_id->idx->variable[variable_index]->values_per_sample * (io_id->idx->variable[variable_index]->bits_per_value/8), (file_index * bytes_per_sample * io_id->idx->variable[variable_index]->values_per_sample - block_negative_offset * io_id->idx_d->samples_per_block * bytes_per_sample * io_id->idx->variable[variable_index]->values_per_sample)/8, (int)io_id->idx_d->start_fs_block, (int)io_id->idx_d->fs_block_size);
fflush(io_dump_fp);
}
#endif
if (io_id->idx_d->parallel_mode == 1)
{
int rank = 0;
MPI_Comm_rank(io_id->comm, &rank);
MPI_File fh;
MPI_Status status;
int mpi_ret;
mpi_ret = MPI_File_open(MPI_COMM_SELF, file_name, MPI_MODE_WRONLY, MPI_INFO_NULL, &fh);
if (mpi_ret != MPI_SUCCESS)
{
fprintf(stderr, "[%s] [%d] MPI_File_open() failed. (%s) [%d]\n", __FILE__, __LINE__, file_name, file_number);
return PIDX_err_io;
}
/*
printf("[%d] Data Offset %d Count %d\n", rank, data_offset, (file_count));
int x = 0;
for (x = 0; x < file_count; x++)
{
double x1;
memcpy(&x1, hz_buffer + x * sizeof(double), sizeof(double));
printf("Values %d %f\n", x, x1);
}
*/
mpi_ret = MPI_File_write_at(fh, data_offset, hz_buffer, file_count * io_id->idx->variable[variable_index]->values_per_sample * (io_id->idx->variable[variable_index]->bits_per_value/8), MPI_BYTE, &status);
if (mpi_ret != MPI_SUCCESS)
{
fprintf(stderr, "[%s] [%d] MPI_File_open() failed.\n", __FILE__, __LINE__);
return PIDX_err_io;
}
//.........这里部分代码省略.........
开发者ID:spetruzza,项目名称:PIDX,代码行数:101,代码来源:PIDX_file_io.c
示例12: main
//.........这里部分代码省略.........
}
input2hash160 = popt ? &bwiosalt2hash160 : &bwiopass2hash160;
} else if (strcmp(topt, "bv2") == 0) {
spok = 1;
input2hash160 = popt ? &brainv2salt2hash160 : &brainv2pass2hash160;
} else {
bail(1, "Unknown input type '%s'.\n", topt);
}
} else {
topt = "str";
input2hash160 = &pass2hash160;
}
if (spok) {
if (sopt && popt) {
bail(1, "Cannot specify both a salt and a passphrase\n");
}
if (popt) {
kdfpass = popt;
kdfpass_sz = strlen(popt);
} else {
if (sopt) {
kdfsalt = sopt;
kdfsalt_sz = strlen(kdfsalt);
} else {
kdfsalt = malloc(0);
kdfsalt_sz = 0;
}
}
} else {
if (popt) {
bail(1,
"Specifying a passphrase not supported with input type '%s'\n",
topt);
} else if (sopt) {
bail(1,
"Specifying a salt not supported with this input type '%s'\n",
topt);
}
}
if (bopt) {
if ((bloom = bloom_open(bopt)) == NULL) {
bail(1, "failed to open bloom filter.\n");
}
}
if (iopt) {
if (MPI_File_open(MPI_COMM_WORLD, iopt, MPI_MODE_RDONLY, MPI_INFO_NULL,
&ifile)) {
bail(1, "failed to open '%s' for reading: %s\n", iopt,
strerror(errno));
}
}
if (oopt) {
if ((ofile = fopen(oopt, (aopt ? "a" : "w"))) == NULL) {
bail(1, "failed to open '%s' for writing: %s\n", oopt,
strerror(errno));
}
}
/* use line buffered output */
setvbuf(ofile, NULL, _IOLBF, 0);
setvbuf(stderr, NULL, _IONBF, 0);
secp256k1_start();
const int overlap = 100;
char **lines;
int nlines;
readlines(&ifile, rank, size, overlap, &lines, &nlines);
fprintf(ofile, "----Welcome %d! %d Lines for you.----\n", rank, nlines);
int index = 0;
time_t start, end;
double length;
time(&start);
for (int i = 0; i < nlines - 1; i++) {
++index;
input2hash160(lines[i], strlen(lines[i]));
if (bloom) {
if (bloom_chk_hash160(bloom, hash160_uncmp.ul)) {
fprintresult(ofile, &hash160_uncmp, 'u', topt, lines[i]);
}
if (bloom_chk_hash160(bloom, hash160_compr.ul)) {
fprintresult(ofile, &hash160_compr, 'c', topt, lines[i]);
}
} else {
fprintresult(ofile, &hash160_uncmp, 'u', topt, lines[i]);
fprintresult(ofile, &hash160_compr, 'c', topt, lines[i]);
}
}
time(&end);
length = difftime(end, start);
double perSecond = index / length;
fprintf(ofile, "----Process: %d, Lines: %d, speed: %.0f/sec!----\n", rank, index, perSecond);
secp256k1_stop();
MPI_File_close(&ifile);
MPI_Finalize();
return 0;
}
开发者ID:Jul1u5,项目名称:brainflayer-parallelized,代码行数:101,代码来源:brainflayer.c
示例13: main
//.........这里部分代码省略.........
}
}
//mpio write pgm
else if (animation == 1 && (run_type == 1 || run_type == 2))
{
//default is no frame
create_frame = 0;
for (ii=0;ii<20;ii++)
{
for (jj=0;jj<animation_list[ii][1]+1;jj++)
{
// if (rank == 0)
// {
// printf("a,ii,j,k= %i,%i,%i,%i, Frame? = %i\n",
// animation_list[ii][0],ii,jj,k,(animation_list[ii][0]+jj-k)==0);
// }
if ((animation_list[ii][0] + jj - k) == 0)
{
create_frame = 1;
break;
}
}
}
if (create_frame == 1)
{
//dynamic filename with leading zeroes for easy conversion to gif
char buffer[128];
snprintf(buffer, sizeof(char)*128, "Animation/frame%04d.pgm", k);
/* open the file, and set the view */
MPI_File file;
MPI_File_open(MPI_COMM_WORLD, buffer,
MPI_MODE_CREATE|MPI_MODE_WRONLY,
MPI_INFO_NULL, &file);
MPI_File_set_view(file, 0, MPI_UNSIGNED_CHAR, MPI_UNSIGNED_CHAR,
"native", MPI_INFO_NULL);
//write header
MPI_File_write(file, &header1, 15, MPI_CHAR, MPI_STATUS_IGNORE);
//write matrix
MPI_File_set_view(file, 15, MPI_UNSIGNED_CHAR, submatrix,
"native", MPI_INFO_NULL);
MPI_File_write_all(file, section, rsize*csize,
MPI_UNSIGNED_CHAR, MPI_STATUS_IGNORE);
//write footer (trailing newline)
MPI_File_set_view(file, 15+rsize*ncols*csize*nrows,
MPI_UNSIGNED_CHAR, MPI_UNSIGNED_CHAR,
"native", MPI_INFO_NULL);
MPI_File_write(file, &footer, 1, MPI_CHAR, MPI_STATUS_IGNORE);
}
}
// BLOCKED COMMUNITATION //
if (run_type == 1)
{
//change bot (send top) to account for middle area
//alternate to avoid locking
send_to = rank - 1;
开发者ID:AaronTHolt,项目名称:MPI,代码行数:67,代码来源:hw6.1-holtat.c
示例14: main
int main(int argc, char **argv)
{
MPI_Datatype newtype;
int i, ndims, array_of_gsizes[3], array_of_distribs[3];
int order, nprocs, j, len;
int array_of_dargs[3], array_of_psizes[3];
int *readbuf, *writebuf, mynod, *tmpbuf, array_size;
MPI_Count bufcount;
char *filename;
int errs = 0, toterrs;
MPI_File fh;
MPI_Status status;
MPI_Request request;
MPI_Info info = MPI_INFO_NULL;
int errcode;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &mynod);
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
/* process 0 broadcasts the file name to other processes */
if (!mynod) {
filename = "testfile";
len = strlen(filename);
MPI_Bcast(&len, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(filename, len + 1, MPI_CHAR, 0, MPI_COMM_WORLD);
}
else {
MPI_Bcast(&len, 1, MPI_INT, 0, MPI_COMM_WORLD);
filename = (char *)malloc(len + 1);
MPI_Bcast(filename, len + 1, MPI_CHAR, 0, MPI_COMM_WORLD);
}
/* create the distributed array filetype */
ndims = 3;
order = MPI_ORDER_C;
array_of_gsizes[0] = 32;
array_of_gsizes[1] = 32;
array_of_gsizes[2] = 32;
array_of_distribs[0] = MPI_DISTRIBUTE_BLOCK;
array_of_distribs[1] = MPI_DISTRIBUTE_BLOCK;
array_of_distribs[2] = MPI_DISTRIBUTE_BLOCK;
array_of_dargs[0] = MPI_DISTRIBUTE_DFLT_DARG;
array_of_dargs[1] = MPI_DISTRIBUTE_DFLT_DARG;
array_of_dargs[2] = MPI_DISTRIBUTE_DFLT_DARG;
for (i = 0; i < ndims; i++) array_of_psizes[i] = 0;
MPI_Dims_create(nprocs, ndims, array_of_psizes);
MPI_Ty
|
请发表评论