• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ MPI_Abort函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中MPI_Abort函数的典型用法代码示例。如果您正苦于以下问题:C++ MPI_Abort函数的具体用法?C++ MPI_Abort怎么用?C++ MPI_Abort使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了MPI_Abort函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: declareBindings


//.........这里部分代码省略.........
  MPI_Reduce (sendbuf, recvbuf, count, datatype, op, root, comm); // L149
#if 0
  MPI_User_function function;
  int commute;
  MPI_Op_create (function, commute, &op); // L153
#endif
  MPI_Op_free (&op); // L155
  MPI_Allreduce (sendbuf, recvbuf, count, datatype, op, comm);
  MPI_Reduce_scatter (sendbuf, recvbuf, recvcounts, datatype, op, comm);
  MPI_Scan (sendbuf, recvbuf, count, datatype, op, comm);

  /* === Groups, contexts, and communicators === */
  MPI_Group group;
  MPI_Group_size (group, &size); // L162
  int rank;
  MPI_Group_rank (group, &rank); // L164
  MPI_Group group1;
  int n;
  int* ranks1;
  MPI_Group group2;
  int* ranks2;
  MPI_Group_translate_ranks (group1, n, ranks1, group2, ranks2); // L170
  int result;
  MPI_Group_compare (group1, group2, &result); // L172
  MPI_Group newgroup;
  MPI_Group_union (group1, group2, &newgroup); // L174
  MPI_Group_intersection (group1, group2, &newgroup);
  MPI_Group_difference (group1, group2, &newgroup);
  int* ranks;
  MPI_Group_incl (group, n, ranks, &newgroup); // L178
  MPI_Group_excl (group, n, ranks, &newgroup);
  extern int ranges[][3];
  MPI_Group_range_incl (group, n, ranges, &newgroup); // L181
  MPI_Group_range_excl (group, n, ranges, &newgroup);
  MPI_Group_free (&group);
  MPI_Comm_size (comm, &size);
  MPI_Comm_rank (comm, &rank);
  MPI_Comm comm1;
  MPI_Comm comm2;
  MPI_Comm_compare (comm1, comm2, &result);
  MPI_Comm newcomm;
  MPI_Comm_dup (comm, &newcomm);
  MPI_Comm_create (comm, group, &newcomm);
  int color;
  int key;
  MPI_Comm_split (comm, color, key, &newcomm); // L194
  MPI_Comm_free (&comm);
  MPI_Comm_test_inter (comm, &flag);
  MPI_Comm_remote_size (comm, &size);
  MPI_Comm_remote_group (comm, &group);
  MPI_Comm local_comm;
  int local_leader;
  MPI_Comm peer_comm;
  int remote_leader;
  MPI_Comm newintercomm;
  MPI_Intercomm_create (local_comm, local_leader, peer_comm, remote_leader, tag,
			&newintercomm); // L204--205
  MPI_Comm intercomm;
  MPI_Comm newintracomm;
  int high;
  MPI_Intercomm_merge (intercomm, high, &newintracomm); // L209
  int keyval;
#if 0
  MPI_Copy_function copy_fn;
  MPI_Delete_function delete_fn;
  void* extra_state;
  MPI_Keyval_create (copy_fn, delete_fn, &keyval, extra_state); // L215
#endif
  MPI_Keyval_free (&keyval); // L217
  void* attribute_val;
  MPI_Attr_put (comm, keyval, attribute_val); // L219
  MPI_Attr_get (comm, keyval, attribute_val, &flag);
  MPI_Attr_delete (comm, keyval);

  /* === Environmental inquiry === */
  char* name;
  int resultlen;
  MPI_Get_processor_name (name, &resultlen); // L226
  MPI_Errhandler errhandler;
#if 0
  MPI_Handler_function function;
  MPI_Errhandler_create (function, &errhandler); // L230
#endif
  MPI_Errhandler_set (comm, errhandler); // L232
  MPI_Errhandler_get (comm, &errhandler);
  MPI_Errhandler_free (&errhandler);
  int errorcode;
  char* string;
  MPI_Error_string (errorcode, string, &resultlen); // L237
  int errorclass;
  MPI_Error_class (errorcode, &errorclass); // L239
  MPI_Wtime ();
  MPI_Wtick ();
  int argc;
  char** argv;
  MPI_Init (&argc, &argv); // L244
  MPI_Finalize ();
  MPI_Initialized (&flag);
  MPI_Abort (comm, errorcode);
}
开发者ID:8l,项目名称:rose,代码行数:101,代码来源:MPI-api.c


示例2: MPI_Comm_rank

unsigned long CSysSolve::FGMRES_LinSolver(const CSysVector & b, CSysVector & x, CMatrixVectorProduct & mat_vec,
                               CPreconditioner & precond, su2double tol, unsigned long m, su2double *residual, bool monitoring) {
	
int rank = 0;

#ifdef HAVE_MPI
	MPI_Comm_rank(MPI_COMM_WORLD, &rank);
#endif
  
  /*---  Check the subspace size ---*/
  
  if (m < 1) {
    if (rank == MASTER_NODE) cerr << "CSysSolve::FGMRES: illegal value for subspace size, m = " << m << endl;
#ifndef HAVE_MPI
    exit(EXIT_FAILURE);
#else
    MPI_Barrier(MPI_COMM_WORLD);
    MPI_Abort(MPI_COMM_WORLD,1);
    MPI_Finalize();
#endif
  }

  /*---  Check the subspace size ---*/
  
  if (m > 1000) {
    if (rank == MASTER_NODE) cerr << "CSysSolve::FGMRES: illegal value for subspace size (too high), m = " << m << endl;
#ifndef HAVE_MPI
    exit(EXIT_FAILURE);
#else
	MPI_Abort(MPI_COMM_WORLD,1);
    MPI_Finalize();
#endif
  }
  
  /*---  Define various arrays
	 Note: elements in w and z are initialized to x to avoid creating
	 a temporary CSysVector object for the copy constructor ---*/
  
  vector<CSysVector> w(m+1, x);
  vector<CSysVector> z(m+1, x);
  vector<su2double> g(m+1, 0.0);
  vector<su2double> sn(m+1, 0.0);
  vector<su2double> cs(m+1, 0.0);
  vector<su2double> y(m, 0.0);
  vector<vector<su2double> > H(m+1, vector<su2double>(m, 0.0));
  
  /*---  Calculate the norm of the rhs vector ---*/
  
  su2double norm0 = b.norm();
  
  /*---  Calculate the initial residual (actually the negative residual)
	 and compute its norm ---*/
  
  mat_vec(x, w[0]);
  w[0] -= b;
  
  su2double beta = w[0].norm();
  
  if ( (beta < tol*norm0) || (beta < eps) ) {
    
    /*---  System is already solved ---*/
    
    if (rank == MASTER_NODE) cout << "CSysSolve::FGMRES(): system solved by initial guess." << endl;
    return 0;
  }
  
  /*---  Normalize residual to get w_{0} (the negative sign is because w[0]
	 holds the negative residual, as mentioned above) ---*/
  
  w[0] /= -beta;
  
  /*---  Initialize the RHS of the reduced system ---*/
  
  g[0] = beta;
  
  /*--- Set the norm to the initial residual value ---*/
  
  norm0 = beta;

  /*---  Output header information including initial residual ---*/
  
  int i = 0;
  if ((monitoring) && (rank == MASTER_NODE)) {
    WriteHeader("FGMRES", tol, beta);
    WriteHistory(i, beta, norm0);
  }
  
  /*---  Loop over all search directions ---*/
  
  for (i = 0; i < (int)m; i++) {
    
    /*---  Check if solution has converged ---*/
    
    if (beta < tol*norm0) break;
    
    /*---  Precondition the CSysVector w[i] and store result in z[i] ---*/
    
    precond(w[i], z[i]);
    
    /*---  Add to Krylov subspace ---*/
//.........这里部分代码省略.........
开发者ID:EduardoMolina,项目名称:SU2,代码行数:101,代码来源:linear_solvers_structure.cpp


示例3: PMPI_Probe

/* STUB */
int PMPI_Probe( int source, int tag, MPI_Comm comm, MPI_Status *status )
{
  fprintf(stderr,"%s:%d: NOT IMPLEMENTED\n",__FILE__,__LINE__);
  return MPI_Abort((MPI_Comm)NULL, MPI_UNDEFINED); 
}
开发者ID:haripandey,项目名称:trilinos,代码行数:6,代码来源:PMPI_Probe.c


示例4: ADIOI_UFS_Fcntl

void ADIOI_UFS_Fcntl(ADIO_File fd, int flag, ADIO_Fcntl_t *fcntl_struct, int *error_code)
{
    int i, ntimes;
    ADIO_Offset curr_fsize, alloc_size, size, len, done;
    ADIO_Status status;
    char *buf;
#if defined(MPICH2) || !defined(PRINT_ERR_MSG)
    static char myname[] = "ADIOI_UFS_FCNTL";
#endif

    switch(flag) {
    case ADIO_FCNTL_GET_FSIZE:
	fcntl_struct->fsize = lseek(fd->fd_sys, 0, SEEK_END);
	if (fd->fp_sys_posn != -1) 
	     lseek(fd->fd_sys, fd->fp_sys_posn, SEEK_SET);
	if (fcntl_struct->fsize == -1) {
#ifdef MPICH2
	    *error_code = MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE, myname, __LINE__, MPI_ERR_IO, "**io",
		"**io %s", strerror(errno));
#elif defined(PRINT_ERR_MSG)
			*error_code = MPI_ERR_UNKNOWN;
#else /* MPICH-1 */
	    *error_code = MPIR_Err_setmsg(MPI_ERR_IO, MPIR_ADIO_ERROR,
			      myname, "I/O Error", "%s", strerror(errno));
	    ADIOI_Error(fd, *error_code, myname);	    
#endif
	}
	else *error_code = MPI_SUCCESS;
	break;

    case ADIO_FCNTL_SET_DISKSPACE:
	/* will be called by one process only */
	/* On file systems with no preallocation function, I have to 
           explicitly write 
           to allocate space. Since there could be holes in the file, 
           I need to read up to the current file size, write it back, 
           and then write beyond that depending on how much 
           preallocation is needed.
           read/write in sizes of no more than ADIOI_PREALLOC_BUFSZ */

	curr_fsize = lseek(fd->fd_sys, 0, SEEK_END);
	alloc_size = fcntl_struct->diskspace;

	size = ADIOI_MIN(curr_fsize, alloc_size);
	
	ntimes = (size + ADIOI_PREALLOC_BUFSZ - 1)/ADIOI_PREALLOC_BUFSZ;
	buf = (char *) ADIOI_Malloc(ADIOI_PREALLOC_BUFSZ);
	done = 0;

	for (i=0; i<ntimes; i++) {
	    len = ADIOI_MIN(size-done, ADIOI_PREALLOC_BUFSZ);
	    ADIO_ReadContig(fd, buf, len, MPI_BYTE, ADIO_EXPLICIT_OFFSET, done,
			    &status, error_code);
	    if (*error_code != MPI_SUCCESS) {
#ifdef MPICH2
		*error_code = MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE, myname, __LINE__, MPI_ERR_IO, "**io",
		    "**io %s", strerror(errno));
#elif defined(PRINT_ERR_MSG)
		FPRINTF(stderr, "ADIOI_UFS_Fcntl: To preallocate disk space, ROMIO needs to read the file and write it back, but is unable to read the file. Please give the file read permission and open it with MPI_MODE_RDWR.\n");
		MPI_Abort(MPI_COMM_WORLD, 1);
#else /* MPICH-1 */
		*error_code = MPIR_Err_setmsg(MPI_ERR_IO, MPIR_PREALLOC_PERM,
			      myname, (char *) 0, (char *) 0);
		ADIOI_Error(fd, *error_code, myname);
#endif
                return;  
	    }
	    ADIO_WriteContig(fd, buf, len, MPI_BYTE, ADIO_EXPLICIT_OFFSET, 
                             done, &status, error_code);
	    if (*error_code != MPI_SUCCESS) return;
	    done += len;
	}

	if (alloc_size > curr_fsize) {
	    memset(buf, 0, ADIOI_PREALLOC_BUFSZ); 
	    size = alloc_size - curr_fsize;
	    ntimes = (size + ADIOI_PREALLOC_BUFSZ - 1)/ADIOI_PREALLOC_BUFSZ;
	    for (i=0; i<ntimes; i++) {
		len = ADIOI_MIN(alloc_size-done, ADIOI_PREALLOC_BUFSZ);
		ADIO_WriteContig(fd, buf, len, MPI_BYTE, ADIO_EXPLICIT_OFFSET, 
				 done, &status, error_code);
		if (*error_code != MPI_SUCCESS) return;
		done += len;  
	    }
	}
	ADIOI_Free(buf);
	if (fd->fp_sys_posn != -1) 
	    lseek(fd->fd_sys, fd->fp_sys_posn, SEEK_SET);
	*error_code = MPI_SUCCESS;
	break;

    case ADIO_FCNTL_SET_IOMODE:
        /* for implementing PFS I/O modes. will not occur in MPI-IO
           implementation.*/
	if (fd->iomode != fcntl_struct->iomode) {
	    fd->iomode = fcntl_struct->iomode;
	    MPI_Barrier(MPI_COMM_WORLD);
	}
	*error_code = MPI_SUCCESS;
	break;
//.........这里部分代码省略.........
开发者ID:hpc,项目名称:mvapich-cce,代码行数:101,代码来源:ad_ufs_fcntl.c


示例5: main

/*
 * This test makes sure that after a failure, the correct group of failed
 * processes is returned from MPIX_Comm_failure_ack/get_acked.
 */
int main(int argc, char **argv)
{
    int rank, size, err, result, i;
    char buf[10] = " No errors";
    char error[MPI_MAX_ERROR_STRING];
    MPI_Group failed_grp, one_grp, world_grp;
    int one[] = { 1 };
    int world_ranks[] = { 0, 1, 2 };
    int failed_ranks[3];

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    if (size < 3) {
        fprintf(stderr, "Must run with at least 3 processes\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);

    if (rank == 1) {
        exit(EXIT_FAILURE);
    }

    if (rank == 0) {
        err = MPI_Recv(buf, 10, MPI_CHAR, 1, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
        if (MPI_SUCCESS == err) {
            fprintf(stderr, "Expected a failure for receive from rank 1\n");
            MPI_Abort(MPI_COMM_WORLD, 1);
        }

        err = MPIX_Comm_failure_ack(MPI_COMM_WORLD);
        if (MPI_SUCCESS != err) {
            int ec;
            MPI_Error_class(err, &ec);
            MPI_Error_string(err, error, &size);
            fprintf(stderr, "MPIX_Comm_failure_ack returned an error: %d\n%s", ec, error);
            MPI_Abort(MPI_COMM_WORLD, 1);
        }
        err = MPIX_Comm_failure_get_acked(MPI_COMM_WORLD, &failed_grp);
        if (MPI_SUCCESS != err) {
            int ec;
            MPI_Error_class(err, &ec);
            MPI_Error_string(err, error, &size);
            fprintf(stderr, "MPIX_Comm_failure_get_acked returned an error: %d\n%s", ec, error);
            MPI_Abort(MPI_COMM_WORLD, 1);
        }

        MPI_Comm_group(MPI_COMM_WORLD, &world_grp);
        MPI_Group_incl(world_grp, 1, one, &one_grp);
        MPI_Group_compare(one_grp, failed_grp, &result);
        if (MPI_IDENT != result) {
            fprintf(stderr, "First failed group contains incorrect processes\n");
            MPI_Group_size(failed_grp, &size);
            MPI_Group_translate_ranks(failed_grp, size, world_ranks, world_grp, failed_ranks);
            for (i = 0; i < size; i++)
                fprintf(stderr, "DEAD: %d\n", failed_ranks[i]);
            MPI_Abort(MPI_COMM_WORLD, 1);
        }
        MPI_Group_free(&failed_grp);

        err = MPI_Recv(buf, 10, MPI_CHAR, 2, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
        if (MPI_SUCCESS != err) {
            fprintf(stderr, "First receive failed\n");
            MPI_Abort(MPI_COMM_WORLD, 1);
        }
        err = MPI_Recv(buf, 10, MPI_CHAR, 2, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
        if (MPI_SUCCESS == err) {
            fprintf(stderr, "Expected a failure for receive from rank 2\n");
            MPI_Abort(MPI_COMM_WORLD, 1);
        }

        err = MPIX_Comm_failure_get_acked(MPI_COMM_WORLD, &failed_grp);
        if (MPI_SUCCESS != err) {
            int ec;
            MPI_Error_class(err, &ec);
            MPI_Error_string(err, error, &size);
            fprintf(stderr, "MPIX_Comm_failure_get_acked returned an error: %d\n%s", ec, error);
            MPI_Abort(MPI_COMM_WORLD, 1);
        }

        MPI_Group_compare(one_grp, failed_grp, &result);
        if (MPI_IDENT != result) {
            fprintf(stderr, "Second failed group contains incorrect processes\n");
            MPI_Group_size(failed_grp, &size);
            MPI_Group_translate_ranks(failed_grp, size, world_ranks, world_grp, failed_ranks);
            for (i = 0; i < size; i++)
                fprintf(stderr, "DEAD: %d\n", failed_ranks[i]);
            MPI_Abort(MPI_COMM_WORLD, 1);
        }

        fprintf(stdout, " No errors\n");
    }
    else if (rank == 2) {
        MPI_Ssend(buf, 10, MPI_CHAR, 0, 0, MPI_COMM_WORLD);

//.........这里部分代码省略.........
开发者ID:NexMirror,项目名称:MPICH,代码行数:101,代码来源:failure_ack.c


示例6: main

int main(int argc, char** argv) {

  int rank, size;

  MPI_Init(&argc, &argv);
  MPI_Comm_size(MPI_COMM_WORLD, &size);
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  
  int mpi_lattice_size = lround(sqrt(size));
  
  if(size < 4 || ! (mpi_lattice_size*mpi_lattice_size==size) ) {
    printf("You have to use a square number of MPI processes.");
    MPI_Abort(MPI_COMM_WORLD,1);
  }
  
  if( find_option( argc, argv, "-h" ) >= 0 )
  {
      printf( "Options:\n" );
      printf( "-h to see this help\n" );
      printf( "-n <int> to set the grid size\n" );
      printf( "-o <filename> to specify the output file name\n" );
      MPI_Abort(MPI_COMM_WORLD,1);
  }
  
  int GRIDSIZE = read_int( argc, argv, "-n", DEFAULT_GRIDSIZE );
  // Check gridsize for some basic assumptions
  if(GRIDSIZE%2 || GRIDSIZE%(mpi_lattice_size)) {
    printf("Only even Gridsize allowed and\nGridsize has to be a multiple of the number of MPI procs!\n");
    MPI_Abort(MPI_COMM_WORLD,1);
  }

  FILE *f;

  if(rank==0) {
    char *savename = read_string( argc, argv, "-o", "sample_conduct.txt" );
    f = savename ? fopen( savename, "w" ) : NULL;
    if( f == NULL )
    {
        printf( "failed to open %s\n", savename );
        MPI_Abort(MPI_COMM_WORLD,1);
    }
  }
  

  int my_mpi_i,my_mpi_j;
  int my_gridsize= GRIDSIZE/(mpi_lattice_size);
  int t,i,j;
  double *T, *Tn,*Tf;
  
  my_mpi_j=rank%mpi_lattice_size;
  my_mpi_i=rank/mpi_lattice_size;
  
  // Allocate Grid with a border on every side
  int padded_grid_size = my_gridsize+2;
  if(rank == 0) {
    Tf=(double *) malloc(GRIDSIZE*GRIDSIZE*sizeof(double));
  }
  T=(double *) malloc((padded_grid_size)*(padded_grid_size)*sizeof(double));
  Tn=(double *) malloc((padded_grid_size)*(padded_grid_size)*sizeof(double));
  

  
  // remember -- our grid has a border around it!
  if(rank==0)
    init_cells(Tf,GRIDSIZE);
  else {
    int i,j;
    for (i=0;i<padded_grid_size;i++)
      for(j=0;j<padded_grid_size;j++)
        T[i*padded_grid_size+j]=0;
  }
  
  
  if(rank==0) {
    for(i=0;i<mpi_lattice_size;i++) {
      for(j=0;j<mpi_lattice_size;j++) {
        if(i==0 && j==0) {
          int k,l,m=1,n=1;
          for(k=0;k<padded_grid_size;k++)
            for(l=0;l<padded_grid_size;l++)
              T[k*padded_grid_size+l]=0;
          
          for(k=0;k<my_gridsize;k++) {
          
            for(l=0;l<my_gridsize;l++) {
            
              T[m*padded_grid_size+n]=Tf[k*(GRIDSIZE) + l];
              n++;
            }
            m++;
            n=1;
          }
          continue;
        }
        
        //re-use Tn for temp arrays
        int k,l,m=1,n=1;
        for(k=0;k<padded_grid_size;k++)
          for(l=0;l<padded_grid_size;l++)
            Tn[k*padded_grid_size+l]=0;
//.........这里部分代码省略.........
开发者ID:mwess,项目名称:pcpc,代码行数:101,代码来源:conduct_mpi.c


示例7: main

int main(int argc, char **argv)
{
    int buf[1024], amode, flag, mynod, len, i;
    MPI_File fh;
    MPI_Status status;
    MPI_Datatype newtype;
    MPI_Offset disp, offset;
    MPI_Group group;
    MPI_Datatype etype, filetype;
    char datarep[25], *filename;

    MPI_Init(&argc,&argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &mynod);

/* process 0 takes the file name as a command-line argument and 
   broadcasts it to other processes */
    if (!mynod) {
	i = 1;
	while ((i < argc) && strcmp("-fname", *argv)) {
	    i++;
	    argv++;
	}
	if (i >= argc) {
	    printf("\n*#  Usage: misc  <mpiparameter> -- -fname filename\n\n");
	    MPI_Abort(MPI_COMM_WORLD, 1);
	}
	argv++;
	len = strlen(*argv);
	filename = (char *) malloc(len+1);
	strcpy(filename, *argv);
	MPI_Bcast(&len, 1, MPI_INT, 0, MPI_COMM_WORLD);
	MPI_Bcast(filename, len+1, MPI_CHAR, 0, MPI_COMM_WORLD);
    }
    else {
	MPI_Bcast(&len, 1, MPI_INT, 0, MPI_COMM_WORLD);
	filename = (char *) malloc(len+1);
	MPI_Bcast(filename, len+1, MPI_CHAR, 0, MPI_COMM_WORLD);
    }


    MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_CREATE | MPI_MODE_RDWR,
                  MPI_INFO_NULL, &fh);

    MPI_File_write(fh, buf, 1024, MPI_INT, &status);

    MPI_File_sync(fh);

    MPI_File_get_amode(fh, &amode);
    if (!mynod) printf("testing MPI_File_get_amode\n");
    if (amode != (MPI_MODE_CREATE | MPI_MODE_RDWR))
	printf("amode is %d, should be %d\n\n", amode, MPI_MODE_CREATE |
                      MPI_MODE_RDWR);

    MPI_File_get_atomicity(fh, &flag);
    if (flag) printf("atomicity is %d, should be 0\n", flag);
    if (!mynod) printf("setting atomic mode\n");
    MPI_File_set_atomicity(fh, 1);
    MPI_File_get_atomicity(fh, &flag);
    if (!flag) printf("atomicity is %d, should be 1\n", flag);
    MPI_File_set_atomicity(fh, 0);
    if (!mynod) printf("reverting back to nonatomic mode\n");

    MPI_Type_vector(10, 10, 20, MPI_INT, &newtype);
    MPI_Type_commit(&newtype);

    MPI_File_set_view(fh, 1000, MPI_INT, newtype, "native", MPI_INFO_NULL);
    if (!mynod) printf("testing MPI_File_get_view\n");
    MPI_File_get_view(fh, &disp, &etype, &filetype, datarep);
    if ((disp != 1000) || strcmp(datarep, "native"))
	printf("disp = %I64, datarep = %s, should be 1000, native\n\n", disp, datarep);

    if (!mynod) printf("testing MPI_File_get_byte_offset\n");
    MPI_File_get_byte_offset(fh, 10, &disp);
    if (disp != (1000+20*sizeof(int))) printf("byte offset = %I64, should be %d\n\n", disp, (int) (1000+20*sizeof(int)));

    MPI_File_get_group(fh, &group);

    if (!mynod) printf("testing MPI_File_set_size\n");
    MPI_File_set_size(fh, 1000+15*sizeof(int));
    MPI_Barrier(MPI_COMM_WORLD);
    MPI_File_sync(fh);
    MPI_File_get_size(fh, &disp);
    if (disp != 1000+15*sizeof(int)) printf("file size = %I64, should be %d\n\n", disp, (int) (1000+15*sizeof(int)));
 
    if (!mynod) printf("seeking to eof and testing MPI_File_get_position\n");
    MPI_File_seek(fh, 0, MPI_SEEK_END);
    MPI_File_get_position(fh, &disp);
    if (disp != 10) printf("file pointer posn = %I64, should be 10\n\n", disp);

    if (!mynod) printf("testing MPI_File_get_byte_offset\n");
    MPI_File_get_byte_offset(fh, disp, &offset);
    if (offset != (1000+20*sizeof(int))) printf("byte offset = %I64, should be %d\n\n", offset, (int) (1000+20*sizeof(int)));
    MPI_Barrier(MPI_COMM_WORLD);

    if (!mynod) printf("testing MPI_File_seek with MPI_SEEK_CUR\n");
    MPI_File_seek(fh, -10, MPI_SEEK_CUR);
    MPI_File_get_position(fh, &disp);
    MPI_File_get_byte_offset(fh, disp, &offset);
    if (offset != 1000)
	printf("file pointer posn in bytes = %I64, should be 1000\n\n", offset);
//.........这里部分代码省略.........
开发者ID:carsten-clauss,项目名称:MP-MPICH,代码行数:101,代码来源:misc.cpp


示例8: data_server

void data_server(int agents_total, int world_width, int world_height)
{
	int np; 
	MPI_Comm_size(MPI_COMM_WORLD, &np);

	/* create a type for struct agent */
	const int nitems=5;
   	int blocklengths[5] = {1,1,1,1,1};
   	MPI_Datatype types[5] = {MPI_INT, MPI_INT, MPI_INT, MPI_FLOAT, MPI_FLOAT};
	MPI_Datatype mpi_agent_type;
	MPI_Aint offsets[5];

	offsets[0] = offsetof(agent, id);
    	offsets[1] = offsetof(agent, x);
    	offsets[2] = offsetof(agent, y);
    	offsets[3] = offsetof(agent, z);
    	offsets[4] = offsetof(agent, w);

	MPI_Type_create_struct(nitems, blocklengths, offsets, types, &mpi_agent_type);
	MPI_Type_commit(&mpi_agent_type);


	int num_comp_nodes = np -1;
	unsigned int num_bytes = agents_total * sizeof(agent);
	agent *h_agents_in, *h_agents_out;

	/* allocate input data */
	h_agents_in = (agent *)malloc(num_bytes);
	h_agents_out = (agent *)malloc(num_bytes);
	if(h_agents_in == NULL || h_agents_out == NULL)
	{
		printf("server couldn't allocate memory\n");
		MPI_Abort(MPI_COMM_WORLD, 1);
	}

	/* initialize input data */
	init_data(h_agents_in, agents_total);

#ifdef DEBUG 
	printf("Init data\n");
	display_data(h_agents_in, agents_total);
#endif

	int world_height_node = world_height / num_comp_nodes;
//	printf("world_height: %d\n", world_height_node);
	agent h_agents_node_in[num_comp_nodes][agents_total], h_agents_node_out[num_comp_nodes][agents_total];
	for(int process = 0; process < num_comp_nodes; process++)
	{	
		for(int i = 0; i < agents_total; i++)
		{
			if(  ( h_agents_in[i].y >= (process * world_height_node) ) and ( h_agents_in[i].y < ( (process + 1) * world_height_node ) )  )
				h_agents_node_in[process][i] = h_agents_in[i];
		}
	}

/***	
	printf("copy data 0\n");
	display_data(h_agents_node_in[0], agents_total);
	printf("copy data 1\n");
	display_data(h_agents_node_in[1], agents_total);
	printf("copy data 2\n");
	display_data(h_agents_node_in[2], agents_total);
***/

	/* send data to compute nodes */
	for(int process = 0; process < num_comp_nodes; process++)
		MPI_Send(h_agents_node_in[process], agents_total, mpi_agent_type, process, 0, MPI_COMM_WORLD);

	/* Wait for nodes to compute */
	MPI_Barrier(MPI_COMM_WORLD);
	
	/* Collect output data */
	MPI_Status status;

	for(int process = 0; process < num_comp_nodes; process++)
		MPI_Recv(h_agents_node_out[process], agents_total, mpi_agent_type, process, DATA_COLLECT, MPI_COMM_WORLD, &status); 

#ifdef DEBUG
        printf("Final Data\n");	
	/* display output data */
//	display_data(h_agents_out, agents_total);
#endif
	
	/* release resources */
	free(h_agents_in);
	free(h_agents_out); 
//	free(h_agents_node_in); 
//	free(h_agents_node_out); 
}
开发者ID:vhpvmx,项目名称:crowds,代码行数:89,代码来源:mpiStencilCudaStructs.cpp


示例9: main

int main(int argc, char* argv[])
{
	double init_time, brick_time, lod_time;
	double start_time = 0;
	MPI_Init(&argc,&argv);
	int size,rank;
	MPI_Comm_rank(MPI_COMM_WORLD, &rank);
	MPI_Comm_size(MPI_COMM_WORLD, &size);
	if(rank==0) {
		start_time = MPI_Wtime();
	}	
	if(argc!=7 || strcmp(argv[1],"-h") == 0 || strcmp(argv[1],"--help") == 0) {
		if(rank==0)
			PrintHelp();
		MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);	
	}
	char *endptr = NULL;
	/* Size of the volume {x,y,z} */
	size_t *VOLUME = malloc(sizeof(size_t)*3);
	VOLUME[0]=(size_t)strtol(argv[1], &endptr, 10);
	VOLUME[1]=(size_t)strtol(argv[2], &endptr, 10);
	VOLUME[2]=(size_t)strtol(argv[3], &endptr, 10);
	/* Dimension of the brick excl. Ghostcells */
	const size_t BRICKSIZE = (size_t)strtol(argv[4], &endptr, 10);
	if(BRICKSIZE<1) {
		printf("BRICKSIZE is smaller than 1!\n");
		MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
	}
	/* Size of ghost cells for later subtraction */
	const size_t GHOSTCELLDIM = (size_t)strtol(argv[5], &endptr, 10);

	/* # bricks per dimension */
	size_t bricks_per_dimension[3];
	/* number of bricks? */
	size_t numberofbricks = NBricks(VOLUME, BRICKSIZE, bricks_per_dimension);
	if(numberofbricks == 0) {
		printf("ERROR determining number of bricks!\n");
		MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
	}

	/* INITIALIZATION */
	size_t *myoffsets = malloc(sizeof(size_t)*3);
	size_t mybricks;
	size_t bricks[size];
	size_t starting_brick[size];
	size_t mystart;
	size_t GBSIZE=BRICKSIZE+2*GHOSTCELLDIM;

	if(Init_MPI(rank, size, &mybricks, &mystart, myoffsets, bricks, starting_brick, numberofbricks, VOLUME, BRICKSIZE) != 0) {
		printf("ERROR @ Init_MPI\n");
		MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
	}
	if(rank==0) {
		init_time = MPI_Wtime();
		init_time -= start_time;
		start_time = MPI_Wtime();
	}
	/* BRICKING START */
	/* input file stream */
	MPI_File fpi;
	int err;
	err = MPI_File_open(MPI_COMM_WORLD, argv[6], MPI_MODE_RDONLY, MPI_INFO_NULL, &fpi);
	if(err) {
		printf("ERROR opening file %s\n", argv[6]);
		MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
	}

	/* output file stream */
	MPI_File fpo;
	char fn[256];
	sprintf(fn, "b_%zu_%zu_%zu_%zu^3.raw", VOLUME[0], VOLUME[1], VOLUME[2], GBSIZE);
	err = MPI_File_open(MPI_COMM_WORLD, fn, MPI_MODE_RDWR | MPI_MODE_CREATE, MPI_INFO_NULL, &fpo);
	if(err) {
		printf("ERROR opening file %s\n", fn);
		MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
	}

	/* read from input file, reorganize data and write into output file */
	if(brick(fpi, fpo, mystart, mybricks, GBSIZE, GHOSTCELLDIM, BRICKSIZE, VOLUME, bricks_per_dimension) != 0) {
		printf("ERROR @ Rank %d: brick() failed \n", rank);
		MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
	}
	MPI_File_close(&fpi);
	MPI_File_close(&fpo);
	if(rank==0) {
		brick_time = MPI_Wtime();
		brick_time -= start_time;
		start_time = MPI_Wtime();
	}
	/* END OF BRICKING */

	size_t lod = 1;
	/* TODO set finished correct */
	bool finished = false;
	while(!finished) {
		/* read from */
		MPI_File fin;
		MPI_File fout;
		if(lod==1) {
			err = MPI_File_open(MPI_COMM_WORLD, fn, MPI_MODE_RDONLY, MPI_INFO_NULL, &fin);
//.........这里部分代码省略.........
开发者ID:szefo555,项目名称:bricker,代码行数:101,代码来源:main.c


示例10: main

int main( int argc, char **argv )
{
    int      err = 0;
    int      *sendbuf, *recvbuf, *recvcounts;
    int      size, rank, i, j, idx, mycount, sumval;
    MPI_Comm comm;


    MTest_Init( &argc, &argv );
    comm = MPI_COMM_WORLD;

    MPI_Comm_size( comm, &size );
    MPI_Comm_rank( comm, &rank );
    recvcounts = (int *)malloc( size * sizeof(int) );
    if (!recvcounts) {
	fprintf( stderr, "Could not allocate %d ints for recvcounts\n", 
		 size );
	MPI_Abort( MPI_COMM_WORLD, 1 );
    }
    mycount = (1024 * 1024) / size;
    for (i=0; i<size; i++) 
	recvcounts[i] = mycount;
    sendbuf = (int *) malloc( mycount * size * sizeof(int) );
    if (!sendbuf) {
	fprintf( stderr, "Could not allocate %d ints for sendbuf\n", 
		 mycount * size );
	MPI_Abort( MPI_COMM_WORLD, 1 );
    }
    idx = 0;
    for (i=0; i<size; i++) {
	for (j=0; j<mycount; j++) {
	    sendbuf[idx++] = rank + i;
	}
    }
    recvbuf = (int *)malloc( mycount * sizeof(int) );
    if (!recvbuf) {
	fprintf( stderr, "Could not allocate %d ints for recvbuf\n", 
		 mycount );
	MPI_Abort( MPI_COMM_WORLD, 1 );
    }
    for (i=0; i<mycount; i++) {
	recvbuf[i] = -1;
    }

    MPI_Reduce_scatter( sendbuf, recvbuf, recvcounts, MPI_INT, MPI_SUM, comm );

    sumval = size * rank + ((size - 1) * size)/2;
    /* recvbuf should be size * (rank + i) */
    for (i=0; i<mycount; i++) {
	if (recvbuf[i] != sumval) {
	    err++;
	    if (err < MAX_ERRORS) {
		fprintf( stdout, "Did not get expected value for reduce scatter\n" );
		fprintf( stdout, "[%d] Got recvbuf[%d] = %d expected %d\n",
			 rank, i, recvbuf[i], sumval );
	    }
	}
    }

    MPI_Reduce_scatter( MPI_IN_PLACE, sendbuf, recvcounts, MPI_INT, MPI_SUM, 
			comm );

    sumval = size * rank + ((size - 1) * size)/2;
    /* recv'ed values for my process should be size * (rank + i) */
    for (i=0; i<mycount; i++) {
	if (sendbuf[i] != sumval) {
	    err++;
	    if (err < MAX_ERRORS) {
		fprintf( stdout, "Did not get expected value for reduce scatter (in place)\n" );
		fprintf( stdout, "[%d] Got buf[%d] = %d expected %d\n", 
			 rank, i, sendbuf[rank*mycount+i], sumval );
	    }
	}
    }

    free(sendbuf);
    free(recvbuf);
    free(recvcounts);
       
    MTest_Finalize( err );

    MPI_Finalize( );

    return 0;
}
开发者ID:jimmycao,项目名称:mpi-test,代码行数:85,代码来源:redscat3.c


示例11: RSL_LITE_PACK

RSL_LITE_PACK ( int * Fcomm0, char * buf , int * shw0 , 
           int * sendbegm0 , int * sendwm0 , int * sendbegp0 , int * sendwp0 ,
           int * recvbegm0 , int * recvwm0 , int * recvbegp0 , int * recvwp0 ,
           int * typesize0 , int * xy0 , int * pu0 , int * imemord , int * xstag0, /* not used */
           int *me0, int * np0 , int * np_x0 , int * np_y0 , 
           int * ids0 , int * ide0 , int * jds0 , int * jde0 , int * kds0 , int * kde0 ,
           int * ims0 , int * ime0 , int * jms0 , int * jme0 , int * kms0 , int * kme0 ,
           int * ips0 , int * ipe0 , int * jps0 , int * jpe0 , int * kps0 , int * kpe0 )
{
  int me, np, np_x, np_y ;
  int sendbegm , sendwm, sendbegp , sendwp ;
  int recvbegm , recvwm, recvbegp , recvwp ;
  int shw , typesize ;
  int ids , ide , jds , jde , kds , kde ;
  int ims , ime , jms , jme , kms , kme ;
  int ips , ipe , jps , jpe , kps , kpe ;
  int xy ;   /* y = 0 , x = 1 */
  int pu ;   /* pack = 0 , unpack = 1 */
  register int i, j, k, t ;
#ifdef crayx1
  register int i2,i3,i4,i_offset;
#endif
  char *p ;
  int da_buf ;
  int yp, ym, xp, xm ;
  int nbytes, ierr ;
  register int *pi, *qi ;

#ifndef STUBMPI
  MPI_Comm comm, *comm0, dummy_comm ;
  int js, je, ks, ke, is, ie, wcount ;

  comm0 = &dummy_comm ;
  *comm0 = MPI_Comm_f2c( *Fcomm0 ) ;

  shw = *shw0 ;          /* logical half-width of stencil */
  sendbegm = *sendbegm0 ;  /* send index of sten copy (edge = 1), lower/left */
  sendwm   = *sendwm0   ;  /* send width of sten copy counting towards edge, lower/left */
  sendbegp = *sendbegp0 ;  /* send index of sten copy (edge = 1), upper/right */
  sendwp   = *sendwp0   ;  /* send width of sten copy counting towards edge, upper/right */
  recvbegm = *recvbegm0 ;  /* recv index of sten copy (edge = 1), lower/left */
  recvwm   = *recvwm0   ;  /* recv width of sten copy counting towards edge, lower/left */
  recvbegp = *recvbegp0 ;  /* recv index of sten copy (edge = 1), upper/right */
  recvwp   = *recvwp0   ;  /* recv width of sten copy counting towards edge, upper/right */
  me = *me0 ; np = *np0 ; np_x = *np_x0 ; np_y = *np_y0 ;
  typesize = *typesize0 ;
  ids = *ids0-1 ; ide = *ide0-1 ; jds = *jds0-1 ; jde = *jde0-1 ; kds = *kds0-1 ; kde = *kde0-1 ;
  ims = *ims0-1 ; ime = *ime0-1 ; jms = *jms0-1 ; jme = *jme0-1 ; kms = *kms0-1 ; kme = *kme0-1 ;
  ips = *ips0-1 ; ipe = *ipe0-1 ; jps = *jps0-1 ; jpe = *jpe0-1 ; kps = *kps0-1 ; kpe = *kpe0-1 ;
  xy = *xy0 ;
  pu = *pu0 ;

/* need to adapt for other memory orders */

#define RANGE(S1,E1,S2,E2,S3,E3,S4,E4) (((E1)-(S1)+1)*((E2)-(S2)+1)*((E3)-(S3)+1)*((E4)-(S4)+1))
#define IMAX(A) (((A)>ids)?(A):ids)
#define IMIN(A) (((A)<ide)?(A):ide)
#define JMAX(A) (((A)>jds)?(A):jds)
#define JMIN(A) (((A)<jde)?(A):jde)

  da_buf = ( pu == 0 ) ? RSL_SENDBUF : RSL_RECVBUF ;

  if ( ips <= ipe && jps <= jpe ) {

  if ( np_y > 1 && xy == 0 ) {
    MPI_Cart_shift( *comm0 , 0, 1, &ym, &yp ) ;
    if ( yp != MPI_PROC_NULL && jpe <= jde  && jde != jpe ) {
      p = buffer_for_proc( yp , 0 , da_buf ) ;
      if ( pu == 0 ) {
        if ( sendwp > 0 ) {
          je = jpe - sendbegp + 1 ; js = je - sendwp + 1 ;
          ks = kps           ; ke = kpe ;
          is = IMAX(ips-shw) ; ie = IMIN(ipe+shw) ;
          nbytes = buffer_size_for_proc( yp, da_buf ) ;
	  if ( yp_curs + RANGE( js, je, kps, kpe, ips-shw, ipe+shw, 1, typesize ) > nbytes ) {
#ifndef MS_SUA
	    fprintf(stderr,"memory overwrite in rsl_lite_pack, Y pack up, %d > %d\n",
	        yp_curs + RANGE( js, je, kps, kpe, ips-shw, ipe+shw, 1, typesize ), nbytes ) ;
#endif
	    MPI_Abort(MPI_COMM_WORLD, 99) ;
          }
          if ( typesize == 8 ) {
            F_PACK_LINT ( buf, p+yp_curs, imemord, &js, &je, &ks, &ke, &is, &ie, 
                                                &jms,&jme,&kms,&kme,&ims,&ime, &wcount ) ;
            yp_curs += wcount*typesize ;
          }
	  else if ( typesize == 4 ) {
            F_PACK_INT ( buf, p+yp_curs, imemord, &js, &je, &ks, &ke, &is, &ie,
                                               &jms,&jme,&kms,&kme,&ims,&ime, &wcount ) ;
            yp_curs += wcount*typesize ;
	  }
	  else {
#ifndef MS_SUA
            fprintf(stderr,"internal error: %s %d\n",__FILE__,__LINE__) ;
#endif
          }
        }
      } else {
        if ( recvwp > 0 ) {
          js = jpe+recvbegp         ; je = js + recvwp - 1 ;
//.........这里部分代码省略.........
开发者ID:CEKrause,项目名称:WRFV_3.7.1,代码行数:101,代码来源:c_code.c


示例12: ADIOI_PFS_IwriteContig

void ADIOI_PFS_IwriteContig(ADIO_File fd, void *buf, int count, 
                MPI_Datatype datatype, int file_ptr_type,
                ADIO_Offset offset, ADIO_Request *request, int *error_code)  
{
    long *id_sys;
    ADIO_Offset off;
    int len, typesize, err;
#ifndef PRINT_ERR_MSG
    static char myname[] = "ADIOI_PFS_IWRITECONTIG";
#endif

    *request = ADIOI_Malloc_request();
    (*request)->optype = ADIOI_WRITE;
    (*request)->fd = fd;
    (*request)->datatype = datatype;

    MPI_Type_size(datatype, &typesize);
    len = count * typesize;

    id_sys = (long *) ADIOI_Malloc(sizeof(long));
    (*request)->handle = (void *) id_sys;

    off = (file_ptr_type == ADIO_INDIVIDUAL) ? fd->fp_ind : offset;

    lseek(fd->fd_sys, off, SEEK_SET);
    *id_sys = _iwrite(fd->fd_sys, buf, len);

    if ((*id_sys == -1) && (errno == EQNOMID)) {
     /* the man pages say EMREQUEST, but in reality errno is set to EQNOMID! */

        /* exceeded the max. no. of outstanding requests. */

        /* complete all previous async. requests */
        ADIOI_Complete_async(&err);

        /* try again */
	*id_sys = _iwrite(fd->fd_sys, buf, len);

        if ((*id_sys == -1) && (errno == EQNOMID)) {
#ifdef PRINT_ERR_MSG
            FPRINTF(stderr, "Error in asynchronous I/O\n");
            MPI_Abort(MPI_COMM_WORLD, 1);
#else
	    *error_code = MPIR_Err_setmsg(MPI_ERR_IO, MPIR_ADIO_ERROR,
			      myname, "I/O Error", "%s", strerror(errno));
	    ADIOI_Error(fd, *error_code, myname);	    
	    return;
#endif
        }
    }
    else if (*id_sys == -1) {
#ifdef PRINT_ERR_MSG
	FPRINTF(stderr, "Unknown errno %d in ADIOI_PFS_IwriteContig\n", errno);
	MPI_Abort(MPI_COMM_WORLD, 1);
#else
	*error_code = MPIR_Err_setmsg(MPI_ERR_IO, MPIR_ADIO_ERROR,
			         myname, "I/O Error", "%s", strerror(errno));
	ADIOI_Error(fd, *error_code, myname);	    
	return;
#endif
    }

    if (file_ptr_type == ADIO_INDIVIDUAL) fd->fp_ind += len; 

    (*request)->queued = 1;
    (*request)->nbytes = len;
    ADIOI_Add_req_to_list(request);
    fd->async_count++;

    fd->fp_sys_posn = -1;   /* set it to null. */

#ifdef PRINT_ERR_MSG
    *error_code = (*id_sys == -1) ? MPI_ERR_UNKNOWN : MPI_SUCCESS;
#else
    if (*id_sys == -1) {
	*error_code = MPIR_Err_setmsg(MPI_ERR_IO, MPIR_ADIO_ERROR,
			      myname, "I/O Error", "%s", strerror(errno));
	ADIOI_Error(fd, *error_code, myname);	    
    }
    else *error_code = MPI_SUCCESS;
#endif
}
开发者ID:davidheryanto,项目名称:sc14,代码行数:82,代码来源:ad_pfs_iwrite.c


示例13: FORT_NAME

FORT_NAME( mpi_abort , MPI_ABORT )(int *comm, int *errorcode, int *ierror)
{
  *ierror=MPI_Abort( *comm, *errorcode);
}
开发者ID:haoleer,项目名称:FGOALS,代码行数:4,代码来源:mpi.c


示例14: POSIX_Xfer

/*
 * Write or read access to file using the POSIX interface.
 */
static IOR_offset_t POSIX_Xfer(int access, void *file, IOR_size_t * buffer,
                               IOR_offset_t length, IOR_param_t * param)
{
        int xferRetries = 0;
        long long remaining = (long long)length;
        char *ptr = (char *)buffer;
        long long rc;
        int fd;

        fd = *(int *)file;

        /* seek to offset */
        if (lseek64(fd, param->offset, SEEK_SET) == -1)
                ERR("lseek64() failed");

        while (remaining > 0) {
                /* write/read file */
                if (access == WRITE) {  /* WRITE */
                        if (verbose >= VERBOSE_4) {
                                fprintf(stdout,
                                        "task %d writing to offset %lld\n",
                                        rank,
                                        param->offset + length - remaining);
                        }
                        rc = write(fd, ptr, remaining);
                        if (rc == -1)
                                ERR("write() failed");
                        if (param->fsyncPerWrite == TRUE)
                                POSIX_Fsync(&fd, param);
                } else {        /* READ or CHECK */
                        if (verbose >= VERBOSE_4) {
                                fprintf(stdout,
                                        "task %d reading from offset %lld\n",
                                        rank,
                                        param->offset + length - remaining);
                        }
                        rc = read(fd, ptr, remaining);
                        if (rc == 0)
                                ERR("read() returned EOF prematurely");
                        if (rc == -1)
                                ERR("read() failed");
                }
                if (rc < remaining) {
                        fprintf(stdout,
                                "WARNING: Task %d, partial %s, %lld of %lld bytes at offset %lld\n",
                                rank,
                                access == WRITE ? "write()" : "read()",
                                rc, remaining,
                                param->offset + length - remaining);
                        if (param->singleXferAttempt == TRUE)
                                MPI_CHECK(MPI_Abort(MPI_COMM_WORLD, -1),
                                          "barrier error");
                        if (xferRetries > MAX_RETRY)
                                ERR("too many retries -- aborting");
                }
                assert(rc >= 0);
                assert(rc <= remaining);
                remaining -= rc;
                ptr += rc;
                xferRetries++;
        }
        return (length);
}
开发者ID:mkluge,项目名称:ior,代码行数:66,代码来源:aiori-POSIX.c


示例15: process_args


//.........这里部分代码省略.........
    exit(1);
  }
  default_values(H);

#ifdef MPI
  MPI_Comm_size(FTI_COMM_WORLD, &H->nproc);
  MPI_Comm_rank(FTI_COMM_WORLD, &H->mype);
#else
  H->nproc = 1;
  H->mype = 0;
#endif
  if (donnees != NULL) {
    process_input(donnees, H);
  } else {
    fprintf(stderr, "Option -i is missing\n");
    exit(1);
  }
#endif

  H->globnx = H->nx;
  H->globny = H->ny;
  H->box[XMIN_BOX] = 0;
  H->box[XMAX_BOX] = H->nx;
  H->box[YMIN_BOX] = 0;
  H->box[YMAX_BOX] = H->ny;

#ifdef MPI
  if (H->nproc > 1) {
#if FTI==0
    MPI_Barrier(MPI_COMM_WORLD);
#endif
#if FTI>0
    MPI_Barrier(FTI_COMM_WORLD);
#endif
    // first pass : determin our actual sub problem size
    CalcSubSurface(0, H->globnx, 0, H->globny, 0, H->nproc - 1, 0, H->box, H->mype, 0);
    // second pass : determin our neighbours
    CalcSubSurface(0, H->globnx, 0, H->globny, 0, H->nproc - 1, 0, H->box, H->mype, 1);

    H->nx = H->box[XMAX_BOX] - H->box[XMIN_BOX];
    H->ny = H->box[YMAX_BOX] - H->box[YMIN_BOX];
    printf("[%4d/%4d] x=%4d X=%4d y=%4d Y=%4d / u=%4d d=%4d l=%4d r=%4d \n", H->mype, H->nproc, H->box[XMIN_BOX], H->box[XMAX_BOX], H->box[YMIN_BOX], H->box[YMAX_BOX], H->box[UP_BOX], H->box[DOWN_BOX], H->box[LEFT_BOX], H->box[RIGHT_BOX]);

    if (H->nx <= 0) {
      printf("Decomposition not suited for this geometry along X: increase nx or change number of procs\n");
    }

    if (H->ny <= 0) {
      printf("Decomposition not suited for this geometry along Y: increase ny or change number of procs\n");
    }

    if (H->nx == 0 || H->ny == 0) {
#if FTI==0
      MPI_Abort(MPI_COMM_WORLD, 123);
#endif
#if FTI>0
      MPI_Abort(FTI_COMM_WORLD, 123);
#endif
    }

    // adapt the boundary conditions 
    if (H->box[LEFT_BOX] != -1) {
      H->boundary_left = 0;
    }
    if (H->box[RIGHT_BOX] != -1) {
      H->boundary_right = 0;
    }
    if (H->box[DOWN_BOX] != -1) {
      H->boundary_down = 0;
    }
    if (H->box[UP_BOX] != -1) {
      H->boundary_up = 0;
    }
  }
  fflush(stdout);
#endif

  if (H->nxystep == -1) {
    // default = full slab
    H->nxystep = (H->nx < H->ny) ? H->nx: H->ny;
  } else {
    if (H->nxystep > H->nx) H->nxystep = H->nx;
    if (H->nxystep > H->ny) H->nxystep = H->ny;
  }

  // small summary of the run conditions
  if (H->mype == 0) {
    printf("+-------------------+\n");
    printf("|GlobNx=%-7d     |\n", H->globnx);
    printf("|GlobNy=%-7d     |\n", H->globny);
    printf("|nx=%-7d         |\n", H->nx);
    printf("|ny=%-7d         |\n", H->ny);
    printf("|nxystep=%-7d    |\n", H->nxystep);
    printf("|tend=%-10.3f    |\n", H->tend);
    printf("|nstepmax=%-7d   |\n", H->nstepmax);
    printf("|noutput=%-7d    |\n", H->noutput);
    printf("|dtoutput=%-10.3f|\n", H->dtoutput);
    printf("+-------------------+\n");
  }
}
开发者ID:HydroBench,项目名称:Hydro,代码行数:101,代码来源:parametres.c

<

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ MPI_Address函数代码示例发布时间:2022-05-30
下一篇:
C++ MPIXEL_X4函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap