int main( int argc, char *argv[] )
{
int errs = 0;
int *ranks;
int *ranksout;
MPI_Group gworld, grev, gself;
MPI_Comm comm;
MPI_Comm commrev;
int rank, size, i;
double start, end, time1, time2;
MTest_Init( &argc, &argv );
comm = MPI_COMM_WORLD;
MPI_Comm_size( comm, &size );
MPI_Comm_rank( comm, &rank );
ranks = malloc(size*sizeof(int));
ranksout = malloc(size*sizeof(int));
if (!ranks || !ranksout) {
fprintf(stderr, "out of memory\n");
MPI_Abort(MPI_COMM_WORLD, 1);
}
/* generate a comm with the rank order reversed */
MPI_Comm_split(comm, 0, (size-rank-1), &commrev);
MPI_Comm_group(commrev, &grev);
MPI_Comm_group(MPI_COMM_SELF, &gself);
MPI_Comm_group(comm, &gworld);
/* sanity check correctness first */
for (i=0; i < size; i++) {
ranks[i] = i;
ranksout[i] = -1;
}
MPI_Group_translate_ranks(grev, size, ranks, gworld, ranksout);
for (i=0; i < size; i++) {
if (ranksout[i] != (size-i-1)) {
if (rank == 0)
printf("%d: (gworld) expected ranksout[%d]=%d, got %d\n", rank, i, (size-rank-1), ranksout[i]);
++errs;
}
}
MPI_Group_translate_ranks(grev, size, ranks, gself, ranksout);
for (i=0; i < size; i++) {
int expected = (i == (size-rank-1) ? 0 : MPI_UNDEFINED);
if (ranksout[i] != expected) {
if (rank == 0)
printf("%d: (gself) expected ranksout[%d]=%d, got %d\n", rank, i, expected, ranksout[i]);
++errs;
}
}
/* now compare relative performance */
/* we needs lots of procs to get a group large enough to have meaningful
* numbers. On most testing machines this means that we're oversubscribing
* cores in a big way, which might perturb the timing results. So we make
* sure everyone started up and then everyone but rank 0 goes to sleep to
* let rank 0 do all the timings. */
MPI_Barrier(comm);
if (rank != 0) {
sleep(10);
}
else /* rank==0 */ {
sleep(1); /* try to avoid timing while everyone else is making syscalls */
MPI_Group_translate_ranks(grev, size, ranks, gworld, ranksout); /*throwaway iter*/
start = MPI_Wtime();
for (i = 0; i < NUM_LOOPS; ++i) {
MPI_Group_translate_ranks(grev, size, ranks, gworld, ranksout);
}
end = MPI_Wtime();
time1 = end - start;
MPI_Group_translate_ranks(grev, size, ranks, gself, ranksout); /*throwaway iter*/
start = MPI_Wtime();
for (i = 0; i < NUM_LOOPS; ++i) {
MPI_Group_translate_ranks(grev, size, ranks, gself, ranksout);
}
end = MPI_Wtime();
time2 = end - start;
/* complain if the "gworld" time exceeds 2x the "gself" time */
if (fabs(time1 - time2) > (2.00 * time2)) {
printf("too much difference in MPI_Group_translate_ranks performance:\n");
printf("time1=%f time2=%f\n", time1, time2);
printf("(fabs(time1-time2)/time2)=%f\n", (fabs(time1-time2)/time2));
if (time1 < time2) {
printf("also, (time1<time2) is surprising...\n");
}
++errs;
}
}
free(ranks);
free(ranksout);
//.........这里部分代码省略.........
int main (int argc, char *argv[])
{
int numtasks, namelen, rank, dest = 1, tag = 111, source = 0, size, i, j;
double start_time=0, elapsed_time=0, acum;
double *outmsg, *inmsg;
char hostname[256];
MPI_Status status,status2;
MPI_Request send_request,recv_request;
if (argc < 2)
{ printf("Usage: %s size [where size is the number elements (double) to send ]\n", argv[0]);
return 0;
}
size = atoi(argv[1]);
outmsg=(double*)malloc(sizeof(double)*size);
if(outmsg==NULL)
{
printf("Unable to allocate memory\n");
return;
}
inmsg=(double*)malloc(sizeof(double)*size);
if(inmsg==NULL)
{
printf("Unable to allocate memory\n");
return;
}
MPI_Init (&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &numtasks); // get number of processes
MPI_Comm_rank(MPI_COMM_WORLD, &rank); // get current process id
MPI_Get_processor_name(hostname, &namelen); // get CPU name
//Initialize the msg buffer to the rank id.
for (i = 0; i < size; i++)
outmsg[i] = rank;
//Define as Source the left neighbour
if (rank == 0) source=numtasks-1;
else source=rank-1;
//Define the destiny the rigth neighbour
if(rank==numtasks-1) dest=0;
else dest=rank+1;
start_time = MPI_Wtime();
acum=0;
for (i=0; i<numtasks; i++) {
if (rank==0) printf("it: %2d - Rank %d (%s) sending data (%g) to rank %d\n",i,rank, hostname, inmsg[0], dest);
MPI_Isend(outmsg, size, MPI_DOUBLE, dest, tag,MPI_COMM_WORLD,&send_request);
MPI_Recv (inmsg, size, MPI_DOUBLE, source, tag, MPI_COMM_WORLD,&status);
acum = acum + inmsg[0];
if (rank==0) printf("it: %2d - Rank %d received data (%g) from rank %d (acum=%g)\n",i,rank,outmsg[0],source,acum);
MPI_Wait(&send_request, &status2);
//Copy the inmsg to outmsg for the next iteration.
for (j = 0; j < size; j++) outmsg[j] = inmsg[j];
}
MPI_Barrier(MPI_COMM_WORLD);
elapsed_time = MPI_Wtime() - start_time;
printf(" Rank %d: Elapsed time to send %6d double(s) across a ring made up by %2d (acum=%g) in %g ms\n", rank, size, numtasks, acum, elapsed_time*1e03);
MPI_Finalize ();
}
static int
test_mpio_1wMr(char *filename, int special_request)
{
char hostname[128];
int mpi_size, mpi_rank;
MPI_File fh;
char mpi_err_str[MPI_MAX_ERROR_STRING];
int mpi_err_strlen;
int mpi_err;
unsigned char writedata[DIMSIZE], readdata[DIMSIZE];
unsigned char expect_val;
int i, irank;
int nerrs = 0; /* number of errors */
int atomicity;
MPI_Offset mpi_off;
MPI_Status mpi_stat;
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
if (MAINPROCESS && VERBOSE_MED){
printf("Testing one process writes, all processes read.\n");
printf("Using %d processes accessing file %s\n", mpi_size, filename);
printf(" (Filename can be specified via program argument)\n");
}
/* show the hostname so that we can tell where the processes are running */
if (VERBOSE_DEF){
if (gethostname(hostname, 128) < 0){
PRINTID;
printf("gethostname failed\n");
return 1;
}
PRINTID;
printf("hostname=%s\n", hostname);
}
/* Delete any old file in order to start anew. */
/* Must delete because MPI_File_open does not have a Truncate mode. */
/* Don't care if it has error. */
MPI_File_delete(filename, MPI_INFO_NULL);
MPI_Barrier(MPI_COMM_WORLD); /* prevent racing condition */
if ((mpi_err = MPI_File_open(MPI_COMM_WORLD, filename,
MPI_MODE_RDWR | MPI_MODE_CREATE ,
MPI_INFO_NULL, &fh))
!= MPI_SUCCESS){
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
PRINTID;
printf("MPI_File_open failed (%s)\n", mpi_err_str);
return 1;
}
if (special_request & USEATOM){
/* ==================================================
* Set atomcity to true (1). A POSIX compliant filesystem
* should not need this.
* ==================================================*/
if ((mpi_err = MPI_File_get_atomicity(fh, &atomicity)) != MPI_SUCCESS){
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
PRINTID;
printf("MPI_File_get_atomicity failed (%s)\n", mpi_err_str);
}
if (VERBOSE_HI)
printf("Initial atomicity = %d\n", atomicity);
if ((mpi_err = MPI_File_set_atomicity(fh, 1)) != MPI_SUCCESS){
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
PRINTID;
printf("MPI_File_set_atomicity failed (%s)\n", mpi_err_str);
}
if ((mpi_err = MPI_File_get_atomicity(fh, &atomicity)) != MPI_SUCCESS){
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
PRINTID;
printf("MPI_File_get_atomicity failed (%s)\n", mpi_err_str);
}
if (VERBOSE_HI)
printf("After set_atomicity atomicity = %d\n", atomicity);
}
/* This barrier is not necessary but do it anyway. */
MPI_Barrier(MPI_COMM_WORLD);
if (VERBOSE_HI){
PRINTID;
printf("between MPI_Barrier and MPI_File_write_at\n");
}
/* ==================================================
* Each process calculates what to write but
* only process irank(0) writes.
* ==================================================*/
irank=0;
for (i=0; i < DIMSIZE; i++)
writedata[i] = irank*DIMSIZE + i;
mpi_off = irank*DIMSIZE;
/* Only one process writes */
if (mpi_rank==irank){
if (VERBOSE_HI){
PRINTID; printf("wrote %d bytes at %ld\n", DIMSIZE, (long)mpi_off);
}
//.........这里部分代码省略.........
开发者ID:Len3d,项目名称:appleseed,代码行数:101,代码来源:t_mpi.c
示例10: test_mpio_derived_dtype
//.........这里部分代码省略.........
!= MPI_SUCCESS){
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
printf("MPI_Type_struct failed (%s)\n", mpi_err_str);
return 1;
}
if((mpi_err=MPI_Type_commit(&adv_filetype))!=MPI_SUCCESS){
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
printf("MPI_Type_commit failed (%s)\n", mpi_err_str);
return 1;
}
if((mpi_err = MPI_File_set_view(fh,disp,etype,adv_filetype,"native",MPI_INFO_NULL))!= MPI_SUCCESS){
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
printf("MPI_File_set_view failed (%s)\n", mpi_err_str);
return 1;
}
if((mpi_err = MPI_File_write(fh,buf,3,MPI_BYTE,&Status))!= MPI_SUCCESS){
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
printf("MPI_File_write failed (%s)\n", mpi_err_str);
return 1;
;
}
if((mpi_err = MPI_File_close(&fh)) != MPI_SUCCESS){
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
printf("MPI_File_close failed (%s)\n", mpi_err_str);
return 1;
}
if((mpi_err = MPI_File_open(MPI_COMM_WORLD,filename,MPI_MODE_RDONLY,MPI_INFO_NULL,&fh)) != MPI_SUCCESS){
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
printf("MPI_File_open failed (%s)\n", mpi_err_str);
return 1;
}
if((mpi_err = MPI_File_set_view(fh,0,MPI_BYTE,MPI_BYTE,"native",MPI_INFO_NULL))!= MPI_SUCCESS){
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
printf("MPI_File_set_view failed (%s)\n", mpi_err_str);
return 1;
}
if((mpi_err = MPI_File_read(fh,outbuf,3,MPI_BYTE,&Status))!=MPI_SUCCESS){
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
printf("MPI_File_read failed (%s)\n", mpi_err_str);
return 1;
}
if(outbuf[2]==2) {
retcode = 0;
}
else {
/* if(mpi_rank == 0) {
printf("complicated derived datatype is NOT working at this platform\n");
printf("go back to hdf5/config and find the corresponding\n");
printf("configure-specific file and change ?????\n");
}
*/
retcode = -1;
}
if((mpi_err = MPI_File_close(&fh)) != MPI_SUCCESS){
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
printf("MPI_File_close failed (%s)\n", mpi_err_str);
return 1;
}
mpi_err = MPI_Barrier(MPI_COMM_WORLD);
#ifdef H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS
if(retcode == -1) {
if(mpi_rank == 0) {
printf("Complicated derived datatype is NOT working at this platform\n");
printf("Go back to hdf5/config and find the corresponding\n");
printf("configure-specific file (for example, powerpc-ibm-aix5.x) and add\n");
printf("hdf5_cv_mpi_complex_derived_datatype_works=${hdf5_cv_mpi_complex_derived_datatype-works='no'}\n");
printf(" at the end of the file.\n");
printf(" Please report to [email protected] about this problem.\n");
}
retcode = 1;
}
#else
if(retcode == 0) {
if(mpi_rank == 0) {
printf(" This is NOT an error, What it really says is\n");
printf("Complicated derived datatype is WORKING at this platform\n");
printf(" Go back to hdf5/config and find the corresponding \n");
printf(" configure-specific file (for example, powerpc-ibm-aix5.x) and delete the line\n");
printf("hdf5_cv_mpi_complex_derived_datatype_works=${hdf5_cv_mpi_complex_derived_datatype-works='no'}\n");
printf(" at the end of the file.\n");
printf("Please report to [email protected] about this problem.\n");
}
retcode = 1;
}
if(retcode == -1) retcode = 0;
#endif
return retcode;
}
开发者ID:Len3d,项目名称:appleseed,代码行数:101,代码来源:t_mpi.c
示例11: test_mpio_overlap_writes
static int
test_mpio_overlap_writes(char *filename)
{
int mpi_size, mpi_rank;
MPI_Comm comm;
MPI_Info info = MPI_INFO_NULL;
int color, mrc;
MPI_File fh;
int i;
int vrfyerrs, nerrs;
unsigned char buf[4093]; /* use some prime number for size */
int bufsize = sizeof(buf);
MPI_Offset stride;
MPI_Offset mpi_off;
MPI_Status mpi_stat;
if (VERBOSE_MED)
printf("MPIO independent overlapping writes test on file %s\n",
filename);
nerrs = 0;
/* set up MPI parameters */
MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
/* Need at least 2 processes */
if (mpi_size < 2) {
if (MAINPROCESS)
printf("Need at least 2 processes to run MPIO test.\n");
printf(" -SKIP- \n");
return 0;
}
/* splits processes 0 to n-2 into one comm. and the last one into another */
color = ((mpi_rank < (mpi_size - 1)) ? 0 : 1);
mrc = MPI_Comm_split (MPI_COMM_WORLD, color, mpi_rank, &comm);
VRFY((mrc==MPI_SUCCESS), "Comm_split succeeded");
if (color==0){
/* First n-1 processes (color==0) open a file and write it */
mrc = MPI_File_open(comm, filename, MPI_MODE_CREATE|MPI_MODE_RDWR,
info, &fh);
VRFY((mrc==MPI_SUCCESS), "");
stride = 1;
mpi_off = mpi_rank*stride;
while (mpi_off < MPIO_TEST_WRITE_SIZE){
/* make sure the write does not exceed the TEST_WRITE_SIZE */
if (mpi_off+stride > MPIO_TEST_WRITE_SIZE)
stride = MPIO_TEST_WRITE_SIZE - mpi_off;
/* set data to some trivial pattern for easy verification */
for (i=0; i<stride; i++)
buf[i] = (unsigned char)(mpi_off+i);
mrc = MPI_File_write_at(fh, mpi_off, buf, (int)stride, MPI_BYTE,
&mpi_stat);
VRFY((mrc==MPI_SUCCESS), "");
/* move the offset pointer to last byte written by all processes */
mpi_off += (mpi_size - 1 - mpi_rank) * stride;
/* Increase chunk size without exceeding buffer size. */
/* Then move the starting offset for next write. */
stride *= 2;
if (stride > bufsize)
stride = bufsize;
mpi_off += mpi_rank*stride;
}
/* close file and free the communicator */
mrc = MPI_File_close(&fh);
VRFY((mrc==MPI_SUCCESS), "MPI_FILE_CLOSE");
mrc = MPI_Comm_free(&comm);
VRFY((mrc==MPI_SUCCESS), "MPI_Comm_free");
/* sync with the other waiting processes */
mrc = MPI_Barrier(MPI_COMM_WORLD);
VRFY((mrc==MPI_SUCCESS), "Sync after writes");
}else{
/* last process waits till writes are done,
* then opens file to verify data.
*/
mrc = MPI_Barrier(MPI_COMM_WORLD);
VRFY((mrc==MPI_SUCCESS), "Sync after writes");
mrc = MPI_File_open(comm, filename, MPI_MODE_RDONLY,
info, &fh);
VRFY((mrc==MPI_SUCCESS), "");
stride = bufsize;
for (mpi_off=0; mpi_off < MPIO_TEST_WRITE_SIZE; mpi_off += bufsize){
/* make sure it does not read beyond end of data */
if (mpi_off+stride > MPIO_TEST_WRITE_SIZE)
stride = MPIO_TEST_WRITE_SIZE - mpi_off;
mrc = MPI_File_read_at(fh, mpi_off, buf, (int)stride, MPI_BYTE,
&mpi_stat);
VRFY((mrc==MPI_SUCCESS), "");
vrfyerrs=0;
for (i=0; i<stride; i++){
//.........这里部分代码省略.........
开发者ID:Len3d,项目名称:appleseed,代码行数:101,代码来源:t_mpi.c
示例12: test_mpio_gb_file
//.........这里部分代码省略.........
INFO((mpi_off>0), "4GB OFFSET assignment no overflow");
INFO((mpi_off-1)==FOUR_GB_LESS1, "4GB OFFSET assignment succeed");
/* verify correctness of increasing from below 4 GB to above 4 GB */
mpi_off = FOUR_GB_LESS1;
for (i=0; i < 3; i++){
mpi_off_old = mpi_off;
mpi_off = mpi_off + 1;
/* no overflow */
INFO((mpi_off>0), "4GB OFFSET increment no overflow");
/* correct inc. */
INFO((mpi_off-1)==mpi_off_old, "4GB OFFSET increment succeed");
}
}
}
/*
* Verify if we can write to a file of multiple GB sizes.
*/
if (VERBOSE_MED)
printf("MPIO GB file test %s\n", filename);
if (sizeof_mpi_offset <= 4){
printf("Skipped GB file range test "
"because MPI_Offset cannot support it\n");
}else{
buf = malloc(MB);
VRFY((buf!=NULL), "malloc succeed");
/* open a new file. Remove it first in case it exists. */
/* Must delete because MPI_File_open does not have a Truncate mode. */
/* Don't care if it has error. */
MPI_File_delete(filename, MPI_INFO_NULL);
MPI_Barrier(MPI_COMM_WORLD); /* prevent racing condition */
mrc = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_CREATE|MPI_MODE_RDWR,
info, &fh);
VRFY((mrc==MPI_SUCCESS), "MPI_FILE_OPEN");
printf("MPIO GB file write test %s\n", filename);
/* instead of writing every bytes of the file, we will just write
* some data around the 2 and 4 GB boundaries. That should cover
* potential integer overflow and filesystem size limits.
*/
writerrs = 0;
for (n=2; n <= 4; n+=2){
ntimes = GB/MB*n/mpi_size + 1;
for (i=ntimes-2; i <= ntimes; i++){
mpi_off = (i*mpi_size + mpi_rank)*(MPI_Offset)MB;
if (VERBOSE_MED)
HDfprintf(stdout,"proc %d: write to mpi_off=%016llx, %lld\n",
mpi_rank, mpi_off, mpi_off);
/* set data to some trivial pattern for easy verification */
for (j=0; j<MB; j++)
*(buf+j) = i*mpi_size + mpi_rank;
if (VERBOSE_MED)
HDfprintf(stdout,"proc %d: writing %d bytes at offset %lld\n",
mpi_rank, MB, mpi_off);
mrc = MPI_File_write_at(fh, mpi_off, buf, MB, MPI_BYTE, &mpi_stat);
INFO((mrc==MPI_SUCCESS), "GB size file write");
if (mrc!=MPI_SUCCESS)
writerrs++;
}
}
开发者ID:Len3d,项目名称:appleseed,代码行数:66,代码来源:t_mpi.c
示例13: main
//.........这里部分代码省略.........
MPI_BANNER("MPIO independent overlapping writes...");
ret_code = test_mpio_overlap_writes(filenames[0]);
ret_code = errors_sum(ret_code);
if (mpi_rank==0 && ret_code > 0){
printf("***FAILED with %d total errors\n", ret_code);
nerrors += ret_code;
}
/*=======================================
* MPIO complicated derived datatype test
*=======================================*/
/* test_mpio_derived_dtype often hangs when fails.
* Do not run it if it is known NOT working unless ask to
* run explicitly by high verbose mode.
*/
#ifdef H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS
MPI_BANNER("MPIO complicated derived datatype test...");
ret_code = test_mpio_derived_dtype(filenames[0]);
#else
if (VERBOSE_HI){
MPI_BANNER("MPIO complicated derived datatype test...");
ret_code = test_mpio_derived_dtype(filenames[0]);
}else{
MPI_BANNER("MPIO complicated derived datatype test SKIPPED.");
ret_code = 0; /* fake ret_code */
}
#endif
ret_code = errors_sum(ret_code);
if (mpi_rank==0 && ret_code > 0){
printf("***FAILED with %d total errors\n", ret_code);
nerrors += ret_code;
}
/*=======================================
* MPIO special collective IO test
*=======================================*/
/* test_special_collective_io often hangs when fails.
* Do not run it if it is known NOT working unless ask to
* run explicitly by high verbose mode.
*/
if(mpi_size !=4){
MPI_BANNER("MPIO special collective io test SKIPPED.");
if(mpi_rank == 0){
printf("Use FOUR processes to run this test\n");
printf("If you still see the <test SKIPPED>, use <-vh> option to verify the test\n");
}
ret_code = 0;
goto sc_finish;
}
#ifdef H5_MPI_SPECIAL_COLLECTIVE_IO_WORKS
MPI_BANNER("MPIO special collective io test...");
ret_code = test_mpio_special_collective(filenames[0]);
#else
if (VERBOSE_HI){
MPI_BANNER("MPIO special collective io test...");
ret_code = test_mpio_special_collective(filenames[0]);
}else{
MPI_BANNER("MPIO special collective io test SKIPPED.");
ret_code = 0; /* fake ret_code */
}
#endif
sc_finish:
ret_code = errors_sum(ret_code);
if (mpi_rank==0 && ret_code > 0){
printf("***FAILED with %d total errors\n", ret_code);
nerrors += ret_code;
}
finish:
/* make sure all processes are finished before final report, cleanup
* and exit.
*/
MPI_Barrier(MPI_COMM_WORLD);
if (MAINPROCESS){ /* only process 0 reports */
printf("===================================\n");
if (nerrors){
printf("***MPI tests detected %d errors***\n", nerrors);
}
else{
printf("MPI tests finished with no errors\n");
}
printf("===================================\n");
}
/* turn off alarm */
ALARM_OFF;
h5_cleanup(FILENAME, fapl);
H5close();
/* MPI_Finalize must be called AFTER H5close which may use MPI calls */
MPI_Finalize();
/* cannot just return (nerrors) because exit code is limited to 1byte */
return(nerrors!=0);
}
开发者ID:Len3d,项目名称:appleseed,代码行数:101,代码来源:t_mpi.c
示例14: Init_ForecastData
ForecastData* Init_ForecastData(char* fcst_filename,unsigned int string_size)
{
FILE* inputfile = NULL;
ForecastData* Forecaster;
int errorcode,valsread;
char end_char;
unsigned int buff_size = string_size + 20;
char* linebuffer = (char*) malloc(buff_size*sizeof(char));
MPI_Barrier(MPI_COMM_WORLD);
if(my_rank == 0)
{
//Open file
inputfile = fopen(fcst_filename,"r");
errorcode = 0;
if(!inputfile)
{
printf("[%i]: Error opening file %s.\n",my_rank,fcst_filename);
errorcode = 1;
}
}
//Check if forecast file was openned
MPI_Bcast(&errorcode,1,MPI_INT,0,MPI_COMM_WORLD);
if(errorcode) return NULL;
//Reserve space
Forecaster = (ForecastData*) malloc(sizeof(ForecastData));
Forecaster->model_name = (char*) malloc(string_size*sizeof(char));
//Read table name
//if(my_rank == 0)
{
ReadLineFromTextFile(inputfile,linebuffer,buff_size,string_size);
valsread = sscanf(linebuffer,"%s",Forecaster->model_name);
if(ReadLineError(valsread,1,"forecaster model name")) return NULL;
//length = strlen(Forecaster->model_name);
}
//MPI_Bcast(&length,1,MPI_UNSIGNED,0,MPI_COMM_WORLD);
//MPI_Bcast(Forecaster->model_name,length+1,MPI_CHAR,0,MPI_COMM_WORLD);
//Read if data is displayed on ifis
//if(my_rank == 0)
{
ReadLineFromTextFile(inputfile,linebuffer,buff_size,string_size);
valsread = sscanf(linebuffer,"%hi",&(Forecaster->ifis_display));
if(ReadLineError(valsread,1,"flag if displaying on ifis")) return NULL;
}
//MPI_Bcast(&(Forecaster->ifis_display),1,MPI_SHORT,0,MPI_COMM_WORLD);
//Read which forcing index is used for forecasting
//if(my_rank == 0)
{
ReadLineFromTextFile(inputfile,linebuffer,buff_size,string_size);
valsread = sscanf(linebuffer,"%u",&(Forecaster->forecasting_forcing));
if(ReadLineError(valsread,1,"index of forecastin forcing")) return NULL;
}
//MPI_Bcast(&(Forecaster->forecasting_forcing),1,MPI_UNSIGNED,0,MPI_COMM_WORLD);
//Read number of rainfall steps to use per forecast
//if(my_rank == 0)
{
ReadLineFromTextFile(inputfile,linebuffer,buff_size,string_size);
valsread = sscanf(linebuffer,"%u",&(Forecaster->num_rainsteps));
if(ReadLineError(valsread,1,"number of precipitation values")) return NULL;
}
//MPI_Bcast(&(Forecaster->num_rainsteps),1,MPI_UNSIGNED,0,MPI_COMM_WORLD);
//Read forecast window
ReadLineFromTextFile(inputfile,linebuffer,buff_size,string_size);
valsread = sscanf(linebuffer,"%lf",&(Forecaster->forecast_window));
if(ReadLineError(valsread,1,"forecast window")) return NULL;
//Read and create a database connection for the rain maps
Forecaster->rainmaps_filename = NULL;
Forecaster->rainmaps_db = NULL;
//if(my_rank == 0)
{
Forecaster->rainmaps_filename = (char*) malloc(string_size*sizeof(char));
ReadLineFromTextFile(inputfile,linebuffer,buff_size,string_size);
valsread = sscanf(linebuffer,"%s",Forecaster->rainmaps_filename);
if(ReadLineError(valsread,1,"rain map filename")) return NULL;
Forecaster->rainmaps_db = ReadDBC(Forecaster->rainmaps_filename,string_size);
if(!Forecaster->rainmaps_db) return NULL;
}
//Read halt filename
Forecaster->halt_filename = (char*) malloc(string_size*sizeof(char));
//if(my_rank == 0)
{
ReadLineFromTextFile(inputfile,linebuffer,buff_size,string_size);
valsread = sscanf(linebuffer,"%s",Forecaster->halt_filename);
if(ReadLineError(valsread,1,"halt filename")) return NULL;
//length = strlen(Forecaster->halt_filename);
}
//MPI_Bcast(&length,1,MPI_UNSIGNED,0,MPI_COMM_WORLD);
//MPI_Bcast(Forecaster->halt_filename,length+1,MPI_CHAR,0,MPI_COMM_WORLD);
//Read ending mark
//.........这里部分代码省略.........
请发表评论