• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ mpi::Status类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中mpi::Status的典型用法代码示例。如果您正苦于以下问题:C++ Status类的具体用法?C++ Status怎么用?C++ Status使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了Status类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: receive_from_master

void Module_DCREATE::receive_from_master() const {
    size_t length;
    MPI::Status status;
    int k;
    int blockLength;

    MPI::COMM_WORLD.Recv(&k,1,MPI::INT,0,COMMUNICATION_CHANNEL);

    MPI::COMM_WORLD.Recv(&blockLength,1,MPI::INT,0,COMMUNICATION_CHANNEL);

    MPI::COMM_WORLD.Probe(0,COMMUNICATION_CHANNEL,status);
    length = status.Get_count(MPI::CHAR);
    char input[length];
    MPI::COMM_WORLD.Recv(input,length,MPI::CHAR,0,COMMUNICATION_CHANNEL);

    MPI::COMM_WORLD.Probe(0,COMMUNICATION_CHANNEL,status);
    length = status.Get_count(MPI::CHAR);
    char output[length];
    MPI::COMM_WORLD.Recv(output,length,MPI::CHAR,0,COMMUNICATION_CHANNEL);

    DEFAULT_CHANNEL << "Informations from master received by node " << my_rank << endl;

    if (strlen(input) != 0 and strlen(output) != 0)
    	compute_hash(k,blockLength,input,output,false); //TODO handle methyl_hash

}
开发者ID:vezzi,项目名称:ERNE,代码行数:26,代码来源:Module_DCREATE.cpp


示例2: recvData

    bool recvData(std::vector<double>& receivedData)
    {
        bool isDataReceived = false;
        if ( intraComm != MPI::COMM_NULL)
        {
            MPI::Status status;
            double buffer[100];
            intraComm.Recv(buffer, 100,
                           MPI::DOUBLE,
                           MPI::ANY_SOURCE,
                           /*tag*/ 100,
                           status);

            int count = status.Get_count(MPI::DOUBLE);
            receivedData = std::vector<double>(buffer, buffer+count);

            log.Info() << "RECV [ " << getRank()
                        << " <-- "
                        << status.Get_source()
                        << " ] data : "
                        << receivedData
                        << std::endl;
            isDataReceived = true;
        }else
        {
            log.Err() << "PID " << getProcessId()
                      << " failed to RECV"
                      << std::endl;
        }
        return isDataReceived;
    }
开发者ID:deaconu-sabin,项目名称:das,代码行数:31,代码来源:Network.cpp


示例3: lMsg

void HPC::MPICommunication::waitReception(Request::Handle ioRequest) const
{
	Beagle_StackTraceBeginM();
	Beagle_NonNullPointerAssertM(ioRequest);
	MPI::Status lStatus;
	ioRequest->mSizeRequest.Wait(lStatus);
	if(lStatus.Is_cancelled()) return;
	int lRank = lStatus.Get_source();
	int lMsgSize = ioRequest->mSize;
	std::string lStringTag = ioRequest->mTag + "_str";
	MPI::COMM_WORLD.Probe(lRank,hashTag(lStringTag),lStatus);
	Beagle_AssertM(lStatus.Get_count(MPI::CHAR) == lMsgSize);
	//constructing a string of the right size.
	std::string lMsg(lMsgSize, ' ');
	MPI::COMM_WORLD.Recv(&lMsg[0], lMsgSize, MPI::CHAR, lRank, hashTag(lStringTag));
#ifdef BEAGLE_HAVE_LIBZ
	if(mCompressionLevel->getWrappedValue() > 0){
		ioRequest->mMessage = new Beagle::String;
		decompressString(lMsg, ioRequest->mMessage->getWrappedValue());
	} else {
		ioRequest->mMessage = new Beagle::String(lMsg);
	}
#else
	ioRequest->mMessage = new Beagle::String(lMsg);
#endif
	Beagle_HPC_StackTraceEndM("void HPC::MPICommunication::waitReception(Request::Handle) const");
}
开发者ID:GhostGambler,项目名称:beagle,代码行数:27,代码来源:MPICommunication.cpp


示例4:

void HPC::MPICommunication::waitSending(Request::Handle ioRequest) const
{
	Beagle_StackTraceBeginM();
	Beagle_NonNullPointerAssertM(ioRequest);
	MPI::Status lStatus;
	ioRequest->mSizeRequest.Wait(lStatus);
	if(lStatus.Is_cancelled()) return;
	ioRequest->mMsgRequest.Wait();
	Beagle_HPC_StackTraceEndM("void HPC::MPICommunication::waitReception(Request::Handle) const");
}
开发者ID:GhostGambler,项目名称:beagle,代码行数:10,代码来源:MPICommunication.cpp


示例5:

void
ParaCommMpiWorld::probe(
   int* source,
   int* tag
   )
{
   MPI::Status mpiStatus;
   MPI::COMM_WORLD.Probe(MPI::ANY_SOURCE, MPI::ANY_TAG, mpiStatus);
   *source = mpiStatus.Get_source();
   *tag = mpiStatus.Get_tag();
   TAG_TRACE (Probe, From, *source, *tag);
}
开发者ID:aimanqais,项目名称:gerardus,代码行数:12,代码来源:paraCommMpiWorld.cpp


示例6: buffer

//#####################################################################
// Function Recv_Columns
//#####################################################################
template<class T_GRID> template<class T_ARRAYS_HORIZONTAL_COLUMN> void MPI_RLE_GRID<T_GRID>::
Recv_Columns(T_ARRAYS_HORIZONTAL_COLUMN& columns,const ARRAY<T_BOX_HORIZONTAL_INT>& regions,const int tag,const MPI::Status& probe_status) const
{
    ARRAY<char> buffer(probe_status.Get_count(MPI::PACKED));
    int position=0;
    comm->Recv(&buffer(1),buffer.m,MPI::PACKED,probe_status.Get_source(),tag);
    TV_HORIZONTAL_INT direction;
    MPI_UTILITIES::Unpack(direction,buffer,position,*comm);
    int neighbor=0;
    all_neighbor_directions.Find(-direction,neighbor);
    for(typename T_HORIZONTAL_GRID::CELL_ITERATOR iterator(local_grid.horizontal_grid,regions(neighbor)); iterator.Valid(); iterator.Next())
        MPI_UTILITIES::Unpack(columns(iterator.Cell_Index()),buffer,position,*comm);
}
开发者ID:aperfilev,项目名称:physbam_public,代码行数:16,代码来源:MPI_RLE_GRID.cpp


示例7: readPopulation

bool Neatzsche_MPI::readPopulation(Phenotypes * p, Coevolution * c, TransferFunctions * tfs)
{
  MPI::Status status;
  MPI::Datatype ndt,gdt;
  int genomes,genes,nodes,id;
  MPI::COMM_WORLD.Recv(&genomes,1,MPI::INT,0,0);//Receive the number of genome
  NeuralNodeSmall * nns;
  GeneSmall * gs;
  Genome * genome = NULL;
  int stringc=0; 
  char *strbuf;
  vector<string> * ftypes = NULL;
  for(int i=0;i<genomes;i++){
    ftypes = new vector<string>();
    MPI::COMM_WORLD.Recv(&id,1,MPI_INT,0,0);
    MPI::COMM_WORLD.Recv(&nodes,1,MPI_INT,0,0);
    MPI::COMM_WORLD.Recv(&genes,1,MPI_INT,0,0);
//     nns = (NeuralNodeSmall*)malloc(sizeof(NeuralNodeSmall)*nodes);
//     gs = (GeneSmall*)malloc(sizeof(GeneSmall)*genes);
    nns = new NeuralNodeSmall [nodes];
    gs = new GeneSmall[genes];

    nodetype = Build_neuralnode_type(&nns[0]);
    MPI::COMM_WORLD.Recv(nns,nodes,nodetype,0,0);
    for(int i=0;i<nodes;i++){//blargh, 1 int would be more usefull in this case:P
      MPI::COMM_WORLD.Probe(0, MPI_Cont, status);
      stringc = status.Get_count(MPI_CHAR);
      strbuf = (char*) malloc(sizeof(char)*stringc);
      MPI::COMM_WORLD.Recv(strbuf,stringc,MPI::CHAR,0,0);//receive the ftype of the node
      ftypes->push_back(string(strbuf).substr(0,stringc));
      free(strbuf);
    }
    genetype = Build_gene_type(&gs[0]);
    MPI::COMM_WORLD.Recv(gs,genes,genetype,0,0);

    genome = new Genome(tfs);
    genome->fromSmall(id,nodes,nns,genes,gs,ftypes);
    delete ftypes;
    p->push_back(new Phenotype(genome));
    if(nodes>0)
      delete[] nns; 
    if(genes>0)
      delete[] gs;
  }
  unsigned int cont;
  MPI::COMM_WORLD.Recv(&cont,1,MPI::INT,0,0);//continue or stop?
  return cont == MPI_Cont;

}
开发者ID:epichub,项目名称:neatzsche,代码行数:49,代码来源:neatmpi.cpp


示例8: hashTag

/*!
 * \brief Receive message from a specific node rank via MPI
 * \param outMessage Message receive.
 * \param inTag Tag associated to the message to be received.
 * \param inRank Node rank of the sending node.
 */
void HPC::MPICommunication::receive(std::string& outMessage, const std::string& inTag, int inRank) const
{
	Beagle_StackTraceBeginM();
	MPI::Status lStatus;

	int lSize = 0;
	MPI::COMM_WORLD.Recv(&lSize, 1, MPI::INT, inRank, hashTag(inTag+"_size"));
	MPI::COMM_WORLD.Probe(inRank,hashTag(inTag+"_str"),lStatus);
	Beagle_AssertM(lStatus.Get_count(MPI::CHAR) == lSize);
	outMessage.resize(lSize);
	MPI::COMM_WORLD.Recv(&outMessage[0], lSize, MPI::CHAR, lStatus.Get_source(), hashTag(inTag+"_str"));

#ifdef BEAGLE_HAVE_LIBZ
	if(mCompressionLevel->getWrappedValue() > 0){
		std::string lString;
		decompressString(outMessage, lString);
		outMessage = lString;
	}
#endif
	Beagle_HPC_StackTraceEndM("void HPC::MPICommunication::receive(std::string&, const std::string&, int) const");
}
开发者ID:GhostGambler,项目名称:beagle,代码行数:27,代码来源:MPICommunication.cpp


示例9: receive

bool
ParaCommMpiWorld::waitToken(
      int tempRank
      )
{
   pthread_mutex_lock(&tokenAccessLock);
   if( token[0] == myRank )
   {
      pthread_mutex_unlock(&tokenAccessLock);
      return true;
   }
   else
   {
      int previousRank = myRank - 1;
      if( previousRank == 0 )
      {
         if( token[0] != -1 )
         {
            previousRank = comSize - 1;
         }
      }
      int receivedTag;
      MPI::Status mpiStatus;
      MPI::COMM_WORLD.Probe(MPI::ANY_SOURCE, MPI::ANY_TAG, mpiStatus);
      receivedTag = mpiStatus.Get_tag();
      TAG_TRACE (Probe, From, mpiStatus.Get_source(), receivedTag);
      if( receivedTag == TagToken )
      {
         receive(token, 2, ParaINT, 0, TagToken);
         assert(token[0] == myRank);
         pthread_mutex_unlock(&tokenAccessLock);
         return true;
      }
      else
      {
         pthread_mutex_unlock(&tokenAccessLock);
         return false;
      }
   }
}
开发者ID:aimanqais,项目名称:gerardus,代码行数:40,代码来源:paraCommMpiWorld.cpp


示例10: f

void PSO::Swarm::evaluate_slave() {
  double f(log(0.0));
  int id(0);
  int flag(0);
  int tag(0);
  int dest(0);
  Point position(numParams);
  MPI::Status status;
//  fprintf(stderr,"Slave %d ready.\n",mpi_rank);
  while (1) {
//    flag = MPI::COMM_WORLD.Iprobe(0,MPI::ANY_TAG,status);
//    if (flag) {
//      tag = status.Get_tag();
    MPI::COMM_WORLD.Recv(&id,1,MPI::INT,0,MPI::ANY_TAG,status);
    if (status.Get_tag() == 0) break;
    MPI::COMM_WORLD.Recv(position.data(),numParams,MPI::DOUBLE,0,MPI::ANY_TAG,status);
    f = p->evalFunc(position,p->evalParams);
    MPI::COMM_WORLD.Send(&id,1,MPI::INT,0,2);
    MPI::COMM_WORLD.Send(&f,1,MPI::DOUBLE,0,2);
//    }
  }
//  fprintf(stderr,"Slave %d done.\n",mpi_rank);
}
开发者ID:gaberoo,项目名称:exploder,代码行数:23,代码来源:Swarm.cpp


示例11: main

int main ( int argc, char *argv[] )

//****************************************************************************80
//
//  Purpose:
//
//    MAIN is the main program for DAY1.
//
//  Discussion:
//
//    DAY1 is exercise 3 for first day of the MPI workshop
//
//    The instructions say:
//
//    Process 1 computes the squares of the first 200 integers.
//    It sends this data to process 3.
//
//    Process 3 should divide the integers between 20 and 119 by 53,
//    getting a real result, and passes this data back to process 1.
//
//    * I presume the first 200 integers are the numbers 0 through 199.
//
//    * The instructions literally mean that process 3 should look
//      at integers whose VALUES are between 20 and 119.  I doubt that
//      is what the instructor meant, but it's more interesting than
//      simply picking the entries with index between 20 and 119,
//      so that's what I'll do.
//
//    * It is also not completely clear whether only the selected data
//      should be sent back, or the entire array.  Again, it is more
//      interesting to send back only part of the data.
//
//  Licensing:
//
//    This code is distributed under the GNU LGPL license. 
//
//  Author:
//
//    John Burkardt
//
//  Reference:
//
//    William Gropp, Ewing Lusk, Anthony Skjellum,
//    Using MPI: Portable Parallel Programming with the
//    Message-Passing Interface,
//    Second Edition,
//    MIT Press, 1999,
//    ISBN: 0262571323.
//
//  Modified:
//
//    26 October 2011
//
//  Author:
//
//    John Burkardt
//
{
# define I_DIM 200
# define R_DIM 200

  int count;
  int count2;
  int dest;
  int i;
  int i_buffer[I_DIM];
  int id;
  int p;
  float r_buffer[R_DIM];
  int source;
  MPI::Status status;
  int tag;
//
//  Initialize MPI.
//
  MPI::Init ( argc, argv );
//
//  Determine this process's rank.
//
  id = MPI::COMM_WORLD.Get_rank ( );
//
//  Get the number of processes.
//
  p = MPI::COMM_WORLD.Get_size ( );
//
//  Have Process 0 say hello.
//
  if ( id == 0 )
  {
    timestamp ( );
    cout << "\n";
    cout << "DAY1:\n";
    cout << "  C++ version\n";
    cout << "  An MPI example program.\n";
    cout << "\n";
    cout << "  Compiled on " << __DATE__ << " at " << __TIME__ << "\n";
    cout << "\n";
    cout << "  The number of processes available is " << p << "\n";
  }
//
//.........这里部分代码省略.........
开发者ID:Vishakha6,项目名称:jburkardt-cpp,代码行数:101,代码来源:day1_mpi.cpp


示例12: recv_output

Module_DMAP::Transmitting_Result Module_DMAP::recv_output(int node, int id) {
	int count;
	unsigned long int * positions;
	unsigned long int * global_positions;
	int * contigs;
	//t_alignment * types;
	int * NMs;
	int * lengths;
	int * algn;
	unsigned short int * bools;
	unsigned int * trim_info;
	char * informations;
	unsigned short int bool_temp;
	Mask * reads;

	{
		mutex::scoped_lock lock(mpi_mutex);
		//DEFAULT_CHANNEL << '[' << my_rank << ',' << id << "] Waiting info from node " << node << " to node " << my_rank << endl;
		if (finished)
			return Transmitting_Result(NULL,0);
		MPI::COMM_WORLD.Recv(&count,1,MPI::INT,my_rank-1,COMMUNICATION_CHANNEL);
		//DEFAULT_CHANNEL << '[' << my_rank << ',' << id << "] Receive " << count << " OUTPUTs from node " << node << " to node " << my_rank << endl;
		if (count == 0) {
			finished = true;
			return Transmitting_Result(NULL,0);
		}

		positions = new unsigned long int[count*2];
		global_positions = new unsigned long int[count*2];
		contigs = new int[count];
		//types = new t_alignment[count];
		NMs = new int[count*2];
		lengths = new int[count*2];
		algn = new int[count];
		bools = new unsigned short int[count];
		trim_info = new unsigned int[count*2];

		size_t sum;
		MPI::Status status;
		MPI::COMM_WORLD.Probe(node,DATA_CHANNEL,status);
		sum = status.Get_count(MPI::CHAR);
		informations = new char[sum];
		MPI::COMM_WORLD.Recv(informations,sum,MPI::CHAR,node,DATA_CHANNEL);

		MPI::COMM_WORLD.Recv(positions,count*2,MPI::UNSIGNED_LONG,node,DATA_CHANNEL);
		MPI::COMM_WORLD.Recv(global_positions,count*2,MPI::UNSIGNED_LONG,node,DATA_CHANNEL);
		MPI::COMM_WORLD.Recv(contigs,count,MPI::INT,node,DATA_CHANNEL);
		//MPI::COMM_WORLD.Recv(types,count*sizeof(t_alignment),MPI::CHAR,node,DATA_CHANNEL);
		MPI::COMM_WORLD.Recv(NMs,count*2,MPI::INT,node,DATA_CHANNEL);
		MPI::COMM_WORLD.Recv(lengths,count*2,MPI::INT,node,DATA_CHANNEL);
		MPI::COMM_WORLD.Recv(algn,count,MPI::INT,node,DATA_CHANNEL);
		MPI::COMM_WORLD.Recv(bools,count,MPI::UNSIGNED_SHORT,node,DATA_CHANNEL);
		MPI::COMM_WORLD.Recv(trim_info,count*2,MPI::UNSIGNED,node,DATA_CHANNEL);
	}
	reads = new Mask[count];
	char * h = informations;
	for (int i = 0; i < count; i++) {
		Mask & r = reads[i];
		r.id = string(h);
		h += r.id.size() + 1;
		r.sequence = string(h);
		h += r.sequence.size() + 1;
		r.quality = string(h);
		h += r.sequence.size() + 1;

		r.position = positions[i*2];
		r.position_gap = positions[i*2+1];
		r.globalPosition = global_positions[i*2];
		r.globalPosition_gap = global_positions[i*2+1];
		r.contig = contigs[i];
		r.length1_gap = lengths[i*2];
		r.length2_gap = lengths[i*2+1];
		//r.type = types[i];
		r.NM = NMs[i*2];
		r.NM_gap = NMs[i*2+1];
		r.algn = algn[i];
		r.good_region_start = trim_info[i*2];
		r.good_region_stop  = trim_info[i*2+1];

		bool_temp = bools[i];
		r.strand = bool_temp & 0x01;
		r.masked = bool_temp & 0x02;
		r.low_quality = bool_temp & 0x04;
		r.trimmed = bool_temp & 0x08;
		r.discarded = bool_temp & 0x10;
		r.low_complexity = bool_temp & 0x20;
		r.contaminated = bool_temp & 0x40;
		r.gapped = bool_temp & 0x80;
	}
	delete [] positions;
	delete [] contigs;
	//delete [] types;
	delete [] NMs;
	delete [] algn;
	delete [] bools;
	delete [] trim_info;
	delete [] lengths;
	delete [] global_positions;
	delete [] informations;

//.........这里部分代码省略.........
开发者ID:vezzi,项目名称:ERNE,代码行数:101,代码来源:Module_DMAP.cpp


示例13: main

int main(int argc, char * argv[]){

	int tag, send_tag;//tag in MPI_Recv
        int to,from;//destination and source of MPI send/receive
	int st_count, st_source, st_tag;
	double start_time = 0.0;//set start and end time for MPI_Wtime()
	double end_time = 0.0;
	MPI::Status status;

	MPI::Init(argc, argv);//start MPI
	int rank = MPI::COMM_WORLD.Get_rank();//The rank label of the machines
	int size = MPI::COMM_WORLD.Get_size();//The number of tasks to be done
//	MPI_Barrier(MPI_COMM_WORLD);
	int option;

	opterr = 0;
	int N = 0;
	string directory;

	while ((option = getopt(argc, argv, "d:n:"))!= -1)//getopt parses the parameters of commands, -n is the first n words that occur most frequently in files, -d is the directory which contains the files that need to be parsed.
	{
		switch (option)
		{
			case 'n':
				N = atoi(optarg);//the first N words
			break;
			case 'd':
				directory = string(optarg);// parameter of the directory
//				cout << dir <<endl;
			break;
			case '?'://when the parameter of option n is wrong, show the error information
				if (optopt == 'n')
					cerr<< "Option -"<<char(optopt)<<" requires an argument." <<endl;
			        else if (isprint (optopt))
					cerr<< "Unknown option `-"<<char(optopt)<<"'.\n"<<endl;
				else
					cerr<<  "Unknown option character `"<<std::hex<<optopt<<"'."<<endl;
		}
	}

	vector<string> filenames;//use this vector to store file names
	char buffer[1024];
	
	if(rank == 0)//Machine 0 parses the name of directory and files in the directory.
	{	
		struct dirent *ptr;
		DIR *dir;
		dir = opendir(directory.c_str());//open the directory

		while((ptr = readdir(dir))!=NULL)//read the name of the directory
		{
			if(ptr->d_name[0]=='.')
				continue;
			strcpy(buffer,directory.c_str());
			strcat(buffer,ptr->d_name);
//			cout<<buffer<<endl;
			
			filenames.push_back(string(buffer));//put the file names of the directory in the vector filenames
		};
	}

	if(rank == 0)//machine 0 send messages and assign tasks to all the machines, including itself.
	{
		start_time = MPI_Wtime();//star time stamp
		to = 0;
		send_tag = 0;
		int round = 0;

		while(round * size < filenames.size())
		{
			for(int i = round * size; i < (round + 1) * size && i < filenames.size(); i++)
			{
				sprintf(buffer, "%s", filenames[i].c_str());
				
//				cout << rank << ":"<< "sending " << buffer << endl;
				MPI::COMM_WORLD.Send(buffer,1024, MPI::CHAR, i%size, send_tag);//send filenames to the other machines and let them parse the files, including itself.
				to++;
				send_tag++;
			}

		
			tag = MPI::ANY_TAG;
			from = MPI::ANY_SOURCE;
			MPI::COMM_WORLD.Recv(buffer, 1024, MPI::CHAR, from, tag, status);//rank 0 receive parsing result from the rest machines, including itself
			st_count = status.Get_count(MPI::CHAR);
			st_source = status.Get_source();
			st_tag = status.Get_tag();
			
			string result("");
			result = parse(buffer, N);
			strcpy(buffer,result.c_str());

			MPI::COMM_WORLD.Send(buffer,1024, MPI::CHAR, 0, st_tag);//rank 0 send message to itself
			
			for(int i = round * size; i < (round + 1) * size && i < filenames.size(); i++)
			{
				tag = MPI::ANY_TAG;
				from = MPI::ANY_SOURCE;
				MPI::COMM_WORLD.Recv(buffer, 1024, MPI::CHAR, from, tag, status);
				st_count = status.Get_count(MPI::CHAR);
//.........这里部分代码省略.........
开发者ID:wangamanda,项目名称:advanced-programming,代码行数:101,代码来源:problem2.cpp


示例14: main

int main ( int argc, char *argv[] )

//****************************************************************************80
//
//  Purpose:
//
//    MAIN is the main program for MONTE_CARLO.
//
//  Discussion:
//
//    MONTE_CARLO illustrates the use of MPI with a Monte Carlo algorithm.
//
//    Generate N random points in the unit square.  Count M, the number
//    of points that are in the quarter circle.  Then PI is approximately
//    equal to the ratio 4 * M / N.
//
//    It's important that each processor use DIFFERENT random numbers.
//    One way to ensure this is to have a single master processor
//    generate all the random numbers, and then divide them up.
//
//    (A second way, not explored here, is simply to ensure that each
//    processor uses a different seed, either chosen by a master processor,
//    or generated from the processor ID.)
//
//  Licensing:
//
//    This code is distributed under the GNU LGPL license. 
//
//  Modified:
//
//    26 February 2007
//
//  Author:
//
//    John Burkardt
//
//  Reference:
//
//    William Gropp, Ewing Lusk, Anthony Skjellum,
//    Using MPI: Portable Parallel Programming with the
//    Message-Passing Interface,
//    Second Edition,
//    MIT Press, 1999,
//    ISBN: 0262571323.
//
{
  double calculatedPi;
  int dest;
  int done;
  double error;
  int i;
  int id;
  int in;
  int max;
  MPI::Status mesgStatus;
  int num_procs;
  int out;
  int point_max = 1000000;
  int randServer;
  int randNums[CHUNKSIZE];
  int ranks[1];
  int request;
  int temp;
  double tolerance;
  int totalin;
  int totalout;
  MPI::Group worker_group;
  MPI::Intracomm worker_comm;
  MPI::Group world_group;
  double x;
  double y;
//
//  Initialize MPI.
//
  MPI::Init ( argc, argv );
//
//  Get the number of processors.
//
  num_procs = MPI::COMM_WORLD.Get_size ( );
//
//  Get the rank of this processor.
//
  id = MPI::COMM_WORLD.Get_rank ( );

  if ( id == 0 ) 
  {
    timestamp ( );
    cout << "\n";
    cout << "MONTE_CARLO - Master process:\n";
    cout << "  C++ version\n";
    cout << "  Estimate pi by the Monte Carlo method, using MPI.\n";
    cout << "\n";
    cout << "  Compiled on : " << __DATE__ << " at " << __TIME__ << ".\n";
    cout << "\n";
    cout << "  The number of processes is " << num_procs << ".\n";
    cout << "\n";
    cout << "  Points in the unit square will be tested\n";
    cout << "  to see if they lie in the unit quarter circle.\n";
  }
//
//.........这里部分代码省略.........
开发者ID:Vishakha6,项目名称:jburkardt-cpp,代码行数:101,代码来源:monte_carlo_mpi.cpp


示例15: start

void PPS::start(){

    //Define parameters struct for mpi
    //Refer to this as an example http://lists.mcs.anl.gov/pipermail/mpich-discuss/2009-April/004880.html
    MPI::Datatype MPIPPSTRUCT;
    int blockcounts[2];
    MPI::Aint offsets[2];
    MPI::Datatype datatypes[2];
    MPI::Aint extent,lb;

    blockcounts[0] = 9; //Number of ints
    blockcounts[1] = 13; //number of __fpv
    datatypes[0] = MPI::INT;
    datatypes[1] = MPIFPV;
    offsets[0] = 0;

    MPI::INT.Get_extent(lb, extent);

    offsets[1] = blockcounts[0] * extent;

    MPIPPSTRUCT = MPIPPSTRUCT.Create_struct(2,blockcounts,offsets, datatypes);
    MPIPPSTRUCT.Commit();

    if(PPS::pid == 0){

        struct parameters temp;
        int start,i,countdown = PPS::comm_size-1;
        bool ready = false;
        MPI::Status status;

        //Logs
        std::ofstream logsfile;
        logsfile.open("tslogs.txt",  std::fstream::out | std::fstream::trunc);

        while(true){


            if(countdown == 0) break;

            //Check first ready-to-compute process
            MPI::COMM_WORLD.Recv(&ready, 1, MPI::BOOL, MPI_ANY_SOURCE, 0, status);

            //Logs
            logsfile << "Remaining sims: " << PPS::plist.size()  << " process countdown: " << countdown << std::endl;

            //Send a 0 status to all the process to stop
            if(ready){
                if(PPS::plist.size() == 0 ){
                    start = EXIT_PROCESS;
                    MPI::COMM_WORLD.Send(&start, 1, MPI::INT, status.Get_source(), 0);
                    countdown = countdown - 1;
                }else{
                    //Prepare him to receive the params and start the sim (an int that contains the simulation number (-1 = exit))
                    start = PPS::plist.size() - 1;
                    MPI::COMM_WORLD.Send(&start, 1, MPI::INT, status.Get_source(), 0);

                    temp = PPS::plist.back();

                    //temp.N = status.Get_source() * 10;

                    //Deploy the parameterer struct
                    MPI::COMM_WORLD.Send(&temp, 1, MPIPPSTRUCT, status.Get_source(), 0);

                    //Pullout the parameter struct from the list
                    plist.pop_back();
                }
            }
            ready = false;
        }

        logsfile.close();



    }else{

        int status;
        bool ready = true;
        struct parameters recvparams;

        while(true){
            status == EXIT_PROCESS;
            //Send with a point to point that you are free
            MPI::COMM_WORLD.Send(&ready, 1, MPI::BOOL, 0, 0);

            //receive status value to exit or to receive a new params struct to start new sim
            MPI::COMM_WORLD.Recv(&status, 1, MPI::INT, 0, 0);

            if(status != EXIT_PROCESS){
                //wait to receive parameters


                //std::this_thread::sleep_for(std::chrono::seconds(PPS::pid));

                MPI::COMM_WORLD.Recv(&recvparams, 1, MPIPPSTRUCT, 0, 0);
                //Start sim
                //std::cout << "//////////////////////////////////////////////////////////////////////////////////"<< std::endl;
                //std::cout << "SAY HI: "<< PPS::pid << std::endl;
                //print_params(recvparams);
                //std::cout << "STARTING REAL SIM"<< std::endl;
//.........这里部分代码省略.........
开发者ID:gitter-badger,项目名称:Potts-Simulation,代码行数:101,代码来源:parallel_scheduler.cpp


示例16: while

template<class T_GRID> int FLOOD_FILL_MPI<T_GRID>::
Synchronize_Colors()
{
    if(mpi_grid.threaded_grid) return Synchronize_Colors_Threaded();
    ARRAY<RANGE<typename T_PARALLEL_GRID::VECTOR_INT> > boundary_regions;
    mpi_grid.Find_Boundary_Regions(boundary_regions,RANGE<typename T_PARALLEL_GRID::VECTOR_INT>::Zero_Box(),false,RANGE<VECTOR<int,1> >(-1,0),false,true,local_grid);
    // figure out which colors are global
    int global_color_count=0;
    ARRAY<int,VECTOR<int,1> > color_map(-1,number_of_regions);color_map(-1)=-1;color_map(0)=0;
    {ARRAY<bool,VECTOR<int,1> > color_is_global(-1,number_of_regions);
    Find_Global_Colors(color_is_global,RANGE<typename T_PARALLEL_GRID::VECTOR_INT>::Centered_Box());
    for(int color=1;color<=number_of_regions;color++)if(color_is_global(color)) color_map(color)=++global_color_count;}

    // send numbers of global colors to everyone
    ARRAY<int> global_color_counts(mpi_grid.number_of_processes);
    mpi_grid.comm->Allgather(&global_color_count,1,MPI_UTILITIES::Datatype<int>(),&global_color_counts(1),1,MPI_UTILITIES::Datatype<int>());
    int total_global_colors=ARRAYS_COMPUTATIONS::Sum(global_color_counts);
    int global_color_offset=ARRAYS_COMPUTATIONS::Sum(global_color_counts.Prefix(mpi_grid.rank));
    LOG::cout<<"initial colors: "<<number_of_regions<<" total, "<<global_color_count<<" out of "<<total_global_colors<<" global"<<std::endl;
    if(!total_global_colors){color_ranks.Clean_Memory();return 0;}

    ARRAY<MPI_PACKAGE> packages;
    ARRAY<T_ARRAYS_INT> colors_copy(boundary_regions.m);
    // send left (front) colors
    ARRAY<MPI::Request> send_requests;
    for(int side=1;side<=T_PARALLEL_GRID::number_of_faces_per_cell;side+=2)if(mpi_grid.side_neighbor_ranks(side)!=MPI::PROC_NULL){
        Resize_Helper(colors_copy(side),local_grid,boundary_regions(side));
        Translate_Local_Colors_To_Global_Colors(color_map,colors_copy(side),boundary_regions(side),global_color_offset);
        MPI_PACKAGE package=mpi_grid.Package_Cell_Data(colors_copy(side),boundary_regions(side));
        packages.Append(package);
        send_requests.Append(package.Isend(*mpi_grid.comm,mpi_grid.side_neighbor_ranks(side),mpi_grid.Get_Send_Tag(mpi_grid.side_neighbor_directions(side))));}
    // receive right (back) colors and initialize union find
    UNION_FIND<> union_find(total_global_colors);
    {ARRAY<MPI::Request> recv_requests;
    for(int side=2;side<=T_PARALLEL_GRID::number_of_faces_per_cell;side+=2)if(mpi_grid.side_neighbor_ranks(side)!=MPI::PROC_NULL){
        Resize_Helper(colors_copy(side),local_grid,boundary_regions(side));
        MPI_PACKAGE package=mpi_grid.Package_Cell_Data(colors_copy(side),boundary_regions(side));
        packages.Append(package);
        recv_requests.Append(package.Irecv(*mpi_grid.comm,mpi_grid.side_neighbor_ranks(side),mpi_grid.Get_Recv_Tag(mpi_grid.side_neighbor_directions(side))));}
    MPI::Status status;
    while(MPI_UTILITIES::Wait_Any(recv_requests,status)){
        int side;for(side=2;side<=T_PARALLEL_GRID::number_of_faces_per_cell;side+=2)if(mpi_grid.Get_Recv_Tag(mpi_grid.side_neighbor_directions(side))==status.Get_tag()) break;
        Find_Color_Matches(color_map,union_find,colors_copy(side),boundary_regions(side),global_color_offset);}}

    // synchronize union find
    UNION_FIND<> final_union_find;
    {ARRAY<char> union_find_buffer(MPI_UTILITIES::Pack_Size(union_find,*mpi_grid.comm)+1);
    {int position=0;MPI_UTILITIES::Pack(union_find,union_find_buffer,position,*mpi_grid.comm);}
    MPI::Datatype union_find_type=MPI::PACKED.Create_contiguous(union_find_buffer.m);union_find_type.Commit();
    MPI::Op union_find_merge_op;union_find_merge_op.Init(Union_Find_Merge_Op,true);
    ARRAY<char> final_union_find_buffer(union_find_buffer.m);
    union_find_merge_op_comm=mpi_grid.comm;
    mpi_grid.comm->Allreduce(union_find_buffer.Get_Array_Pointer(),final_union_find_buffer.Get_Array_Pointer(),1,union_find_type,union_find_merge_op);
    {int position=0;MPI_UTILITIES::Unpack(final_union_find,final_union_find_buffer,position,*mpi_grid.comm);}
    union_find_type.Free();union_find_merge_op.Free();}

    // fix color map for global colors
    number_of_regions=0;
    ARRAY<int> global_to_final_color_map(total_global_colors);
    for(int i=1;i<=total_global_colors;i++){
        int root=final_union_find.Find(i);
        if(!global_to_final_color_map(root)) global_to_final_color_map(root)=++number_of_regions;
        global_to_final_color_map(i)=global_to_final_color_map(root);}
    for(int i=1;i<=color_map.domain.max_corner.x;i++)if(color_map(i)>0) color_map(i)=global_to_final_color_map(color_map(i)+global_color_offset);

    // find list of processes corresponding to each color
    int end=0;
    color_ranks.Clean_Memory();
    color_ranks.Resize(number_of_regions);
    for(int r=0;r<mpi_grid.number_of_processes;r++){
        int start=end+1;end+=global_color_counts(r+1);
        for(int i=start;i<=end;i++)color_ranks(global_to_final_color_map(i)).Append_Unique(r);}
    for(int color=1;color<=color_ranks.m;color++) assert(color_ranks(color).m>1 || mpi_grid.side_neighbor_ranks.Contains(mpi_grid.rank));

    // remap colors
    Remap_Colors(color_map,RANGE<typename T_PARALLEL_GRID::VECTOR_INT>::Centered_Box());

    LOG::cout<<"final colors: "<<color_ranks.m<<" global, "<<number_of_regions-color_ranks.m<<" local"<<std::endl;

    // remap color_touches_uncolorable
    if(color_touches_uncolorable){
        ARRAY<bool> new_color_touches_uncolorable(number_of_regions);
        for(int i=1;i<=color_touches_uncolorable->m;i++)if(color_map(i)>0) new_color_touches_uncolorable(color_map(i))|=(*color_touches_uncolorable)(i);
        color_touches_uncolorable->Exchange(new_color_touches_uncolorable);
        // synchronize color_touches_uncolorable, TODO: this could be merged with above communication
        ARRAY<bool> global_color_touches_uncolorable(color_ranks.m);
        ARRAY<bool>::Get(global_color_touches_uncolorable,*color_touches_uncolorable);
        mpi_grid.comm->Allreduce(&global_color_touches_uncolorable(1),&(*color_touches_uncolorable)(1),color_ranks.m,MPI_UTILITIES::Datatype<bool>(),MPI::LOR);}

    // finish
    MPI_UTILITIES::Wait_All(send_requests);
    MPI_PACKAGE::Free_All(packages);

    return color_ranks.m;
}
开发者ID:acrlakshman,项目名称:physbam_public,代码行数:95,代码来源:FLOOD_FILL_MPI.cpp


示例17: main

int main(int argc, char * argv[]){

	int tag, send_tag;
        int to,from;
	int st_count, st_source, st_tag;
	double start_time = 0.0;
	double end_time = 0.0;
	MPI::Status status;

	MPI::Init(argc, argv);
	int rank = MPI::COMM_WORLD.Get_rank();
	int size = MPI::COMM_WORLD.Get_size();
	MPI_Barrier(MPI_COMM_WORLD);
	start_time = MPI_Wtime();
	int option;

	opterr = 0;
	int N = 0;
	string web_file;

	while ((option = getopt(argc, argv, "l:n:"))!= -1)
	{
		switch (option)
		{
			case 'n':
				N = atoi(optarg);
			break;
			case 'l':
				web_file = string(optarg);
			break;
			case '?':
				if (optopt == 'n')
					cerr<< "Option -"<<char(optopt)<<" requires an argument." <<endl;
			        else if (isprint (optopt))
					cerr<< "Unknown option `-"<<char(optopt)<<"'.\n"<<endl;
				else
					cerr<<  "Unknown option character `"<<std::hex<<optopt<<"'."<<endl;
		}
	}

	vector<string> URLs;
	char buffer[1024];
	string line;
	system("rm -fr /tmp/xiw412/");
	system("mkdir /tmp/xiw412/");

	if(rank == 0)
	{	
		fstream fread_file(web_file.c_str(), ios::in);
		while (getline(fread_file, line)){
		URLs.push_back(line);
		}
	}

	if(rank == 0)
	{
		to = 0;
		send_tag = 0;
		int round = 0;

		while(round * size < URLs.size())
		{
			for(int i = round * size; i < (round + 1) * size && i < URLs.size(); i++)
			{
				sprintf(buffer, "%s", URLs[i].c_str());
				
				cout << rank << ":"<< "sending " << buffer << endl;
				MPI::COMM_WORLD.Send(buffer,1024, MPI::CHAR, i%size, send_tag);
				to++;
				send_tag++;
			}

		
			tag = MPI::ANY_TAG;
			from = MPI::ANY_SOURCE;
			MPI::COMM_WORLD.Recv(buffer, 1024, MPI::CHAR, from, tag, status);
			st_count = status.Get_count(MPI::CHAR);
			st_source = status.Get_source();
			st_tag = status.Get_tag();
			
			string result("");
			result = parse(buffer, N);
			strcpy(buffer,result.c_str());

			MPI::COMM_WORLD.Send(buffer,1024, MPI::CHAR, 0, st_tag);
			
			for(int i = round * size; i < (round + 1) * size && i < URLs.size(); i++)
			{
				tag = MPI::ANY_TAG;
				from = MPI::ANY_SOURCE;
				MPI::COMM_WORLD.Recv(buffer, 1024, MPI::CHAR, from, tag, status);
				st_count = status.Get_count(MPI::CHAR);
				st_source = status.Get_source();
				st_tag = status.Get_tag();

				cout << rank <<":" << "received from "<<st_source<<endl<< buffer << endl;
			}

			round++;
		}
//.........这里部分代码省略.........
开发者ID:wangamanda,项目名称:advanced-programming,代码行数:101,代码来源:q4.cpp


示例18: manager_process

void manager_process(const MPI::Intracomm &comm_world, const int manager_rank, const int worker_size, std::string &maskName, std::string &imgDir, std::string &outDir, bool overwrite) {
	// first get the list of files to process
   	std::vector<std::string> filenames;
	std::vector<std::string> seg_output;
	std::vector<std::string> features_output;
	uint64_t t1, t0;

	t0 = cci::common::event::timestampInUS();
	getFiles(maskName, imgDir, outDir, filenames, seg_output, features_output, overwrite);

	t1 = cci::common::event::timestampInUS();
	printf("Manager ready at %d, file read took %lu us\n", manager_rank, t1 - t0);
	comm_world.Barrier();

	// now start the loop to listen for messages
	int curr = 0;
	int total = filenames.size();
	MPI::Status status;
	int worker_id;
	char ready;
	char *input;
	char *mask;
	char *output;
	int inputlen;
	int masklen;
	int outputlen;
	while (curr < total) {
		usleep(1000);

		if (comm_world.Iprobe(MPI_ANY_SOURCE, TAG_CONTROL, status)) {
/* where is it coming from */
			worker_id=status.Get_source();
			comm_world.Recv(&ready, 1, MPI::CHAR, worker_id, TAG_CONTROL);
//			printf("manager received request from worker %d\n",worker_id);
			if (worker_id == manager_rank) continue;

			if(ready == WORKER_READY) {
				// tell worker that manager is ready
				comm_world.Send(&MANAGER_READY, 1, MPI::CHAR, worker_id, TAG_CONTROL);
//				printf("manager signal transfer\n");
/* send real data */
				inputlen = filenames[curr].size() + 1;  // add one to create the zero-terminated string
				masklen = seg_output[curr].size() + 1;
				outputlen = features_output[curr].size() + 1;
				input = new char[inputlen];
				memset(input, 0, sizeof(char) * inputlen);
				strncpy(input, filenames[curr].c_str(), inputlen);
				mask = new char[masklen];
				memset(mask, 0, sizeof(char) * masklen);
				strncpy(mask, seg_output[curr].c_str(), masklen);
				output = new char[outputlen];
				memset(output, 0, sizeof(char) * outputlen);
				strncpy(output, features_output[curr].c_str(), outputlen);

				comm_world.Send(&inputlen, 1, MPI::INT, worker_id, TAG_METADATA);
				comm_world.Send(&masklen, 1, MPI::INT, worker_id, TAG_METADATA);
				comm_world.Send(&outputlen, 1, MPI::INT, worker_id, TAG_METADATA);

				// now send the actual string data
				comm_world.Send(input, inputlen, MPI::CHAR, worker_id, TAG_DATA);
				comm_world.Send(mask, masklen, MPI::CHAR, worker_id, TAG_DATA);
				comm_world.Send(output, outputlen, MPI::CHAR, worker_id, TAG_DATA);
				curr++;

				delete [] input;
				delete [] mask;
				delete [] output;

			}

			if (curr % 100 == 1) {
				printf("[ MANAGER STATUS ] %d tasks remaining.\n", total - curr);
			}

		}
	}
/* tell everyone to quit */
	int active_workers = worker_size;
	while (active_workers > 0) {
		usleep(1000);

		if (comm_world.Iprobe(MPI_ANY_SOURCE, TAG_CONTROL, status)) {
		/* where is it coming from */
			worker_id=status.Get_source();
			comm_world.Recv(&ready, 1, MPI::CHAR, worker_id, TAG_CONTROL);
//			printf("manager received request from worker %d\n",worker_id);
			if (worker_id == manager_rank) continue;

			if(ready == WORKER_READY) {
				comm_world.Send(&MANAGER_FINISHED, 1, MPI::CHAR, worker_id, TAG_CONTROL);
//				printf("manager signal finished\n");
				--active_workers;
			}
		}
	}
}
开发者ID:SBU-BMI,项目名称:nscale,代码行数:96,代码来源:nu-features.cpp


示例19: main


                      

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ mpi::communicator类代码示例发布时间:2022-05-31
下一篇:
C++ mpi::Intracomm类代码示例发布时间:2022-05-31
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap