• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ mpi::Intracomm类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中mpi::Intracomm的典型用法代码示例。如果您正苦于以下问题:C++ Intracomm类的具体用法?C++ Intracomm怎么用?C++ Intracomm使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了Intracomm类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: main

int main(int argc, char* argv[]) {
	MPI::Init(argc, argv);
	
	rank = MPI::COMM_WORLD.Get_rank();
	size = MPI::COMM_WORLD.Get_size();
	if (size < 2) MPI::COMM_WORLD.Abort(1);
	if (size < 1+COLS+ROWS) MPI::COMM_WORLD.Abort(1);
	
	MPI::Group globalGroup = MPI::COMM_WORLD.Get_group();

	if (0 == rank) {
		int matrix[COLS][ROWS], xirtam[ROWS][COLS];
	
		srand(time(0));
		for (int i=0; i<COLS; i++)
			for (int j=0; j<ROWS; j++) {
				matrix[i][j] = 9 * (double)rand() / (double)RAND_MAX;
				xirtam[j][i] = matrix[i][j];
			}
		
		cout << "random matrica: " << endl;
		for (int i=0; i<COLS; i++) {
			for (int j=0; j<ROWS; j++)
				cout << matrix[i][j] << " ";
			cout << endl;
		}
	}
	else {	
		MPI::Group group;
		MPI::Intracomm comm;

		int j=0, k=0;
		for (int i=1; i<size; i++)
			if (i % 2) ranksA[j++] = i;
			else ranksB[k++] = i;

		if (rank % 2) 
			group = globalGroup.Incl(size / 2 + size % 2, ranksA);
		else 
			group = globalGroup.Incl(size / 2, ranksB);

		comm = MPI::COMM_WORLD.Create(group);
		int newRank = comm.Get_rank();
	
	
		pline(); cout << rank << ", " << newRank << ", " << powerSum << endl;
		fflush(stdout); 

		group.Free();
		comm.Free();
	}
	
	
	
	//comm.Free();
	
	MPI::Finalize();
	
	return 0;
}
开发者ID:aaleksandar,项目名称:si4mps,代码行数:60,代码来源:dz5z10.cpp


示例2: GetBlockData

void LocalScalar3D<real>::Dump(BlockManager& blockManager, const int step, const char* label) {
	ImposeBoundaryCondition(blockManager);
	MPI::Intracomm comm = blockManager.getCommunicator();

	ostringstream ossFileNameTime;
	ossFileNameTime << "./BIN/";
	mkdir(ossFileNameTime.str().c_str(), 0755);

#ifdef _BLOCK_IS_LARGE_
#else
#endif
	for (int id = 0; id < blockManager.getNumBlock(); ++id) {
		BlockBase* block = blockManager.getBlock(id);

		::Vec3i size = block->getSize();
		Vec3d origin = block->getOrigin();
		Vec3d blockSize = block->getBlockSize();
		Vec3d cellSize = block->getCellSize();
		int level = block->getLevel();

		ostringstream ossFileName;
		ossFileName << "./BIN/";
		ossFileName << "dump-";
		ossFileName << label;
		ossFileName << "-";
		ossFileName.width(5);
		ossFileName.setf(ios::fixed);
		ossFileName.fill('0');
		ossFileName << comm.Get_rank();
		ossFileName << "-";
		ossFileName.width(5);
		ossFileName.setf(ios::fixed);
		ossFileName.fill('0');
		ossFileName << id;
		ossFileName << "-";
		ossFileName.width(10);
		ossFileName.setf(ios::fixed);
		ossFileName.fill('0');
		ossFileName << step;
		ossFileName << ".bin";

		int cx = size.x + 2*vc;
		int cy = size.y + 2*vc;
		int cz = size.z + 2*vc;
		int iNE = 1;

		real* pData = GetBlockData(block);

		ofstream ofs;
		ofs.open(ossFileName.str().c_str(), ios::out | ios::binary);
		ofs.write((char*)&size.x, sizeof(int));
		ofs.write((char*)&size.y, sizeof(int));
		ofs.write((char*)&size.z, sizeof(int));
		ofs.write((char*)&vc    , sizeof(int));
		ofs.write((char*)&iNE   , sizeof(int));
		ofs.write((char*)pData  , sizeof(real)*cx*cy*cz);
		ofs.close();
	}
}
开发者ID:avr-aics-riken,项目名称:BCMTools,代码行数:59,代码来源:LocalScalar3D.cpp


示例3: getDofNumbering

 /** \brief
  * In many situations a rank computes a number of local DOFs. Then all
  * ranks want to know the number of global DOFs and the starting
  * displacment number of the DOF numbering in each rank.
  *
  * \param[in]   mpiComm        The MPI communicator.
  * \param[in]   nRankDofs      The number of local DOFs.
  * \param[out]  rStartDofs     Displacment of the DOF numbering. On rank n
  *                             this is the sum of all local DOF numbers in
  *                             ranks 0 to n - 1.
  * \param[out]  nOverallDofs   Global sum of nRankDofs. Is equal on all
  *                             ranks.
  */
 inline void getDofNumbering(MPI::Intracomm& mpiComm,
                             int nRankDofs,
                             int& rStartDofs,
                             int& nOverallDofs)
 {
   rStartDofs = 0;
   nOverallDofs = 0;
   mpiComm.Scan(&nRankDofs, &rStartDofs, 1, MPI_INT, MPI_SUM);
   rStartDofs -= nRankDofs;
   mpiComm.Allreduce(&nRankDofs, &nOverallDofs, 1, MPI_INT, MPI_SUM);
 }
开发者ID:spraetor,项目名称:amdis2,代码行数:24,代码来源:MpiHelper.hpp


示例4: init_workers

// not necessary to create a new comm object
MPI::Intracomm init_workers(const MPI::Intracomm &comm_world, int managerid) {
	// get old group
	MPI::Group world_group = comm_world.Get_group();
	// create new group from old group
	int worker_size = comm_world.Get_size() - 1;
	int *workers = new int[worker_size];
	for (int i = 0, id = 0; i < worker_size; ++i, ++id) {
		if (id == managerid) ++id;  // skip the manager id
		workers[i] = id;
	}
	MPI::Group worker_group = world_group.Incl(worker_size, workers);
	delete [] workers;
	return comm_world.Create(worker_group);
}
开发者ID:SBU-BMI,项目名称:nscale,代码行数:15,代码来源:nu-features.cpp


示例5: computeNAtomTotal

   /*
   * Compute, store and return total number of atoms on all processors.
   */
   void AtomStorage::computeNAtomTotal(MPI::Intracomm& communicator)
   {
      // If nAtomTotal is already set, do nothing and return.
      // if (nAtomTotal_.isSet()) return;

      int nAtomLocal = nAtom();
      int nAtomTotal = 0;
      communicator.Reduce(&nAtomLocal, &nAtomTotal, 1, 
                          MPI::INT, MPI::SUM, 0);
      if (communicator.Get_rank() !=0) {
         nAtomTotal = 0;
      }
      nAtomTotal_.set(nAtomTotal);
   }
开发者ID:tdunn19,项目名称:simpatico,代码行数:17,代码来源:AtomStorage.cpp


示例6: setIoCommunicator

 void MpiFileIo::setIoCommunicator(MPI::Intracomm& communicator)
 {
    communicatorPtr_ = &communicator; 
    if (communicator.Get_rank() == 0) {
       isIoProcessor_ = true;
    } else {
       isIoProcessor_ = false;
    }
 }
开发者ID:TaherGhasimakbari,项目名称:simpatico,代码行数:9,代码来源:MpiFileIo.cpp


示例7: iSend

   /*
   * Send a block (nonblocking)
   */
   void MemoryOArchive::iSend(MPI::Intracomm& comm, MPI::Request& req, int dest)
   {
      int  comm_size = comm.Get_size();
      int  myRank = comm.Get_rank();

      // Preconditions
      if (dest > comm_size - 1 || dest < 0) {
         UTIL_THROW("Destination rank out of bounds");
      }
      if (dest == myRank) {
         UTIL_THROW("Source and desination identical");
      }

      size_t  sendBytes = cursor_ - buffer_;
      size_t* sizePtr = (size_t*)buffer_;
      *sizePtr = sendBytes;
      req = comm.Isend(buffer_, sendBytes, MPI::UNSIGNED_CHAR, dest, 5);
   }
开发者ID:TaherGhasimakbari,项目名称:simpatico,代码行数:21,代码来源:MemoryOArchive.cpp


示例8: recv

/*
* Receive a block.
*/
void PackedData::recv(MPI::Intracomm& comm, int source)
{
    MPI::Request request;
    int  myRank     = comm.Get_rank();
    int  comm_size  = comm.Get_size();

    // Preconditons
    if (source > comm_size - 1 || source < 0) {
        UTIL_THROW("Source rank out of bounds");
    }
    if (source == myRank) {
        UTIL_THROW("Source and desination identical");
    }

    request = comm.Irecv(begin_, capacity_, MPI::UNSIGNED_CHAR, source, 5);
    request.Wait();
    cursor_ = begin_;

}
开发者ID:jglaser,项目名称:simpatico,代码行数:22,代码来源:PackedData.cpp


示例9: sendRecv

   /*
   * Send and receive buffer.
   */
   void Buffer::sendRecv(MPI::Intracomm& comm, int source, int dest)
   {

      MPI::Request request[2];
      int  sendBytes = 0;
      int  myRank    = comm.Get_rank();
      int  comm_size = comm.Get_size();

      // Preconditions
      if (dest > comm_size - 1 || dest < 0) {
         UTIL_THROW("Destination rank out of bounds");
      }
      if (source > comm_size - 1 || source < 0) {
         UTIL_THROW("Source rank out of bounds");
      }
      if (dest == myRank) {
         UTIL_THROW("Destination and my rank are identical");
      }
      if (source == myRank) {
         UTIL_THROW("Source and my rank are identical");
      }

      // Start nonblocking receive.
      request[0] = comm.Irecv(recvBufferBegin_, bufferCapacity_ , 
                              MPI::CHAR, source, 5);

      // Start nonblocking send.
      sendBytes = sendPtr_ - sendBufferBegin_;
      request[1] = comm.Isend(sendBufferBegin_, sendBytes , MPI::CHAR, dest, 5);

      // Wait for completion of receive.
      request[0].Wait();
      recvPtr_ = recvBufferBegin_;

      // Wait for completion of send.
      request[1].Wait();

      // Update statistics.
      if (sendBytes > maxSendLocal_) {
         maxSendLocal_ = sendBytes;
      }
   }
开发者ID:pombredanne,项目名称:simpatico,代码行数:45,代码来源:Buffer.cpp


示例10: recv

   /*
   * Receive a buffer.
   */
   void Buffer::recv(MPI::Intracomm& comm, int source)
   {
      MPI::Request request;
      int  myRank     = comm.Get_rank();
      int  comm_size  = comm.Get_size();

      // Preconditons
      if (source > comm_size - 1 || source < 0) {
         UTIL_THROW("Source rank out of bounds");
      }
      if (source == myRank) {
         UTIL_THROW("Source and destination identical");
      }

      request = comm.Irecv(recvBufferBegin_, bufferCapacity_, 
                           MPI::CHAR, source, 5);
      request.Wait();
      recvType_ = NONE;
      recvPtr_ = recvBufferBegin_;
   }
开发者ID:pombredanne,项目名称:simpatico,代码行数:23,代码来源:Buffer.cpp


示例11: send

/*
* Send a block.
*/
void PackedData::send(MPI::Intracomm& comm, int dest)
{
    MPI::Request request;
    int  sendBytes = 0;
    int  comm_size = comm.Get_size();
    int  myRank = comm.Get_rank();

    // Preconditions
    if (dest > comm_size - 1 || dest < 0) {
        UTIL_THROW("Destination rank out of bounds");
    }
    if (dest == myRank) {
        UTIL_THROW("Source and desination identical");
    }

    sendBytes = cursor_ - begin_;
    request = comm.Isend(begin_, sendBytes, MPI::UNSIGNED_CHAR, dest, 5);
    request.Wait();

}
开发者ID:jglaser,项目名称:simpatico,代码行数:23,代码来源:PackedData.cpp


示例12: reduce

   /*
   * Reduce (add) distributions from multiple MPI processors.
   */
   void Distribution::reduce(MPI::Intracomm& communicator, int root)
   {
  
      long* totHistogram = new long[nBin_]; 
      communicator.Reduce(histogram_.cArray(), totHistogram, nBin_, MPI::LONG, MPI::SUM, root);
      if (communicator.Get_rank() == root) {
         for (int i=0; i < nBin_; ++i) {
            histogram_[i] = totHistogram[i];
         }
      } else { 
         for (int i=0; i < nBin_; ++i) {
            histogram_[i] = 0.0;
         }
      }
      delete totHistogram;

      long totSample; 
      communicator.Reduce(&nSample_, &totSample, 1, MPI::LONG, MPI::SUM, root);
      if (communicator.Get_rank() == root) {
         nSample_ = totSample;
      } else {
         nSample_ = 0;
      }

      long totReject; 
      communicator.Reduce(&nReject_, &totReject, 1, MPI::LONG, MPI::SUM, root);
      if (communicator.Get_rank() == root) {
         nReject_ = totReject;
      } else {
         nReject_ = 0;
      }

   }
开发者ID:TaherGhasimakbari,项目名称:simpatico,代码行数:36,代码来源:Distribution.cpp


示例13: bcast

   /*
   * Broadcast a buffer.
   */
   void Buffer::bcast(MPI::Intracomm& comm, int source)
   {
      int comm_size = comm.Get_size();
      int myRank = comm.Get_rank();
      if (source > comm_size - 1 || source < 0) {
         UTIL_THROW("Source rank out of bounds");
      }

      int sendBytes;
      if (myRank == source) {
         sendBytes = sendPtr_ - sendBufferBegin_;
         comm.Bcast(&sendBytes, 1, MPI::INT, source);
         comm.Bcast(sendBufferBegin_, sendBytes, MPI::CHAR, source);
         sendPtr_ = sendBufferBegin_;
         sendType_ = NONE;
      } else {
         comm.Bcast(&sendBytes, 1, MPI::INT, source);
         comm.Bcast(recvBufferBegin_, sendBytes, MPI::CHAR, source);
         recvPtr_ = recvBufferBegin_;
         recvType_ = NONE;
      }
      if (sendBytes > maxSendLocal_) {
         maxSendLocal_ = sendBytes;
      }

   }
开发者ID:pombredanne,项目名称:simpatico,代码行数:29,代码来源:Buffer.cpp


示例14: resample_popsizes_mh

// Metropolis-Hastings population size resampling; not used anymore
void resample_popsizes_mh(ArgModel *model, const LocalTrees *trees,
                       bool sample_popsize_recomb, double heat) {
    list<PopsizeConfigParam> &l = model->popsize_config.params;
    double curr_like = sample_popsize_recomb ? calc_arg_prior(model, trees) :
        calc_arg_prior_recomb_integrate(model, trees, NULL, NULL, NULL);
#ifdef ARGWEAVER_MPI
    MPI::Intracomm *comm = model->mc3.group_comm;
    int rank = comm->Get_rank();
    comm->Reduce(rank == 0 ? MPI_IN_PLACE : &curr_like,
                 &curr_like, 1, MPI::DOUBLE, MPI_SUM, 0);
#endif
    for (int rep=0; rep < model->popsize_config.numsample; rep++) {
        int idx=0;
        for (list<PopsizeConfigParam>::iterator it = l.begin();
             it != l.end(); it++) {
            curr_like =
                resample_single_popsize_mh(model, trees, sample_popsize_recomb,
                                           heat, it, curr_like, idx++);
        }
    }

}
开发者ID:mjhubisz,项目名称:argweaver,代码行数:23,代码来源:est_popsize.cpp


示例15: broadcast

/// Octree情報を他rankにブロードキャスト.
void BCMOctree::broadcast(MPI::Intracomm& comm)
{
  assert(comm.Get_rank() == 0);
  rootGrid->broadcast(comm);

  int numLeafNode = leafNodeArray.size();
  int ibuf[2];
  ibuf[0] = numLeafNode;
  ibuf[1] = ordering;
  comm.Bcast(&ibuf, 2, MPI::INT, 0);

  size_t size = Pedigree::GetSerializeSize();
  unsigned char* buf = new unsigned char[size * numLeafNode];

  size_t ip = 0;
  for (int id = 0; id < rootGrid->getSize(); id++) {
    packPedigrees(rootNodes[id], ip, buf);
  }

  comm.Bcast(buf, size*numLeafNode, MPI::BYTE, 0);
  delete[] buf;
}
开发者ID:avr-aics-riken,项目名称:BCMTools,代码行数:23,代码来源:BCMOctree.cpp


示例16: recv

   /*
   * Receive a block.
   */
   void MemoryIArchive::recv(MPI::Intracomm& comm, int source)
   {
      int  myRank     = comm.Get_rank();
      int  comm_size  = comm.Get_size();

      // Preconditions
      if (source > comm_size - 1 || source < 0) {
         UTIL_THROW("Source rank out of bounds");
      }
      if (source == myRank) {
         UTIL_THROW("Source and desination identical");
      }

      size_t recvCapacity = capacity_ + sizeof(size_t);
      comm.Recv(buffer_, recvCapacity, MPI::UNSIGNED_CHAR, source, 5);

      begin_ = buffer_ + sizeof(size_t);
      cursor_ = begin_;

      size_t* sizePtr = (size_t*) buffer_;
      size_t  size = *sizePtr;
      end_  = buffer_ + size;
   }
开发者ID:jglaser,项目名称:simpatico,代码行数:26,代码来源:MemoryIArchive.cpp


示例17: temp

FullyDistSpVec<IT, IT> FullyDistSpVec<IT, NT>::sort()
{
	MPI::Intracomm World = commGrid->GetWorld();
	FullyDistSpVec<IT,IT> temp(commGrid);
	IT nnz = getlocnnz(); 
	pair<NT,IT> * vecpair = new pair<NT,IT>[nnz];
	int nprocs = World.Get_size();
	int rank = World.Get_rank();

	IT * dist = new IT[nprocs];
	dist[rank] = nnz;
	World.Allgather(MPI::IN_PLACE, 1, MPIType<IT>(), dist, 1, MPIType<IT>());
	IT sizeuntil = accumulate(dist, dist+rank, 0);
	for(IT i=0; i< nnz; ++i)
	{
		vecpair[i].first = num[i];	// we'll sort wrt numerical values
		vecpair[i].second = ind[i] + sizeuntil;	
	}
	SpParHelper::MemoryEfficientPSort(vecpair, nnz, dist, World);

	vector< IT > nind(nnz);
	vector< IT > nnum(nnz);
	for(IT i=0; i< nnz; ++i)
	{
		num[i] = vecpair[i].first;	// sorted range (change the object itself)
		nind[i] = ind[i];		// make sure the sparsity distribution is the same
		nnum[i] = vecpair[i].second;	// inverse permutation stored as numerical values
	}
	delete [] vecpair;
	delete [] dist;

	temp.NOT_FOUND = NOT_FOUND;
	temp.glen = glen;
	temp.ind = nind;
	temp.num = nnum;
	return temp;
}
开发者ID:harperj,项目名称:KDTSpecializer,代码行数:37,代码来源:FullyDistSpVec.cpp


示例18: send

   /*
   * Send a buffer.
   */
   void Buffer::send(MPI::Intracomm& comm, int dest)
   {
      MPI::Request request;
      int  sendBytes = 0;
      int  comm_size = comm.Get_size();
      int  myRank = comm.Get_rank();

      // Preconditions
      if (dest > comm_size - 1 || dest < 0) {
         UTIL_THROW("Destination rank out of bounds");
      }
      if (dest == myRank) {
         UTIL_THROW("Source and destination identical");
      }

      sendBytes = sendPtr_ - sendBufferBegin_;
      request = comm.Isend(sendBufferBegin_, sendBytes, MPI::CHAR, dest, 5);
      request.Wait();

      // Update statistics.
      if (sendBytes > maxSendLocal_) {
         maxSendLocal_ = sendBytes;
      }
   }
开发者ID:pombredanne,项目名称:simpatico,代码行数:27,代码来源:Buffer.cpp


示例19: outofrangeexception

FullyDistVec<IT,NT> FullyDistSpVec<IT,NT>::operator() (const FullyDistVec<IT,IT> & ri) const
{
	MPI::Intracomm World = commGrid->GetWorld();
	// FullyDistVec ( shared_ptr<CommGrid> grid, IT globallen, NT initval, NT id);
	FullyDistVec<IT,NT> Indexed(ri.commGrid, ri.glen, zero, zero);
	int nprocs = World.Get_size();
        unordered_map<IT, IT> revr_map;       // inverted index that maps indices of *this to indices of output
	vector< vector<IT> > data_req(nprocs);
	IT locnnz = ri.LocArrSize();

	// ABAB: Input sanity check
	int local = 1;
	int whole = 1;
	for(IT i=0; i < locnnz; ++i)
	{
		if(ri.arr[i] >= glen || ri.arr[i] < 0)
		{
			local = 0;
		} 
	}
	World.Allreduce( &local, &whole, 1, MPI::INT, MPI::BAND);
	if(whole == 0)
	{
		throw outofrangeexception();
	}

	for(IT i=0; i < locnnz; ++i)
	{
		IT locind;
		int owner = Owner(ri.arr[i], locind);	// numerical values in ri are 0-based
		data_req[owner].push_back(locind);
                revr_map.insert(typename unordered_map<IT, IT>::value_type(locind, i));
	}
	IT * sendbuf = new IT[locnnz];
	int * sendcnt = new int[nprocs];
	int * sdispls = new int[nprocs];
	for(int i=0; i<nprocs; ++i)
		sendcnt[i] = data_req[i].size();

	int * rdispls = new int[nprocs];
	int * recvcnt = new int[nprocs];
	World.Alltoall(sendcnt, 1, MPI::INT, recvcnt, 1, MPI::INT);	// share the request counts 

	sdispls[0] = 0;
	rdispls[0] = 0;
	for(int i=0; i<nprocs-1; ++i)
	{
		sdispls[i+1] = sdispls[i] + sendcnt[i];
		rdispls[i+1] = rdispls[i] + recvcnt[i];
	}
	IT totrecv = accumulate(recvcnt,recvcnt+nprocs,0);
	IT * recvbuf = new IT[totrecv];

	for(int i=0; i<nprocs; ++i)
	{
		copy(data_req[i].begin(), data_req[i].end(), sendbuf+sdispls[i]);
		vector<IT>().swap(data_req[i]);
	}
	World.Alltoallv(sendbuf, sendcnt, sdispls, MPIType<IT>(), recvbuf, recvcnt, rdispls, MPIType<IT>());  // request data
		
	// We will return the requested data, 
	// our return can be at most as big as the request
	// and smaller if we are missing some elements 
	IT * indsback = new IT[totrecv];
	NT * databack = new NT[totrecv];		

	int * ddispls = new int[nprocs];
	copy(rdispls, rdispls+nprocs, ddispls);
	for(int i=0; i<nprocs; ++i)
	{
		// this is not the most efficient method because it scans ind vector nprocs = sqrt(p) times
		IT * it = set_intersection(recvbuf+rdispls[i], recvbuf+rdispls[i]+recvcnt[i], ind.begin(), ind.end(), indsback+rdispls[i]);
		recvcnt[i] = (it - (indsback+rdispls[i]));	// update with size of the intersection
	
		IT vi = 0;
		for(int j = rdispls[i]; j < rdispls[i] + recvcnt[i]; ++j)	// fetch the numerical values
		{
			// indsback is a subset of ind
			while(indsback[j] > ind[vi]) 
				++vi;
			databack[j] = num[vi++];
		}
	}
		
	DeleteAll(recvbuf, ddispls);
	NT * databuf = new NT[ri.LocArrSize()];

	World.Alltoall(recvcnt, 1, MPI::INT, sendcnt, 1, MPI::INT);	// share the response counts, overriding request counts 
	World.Alltoallv(indsback, recvcnt, rdispls, MPIType<IT>(), sendbuf, sendcnt, sdispls, MPIType<IT>());  // send indices
	World.Alltoallv(databack, recvcnt, rdispls, MPIType<NT>(), databuf, sendcnt, sdispls, MPIType<NT>());  // send data
	DeleteAll(rdispls, recvcnt, indsback, databack);

	// Now create the output from databuf (holds numerical values) and sendbuf (holds indices)
	// arr is already resized during its construction
	for(int i=0; i<nprocs; ++i)
	{
		// data will come globally sorted from processors 
		// i.e. ind owned by proc_i is always smaller than 
		// ind owned by proc_j for j < i
		for(int j=sdispls[i]; j< sdispls[i]+sendcnt[i]; ++j)	
//.........这里部分代码省略.........
开发者ID:harperj,项目名称:KDTSpecializer,代码行数:101,代码来源:FullyDistSpVec.cpp


示例20: fit_steepest_descent

  // currently only hacked for spheres, with radius and sd as two parameters
  bool HipGISAXS::fit_steepest_descent(real_t zcut,
          real_t radius_min, real_t radius_max, real_t radius_num,
          real_t sd_min, real_t sd_max, real_t sd_num,
          unsigned int dim, MPI::Intracomm& world_comm,
          int x_min, int x_max, int x_step) {
    int mpi_rank = world_comm.Get_rank();

    if(!init_steepest_fit(world_comm, zcut)) return false;

    int num_alphai = 0, num_phi = 0, num_tilt = 0;;

    real_t alphai_min, alphai_max, alphai_step;
    HiGInput::instance().scattering_alphai(alphai_min, alphai_max, alphai_step);
    if(alphai_max < alphai_min) alphai_max = alphai_min;
    if(alphai_min == alphai_max || alphai_step == 0) num_alphai = 1;
    else num_alphai = (alphai_max - alphai_min) / alphai_step + 1;

    real_t phi_min, phi_max, phi_step;
    HiGInput::instance().scattering_inplanerot(phi_min, phi_max, phi_step);
    if(phi_step == 0) num_phi = 1;
    else num_phi = (phi_max - phi_min) / phi_step + 1;

    real_t tilt_min, tilt_max, tilt_step;
    HiGInput::instance().scattering_tilt(tilt_min, tilt_max, tilt_step);
    if(tilt_step == 0) num_tilt = 1;
    else num_tilt = (tilt_max - tilt_min) / tilt_step + 1;

    std::cout << "**                    Num alphai: " << num_alphai << std::endl
          << "**                       Num phi: " << num_phi << std::endl
          << "**                      Num tilt: " << num_tilt << std::endl;

    // prepare parameters

    std::vector<std::vector<real_t> > params;
    int num_params = 2;
    std::vector<real_t> temp;
    real_t deltap = 0.0;
    if(radius_num <= 1)
      temp.push_back(radius_min);
    else {
      deltap = fabs(radius_max - radius_min) / (radius_num - 1);
      for(int i = 0; i < radius_num; ++ i) {
        temp.push_back(radius_min + i * deltap);
      } // for
    } // if-else
    params.push_back(temp);
    temp.clear();
    if(sd_num <= 1)
      temp.push_back(sd_min);
    else {
      deltap = fabs(sd_max - sd_min) / (sd_num - 1);
      for(int i = 0; i < sd_num; ++ i) {
        temp.push_back(sd_min + i * deltap);
      } // for
    } // if-else
    params.push_back(temp);
    temp.clear();

    // this will work only on one shape and one structure

    const real_t err_threshold = 1e-8;
    const unsigned int max_iter = 200;

    std::vector<real_t> param_vals;
    //param_vals.push_back(16.0);
    //param_vals.push_back(6.0);
    param_vals.push_back(23.0);
    param_vals.push_back(2.0);
    std::vector<real_t> param_deltas;
    param_deltas.push_back(0.05);
    param_deltas.push_back(0.05);
    real_t gamma_const = 0.05;

    real_t qdeltay = QGrid::instance().delta_y();

    real_t alpha_i = alphai_min;
    // high level of parallelism here (alphai, phi, tilt) for dynamicity ...
    for(int i = 0; i < num_alphai; i ++, alpha_i += alphai_step) {
      real_t alphai = alpha_i * PI_ / 180;
      real_t phi = phi_min;
      for(int j = 0; j < num_phi; j ++, phi += phi_step) {
        real_t tilt = tilt_min;
        for(int k = 0; k < num_tilt; k ++, tilt += tilt_step) {

          std::cout << "-- Computing reference GISAXS "
                << i * num_phi * num_tilt + j * num_tilt + k + 1 << " / "
                << num_alphai * num_phi * num_tilt
                << " [alphai = " << alpha_i << ", phi = " << phi
                << ", tilt = " << tilt << "] ..." << std::endl;

          /* run the reference gisaxs simulation using input params */
          real_t* ref_data = NULL;
          if(!run_gisaxs(alpha_i, alphai, phi, tilt, ref_data, world_comm)) {
            if(mpi_rank == 0) std::cerr << "error: could not finish successfully" << std::endl;
            return false;
          } // if

          if(dim != 1) {
            std::cerr << "uh-oh: only 1D is supported for now" << std::endl;
//.........这里部分代码省略.........
开发者ID:HipGISAXS,项目名称:HipGISAXS,代码行数:101,代码来源:fitting_steepest_descent.cpp



注:本文中的mpi::Intracomm类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ mpi::Status类代码示例发布时间:2022-05-31
下一篇:
C++ mpd::SongList类代码示例发布时间:2022-05-31
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap