int main(int argc, const char *argv[])
{
// init mpi
MPI_Init(NULL,NULL);
// get processor standing
int rank = 0, world = 0;
MPI_Comm_size(MPI_COMM_WORLD, &world);
// get rank of this processor
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
// send some random numbers
int number_amount;
if(rank == 0)
{
const int MAX_NUMBERS = 10;
int numbers[MAX_NUMBERS];
Random rg;
number_amount = rg.getRandom(0,MAX_NUMBERS-1);
// make numbers
for(int i = 0; i < number_amount; i++)
numbers[i] = rg.getRandom(-10,10);
// send
MPI_Send(numbers,number_amount,MPI_INT,1,0,MPI_COMM_WORLD);
printf("0 sends %d numbers to 1\n",number_amount);
for(int i = 0; i < number_amount; i++)
printf("%d, ", numbers[i]);
printf("\n");
}else if(rank == 1)
{
MPI_Status status;
// probe for message and size
// MPI_Probe(0,0,MPI_COMM_WORLD,&status);
// check status out to find out how many numbers were actually sent
//MPI_Get_count(&status, MPI_INT, &number_amount);
//int * numbers = new int[number_amount];
// recieve from 0
//MPI_Recv(numbers,number_amount,MPI_INT,0,0,MPI_COMM_WORLD, &status);
void * numbers = NULL;
MPI_ProbeRecv(&numbers,MPI_INT,0,0,MPI_COMM_WORLD,&status);
int number_amount = 0;
MPI_Get_count(&status,MPI_INT,&number_amount);
printf("1 received %d numbers from 0. Message source = %d, "
"tag = %d\n",number_amount, status.MPI_SOURCE, status.MPI_TAG);
// print numbers
printf("Try to print numbers...\n");
int * tmp = (int *)numbers;
for(int i = 0; i < number_amount; i++)
printf("%d, ", tmp[i]);
printf("\n");
free(numbers);
}
// end session
MPI_Finalize();
}
/*
* Implements a blocking receive operation.
* mpi_recv(?Source,?Tag,-Data).
*/
static YAP_Bool
mpi_recv(term_t YAP_ARG1,...) {
YAP_Term t1 = YAP_Deref(YAP_ARG1),
t2 = YAP_Deref(YAP_ARG2),
t3 = YAP_Deref(YAP_ARG3),
t4;
int tag, orig;
int len=0;
MPI_Status status;
//The third argument (data) must be unbound
if(!YAP_IsVarTerm(t3)) {
return false;
}
/* The first argument (Source) must be bound to an integer
(the rank of the source) or left unbound (i.e. any source
is OK) */
if (YAP_IsVarTerm(t1)) orig = MPI_ANY_SOURCE;
else if( !YAP_IsIntTerm(t1) ) return false;
else orig = YAP_IntOfTerm(t1);
/* The second argument must be bound to an integer (the tag)
or left unbound (i.e. any tag is OK) */
if (YAP_IsVarTerm(t2)) tag = MPI_ANY_TAG;
else if( !YAP_IsIntTerm(t2) ) return false;
else tag = YAP_IntOfTerm( t2 );
CONT_TIMER();
// probe for term' size
if( MPI_CALL(MPI_Probe( orig, tag, MPI_COMM_WORLD, &status )) != MPI_SUCCESS) {
PAUSE_TIMER();
return false;
}
if( MPI_CALL(MPI_Get_count( &status, MPI_CHAR, &len )) != MPI_SUCCESS ||
status.MPI_TAG==MPI_UNDEFINED ||
status.MPI_SOURCE==MPI_UNDEFINED) {
PAUSE_TIMER();
return false;
}
//realloc memory buffer
change_buffer_size((size_t)(len+1));
BUFFER_LEN=len;
// Already know the source from MPI_Probe()
if( orig == MPI_ANY_SOURCE ) {
orig = status.MPI_SOURCE;
if( !YAP_Unify(t1, YAP_MkIntTerm(orig))) {
PAUSE_TIMER();
return false;
}
}
// Already know the tag from MPI_Probe()
if( tag == MPI_ANY_TAG ) {
tag = status.MPI_TAG;
if( !YAP_Unify(t2, YAP_MkIntTerm(status.MPI_TAG))) {
PAUSE_TIMER();
return false;
}
}
// Receive the message as a string
if( MPI_CALL(MPI_Recv( BUFFER_PTR, BUFFER_LEN, MPI_CHAR, orig, tag,
MPI_COMM_WORLD, &status )) != MPI_SUCCESS ) {
/* Getting in here should never happen; it means that the first
package (containing size) was sent properly, but there was a glitch with
the actual content! */
PAUSE_TIMER();
return false;
}
#ifdef DEBUG
write_msg(__FUNCTION__,__FILE__,__LINE__,"%s(%s,%u, MPI_CHAR,%d,%d)\n",__FUNCTION__,BUFFER_PTR, BUFFER_LEN, orig, tag);
#endif
MSG_RECV(BUFFER_LEN);
t4=string2term(BUFFER_PTR,&BUFFER_LEN);
PAUSE_TIMER();
return(YAP_Unify(YAP_ARG3,t4));
}
开发者ID:friguzzi,项目名称:mpi,代码行数:79,代码来源:pl_mpi.c
示例5: do_MPImaster_cluster
/* Master distributes work to slaves */
void do_MPImaster_cluster(WorkPtr work) {
#ifdef MPI
FILE * checkfile;
int i,k,nlen,tranche,client,maxlen,err,rlen,maxload,minload;
int bound[2];
int checkpoint;
int round=0;
int w=(SEQELTWIDTH/2);
int *last_sent, *last_got;
MPI_Status status;
int seq_data[2];
maxlen = MPIBUFRECSZ*num_seqs;
buffer = (int32_t *) calloc(maxlen,sizeof(int32_t));
last_sent = (int *) calloc(numprocs,sizeof(int));
last_got = (int *) calloc(numprocs,sizeof(int));
bzero(last_sent,sizeof(int)*numprocs);
bzero(last_got,sizeof(int)*numprocs);
seq_data[0]=num_seqs;
seq_data[1]=data_size;
mpierr(MPI_Bcast(seq_data,2,MPI_INT,0,MPI_COMM_WORLD));
mpierr(MPI_Bcast(seqInfo,2*num_seqs,MPI_INT,0,MPI_COMM_WORLD));
mpierr(MPI_Bcast(data,data_size,MPI_SHORT,0,MPI_COMM_WORLD));
/* divide work up */
tranche = (num_seqs+8)/16/numprocs;
/* Now wait for requests from slaves */
for(i=0; i<num_seqs; i=i+tranche) {
round++;
if (round < prog_opts.restore) continue;
checkpoint = 0;
bound[0]=i;
bound[1]=MIN(num_seqs,i+tranche);
if (prog_opts.checkpoint) bound[1]=-bound[1];
//printf("Master waits for client answer <%d,%d>\n",bound[0],bound[1]);
merr =MPI_Recv(&client, 1,
MPI_INT, MPI_ANY_SOURCE, WORKTAG, MPI_COMM_WORLD,&status);
mpierr(merr);
//printf("Master gets note from client %d\n",client);
if (client < 0) { // client wants to send checkpoint
client = -client;
checkpoint = 1;
}
// The round previously sent to the client is done
last_got[client] = last_sent[client];
// Record the current round send to the client
last_sent[client]=round;
// DBg printf("Master sends new work to client %d\n",client);
merr =MPI_Send(bound, 2, MPI_INT, client, WORKTAG, MPI_COMM_WORLD);
mpierr(merr);
if (prog_opts.checkpoint) {
if (checkpoint) {
printf("Master to receive checkpoint\n");
err = MPI_Recv(buffer, maxlen, MPI_INT, MPI_ANY_SOURCE,
ANSTAG, MPI_COMM_WORLD, &status);
mpierr(err);
MPI_Get_count(&status, MPI_INT, &rlen);
MergeSlaveClusterTable(buffer,rlen);
checkfile = fopen(prog_opts.checkpoint,"w");
for(k=1; k<numprocs; k++)
fprintf(checkfile,"%d\n",last_got[k]);
show_clusters(checkfile);
fclose(checkfile);
}
printf("Slave %d sent tranche %d of %d\n",
client,round,(num_seqs+1)/tranche);
}
}
bound[0]=-1;
for(i=1; i< numprocs; i++) {
// Tell them no more work
merr =
MPI_Recv(&client, 1, MPI_INT, MPI_ANY_SOURCE,
WORKTAG, MPI_COMM_WORLD,&status);
//printf("Client %d told to finish\n",client);
mpierr(merr);
if (client < 0)
client = -client;
mpierr(MPI_Send(bound, 2, MPI_INT, client, WORKTAG, MPI_COMM_WORLD));
err = MPI_Recv(buffer, maxlen, MPI_INT, MPI_ANY_SOURCE,
ANSTAG, MPI_COMM_WORLD, &status);
mpierr(err);
MPI_Get_count(&status, MPI_INT, &rlen);
MergeSlaveClusterTable(buffer,rlen);
}
#endif
}
开发者ID:shaze,项目名称:wcdest,代码行数:92,代码来源:mpistuff.c
示例6: PCSetUp_Redistribute
//.........这里部分代码省略.........
nsends = 0;
for (i=rstart; i<rend; i++) {
if (i < nmap->range[j]) j = 0;
for (; j<size; j++) {
if (i < nmap->range[j+1]) {
if (!nprocs[j]++) nsends++;
owner[i-rstart] = j;
break;
}
}
}
/* inform other processors of number of messages and max length*/
ierr = PetscGatherNumberOfMessages(comm,PETSC_NULL,nprocs,&nrecvs);CHKERRQ(ierr);
ierr = PetscGatherMessageLengths(comm,nsends,nrecvs,nprocs,&onodes1,&olengths1);CHKERRQ(ierr);
ierr = PetscSortMPIIntWithArray(nrecvs,onodes1,olengths1);CHKERRQ(ierr);
recvtotal = 0; for (i=0; i<nrecvs; i++) recvtotal += olengths1[i];
/* post receives: rvalues - rows I will own; count - nu */
ierr = PetscMalloc3(recvtotal,PetscInt,&rvalues,nrecvs,PetscInt,&source,nrecvs,MPI_Request,&recv_waits);CHKERRQ(ierr);
count = 0;
for (i=0; i<nrecvs; i++) {
ierr = MPI_Irecv((rvalues+count),olengths1[i],MPIU_INT,onodes1[i],tag,comm,recv_waits+i);CHKERRQ(ierr);
count += olengths1[i];
}
/* do sends:
1) starts[i] gives the starting index in svalues for stuff going to
the ith processor
*/
ierr = PetscMalloc3(cnt,PetscInt,&svalues,nsends,MPI_Request,&send_waits,size,PetscInt,&starts);CHKERRQ(ierr);
starts[0] = 0;
for (i=1; i<size; i++) { starts[i] = starts[i-1] + nprocs[i-1];}
for (i=0; i<cnt; i++) {
svalues[starts[owner[i]]++] = rows[i];
}
for (i=0; i<cnt; i++) rows[i] = rows[i] - rstart;
red->drows = drows;
red->dcnt = dcnt;
ierr = PetscFree(rows);CHKERRQ(ierr);
starts[0] = 0;
for (i=1; i<size; i++) { starts[i] = starts[i-1] + nprocs[i-1];}
count = 0;
for (i=0; i<size; i++) {
if (nprocs[i]) {
ierr = MPI_Isend(svalues+starts[i],nprocs[i],MPIU_INT,i,tag,comm,send_waits+count++);CHKERRQ(ierr);
}
}
/* wait on receives */
count = nrecvs;
slen = 0;
while (count) {
ierr = MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);CHKERRQ(ierr);
/* unpack receives into our local space */
ierr = MPI_Get_count(&recv_status,MPIU_INT,&n);CHKERRQ(ierr);
slen += n;
count--;
}
if (slen != recvtotal) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Total message lengths %D not expected %D",slen,recvtotal);
ierr = ISCreateGeneral(comm,slen,rvalues,PETSC_COPY_VALUES,&red->is);CHKERRQ(ierr);
/* free up all work space */
ierr = PetscFree(olengths1);CHKERRQ(ierr);
ierr = PetscFree(onodes1);CHKERRQ(ierr);
ierr = PetscFree3(rvalues,source,recv_waits);CHKERRQ(ierr);
ierr = PetscFree2(nprocs,owner);CHKERRQ(ierr);
if (nsends) { /* wait on sends */
ierr = PetscMalloc(nsends*sizeof(MPI_Status),&send_status);CHKERRQ(ierr);
ierr = MPI_Waitall(nsends,send_waits,send_status);CHKERRQ(ierr);
ierr = PetscFree(send_status);CHKERRQ(ierr);
}
ierr = PetscFree3(svalues,send_waits,starts);CHKERRQ(ierr);
ierr = PetscLayoutDestroy(&map);CHKERRQ(ierr);
ierr = PetscLayoutDestroy(&nmap);CHKERRQ(ierr);
ierr = VecCreateMPI(comm,slen,PETSC_DETERMINE,&red->b);CHKERRQ(ierr);
ierr = VecDuplicate(red->b,&red->x);CHKERRQ(ierr);
ierr = MatGetVecs(pc->pmat,&tvec,PETSC_NULL);CHKERRQ(ierr);
ierr = VecScatterCreate(tvec,red->is,red->b,PETSC_NULL,&red->scatter);CHKERRQ(ierr);
ierr = VecDestroy(&tvec);CHKERRQ(ierr);
ierr = MatGetSubMatrix(pc->pmat,red->is,red->is,MAT_INITIAL_MATRIX,&tmat);CHKERRQ(ierr);
ierr = KSPSetOperators(red->ksp,tmat,tmat,SAME_NONZERO_PATTERN);CHKERRQ(ierr);
ierr = MatDestroy(&tmat);CHKERRQ(ierr);
}
/* get diagonal portion of matrix */
ierr = PetscMalloc(red->dcnt*sizeof(PetscScalar),&red->diag);CHKERRQ(ierr);
ierr = MatGetVecs(pc->pmat,&diag,PETSC_NULL);CHKERRQ(ierr);
ierr = MatGetDiagonal(pc->pmat,diag);CHKERRQ(ierr);
ierr = VecGetArrayRead(diag,&d);CHKERRQ(ierr);
for (i=0; i<red->dcnt; i++) {
red->diag[i] = 1.0/d[red->drows[i]];
}
ierr = VecRestoreArrayRead(diag,&d);CHKERRQ(ierr);
ierr = VecDestroy(&diag);CHKERRQ(ierr);
ierr = KSPSetUp(red->ksp);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
//.........这里部分代码省略.........
* also, catch error states and die later, after clean shutdown of workers.
*
* When a recoverable error occurs, have_work = FALSE, xstatus !=
* eslOK, and errmsg is set to an informative message. No more
* errmsg's can be received after the first one. We wait for all the
* workers to clear their work units, then send them shutdown signals,
* then finally print our errmsg and exit.
*
* Unrecoverable errors just crash us out with p7_Fail().
*/
wi = 1;
while (have_work || nproc_working)
{
if (have_work)
{
if ((status = esl_msa_Read(cfg->afp, &msa)) == eslOK)
{
cfg->nali++;
ESL_DPRINTF1(("MPI master read MSA %s\n", msa->name == NULL? "" : msa->name));
}
else
{
have_work = FALSE;
if (status == eslEFORMAT) { xstatus = eslEFORMAT; snprintf(errmsg, eslERRBUFSIZE, "Alignment file parse error:\n%s\n", cfg->afp->errbuf); }
else if (status == eslEINVAL) { xstatus = eslEFORMAT; snprintf(errmsg, eslERRBUFSIZE, "Alignment file parse error:\n%s\n", cfg->afp->errbuf); }
else if (status != eslEOF) { xstatus = status; snprintf(errmsg, eslERRBUFSIZE, "Alignment file read unexpectedly failed with code %d\n", status); }
ESL_DPRINTF1(("MPI master has run out of MSAs (having read %d)\n", cfg->nali));
}
}
if ((have_work && nproc_working == cfg->nproc-1) || (!have_work && nproc_working > 0))
{
if (MPI_Probe(MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &mpistatus) != 0) { MPI_Finalize(); p7_Fail("mpi probe failed"); }
if (MPI_Get_count(&mpistatus, MPI_PACKED, &n) != 0) { MPI_Finalize(); p7_Fail("mpi get count failed"); }
wi = mpistatus.MPI_SOURCE;
ESL_DPRINTF1(("MPI master sees a result of %d bytes from worker %d\n", n, wi));
if (n > bn) {
if ((buf = realloc(buf, sizeof(char) * n)) == NULL) p7_Fail("reallocation failed");
bn = n;
}
if (MPI_Recv(buf, bn, MPI_PACKED, wi, 0, MPI_COMM_WORLD, &mpistatus) != 0) { MPI_Finalize(); p7_Fail("mpi recv failed"); }
ESL_DPRINTF1(("MPI master has received the buffer\n"));
/* If we're in a recoverable error state, we're only clearing worker results;
* just receive them, don't unpack them or print them.
* But if our xstatus is OK, go ahead and process the result buffer.
*/
if (xstatus == eslOK)
{
pos = 0;
if (MPI_Unpack(buf, bn, &pos, &xstatus, 1, MPI_INT, MPI_COMM_WORLD) != 0) { MPI_Finalize(); p7_Fail("mpi unpack failed");}
if (xstatus == eslOK) /* worker reported success. Get the HMM. */
{
ESL_DPRINTF1(("MPI master sees that the result buffer contains an HMM\n"));
if (p7_hmm_MPIUnpack(buf, bn, &pos, MPI_COMM_WORLD, &(cfg->abc), &hmm) != eslOK) { MPI_Finalize(); p7_Fail("HMM unpack failed"); }
ESL_DPRINTF1(("MPI master has unpacked the HMM\n"));
if (cfg->postmsafile != NULL) {
if (esl_msa_MPIUnpack(cfg->abc, buf, bn, &pos, MPI_COMM_WORLD, &postmsa) != eslOK) { MPI_Finalize(); p7_Fail("postmsa unpack failed");}
}
entropy = p7_MeanMatchRelativeEntropy(hmm, bg);
if ((status = output_result(cfg, errmsg, msaidx[wi], msalist[wi], hmm, postmsa, entropy)) != eslOK) xstatus = status;
esl_msa_Destroy(postmsa); postmsa = NULL;
/**
* This method contains most of the MPI code that coordinates the efforts
* among the crawlers. This method doesn't return until the root MPI process
* recieves a SIGTERM signal.
*
* One of the crawlers is designated the root crawler based on its MPI rank.
* The root crawler instantiates a KeyspaceMapping object to coordinate the
* keyspace mapping among the crawlers. In reality the root crawler forks its
* tripcode generating thread and continues to listen for KeyspacePool
* requests in the main thread.
*
* When the root crawler recieves a SIGTERM signal, it signals all of the
* crawlers to finish their current pools and optionally serialize the
* KeyspaceMapping object to disk to allow for the search to be resumed in the
* future.
*
* \fixme Catching the SIGTERM signal in a thread that makes MPI calls might
* not be safe. See section 2.9.2 of the MPI specification.
*/
void TripcodeCrawler::run()
{
int worldRank, worldSize;
MPI_Comm_rank(MPI_COMM_WORLD, &worldRank);
MPI_Comm_size(MPI_COMM_WORLD, &worldSize);
if(worldRank == ROOT_RANK)
{
/// \todo Spawn a thread so the root process can compute tripcodes and
/// coordinate the threads at the same time.
while(true)
{
cout << "doing things" << endl;
MPI_Status status;
// blocking receive for requests for keyspace pools
MPI_Recv(NULL, 0, MPI_INT, MPI_ANY_SOURCE, KEYSPACE_REQUEST, MPI_COMM_WORLD, &status);
/// \todo Need to document the ownership of a lot of these buffers.
// construct a KeyspacePool object suitable for serialization and
// transmission
assert(m_keyspaceMapping != NULL);
KeyspacePool *keyspacePool = m_keyspaceMapping->getNextPool();
size_t poolDataSize;
uint8_t *poolData = keyspacePool->serialize(&poolDataSize);
// blocking response to keyspace pool request with serialized
// KeyspacePool object
MPI_Send(poolData, static_cast<int>(poolDataSize), MPI_BYTE, status.MPI_SOURCE, KEYSPACE_RESPONSE, MPI_COMM_WORLD);
delete keyspacePool;
delete poolData;
}
}
else
{
while(true)
{
MPI_Status status;
// request a new keyspace pool
MPI_Send(NULL, 0, MPI_INT, ROOT_RANK, KEYSPACE_REQUEST, MPI_COMM_WORLD);
// recieve the serialized KeyspacePool object
MPI_Probe(ROOT_RANK, KEYSPACE_RESPONSE, MPI_COMM_WORLD, &status);
size_t poolDataSize;
MPI_Get_count(&status, MPI_BYTE, reinterpret_cast<int*>(poolDataSize));
uint8_t *poolData = new uint8_t[poolDataSize];
MPI_Recv(poolData, static_cast<int>(poolDataSize), MPI_BYTE, ROOT_RANK, KEYSPACE_RESPONSE, MPI_COMM_WORLD, &status);
KeyspacePool *keyspacePool = KeyspacePoolFactory::singleton()->createKeyspacePool(poolData, poolDataSize);
delete poolData; /// \todo We might want to explore the speed benefit
/// of a custom memory allocater here and a few other places.
TripcodeContainer tripcodes, matches;
KeyBlock *currentBlock;
while((currentBlock = keyspacePool->getNextBlock()) != NULL)
{
m_tripcodeAlgorithm->computeTripcodes(currentBlock, &tripcodes);
m_matchingAlgorithm->matchTripcodes(&tripcodes, &matches);
}
// TODO: send TripcodeSearchResult to ROOT_RANK
delete keyspacePool;
// TODO: check for termination signal
}
}
}
请发表评论