• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ MPI_CHECK函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中MPI_CHECK函数的典型用法代码示例。如果您正苦于以下问题:C++ MPI_CHECK函数的具体用法?C++ MPI_CHECK怎么用?C++ MPI_CHECK使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了MPI_CHECK函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: MPI_Get_accumulate_C_Wrapper

int MPI_Get_accumulate_C_Wrapper (void *origin_addr, int origin_count, MPI_Datatype origin_datatype, 	   void *result_addr, int result_count, MPI_Datatype result_datatype, int target_rank, 
	MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op,
	MPI_Win win)
{
	int ierror;
	int origin_datatype_size, result_datatype_size, target_datatype_size;

	ierror = PMPI_Type_size(origin_datatype, &origin_datatype_size);
	MPI_CHECK(ierror, PMPI_Type_size);
	
	ierror = PMPI_Type_size(result_datatype, &result_datatype_size);
	MPI_CHECK(ierror, PMPI_Type_size);
	
	ierror = PMPI_Type_size(target_datatype, &target_datatype_size);
	MPI_CHECK(ierror, PMPI_Type_size);
	
	TRACE_MPIEVENT(LAST_READ_TIME, MPI_GET_ACCUMULATE_EV, EVT_BEGIN, target_rank, ((origin_datatype_size * origin_count) + (target_datatype_size * target_count)), EMPTY, target_datatype_size * target_disp, origin_addr);
	ierror = PMPI_Get_accumulate (origin_addr, origin_count, origin_datatype, result_addr, result_count, result_datatype, target_rank, target_disp, target_count, target_datatype, op, win);
	TRACE_MPIEVENT(TIME, MPI_GET_ACCUMULATE_EV, EVT_END, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY);

	updateStats_OTHER(global_mpi_stats);

	return ierror;
}
开发者ID:bsc-performance-tools,项目名称:extrae,代码行数:24,代码来源:mpi_wrapper_1sided_c.c


示例2: getHostRank

/*! gets hostRank
 *
 * process with MPI-rank 0 is the master and builds a map with hostname
 * and number of already known processes on this host.
 * Each rank will provide its hostname via MPISend and gets its HostRank
 * from the master.
 *
 */
int getHostRank( )
{
    char hostname[MPI_MAX_PROCESSOR_NAME];
    int length;
    int hostRank;

    int totalnodes;
    int myrank;

    MPI_CHECK( MPI_Get_processor_name( hostname, &length ) );
    cleanHostname( hostname );
    hostname[length++] = '\0';

    //int totalnodes;

    MPI_CHECK( MPI_Comm_size( MPI_COMM_WORLD, &totalnodes ) );
    MPI_CHECK( MPI_Comm_rank( MPI_COMM_WORLD, &myrank ) );

    if ( myrank == 0 )
    {

        std::map<std::string, int> hosts;
        hosts[hostname] = 0;
        hostRank = 0;
        for ( int rank = 1; rank < totalnodes; ++rank )
        {

            MPI_CHECK( MPI_Recv( hostname, MPI_MAX_PROCESSOR_NAME, MPI_CHAR, rank, gridHostnameTag, MPI_COMM_WORLD, MPI_STATUS_IGNORE ) );

            //printf("Hostname: %s\n", hostname);
            int hostrank = 0;
            if ( hosts.count( hostname ) > 0 ) hostrank = hosts[hostname] + 1;

            MPI_CHECK( MPI_Send( &hostrank, 1, MPI_INT, rank, gridHostRankTag, MPI_COMM_WORLD ) );

            hosts[hostname] = hostrank;


        }

    }
    else
    {
        MPI_CHECK( MPI_Send( hostname, length, MPI_CHAR, 0, gridHostnameTag, MPI_COMM_WORLD ) );

        MPI_CHECK( MPI_Recv( &hostRank, 1, MPI_INT, 0, gridHostRankTag, MPI_COMM_WORLD, MPI_STATUS_IGNORE ) );
        // if(hostRank!=0) hostRank--; //!\todo fix mpi hostrank start with 1
    }

    return hostRank;
}
开发者ID:Heikman,项目名称:picongpu,代码行数:59,代码来源:main.cpp


示例3: startSend

    MPI_Request* startSend(uint32_t ex, const char *send_data, size_t send_data_count, uint32_t tag)
    {
        MPI_Request *request = new MPI_Request;

        MPI_CHECK(MPI_Isend(
                            (void*) send_data,
                            send_data_count,
                            MPI_CHAR,
                            ExchangeTypeToRank(ex),
                            gridExchangeTag + tag,
                            topology,
                            request));

        return request;
    }
开发者ID:BenjaminW3,项目名称:picongpu,代码行数:15,代码来源:CommunicatorMPI.hpp


示例4: startReceive

    MPI_Request* startReceive(uint32_t ex, char *recv_data, size_t recv_data_max, uint32_t tag)
    {

        MPI_Request *request = new MPI_Request;

        MPI_CHECK(MPI_Irecv(
                            recv_data,
                            recv_data_max,
                            MPI_CHAR,
                            ExchangeTypeToRank(ex),
                            gridExchangeTag + tag,
                            topology,
                            request));

        return request;
    }
开发者ID:AK9527lq,项目名称:picongpu,代码行数:16,代码来源:CommunicatorMPI.hpp


示例5: participate

    /* Activate participation for reduce algorithm. 
     * Must called from any mpi process. This function use global blocking mpi calls.
     * @param isActive true if mpi rank should be part of reduce operation, else false
     */
    void participate(bool isActive)
    {
        /*free old communicator of init is called again*/
        if (isMPICommInitialized)
        {
            MPI_CHECK(MPI_Comm_free(&comm));
            mpiRank = -1;
            numRanks = 0;
            isMPICommInitialized = false;
        }

        int countRanks;
        MPI_CHECK(MPI_Comm_size(MPI_COMM_WORLD, &countRanks));
        int reduceRank[countRanks];
        int groupRanks[countRanks];
        MPI_CHECK(MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank));

        if (!isActive)
            mpiRank = -1;

        MPI_CHECK(MPI_Allgather(&mpiRank, 1, MPI_INT, reduceRank, 1, MPI_INT, MPI_COMM_WORLD));

        for (int i = 0; i < countRanks; ++i)
        {
            if (reduceRank[i] != -1)
            {
                groupRanks[numRanks] = reduceRank[i];
                numRanks++;
            }
        }

        MPI_Group group;
        MPI_Group newgroup;
        MPI_CHECK(MPI_Comm_group(MPI_COMM_WORLD, &group));
        MPI_CHECK(MPI_Group_incl(group, numRanks, groupRanks, &newgroup));

        MPI_CHECK(MPI_Comm_create(MPI_COMM_WORLD, newgroup, &comm));

        if (mpiRank != -1)
        {
            MPI_CHECK(MPI_Comm_rank(comm, &mpiRank));
            isMPICommInitialized = true;
        }
    }
开发者ID:CodeLemon,项目名称:picongpu,代码行数:48,代码来源:MPIReduce.hpp


示例6: SeedRandGen

/*
 * Seed random generator.
 */
void SeedRandGen(MPI_Comm testComm)
{
        unsigned int randomSeed;

        if (rank == 0) {
#ifdef _WIN32
                rand_s(&randomSeed);
#else
                struct timeval randGenTimer;
                gettimeofday(&randGenTimer, (struct timezone *)NULL);
                randomSeed = randGenTimer.tv_usec;
#endif
        }
        MPI_CHECK(MPI_Bcast(&randomSeed, 1, MPI_INT, 0,
                            testComm), "cannot broadcast random seed value");
        srandom(randomSeed);
}
开发者ID:AlvaroAguilera,项目名称:ior,代码行数:20,代码来源:utilities.c


示例7: rsadp

error_t rsadp(const RsaPrivateKey *key, const Mpi *c, Mpi *m)
{
   error_t error;
   Mpi m1;
   Mpi m2;
   Mpi h;

   //The ciphertext representative c shall be between 0 and n - 1
   if(mpiCompInt(c, 0) < 0 || mpiComp(c, &key->n) >= 0)
      return ERROR_OUT_OF_RANGE;

   //Initialize multiple-precision integers
   mpiInit(&m1);
   mpiInit(&m2);
   mpiInit(&h);

   //Use the Chinese remainder algorithm?
   if(key->n.size && key->p.size && key->q.size &&
      key->dp.size && key->dq.size && key->qinv.size)
   {
      //Compute m1 = c ^ dP mod p
      MPI_CHECK(mpiExpMod(&m1, c, &key->dp, &key->p));
      //Compute m2 = c ^ dQ mod q
      MPI_CHECK(mpiExpMod(&m2, c, &key->dq, &key->q));
      //Let h = (m1 - m2) * qInv mod p
      MPI_CHECK(mpiSub(&h, &m1, &m2));
      MPI_CHECK(mpiMulMod(&h, &h, &key->qinv, &key->p));
      //Let m = m2 + q * h
      MPI_CHECK(mpiMul(m, &key->q, &h));
      MPI_CHECK(mpiAdd(m, m, &m2));
   }
   //Use modular exponentiation?
   else if(key->n.size && key->d.size)
   {
      //Let m = c ^ d mod n
      error = mpiExpMod(m, c, &key->d, &key->n);
   }
   //Invalid parameters?
   else
   {
      //Report an error
      error = ERROR_INVALID_PARAMETER;
   }

end:
   //Free previously allocated memory
   mpiFree(&m1);
   mpiFree(&m2);
   mpiFree(&h);

   //Return status code
   return error;
}
开发者ID:frankzzcn,项目名称:M2_SE_RTOS_Project,代码行数:53,代码来源:rsa.c


示例8: send_steal_batch

/*
   Send a batch of work until to a stealer
   batch: info about the batch.  This function will free memory
   finish: true if we should notify target that this is last to send
 */
static adlb_code
send_steal_batch(steal_cb_state *batch, bool finish)
{
  int count = (int)batch->size;
  struct packed_steal_resp hdr = { .count = count, .last = finish };
  SEND(&hdr, sizeof(hdr), MPI_BYTE, batch->stealer_rank,
       ADLB_TAG_RESPONSE_STEAL_COUNT);

  if (count == 0)
    return ADLB_SUCCESS;

  struct packed_steal_work packed[count];
  for (int i = 0; i < count; i++)
  {
    xlb_pack_steal_work(&(packed[i]), batch->work_units[i]);
  }
 
  // Store requests for wait
  
  MPI_Request reqs[count + 1];

  DEBUG("[%i] sending batch size %zu", xlb_s.layout.rank, batch->size);
  ISEND(packed, (int)sizeof(packed[0]) * count, MPI_BYTE,
       batch->stealer_rank, ADLB_TAG_RESPONSE_STEAL, &reqs[0]);

  for (int i = 0; i < count; i++)
  {
    DEBUG("stolen payload: %s", (char*) batch->work_units[i]->payload);
    xlb_work_unit *unit = batch->work_units[i];
    ISEND(unit->payload, unit->length, MPI_BYTE,
         batch->stealer_rank, ADLB_TAG_RESPONSE_STEAL, &reqs[i+1]);
  }

  // Wait until MPI confirms sends have completed
  int rc = MPI_Waitall(count + 1, reqs, MPI_STATUSES_IGNORE);
  MPI_CHECK(rc);

  for (int i = 0; i < count; i++)
  {
    xlb_work_unit_free(batch->work_units[i]);
  }
  
  batch->size = 0;
  return ADLB_SUCCESS;
}
开发者ID:JohnPJenkins,项目名称:swift-t,代码行数:50,代码来源:steal.c


示例9: mpi_io_shared

/*
 * mpi_io_shared
 *
 * creates a single-shared-file
 * writes with independent-io
 * reads with independent-io
 * writes with collective-io
 * reads with collective-io
 */
int mpi_io_shared (char *path, int size, int rank)
{
    MPI_File fh;
    char filepath[512];
    MPI_Offset offset;
    MPI_Status status;
    void *buf;
    int bufcount = BYTES_PER_RANK;
    int rc;

    buf = malloc(bufcount);
    if (!buf) { return 0; }

    memset(buf, 0xa, bufcount);

    sprintf(filepath, "%s/%s", path, "cp-bench-mpio-shared");
    rc = MPI_File_open(MPI_COMM_WORLD,
                       filepath,
                       (MPI_MODE_CREATE|MPI_MODE_RDWR|MPI_MODE_DELETE_ON_CLOSE),
                       MPI_INFO_NULL,
                       &fh);
    MPI_CHECK(rc,"MPI_File_open");

    /* Indep Write */
    offset = rank * bufcount;
    rc = MPI_File_write_at(fh,offset,buf,bufcount,MPI_BYTE,&status);
    MPI_CHECK(rc,"MPI_File_write_at");

    MPI_Barrier(MPI_COMM_WORLD);

    /* Indep Read */
    offset = ((rank+1)%size) * bufcount;
    rc = MPI_File_read_at(fh,offset,buf,bufcount,MPI_BYTE,&status);
    MPI_CHECK(rc,"MPI_File_read_at");

    /* Collective Write */
    offset = rank * bufcount;
    rc = MPI_File_write_at_all(fh, offset, buf, bufcount, MPI_BYTE, &status);
    MPI_CHECK(rc,"MPI_File_write_at_all");

    /* Collective Read */
    offset = ((rank+1)%size) * bufcount;
    rc = MPI_File_read_at_all(fh, offset, buf, bufcount, MPI_BYTE, &status);
    MPI_CHECK(rc,"MPI_File_read_at_all");

    rc = MPI_File_close(&fh);
    MPI_CHECK(rc,"MPI_File_close");

    free(buf);

    return 1;
}
开发者ID:daidong,项目名称:darshan-neo4j,代码行数:61,代码来源:io-sample.c


示例10: ecLoadDomainParameters

error_t ecLoadDomainParameters(EcDomainParameters *params, const EcCurveInfo *curveInfo)
{
   error_t error;

   //Debug message
   TRACE_DEBUG("Loading %s EC domain parameters...\r\n", curveInfo->name);

   //Curve type
   params->type = curveInfo->type;

   //Import prime modulus
   MPI_CHECK(mpiReadRaw(&params->p, curveInfo->p, curveInfo->pLen));
   //Import parameter a
   MPI_CHECK(mpiReadRaw(&params->a, curveInfo->a, curveInfo->aLen));
   //Import parameter b
   MPI_CHECK(mpiReadRaw(&params->b, curveInfo->b, curveInfo->bLen));
   //Import the x-coordinate of the base point G
   MPI_CHECK(mpiReadRaw(&params->g.x, curveInfo->gx, curveInfo->gxLen));
   //Import the y-coordinate of the base point G
   MPI_CHECK(mpiReadRaw(&params->g.y, curveInfo->gy, curveInfo->gyLen));
   //Import base point order q
   MPI_CHECK(mpiReadRaw(&params->q, curveInfo->q, curveInfo->qLen));

   //Normalize base point G
   MPI_CHECK(mpiSetValue(&params->g.z, 1));

   //Fast modular reduction
   params->mod = curveInfo->mod;

   //Debug message
   TRACE_DEBUG("  p:\r\n");
   TRACE_DEBUG_MPI("    ", &params->p);
   TRACE_DEBUG("  a:\r\n");
   TRACE_DEBUG_MPI("    ", &params->a);
   TRACE_DEBUG("  b:\r\n");
   TRACE_DEBUG_MPI("    ", &params->b);
   TRACE_DEBUG("  Gx:\r\n");
   TRACE_DEBUG_MPI("    ", &params->g.x);
   TRACE_DEBUG("  Gy:\r\n");
   TRACE_DEBUG_MPI("    ", &params->g.y);
   TRACE_DEBUG("  q:\r\n");
   TRACE_DEBUG_MPI("    ", &params->q);

end:
   //Return status code
   return error;
}
开发者ID:nandojve,项目名称:embedded,代码行数:47,代码来源:ec.c


示例11: TEE_BigIntNeg

void TEE_BigIntNeg(TEE_BigInt *dest, const TEE_BigInt *src)
{
	mbedtls_mpi mpi_dest;

	get_mpi(&mpi_dest, dest);

	if (dest != src) {
		mbedtls_mpi mpi_src;

		get_const_mpi(&mpi_src, src);

		MPI_CHECK(mbedtls_mpi_copy(&mpi_dest, &mpi_src));

		put_mpi(&mpi_src);
	}

	mpi_dest.s *= -1;

	put_mpi(&mpi_dest);
}
开发者ID:prime-zeng,项目名称:optee_os,代码行数:20,代码来源:tee_api_arith_mpi.c


示例12: executeIntern

    bool executeIntern()
    {
        if (this->isFinished())
            return true;

        if (this->request == NULL)
            throw std::runtime_error("request was NULL (call executeIntern after freed");
        
        int flag=0;
        MPI_CHECK(MPI_Test(this->request, &flag, &(this->status)));

        if (flag) //finished
        {
            delete this->request;
            this->request = NULL;
            setFinished();
            return true;
        }
        return false;
    }
开发者ID:Heikman,项目名称:picongpu,代码行数:20,代码来源:TaskReceiveMPI.hpp


示例13: MPI_Compare_and_swap_C_Wrapper

int MPI_Compare_and_swap_C_Wrapper (void *origin_addr, void *compare_addr,
  void *result_addr, MPI_Datatype datatype, int target_rank,
  MPI_Aint target_disp, MPI_Win win)
{
	int ierror;
	int datatype_size;

	ierror = PMPI_Type_size(datatype, &datatype_size);
	MPI_CHECK(ierror, PMPI_Type_size);

	TRACE_MPIEVENT (LAST_READ_TIME, MPI_COMPARE_AND_SWAP_EV, EVT_BEGIN, target_rank,
	  (datatype_size * target_disp), EMPTY, EMPTY, origin_addr);
	ierror = PMPI_Compare_and_swap (origin_addr, compare_addr, result_addr,
	  datatype, target_rank, target_disp, win);
	TRACE_MPIEVENT (TIME, MPI_COMPARE_AND_SWAP_EV, EVT_END, EMPTY, EMPTY,
	  EMPTY, EMPTY, EMPTY);

	updateStats_OTHER(global_mpi_stats);

	return ierror;
}
开发者ID:bsc-performance-tools,项目名称:extrae,代码行数:21,代码来源:mpi_wrapper_1sided_c.c


示例14: TEE_BigIntConvertToOctetString

TEE_Result TEE_BigIntConvertToOctetString(uint8_t *buffer, uint32_t *bufferLen,
					  const TEE_BigInt *bigInt)
{
	TEE_Result res = TEE_SUCCESS;
	mbedtls_mpi mpi;
	size_t sz;

	get_mpi(&mpi, bigInt);

	sz = mbedtls_mpi_size(&mpi);
	if (sz <= *bufferLen)
		MPI_CHECK(mbedtls_mpi_write_binary(&mpi, buffer, sz));
	else
		res = TEE_ERROR_SHORT_BUFFER;

	*bufferLen = sz;

	mbedtls_mpi_free(&mpi);

	return res;
}
开发者ID:pascal-brand-st-dev,项目名称:optee_os,代码行数:21,代码来源:tee_api_arith_mpi.c


示例15: MPI_Gatherv_linear

/** Currently the openmpi implementation doesn't have a non-linear
    implementation of gatherv. In theory we could adapt gather to do
    this? I think you could actually call gather then just re-oreint
    the memory although it would require 2x the mem?  */
MPI_Gatherv_linear( void *sbuf, int scnt, MPI_Datatype sdt,
                    void *rbuf, int *rcnts, int *displs, MPI_Datatype rdt,
                    int root, MPI_Comm comm )
{
	int rc = MPI_SUCCESS, rank, src, size, extent, reqcnt = 0;
	MPI_Request *reqs = NULL;

	assert( ( rcnts != NULL ) && ( displs != NULL ) );

	MPI_CHECK( rc = MPI_Comm_rank( comm, &rank ) );
	MPI_CHECK( rc = MPI_Comm_size( comm, &size ) );
	MPI_CHECK( rc = MPI_Type_size( rdt, &extent ) );

	if( rank != root ){

		MPI_CHECK( rc = MPI_Send( sbuf, scnt, sdt, root, GATHERV_TAG,
                                  comm ) );
	} else {
		NULL_CHECK( reqs = malloc( sizeof( MPI_Request ) *
                                   ( size - 1 ) ) );

		for( src = 0 ; src < size ; src++ ){

            if( src == root ){
				memmove( rbuf + ( displs[ src ] * extent ),
                         sbuf, extent * rcnts[ src ] );
				
				continue;
			}


			MPI_CHECK( rc = MPI_Irecv( rbuf +
                                       ( displs[ src ] * extent ),	
                                       rcnts[ src ], rdt, src,
                                       GATHERV_TAG, comm,
                                       &reqs[ reqcnt++ ] ) );
		}

		MPI_CHECK( rc = MPI_Waitall( reqcnt, reqs,
                                     MPI_STATUSES_IGNORE ) );

	}

	free( reqs );

	return rc;
}
开发者ID:bryanmills,项目名称:srmpi,代码行数:51,代码来源:opt_gatherv.c


示例16: SeekOffset_MPIIO

static IOR_offset_t
SeekOffset_MPIIO(MPI_File       fd,
                 IOR_offset_t   offset,
                 IOR_param_t  * param)
{
    int          offsetFactor,
                 tasksPerFile;
    IOR_offset_t tempOffset;

    tempOffset = offset;

    if (param->filePerProc) {
        offsetFactor = 0;
        tasksPerFile = 1;
    } else {
        offsetFactor = (rank + rankOffset) % param->numTasks;
        tasksPerFile = param->numTasks;
    }
    if (param->useFileView) {
        /* recall that offsets in a file view are
           counted in units of transfer size */
        if (param->filePerProc) {
            tempOffset = tempOffset / param->transferSize;
        } else {
            /* 
             * this formula finds a file view offset for a task
             * from an absolute offset
             */
            tempOffset = ((param->blockSize / param->transferSize)
                          * (tempOffset / (param->blockSize * tasksPerFile)))
                         + (((tempOffset % (param->blockSize * tasksPerFile))
                          - (offsetFactor * param->blockSize))
                           / param->transferSize);
        }
    }
    MPI_CHECK(MPI_File_seek(fd, tempOffset, MPI_SEEK_SET),
              "cannot seek offset");
    return(offset);
} /* SeekOffset_MPIIO() */
开发者ID:gcongiu,项目名称:E10,代码行数:39,代码来源:aiori-MPIIO.c


示例17: MPI_Alltoallv_linear

int
MPI_Alltoallv_linear( void *sbuf, int *scnts, int *sdispls, MPI_Datatype sdt,
                      void *rbuf, int *rcnts, int *rdispls, MPI_Datatype rdt,
                      MPI_Comm comm )
{
	int rc = MPI_SUCCESS, size, rank, sndextent, rcvextent, reqcnt = 0;
	int i;
	MPI_Request *reqs = NULL;

	MPI_CHECK( rc = MPI_Comm_rank( comm, &rank ) );
	MPI_CHECK( rc = MPI_Comm_size( comm, &size ) );
	MPI_CHECK( rc = MPI_Type_size( rdt, &rcvextent ) );
	MPI_CHECK( rc = MPI_Type_size( sdt, &sndextent ) );

	NULL_CHECK( reqs = malloc( sizeof( MPI_Request ) *
                               ( size - 1 ) * 2 ) );

	memmove( rbuf + ( rdispls[ rank ] * rcvextent ),
             sbuf + ( sdispls[ rank ] * sndextent ),
             rcnts[ rank ] * rcvextent );

	for( i = 0 ; i < size ; i++ ){

		if( i == rank )
			continue;

		MPI_CHECK( rc = MPI_Isend( sbuf + ( sdispls[ i ] * sndextent ),
                                   scnts[ i ], sdt, i, ALLTOALL_TAG,
                                   comm, &reqs[ reqcnt++ ] ) );

		MPI_CHECK( rc = MPI_Irecv( rbuf + ( rdispls[ i ] * rcvextent ),
                                   rcnts[ i ], rdt, i, ALLTOALL_TAG,
                                   comm, &reqs[ reqcnt++ ] ) );
	}

	MPI_CHECK( rc = MPI_Waitall( reqcnt, reqs, MPI_STATUSES_IGNORE ) );

	free( reqs );

	return rc;
}
开发者ID:bryanmills,项目名称:srmpi,代码行数:41,代码来源:linear_alltoallv.c


示例18: Share_MISC_Operations

void Share_MISC_Operations (void)
{
	int res, i, max;
	int tmp2[3], tmp[3] = { Rusage_Events_Found, MPI_Stats_Events_Found, Memusage_Events_Found };
	int tmp_in[RUSAGE_EVENTS_COUNT], tmp_out[RUSAGE_EVENTS_COUNT];
	int tmp2_in[MPI_STATS_EVENTS_COUNT], tmp2_out[MPI_STATS_EVENTS_COUNT];
	int tmp3_in[MEMUSAGE_EVENTS_COUNT], tmp3_out[MEMUSAGE_EVENTS_COUNT];
	int tmp_misc[MAX_MISC_INDEX];

	res = MPI_Reduce (inuse, tmp_misc, MAX_MISC_INDEX, MPI_INT, MPI_BOR, 0,
		MPI_COMM_WORLD);
	MPI_CHECK(res, MPI_Reduce, "Sharing MISC operations #1");
	for (i = 0; i < MAX_MISC_INDEX; i++)
		inuse[i] = tmp_misc[i];

	res = MPI_Reduce (tmp, tmp2, 4, MPI_INT, MPI_BOR, 0, MPI_COMM_WORLD);
	MPI_CHECK(res, MPI_Reduce, "Sharing MISC operations #2");
	Rusage_Events_Found = tmp2[0];
	MPI_Stats_Events_Found = tmp2[1];
	Memusage_Events_Found = tmp2[2];

	for (i = 0; i < RUSAGE_EVENTS_COUNT; i++)
		tmp_in[i] = GetRusage_Labels_Used[i];
	res = MPI_Reduce (tmp_in, tmp_out, RUSAGE_EVENTS_COUNT, MPI_INT, MPI_BOR, 0, MPI_COMM_WORLD);
	MPI_CHECK(res, MPI_Reduce, "Sharing MISC operations #3");
	for (i = 0; i < RUSAGE_EVENTS_COUNT; i++)
		GetRusage_Labels_Used[i] = tmp_out[i];

	for (i = 0; i < MPI_STATS_EVENTS_COUNT; i++)
		tmp2_in[i] = MPI_Stats_Labels_Used[i];
	res = MPI_Reduce (tmp2_in, tmp2_out, MPI_STATS_EVENTS_COUNT, MPI_INT, MPI_BOR, 0, MPI_COMM_WORLD);
	MPI_CHECK(res, MPI_Reduce, "Sharing MISC operations #4");
	for (i = 0; i < MPI_STATS_EVENTS_COUNT; i++)
		MPI_Stats_Labels_Used[i] = tmp2_out[i];

	for (i = 0; i < MEMUSAGE_EVENTS_COUNT; i++)
		tmp3_in[i] = Memusage_Labels_Used[i];
	res = MPI_Reduce (tmp3_in, tmp3_out, MEMUSAGE_EVENTS_COUNT, MPI_INT, MPI_BOR, 0, MPI_COMM_WORLD);
	MPI_CHECK(res, MPI_Reduce, "Sharing MISC operations #6");
	for (i = 0; i < MEMUSAGE_EVENTS_COUNT; i++)
		Memusage_Labels_Used[i] = tmp3_out[i];

	res = MPI_Reduce (&MaxClusterId, &max, 1, MPI_INT, MPI_MAX, 0, MPI_COMM_WORLD);
	MPI_CHECK(res, MPI_Reduce, "Sharing MISC operations #7");
	MaxClusterId = max;
}
开发者ID:gllort,项目名称:extrae,代码行数:46,代码来源:misc_prv_events.c


示例19: TEE_BigIntDiv

void TEE_BigIntDiv(TEE_BigInt *dest_q, TEE_BigInt *dest_r,
		   const TEE_BigInt *op1, const TEE_BigInt *op2)
{
	mbedtls_mpi mpi_dest_q;
	mbedtls_mpi mpi_dest_r;
	mbedtls_mpi mpi_op1;
	mbedtls_mpi mpi_op2;
	mbedtls_mpi *pop1 = &mpi_op1;
	mbedtls_mpi *pop2 = &mpi_op2;

	get_mpi(&mpi_dest_q, dest_q);
	get_mpi(&mpi_dest_r, dest_r);

	if (op1 == dest_q)
		pop1 = &mpi_dest_q;
	else if (op1 == dest_r)
		pop1 = &mpi_dest_r;
	else
		get_const_mpi(&mpi_op1, op1);

	if (op2 == dest_q)
		pop2 = &mpi_dest_q;
	else if (op2 == dest_r)
		pop2 = &mpi_dest_r;
	else if (op2 == op1)
		pop2 = pop1;
	else
		get_const_mpi(&mpi_op2, op2);

	MPI_CHECK(mbedtls_mpi_div_mpi(&mpi_dest_q, &mpi_dest_r, pop1, pop2));

	put_mpi(&mpi_dest_q);
	put_mpi(&mpi_dest_r);
	if (pop1 == &mpi_op1)
		put_mpi(&mpi_op1);
	if (pop2 == &mpi_op2)
		put_mpi(&mpi_op2);
}
开发者ID:prime-zeng,项目名称:optee_os,代码行数:38,代码来源:tee_api_arith_mpi.c


示例20: TEE_BigIntShiftRight

void TEE_BigIntShiftRight(TEE_BigInt *dest, const TEE_BigInt *op, size_t bits)
{
	mbedtls_mpi mpi_dest;
	mbedtls_mpi mpi_op;

	get_mpi(&mpi_dest, dest);

	if (dest == op) {
		MPI_CHECK(mbedtls_mpi_shift_r(&mpi_dest, bits));
		goto out;
	}

	get_mpi(&mpi_op, op);

	if (mbedtls_mpi_size(&mpi_dest) >= mbedtls_mpi_size(&mpi_op)) {
		MPI_CHECK(mbedtls_mpi_copy(&mpi_dest, &mpi_op));
		MPI_CHECK(mbedtls_mpi_shift_r(&mpi_dest, bits));
	} else {
		mbedtls_mpi mpi_t;

		get_mpi(&mpi_t, NULL);

		/*
		 * We're using a temporary buffer to avoid the corner case
		 * where destination is unexpectedly overflowed by up to
		 * @bits number of bits.
		 */
		MPI_CHECK(mbedtls_mpi_copy(&mpi_t, &mpi_op));
		MPI_CHECK(mbedtls_mpi_shift_r(&mpi_t, bits));
		MPI_CHECK(mbedtls_mpi_copy(&mpi_dest, &mpi_t));

		mbedtls_mpi_free(&mpi_t);
	}

	mbedtls_mpi_free(&mpi_op);

out:
	MPI_CHECK(copy_mpi_to_bigint(&mpi_dest, dest));
	mbedtls_mpi_free(&mpi_dest);
}
开发者ID:pascal-brand-st-dev,项目名称:optee_os,代码行数:40,代码来源:tee_api_arith_mpi.c



注:本文中的MPI_CHECK函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ MPI_CHK函数代码示例发布时间:2022-05-30
下一篇:
C++ MPI_Barrier函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap