• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ MPI_Alltoall函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中MPI_Alltoall函数的典型用法代码示例。如果您正苦于以下问题:C++ MPI_Alltoall函数的具体用法?C++ MPI_Alltoall怎么用?C++ MPI_Alltoall使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了MPI_Alltoall函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: exch_addr

int exch_addr(void)
{
    MPI_Status status;

	int i, rc;

    rc = MPI_Alltoall((void *)conn.qp_num, sizeof(uint32_t), MPI_BYTE, 
            (void *)rbuf.qp_num, sizeof(uint32_t), MPI_BYTE, l_state.world_comm);
   
    assert(!rc); 
    rc = MPI_Alltoall((void *)conn.lid, sizeof(uint16_t), MPI_BYTE, 
            (void *)rbuf.lid, sizeof(uint16_t), MPI_BYTE, l_state.world_comm);
    assert(!rc); 

#ifdef DEBUG
    for (i = 0; i < nprocs; i++) {
        if (me == i)
            continue;
        fprintf(stdout,"[%d] Remote QP %d, Remote LID %u, Rkey %u, Lkey %u\n"
                " LBuf %p, RBuf %p\n", 
                me, rbuf.qp_num[i], rbuf.lid[i], rbuf.rkey[i], lbuf.mr->lkey,
                lbuf.buf, rbuf.buf[i]);
        fflush(stdout);
    }
#endif

    return 0;
}
开发者ID:bcernohous,项目名称:ga,代码行数:28,代码来源:openib.c


示例2: apply

static void apply(const plan *ego_, R *I, R *O)
{
     const P *ego = (const P *) ego_;
     plan_rdft *cld1, *cld2, *cld2rest, *cld3;

     /* transpose locally to get contiguous chunks */
     cld1 = (plan_rdft *) ego->cld1;
     if (cld1) {
	  cld1->apply(ego->cld1, I, O);
	  
	  /* transpose chunks globally */
	  if (ego->equal_blocks)
	       MPI_Alltoall(O, ego->send_block_sizes[0], FFTW_MPI_TYPE,
			    I, ego->recv_block_sizes[0], FFTW_MPI_TYPE,
			    ego->comm);
	  else
	       MPI_Alltoallv(O, ego->send_block_sizes, ego->send_block_offsets,
			     FFTW_MPI_TYPE,
			     I, ego->recv_block_sizes, ego->recv_block_offsets,
			     FFTW_MPI_TYPE,
			     ego->comm);
     }
     else { /* TRANSPOSED_IN, no need to destroy input */
	  /* transpose chunks globally */
	  if (ego->equal_blocks)
	       MPI_Alltoall(I, ego->send_block_sizes[0], FFTW_MPI_TYPE,
			    O, ego->recv_block_sizes[0], FFTW_MPI_TYPE,
			    ego->comm);
	  else
	       MPI_Alltoallv(I, ego->send_block_sizes, ego->send_block_offsets,
			     FFTW_MPI_TYPE,
			     O, ego->recv_block_sizes, ego->recv_block_offsets,
			     FFTW_MPI_TYPE,
			     ego->comm);
	  I = O; /* final transpose (if any) is in-place */
     }
     
     /* transpose locally, again, to get ordinary row-major */
     cld2 = (plan_rdft *) ego->cld2;
     if (cld2) {
	  cld2->apply(ego->cld2, I, O);
	  cld2rest = (plan_rdft *) ego->cld2rest;
	  if (cld2rest) { /* leftover from unequal block sizes */
	       cld2rest->apply(ego->cld2rest,
			       I + ego->rest_Ioff, O + ego->rest_Ooff);
	       cld3 = (plan_rdft *) ego->cld3;
	       if (cld3)
		    cld3->apply(ego->cld3, O, O);
	       /* else TRANSPOSED_OUT is true and user wants O transposed */
	  }
     }
}
开发者ID:376473984,项目名称:fftw3,代码行数:52,代码来源:transpose-alltoall.c


示例3: main

int main( int argc, char* argv[] )
{
  int i, j;
  int myrank, nprocs;
  char *sbuf,  *rbuf;
  int dsize;

  MPI_Init( &argc, &argv );
  
  MPI_Comm_rank( MPI_COMM_WORLD, &myrank );
  MPI_Comm_size( MPI_COMM_WORLD, &nprocs );
  MPI_Type_size(DATATYPE, &dsize);

  sbuf=(char*)malloc(SIZE*dsize*nprocs);
  rbuf=(char*)malloc(SIZE*dsize*nprocs);

  for( i=0; i<REPEAT; i++ )
    {
      MPI_Alltoall( sbuf, SIZE, DATATYPE,
		    rbuf, SIZE, DATATYPE,
		    MPI_COMM_WORLD );
    }

  MPI_Finalize();
  return 0;
}
开发者ID:nerscadmin,项目名称:IPM,代码行数:26,代码来源:main.c


示例4: execute_predefined_op

static inline void execute_predefined_op(int opnum, void* args, void* scratch) {
	if (opnum == -1) {
		MPI_Barrier(G_GOAL_WorldComm);
	}
	else if (opnum == -2) {
		struct bcast_args* bc = (struct bcast_args*) args;
		MPI_Bcast(bc->buffer, bc->count, MPI_BYTE, bc->root, G_GOAL_WorldComm);
	}
	else if (opnum == -3) {
		struct scatter_args* sc = (struct scatter_args*) args;
		MPI_Scatter(sc->sendbuffer, sc->count, MPI_BYTE, sc->recvbuffer, sc->count, MPI_BYTE, sc->root, G_GOAL_WorldComm);
	}
	else if (opnum == -4) {
		struct scatter_args* ga = (struct scatter_args*) args;
		MPI_Gather(ga->sendbuffer, ga->count, MPI_BYTE, ga->recvbuffer, ga->count, MPI_BYTE, ga->root, G_GOAL_WorldComm);
	}
	else if (opnum == -5) {
		struct alltoall_args* aa = (struct alltoall_args*) args;
		MPI_Alltoall(aa->sendbuffer, aa->count, MPI_BYTE, aa->recvbuffer, aa->count, MPI_BYTE, G_GOAL_WorldComm);
	}
	else if (opnum == -99) {
		/* dummy op - do nothing */
	}
	else  {
		printf("Predefined op number %i is not implemented yet\n", opnum);
	}
}
开发者ID:sriram87,项目名称:mpi-goal,代码行数:27,代码来源:GOAL_Tran_MPI.hpp


示例5: transpose_mpi_out_of_place

/* Out-of-place version of transpose_mpi (or rather, in place using
   a scratch array): */
static void transpose_mpi_out_of_place(transpose_mpi_plan p, int el_size,
				       TRANSPOSE_EL_TYPE *local_data,
				       TRANSPOSE_EL_TYPE *work)
{
     local_transpose_copy(local_data, work, el_size, p->local_nx, p->ny);

     if (p->all_blocks_equal)
	  MPI_Alltoall(work, p->send_block_size * el_size, p->el_type,
		       local_data, p->recv_block_size * el_size, p->el_type,
		       p->comm);
     else {
	  int i, n_pes = p->n_pes;

	  for (i = 0; i < n_pes; ++i) {
	       p->send_block_sizes[i] *= el_size;
	       p->recv_block_sizes[i] *= el_size;
	       p->send_block_offsets[i] *= el_size;
	       p->recv_block_offsets[i] *= el_size;
	  }
	  MPI_Alltoallv(work, p->send_block_sizes, p->send_block_offsets,
			p->el_type,
			local_data, p->recv_block_sizes, p->recv_block_offsets,
			p->el_type,
			p->comm);
	  for (i = 0; i < n_pes; ++i) {
	       p->send_block_sizes[i] /= el_size;
	       p->recv_block_sizes[i] /= el_size;
	       p->send_block_offsets[i] /= el_size;
	       p->recv_block_offsets[i] /= el_size;
	  }
     }

     do_permutation(local_data, p->perm_block_dest, p->num_perm_blocks,
		    p->perm_block_size * el_size);
}
开发者ID:JonBoley,项目名称:peaqb-fast,代码行数:37,代码来源:transpose_mpi.c


示例6: main

int main(int argc, char** argv)
{
  // Initialize MPI
  MPI_Init(&argc, &argv);

  int size, rank;

  // Figure out the number of processes and our rank in the world group
  MPI_Comm_size(MPI_COMM_WORLD, &size);
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);

  if (size % 2) {
    printf("Need an even number of processes\n");
    MPI_Finalize();
    return 1;
  }

  // setup new communicators
  MPI_Comm twocomm;
  MPI_Comm_split(MPI_COMM_WORLD, rank/2, rank%2, &twocomm);

  int senddata[2], recvdata[2];
  senddata[(rank+1)%2] = rank;
  senddata[rank%2] = 0;
  MPI_Alltoall(senddata, 1, MPI_INT, recvdata, 1, MPI_INT, twocomm);

  // print to tty
  printf("process %i: received %i\n", rank, recvdata[(rank+1)%2]);

  // close down MPI
  MPI_Finalize();

  // ay-oh-kay
  return 0;
}
开发者ID:hgranlund,项目名称:tma4280,代码行数:35,代码来源:mpiping-collective.c


示例7: MADRE_exchange

void MADRE_exchange(MC* mc, int *myRecvCount, int *mySendCount){
  int i;
  Particle *p;
  p = mc->particles;
  //cache blockLength
  int blockLength = MADRE_BLOCK_LENGTH;

  /* MADRE_pack should have constructed an integer number of blocks */
  assert(mc->nparticles % (int)MADRE_BLOCK_LENGTH == 0);
  int liveBlocks = mc->nparticles/blockLength;
  for (i=0; i<liveBlocks; ++i) destRanks[i] = p[i*blockLength].proc;

  /* By default, this was set to zero */
  myRecvCount[mc->mype] = mySendCount[mc->mype];

  /* Organize destIndices by proc-rank order */
  displ[0] = 0; 
  for (i=1;i<(mc->nprocs);++i) displ[i] = displ[i-1] + myRecvCount[i-1]/blockLength;

  /* Alltoall where each proc can start receiving particles to get destIndices */
  MPI_Alltoall(displ, 1, MPI_INT, sdispl, 1, MPI_INT, MPI_COMM_WORLD);

  for (i=0; i<liveBlocks; ++i){
    destIndices[i]= sdispl[p[i*blockLength].proc];
    sdispl[p[i*blockLength].proc]++;
  }

  MADRE_redistribute(MADRE_particle, liveBlocks, destRanks, destIndices); 

  mc->nparticles = isum(myRecvCount, mc->nprocs);
  /* Each proc should have an integer number of blocks after exchanges */
  assert(mc->nparticles % (int)MADRE_BLOCK_LENGTH == 0);
}
开发者ID:shamouda,项目名称:x10-applications,代码行数:33,代码来源:MC_Comm.c


示例8: main

int main( int argc, char **argv )
{
    int send[4], recv[4];
    int rank, size, k;
    
    MPI_Init( &argc, &argv );
    MPI_Comm_rank( MPI_COMM_WORLD, &rank );
    MPI_Comm_size( MPI_COMM_WORLD, &size );
    
    if (size != 4) {
        printf("Error!:# of processors must be equal to 4\n");
        printf("Programm aborting....\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }
    for (k=0;k<size;k++) send[k] = (k+1) + rank*size;
    
    printf("%d : send = %d %d %d %d\n", rank, send[0], send[1], send[2], send[3]);
    
    MPI_Alltoall(send, 1, MPI_INT, recv, 1, MPI_INT, MPI_COMM_WORLD);
    
    printf("%d : recv = %d %d %d %d\n", rank, recv[0], recv[1], recv[2], recv[3]);
    
    MPI_Finalize();
    return 0;
}
开发者ID:arnabd88,项目名称:CIVL-CS6110,代码行数:25,代码来源:matTrans.c


示例9: exchangetest

/* run an exchange test with msgsz bytes per proc with bytes transferred
 * actually nproc*msgsz per exchange (all-to-all).
 */
double exchangetest(int iters, int msgsz) {
  int64_t starttime, endtime;
  int i;
  char *sendbuf, *recvbuf;

  sendbuf = malloc(msgsz*nproc);
  recvbuf = malloc(msgsz*nproc);

  if (sendbuf == NULL || recvbuf == NULL) {
    fprintf(stderr, "malloc");
    exit(-1);
  }

  barrier();

  starttime = getMicrosecondTimeStamp();
  for (i=0; i<iters; i++) {
    MPI_Alltoall(sendbuf, msgsz, MPI_CHAR, 
		 recvbuf, msgsz, MPI_CHAR, MPI_COMM_WORLD);
  }
  endtime = getMicrosecondTimeStamp();

  free(sendbuf);
  free(recvbuf);

  return (endtime-starttime);
}
开发者ID:AbheekG,项目名称:chapel,代码行数:30,代码来源:testmpiperf.c


示例10: main

int main(int argc, char *argv[])
{
    int rank, size;
    int chunk = 128;
    int i;
    int *sb;
    int *rb;
    int status, gstatus;

    MTest_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);

    for (i = 1; i < argc; ++i) {
        if (argv[i][0] != '-')
            continue;
        switch (argv[i][1]) {
        case 'm':
            chunk = atoi(argv[++i]);
            break;
        default:
            fprintf(stderr, "Unrecognized argument %s\n", argv[i]);
            MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
        }
    }

    sb = (int *) malloc(size * chunk * sizeof(int));
    if (!sb) {
        perror("can't allocate send buffer");
        MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
    }
    rb = (int *) malloc(size * chunk * sizeof(int));
    if (!rb) {
        perror("can't allocate recv buffer");
        free(sb);
        MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
    }
    for (i = 0; i < size * chunk; ++i) {
        sb[i] = rank + 1;
        rb[i] = 0;
    }

    /* fputs("Before MPI_Alltoall\n",stdout); */

    /* This should really send MPI_CHAR, but since sb and rb were allocated
     * as chunk*size*sizeof(int), the buffers are large enough */
    status = MPI_Alltoall(sb, chunk, MPI_INT, rb, chunk, MPI_INT, MPI_COMM_WORLD);

    /* fputs("Before MPI_Allreduce\n",stdout); */

    MTest_Finalize(status);

    free(sb);
    free(rb);

    MPI_Finalize();

    return MTestReturnValue(status);
}
开发者ID:NexMirror,项目名称:MPICH,代码行数:59,代码来源:coll13.c


示例11: mpi_alltoall

void mpi_alltoall (void *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype,
		   void *recvbuf, MPI_Fint *recvcount, MPI_Fint *recvtype, 
		   MPI_Fint *comm, MPI_Fint *__ierr)
{
  *__ierr = MPI_Alltoall (sendbuf, *sendcount, MPI_Type_f2c(*sendtype),
			  recvbuf, *recvcount, MPI_Type_f2c(*recvtype), 
			  MPI_Comm_f2c (*comm));
}
开发者ID:JeremyFyke,项目名称:cime,代码行数:8,代码来源:f_wrappers_pmpi.c


示例12: ReAllocateRasterBlock

int ReAllocateRasterBlock( void * SendBuf, int SendCount, MPI_Datatype SendType,
                           void * RecvBuf, int RecvCount, MPI_Datatype RecvType,
                           MPI_Comm Comm )
{
    return MPI_Alltoall(SendBuf, SendCount, SendType,
                        RecvBuf, RecvCount, RecvType,
                        Comm);
}
开发者ID:htoooth,项目名称:hpgc_new,代码行数:8,代码来源:mpioperator.reallocate.cpp


示例13: all_to_all

 static void all_to_all(const communicator& comm, const std::vector<T>& in, std::vector<T>& out, int n = 1)
 {
   // NB: this will fail if T is a vector
   MPI_Alltoall(Datatype::address(const_cast<T&>(in[0])), n,
                Datatype::datatype(),
                Datatype::address(out[0]), n,
                Datatype::datatype(),
                comm);
 }
开发者ID:SINTEFMedtek,项目名称:VTK,代码行数:9,代码来源:collectives.hpp


示例14: kmr_exchange_sizes

int
kmr_exchange_sizes(KMR *mr, long *sbuf, long *rbuf)
{
    MPI_Comm comm = mr->comm;
    int cc;
    cc = MPI_Alltoall(sbuf, 1, MPI_LONG, rbuf, 1, MPI_LONG, comm);
    assert(cc == MPI_SUCCESS);
    return MPI_SUCCESS;
}
开发者ID:tnishinaga,项目名称:kmr,代码行数:9,代码来源:kmratoa.c


示例15: FC_FUNC

FC_FUNC( mpi_alltoall , MPI_ALLTOALL )
                        ( void *sendbuf, int *sendcount, int *sendtype,
			  void *recvbuf, int *recvcount, int *recvtype,
                          int *comm, int *ierror )
{
  *ierror=MPI_Alltoall(sendbuf, *sendcount, *sendtype,
		       recvbuf, *recvcount, *recvtype,
		       *comm);
}
开发者ID:ACME-Climate,项目名称:cime,代码行数:9,代码来源:collective.c


示例16: PCU_Comm_Peers

int binGraph::exchange_edges(uint64_t m_read, uint64_t* read_edges,
                             int32_t* ranks,etype t)
{
  int32_t* scounts = (int32_t*)malloc(PCU_Comm_Peers()*sizeof(int32_t));
  int32_t* rcounts = (int32_t*)malloc(PCU_Comm_Peers()*sizeof(int32_t));
  int32_t* sdispls = (int32_t*)malloc(PCU_Comm_Peers()*sizeof(int32_t));
  int32_t* sdispls_cpy = (int32_t*)malloc(PCU_Comm_Peers()*sizeof(int32_t));
  int32_t* rdispls = (int32_t*)malloc(PCU_Comm_Peers()*sizeof(int32_t));
  for (int i = 0; i < PCU_Comm_Peers(); ++i)
  {
    scounts[i] = 0;
    rcounts[i] = 0;
    sdispls[i] = 0;
    sdispls_cpy[i] = 0;
    rdispls[i] = 0;
  }

  uint64_t n_per_rank = num_global_verts / PCU_Comm_Peers() + 1;
  for (uint64_t i = 0; i < m_read*2; i+=2)
  {
    uint64_t vert = read_edges[i];
    int vert_task = ranks[vert];
    scounts[vert_task] += 2;
  }

  MPI_Alltoall(scounts, 1, MPI_INT32_T,
               rcounts, 1, MPI_INT32_T, PCU_Get_Comm());

  for (uint64_t i = 1; i < PCU_Comm_Peers(); ++i) {
    sdispls[i] = sdispls[i-1] + scounts[i-1];
    sdispls_cpy[i] = sdispls[i];
    rdispls[i] = rdispls[i-1] + rcounts[i-1];
  }
 
  int32_t total_send = sdispls[PCU_Comm_Peers()-1] + scounts[PCU_Comm_Peers()-1];
  int32_t total_recv = rdispls[PCU_Comm_Peers()-1] + rcounts[PCU_Comm_Peers()-1];
  uint64_t* sendbuf = (uint64_t*)malloc(total_send*sizeof(uint64_t));
  edge_list[t] = (uint64_t*)malloc(total_recv*sizeof(uint64_t));
  num_local_edges[t] = total_recv / 2;

  for (uint64_t i = 0; i < m_read*2; i+=2)
  {
    uint64_t vert1 = read_edges[i];
    uint64_t vert2 = read_edges[i+1];
    int vert_task = ranks[vert1];

    sendbuf[sdispls_cpy[vert_task]++] = vert1;
    sendbuf[sdispls_cpy[vert_task]++] = vert2;
  }

  MPI_Alltoallv(sendbuf, scounts, sdispls, MPI_UINT64_T,
                edge_list[t], rcounts, rdispls, MPI_UINT64_T, PCU_Get_Comm());
  free(sendbuf);

  return 0;
}
开发者ID:SCOREC,项目名称:EnGPar,代码行数:56,代码来源:binGraph.cpp


示例17: transpose

void transpose(Real** recv, Real** send) {
  int i;
  for (i = 0; i < mpi_work; i++) {
    MPI_Alltoall(send[i], mpi_work, MPI_DOUBLE,
                 recv[i], mpi_work, MPI_DOUBLE, MPI_COMM_WORLD);
  }
  for (i = 0; i < mpi_size; i++) {
    local_transpose(recv, i*mpi_work);
  }
}
开发者ID:CSaehle,项目名称:TMA4280,代码行数:10,代码来源:parallel2.c


示例18: measure_delayed_Alltoall

double measure_delayed_Alltoall(int send_count, MPI_Datatype send_dt, int recv_count, MPI_Datatype recv_dt, double delay, int node)
{
  double start_time, end_time;

  start_time = start_synchronization();
  if( get_measurement_rank() == node )  while( wtime() < start_time + delay ) ;
  MPI_Alltoall(get_send_buffer(), send_count, send_dt,
	       get_recv_buffer(), recv_count, recv_dt, get_measurement_comm());
  end_time = stop_synchronization();
  return end_time - start_time;
}
开发者ID:jonarbo,项目名称:KUBE,代码行数:11,代码来源:delayed.c


示例19: MPI_Alltoall

// Given how many numbers each process is sending to the other processes, find
// out how many numbers you are receiving from each process. This function
// returns an array of counts indexed on the rank of the process from which it
// will receive the numbers.
int *get_recv_amounts_per_proc(int *send_amounts_per_proc, int world_size) {
  int *recv_amounts_per_proc = (int *)malloc(sizeof(int) * world_size);

  // Perform an Alltoall for the send counts. This will send the send counts
  // from each process and place them in the recv_amounts_per_proc array of
  // the receiving processes to let them know how many numbers they will
  // receive when binning occurs.
  MPI_Alltoall(send_amounts_per_proc, 1, MPI_INT, recv_amounts_per_proc, 1,
               MPI_INT, MPI_COMM_WORLD);
  return recv_amounts_per_proc;
}
开发者ID:Shtkddud123,项目名称:New,代码行数:15,代码来源:bin.c


示例20: timing_basic_alltoall_nelements

void timing_basic_alltoall_nelements( int DIM1, int procs, int loop, char* testname, MPI_Comm local_communicator) {
      
  float* send_array;
  float* recv_array;
  
  int myrank;
  int base, typesize, bytes, i;
  char method[50];

  send_array = malloc( DIM1 * procs * sizeof(float));
  recv_array = malloc( DIM1 * procs * sizeof(float));

  MPI_Comm_rank( local_communicator, &myrank );

  base = myrank * DIM1 + 1;
  utilities_fill_unique_array_1D_float( &send_array[0], DIM1, base );

  if ( myrank == 0 ) {
    snprintf(method, 50, "reference");
        
    MPI_Type_size( MPI_FLOAT, &typesize );
    bytes = typesize * DIM1 * procs;

    timing_init( testname, &method[0], bytes );
  }

  for( i=0 ; i<loop ; i++ ) {
    MPI_Alltoall(&send_array[0], DIM1, MPI_FLOAT, &recv_array[0], DIM1, MPI_FLOAT, local_communicator );
    MPI_Alltoall(&recv_array[0], DIM1, MPI_FLOAT, &send_array[0], DIM1, MPI_FLOAT, local_communicator );
    if ( myrank == 0 ) {
      timing_record(3);
    }
  }

  if ( myrank == 0 ) {
    timing_print( 1 );
  }

  free(send_array);
  free(recv_array);
}     
开发者ID:fredrikbk,项目名称:libpack,代码行数:41,代码来源:timing_basic.c



注:本文中的MPI_Alltoall函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ MPI_Alltoallv函数代码示例发布时间:2022-05-30
下一篇:
C++ MPI_Allreduce函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap