• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ PetscObjectGetComm函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中PetscObjectGetComm函数的典型用法代码示例。如果您正苦于以下问题:C++ PetscObjectGetComm函数的具体用法?C++ PetscObjectGetComm怎么用?C++ PetscObjectGetComm使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了PetscObjectGetComm函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: PCGAMGCreateGraph

PetscErrorCode PCGAMGCreateGraph(const Mat Amat, Mat *a_Gmat)
{
  PetscErrorCode ierr;
  PetscInt       Istart,Iend,Ii,jj,kk,ncols,nloc,NN,MM,bs;
  PetscMPIInt    rank, size;
  MPI_Comm       comm;
  Mat            Gmat;

  PetscFunctionBegin;
  ierr = PetscObjectGetComm((PetscObject)Amat,&comm);CHKERRQ(ierr);
  ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
  ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
  ierr = MatGetOwnershipRange(Amat, &Istart, &Iend);CHKERRQ(ierr);
  ierr = MatGetSize(Amat, &MM, &NN);CHKERRQ(ierr);
  ierr = MatGetBlockSize(Amat, &bs);CHKERRQ(ierr);
  nloc = (Iend-Istart)/bs;

#if defined PETSC_GAMG_USE_LOG
  ierr = PetscLogEventBegin(petsc_gamg_setup_events[GRAPH],0,0,0,0);CHKERRQ(ierr);
#endif
  if (bs > 1) {
    const PetscScalar *vals;
    const PetscInt    *idx;
    PetscInt          *d_nnz, *o_nnz;
    /* count nnz, there is sparcity in here so this might not be enough */
    ierr = PetscMalloc1(nloc, &d_nnz);CHKERRQ(ierr);
    ierr = PetscMalloc1(nloc, &o_nnz);CHKERRQ(ierr);
    for (Ii = Istart, jj = 0; Ii < Iend; Ii += bs, jj++) {
      d_nnz[jj] = 0;
      for (kk=0; kk<bs; kk++) {
        ierr = MatGetRow(Amat,Ii+kk,&ncols,0,0);CHKERRQ(ierr);
        if (ncols > d_nnz[jj]) {
          d_nnz[jj] = ncols; /* very pessimistic but could be too low in theory */
          o_nnz[jj] = ncols;
          if (d_nnz[jj] > nloc) d_nnz[jj] = nloc;
          if (o_nnz[jj] > (NN/bs-nloc)) o_nnz[jj] = NN/bs-nloc;
        }
        ierr = MatRestoreRow(Amat,Ii+kk,&ncols,0,0);CHKERRQ(ierr);
      }
    }

    /* get scalar copy (norms) of matrix -- AIJ specific!!! */
    ierr = MatCreateAIJ(comm, nloc, nloc, PETSC_DETERMINE, PETSC_DETERMINE,0, d_nnz, 0, o_nnz, &Gmat);CHKERRQ(ierr);

    ierr = PetscFree(d_nnz);CHKERRQ(ierr);
    ierr = PetscFree(o_nnz);CHKERRQ(ierr);

    for (Ii = Istart; Ii < Iend; Ii++) {
      PetscInt dest_row = Ii/bs;
      ierr = MatGetRow(Amat,Ii,&ncols,&idx,&vals);CHKERRQ(ierr);
      for (jj=0; jj<ncols; jj++) {
        PetscInt    dest_col = idx[jj]/bs;
        PetscScalar sv       = PetscAbs(PetscRealPart(vals[jj]));
        ierr = MatSetValues(Gmat,1,&dest_row,1,&dest_col,&sv,ADD_VALUES);CHKERRQ(ierr);
      }
      ierr = MatRestoreRow(Amat,Ii,&ncols,&idx,&vals);CHKERRQ(ierr);
    }
    ierr = MatAssemblyBegin(Gmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
    ierr = MatAssemblyEnd(Gmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
  } else {
    /* just copy scalar matrix - abs() not taken here but scaled later */
    ierr = MatDuplicate(Amat, MAT_COPY_VALUES, &Gmat);CHKERRQ(ierr);
  }

#if defined PETSC_GAMG_USE_LOG
  ierr = PetscLogEventEnd(petsc_gamg_setup_events[GRAPH],0,0,0,0);CHKERRQ(ierr);
#endif

  *a_Gmat = Gmat;
  PetscFunctionReturn(0);
}
开发者ID:ZJLi2013,项目名称:petsc,代码行数:71,代码来源:tools.c


示例2: subsetting

/*@C
  TaoVecGetSubVec - Gets a subvector using the IS

  Input Parameters:
+ vfull - the full matrix
. is - the index set for the subvector
. reduced_type - the method TAO is using for subsetting (TAO_SUBSET_SUBVEC, TAO_SUBSET_MASK,  TAO_SUBSET_MATRIXFREE)
- maskvalue - the value to set the unused vector elements to (for TAO_SUBSET_MASK or TAO_SUBSET_MATRIXFREE)

  Output Parameters:
. vreduced - the subvector

  Notes:
  maskvalue should usually be 0.0, unless a pointwise divide will be used.

@*/
PetscErrorCode TaoVecGetSubVec(Vec vfull, IS is, TaoSubsetType reduced_type, PetscReal maskvalue, Vec *vreduced)
{
  PetscErrorCode ierr;
  PetscInt       nfull,nreduced,nreduced_local,rlow,rhigh,flow,fhigh;
  PetscInt       i,nlocal;
  PetscReal      *fv,*rv;
  const PetscInt *s;
  IS             ident;
  VecType        vtype;
  VecScatter     scatter;
  MPI_Comm       comm;

  PetscFunctionBegin;
  PetscValidHeaderSpecific(vfull,VEC_CLASSID,1);
  PetscValidHeaderSpecific(is,IS_CLASSID,2);

  ierr = VecGetSize(vfull, &nfull);CHKERRQ(ierr);
  ierr = ISGetSize(is, &nreduced);CHKERRQ(ierr);

  if (nreduced == nfull) {
    ierr = VecDestroy(vreduced);CHKERRQ(ierr);
    ierr = VecDuplicate(vfull,vreduced);CHKERRQ(ierr);
    ierr = VecCopy(vfull,*vreduced);CHKERRQ(ierr);
  } else {
    switch (reduced_type) {
    case TAO_SUBSET_SUBVEC:
      ierr = VecGetType(vfull,&vtype);CHKERRQ(ierr);
      ierr = VecGetOwnershipRange(vfull,&flow,&fhigh);CHKERRQ(ierr);
      ierr = ISGetLocalSize(is,&nreduced_local);CHKERRQ(ierr);
      ierr = PetscObjectGetComm((PetscObject)vfull,&comm);CHKERRQ(ierr);
      if (*vreduced) {
        ierr = VecDestroy(vreduced);CHKERRQ(ierr);
      }
      ierr = VecCreate(comm,vreduced);CHKERRQ(ierr);
      ierr = VecSetType(*vreduced,vtype);CHKERRQ(ierr);

      ierr = VecSetSizes(*vreduced,nreduced_local,nreduced);CHKERRQ(ierr);
      ierr = VecGetOwnershipRange(*vreduced,&rlow,&rhigh);CHKERRQ(ierr);
      ierr = ISCreateStride(comm,nreduced_local,rlow,1,&ident);CHKERRQ(ierr);
      ierr = VecScatterCreate(vfull,is,*vreduced,ident,&scatter);CHKERRQ(ierr);
      ierr = VecScatterBegin(scatter,vfull,*vreduced,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
      ierr = VecScatterEnd(scatter,vfull,*vreduced,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
      ierr = VecScatterDestroy(&scatter);CHKERRQ(ierr);
      ierr = ISDestroy(&ident);CHKERRQ(ierr);
      break;

    case TAO_SUBSET_MASK:
    case TAO_SUBSET_MATRIXFREE:
      /* vr[i] = vf[i]   if i in is
       vr[i] = 0       otherwise */
      if (!*vreduced) {
        ierr = VecDuplicate(vfull,vreduced);CHKERRQ(ierr);
      }

      ierr = VecSet(*vreduced,maskvalue);CHKERRQ(ierr);
      ierr = ISGetLocalSize(is,&nlocal);CHKERRQ(ierr);
      ierr = VecGetOwnershipRange(vfull,&flow,&fhigh);CHKERRQ(ierr);
      ierr = VecGetArray(vfull,&fv);CHKERRQ(ierr);
      ierr = VecGetArray(*vreduced,&rv);CHKERRQ(ierr);
      ierr = ISGetIndices(is,&s);CHKERRQ(ierr);
      if (nlocal > (fhigh-flow)) SETERRQ2(PETSC_COMM_WORLD,1,"IS local size %d > Vec local size %d",nlocal,fhigh-flow);
      for (i=0;i<nlocal;i++) {
        rv[s[i]-flow] = fv[s[i]-flow];
      }
      ierr = ISRestoreIndices(is,&s);CHKERRQ(ierr);
      ierr = VecRestoreArray(vfull,&fv);CHKERRQ(ierr);
      ierr = VecRestoreArray(*vreduced,&rv);CHKERRQ(ierr);
      break;
    }
  }
  PetscFunctionReturn(0);
}
开发者ID:tom-klotz,项目名称:petsc,代码行数:88,代码来源:isutil.c


示例3: MatIncreaseOverlapSplit_Single

/*
 * Increase overlap for the sub-matrix across sub communicator
 * sub-matrix could be a graph or numerical matrix
 * */
PetscErrorCode  MatIncreaseOverlapSplit_Single(Mat mat,IS *is,PetscInt ov)
{
  PetscInt         i,nindx,*indices_sc,*indices_ov,localsize,*localsizes_sc,localsize_tmp;
  PetscInt         *indices_ov_rd,nroots,nleaves,*localoffsets,*indices_recv,*sources_sc,*sources_sc_rd;
  const PetscInt   *indices;
  PetscMPIInt      srank,ssize,issamecomm,k,grank;
  IS               is_sc,allis_sc,partitioning;
  MPI_Comm         gcomm,dcomm,scomm;
  PetscSF          sf;
  PetscSFNode      *remote;
  Mat              *smat;
  MatPartitioning  part;
  PetscErrorCode   ierr;

  PetscFunctionBegin;
  /* get a sub communicator before call individual MatIncreaseOverlap
   * since the sub communicator may be changed.
   * */
  ierr = PetscObjectGetComm((PetscObject)(*is),&dcomm);CHKERRQ(ierr);
  /*make a copy before the original one is deleted*/
  ierr = PetscCommDuplicate(dcomm,&scomm,NULL);CHKERRQ(ierr);
  /*get a global communicator, where mat should be a global matrix  */
  ierr = PetscObjectGetComm((PetscObject)mat,&gcomm);CHKERRQ(ierr);
  /*increase overlap on each individual subdomain*/
  ierr = (*mat->ops->increaseoverlap)(mat,1,is,ov);CHKERRQ(ierr);
  /*compare communicators */
  ierr = MPI_Comm_compare(gcomm,scomm,&issamecomm);CHKERRQ(ierr);
  /* if the sub-communicator is the same as the global communicator,
   * user does not want to use a sub-communicator
   * */
  if(issamecomm == MPI_IDENT || issamecomm == MPI_CONGRUENT) PetscFunctionReturn(0);
  /* if the sub-communicator is petsc_comm_self,
   * user also does not care the sub-communicator
   * */
  ierr = MPI_Comm_compare(scomm,PETSC_COMM_SELF,&issamecomm);CHKERRQ(ierr);
  if(issamecomm == MPI_IDENT || issamecomm == MPI_CONGRUENT){PetscFunctionReturn(0);}
  /*local rank, size in a sub-communicator  */
  ierr = MPI_Comm_rank(scomm,&srank);CHKERRQ(ierr);
  ierr = MPI_Comm_size(scomm,&ssize);CHKERRQ(ierr);
  ierr = MPI_Comm_rank(gcomm,&grank);CHKERRQ(ierr);
  /*create a new IS based on sub-communicator
   * since the old IS is often based on petsc_comm_self
   * */
  ierr = ISGetLocalSize(*is,&nindx);CHKERRQ(ierr);
  ierr = PetscCalloc1(nindx,&indices_sc);CHKERRQ(ierr);
  ierr = ISGetIndices(*is,&indices);CHKERRQ(ierr);
  ierr = PetscMemcpy(indices_sc,indices,sizeof(PetscInt)*nindx);CHKERRQ(ierr);
  ierr = ISRestoreIndices(*is,&indices);CHKERRQ(ierr);
  /*we do not need any more*/
  ierr = ISDestroy(is);CHKERRQ(ierr);
  /*create a index set based on the sub communicator  */
  ierr = ISCreateGeneral(scomm,nindx,indices_sc,PETSC_OWN_POINTER,&is_sc);CHKERRQ(ierr);
  /*gather all indices within  the sub communicator*/
  ierr = ISAllGather(is_sc,&allis_sc);CHKERRQ(ierr);
  ierr = ISDestroy(&is_sc);CHKERRQ(ierr);
  /* gather local sizes */
  ierr = PetscMalloc1(ssize,&localsizes_sc);CHKERRQ(ierr);
  /*get individual local sizes for all index sets*/
  ierr = MPI_Gather(&nindx,1,MPIU_INT,localsizes_sc,1,MPIU_INT,0,scomm);CHKERRQ(ierr);
  /*only root does these computations */
  if(!srank){
   /*get local size for the big index set*/
   ierr = ISGetLocalSize(allis_sc,&localsize);CHKERRQ(ierr);
   ierr = PetscCalloc2(localsize,&indices_ov,localsize,&sources_sc);CHKERRQ(ierr);
   ierr = PetscCalloc2(localsize,&indices_ov_rd,localsize,&sources_sc_rd);CHKERRQ(ierr);
   ierr = ISGetIndices(allis_sc,&indices);CHKERRQ(ierr);
   ierr = PetscMemcpy(indices_ov,indices,sizeof(PetscInt)*localsize);CHKERRQ(ierr);
   ierr = ISRestoreIndices(allis_sc,&indices);CHKERRQ(ierr);
   /*we do not need it any more */
   ierr = ISDestroy(&allis_sc);CHKERRQ(ierr);
   /*assign corresponding sources */
   localsize_tmp = 0;
   for(k=0; k<ssize; k++){
     for(i=0; i<localsizes_sc[k]; i++){
       sources_sc[localsize_tmp++] = k;
     }
   }
   /*record where indices come from */
   ierr = PetscSortIntWithArray(localsize,indices_ov,sources_sc);CHKERRQ(ierr);
   /*count local sizes for reduced indices */
   ierr = PetscMemzero(localsizes_sc,sizeof(PetscInt)*ssize);CHKERRQ(ierr);
   /*initialize the first entity*/
   if(localsize){
	 indices_ov_rd[0] = indices_ov[0];
	 sources_sc_rd[0] = sources_sc[0];
	 localsizes_sc[sources_sc[0]]++;
   }
   localsize_tmp = 1;
   /*remove duplicate integers */
   for(i=1; i<localsize; i++){
	 if(indices_ov[i] != indices_ov[i-1]){
	   indices_ov_rd[localsize_tmp]   = indices_ov[i];
	   sources_sc_rd[localsize_tmp++] = sources_sc[i];
	   localsizes_sc[sources_sc[i]]++;
	 }
   }
//.........这里部分代码省略.........
开发者ID:mchandra,项目名称:petsc,代码行数:101,代码来源:overlapsplit.c


示例4: PCSetUp_Redistribute

static PetscErrorCode PCSetUp_Redistribute(PC pc)
{
  PC_Redistribute   *red = (PC_Redistribute*)pc->data;
  PetscErrorCode    ierr;
  MPI_Comm          comm;
  PetscInt          rstart,rend,i,nz,cnt,*rows,ncnt,dcnt,*drows;
  PetscLayout       map,nmap;
  PetscMPIInt       size,imdex,tag,n;
  PetscInt          *source = PETSC_NULL;
  PetscMPIInt       *nprocs = PETSC_NULL,nrecvs;
  PetscInt          j,nsends;
  PetscInt          *owner = PETSC_NULL,*starts = PETSC_NULL,count,slen;
  PetscInt          *rvalues,*svalues,recvtotal;
  PetscMPIInt       *onodes1,*olengths1;
  MPI_Request       *send_waits = PETSC_NULL,*recv_waits = PETSC_NULL;
  MPI_Status        recv_status,*send_status;
  Vec               tvec,diag;
  Mat               tmat;
  const PetscScalar *d;

  PetscFunctionBegin;
  if (pc->setupcalled) {
    ierr = KSPGetOperators(red->ksp,PETSC_NULL,&tmat,PETSC_NULL);CHKERRQ(ierr);
    ierr = MatGetSubMatrix(pc->pmat,red->is,red->is,MAT_REUSE_MATRIX,&tmat);CHKERRQ(ierr);
    ierr = KSPSetOperators(red->ksp,tmat,tmat,SAME_NONZERO_PATTERN);CHKERRQ(ierr);
  } else {
    PetscInt NN;

    ierr = PetscObjectGetComm((PetscObject)pc,&comm);CHKERRQ(ierr);
    ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
    ierr = PetscObjectGetNewTag((PetscObject)pc,&tag);CHKERRQ(ierr);

    /* count non-diagonal rows on process */
    ierr = MatGetOwnershipRange(pc->mat,&rstart,&rend);CHKERRQ(ierr);
    cnt  = 0;
    for (i=rstart; i<rend; i++) {
      ierr = MatGetRow(pc->mat,i,&nz,PETSC_NULL,PETSC_NULL);CHKERRQ(ierr);
      if (nz > 1) cnt++;
      ierr = MatRestoreRow(pc->mat,i,&nz,PETSC_NULL,PETSC_NULL);CHKERRQ(ierr);
    }
    ierr = PetscMalloc(cnt*sizeof(PetscInt),&rows);CHKERRQ(ierr);
    ierr = PetscMalloc((rend - rstart - cnt)*sizeof(PetscInt),&drows);CHKERRQ(ierr);

    /* list non-diagonal rows on process */
    cnt  = 0; dcnt = 0;
    for (i=rstart; i<rend; i++) {
      ierr = MatGetRow(pc->mat,i,&nz,PETSC_NULL,PETSC_NULL);CHKERRQ(ierr);
      if (nz > 1) rows[cnt++] = i;
      else drows[dcnt++] = i - rstart;
      ierr = MatRestoreRow(pc->mat,i,&nz,PETSC_NULL,PETSC_NULL);CHKERRQ(ierr);
    }

    /* create PetscLayout for non-diagonal rows on each process */
    ierr = PetscLayoutCreate(comm,&map);CHKERRQ(ierr);
    ierr = PetscLayoutSetLocalSize(map,cnt);CHKERRQ(ierr);
    ierr = PetscLayoutSetBlockSize(map,1);CHKERRQ(ierr);
    ierr = PetscLayoutSetUp(map);CHKERRQ(ierr);
    rstart = map->rstart;
    rend   = map->rend;

    /* create PetscLayout for load-balanced non-diagonal rows on each process */
    ierr = PetscLayoutCreate(comm,&nmap);CHKERRQ(ierr);
    ierr = MPI_Allreduce(&cnt,&ncnt,1,MPIU_INT,MPI_SUM,comm);CHKERRQ(ierr);
    ierr = PetscLayoutSetSize(nmap,ncnt);CHKERRQ(ierr);
    ierr = PetscLayoutSetBlockSize(nmap,1);CHKERRQ(ierr);
    ierr = PetscLayoutSetUp(nmap);CHKERRQ(ierr);

    ierr = MatGetSize(pc->pmat,&NN,PETSC_NULL);CHKERRQ(ierr);
    ierr = PetscInfo2(pc,"Number of diagonal rows eliminated %d, percentage eliminated %g\n",NN-ncnt,((PetscReal)(NN-ncnt))/((PetscReal)(NN)));CHKERRQ(ierr);
    /*
        this code is taken from VecScatterCreate_PtoS()
        Determines what rows need to be moved where to
        load balance the non-diagonal rows
    */
    /*  count number of contributors to each processor */
    ierr = PetscMalloc2(size,PetscMPIInt,&nprocs,cnt,PetscInt,&owner);CHKERRQ(ierr);
    ierr = PetscMemzero(nprocs,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
    j      = 0;
    nsends = 0;
    for (i=rstart; i<rend; i++) {
      if (i < nmap->range[j]) j = 0;
      for (; j<size; j++) {
        if (i < nmap->range[j+1]) {
          if (!nprocs[j]++) nsends++;
          owner[i-rstart] = j;
          break;
        }
      }
    }
    /* inform other processors of number of messages and max length*/
    ierr = PetscGatherNumberOfMessages(comm,PETSC_NULL,nprocs,&nrecvs);CHKERRQ(ierr);
    ierr = PetscGatherMessageLengths(comm,nsends,nrecvs,nprocs,&onodes1,&olengths1);CHKERRQ(ierr);
    ierr = PetscSortMPIIntWithArray(nrecvs,onodes1,olengths1);CHKERRQ(ierr);
    recvtotal = 0; for (i=0; i<nrecvs; i++) recvtotal += olengths1[i];

    /* post receives:  rvalues - rows I will own; count - nu */
    ierr = PetscMalloc3(recvtotal,PetscInt,&rvalues,nrecvs,PetscInt,&source,nrecvs,MPI_Request,&recv_waits);CHKERRQ(ierr);
    count  = 0;
    for (i=0; i<nrecvs; i++) {
      ierr  = MPI_Irecv((rvalues+count),olengths1[i],MPIU_INT,onodes1[i],tag,comm,recv_waits+i);CHKERRQ(ierr);
//.........这里部分代码省略.........
开发者ID:erdc-cm,项目名称:petsc-dev,代码行数:101,代码来源:redistribute.c


示例5: VecView_MPI_Draw_DA1d

PetscErrorCode VecView_MPI_Draw_DA1d(Vec xin,PetscViewer v)
{
  DM                da;
  PetscErrorCode    ierr;
  PetscMPIInt       rank,size,tag1,tag2;
  PetscInt          i,n,N,step,istart,isize,j,nbounds;
  MPI_Status        status;
  PetscReal         coors[4],ymin,ymax,min,max,xmin,xmax,tmp,xgtmp;
  const PetscScalar *array,*xg;
  PetscDraw         draw;
  PetscBool         isnull,showpoints = PETSC_FALSE;
  MPI_Comm          comm;
  PetscDrawAxis     axis;
  Vec               xcoor;
  DMDABoundaryType  bx;
  const PetscReal   *bounds;
  PetscInt          *displayfields;
  PetscInt          k,ndisplayfields;
  PetscBool         hold;

  PetscFunctionBegin;
  ierr = PetscViewerDrawGetDraw(v,0,&draw);CHKERRQ(ierr);
  ierr = PetscDrawIsNull(draw,&isnull);CHKERRQ(ierr); if (isnull) PetscFunctionReturn(0);
  ierr = PetscViewerDrawGetBounds(v,&nbounds,&bounds);CHKERRQ(ierr);

  ierr = VecGetDM(xin,&da);CHKERRQ(ierr);
  if (!da) SETERRQ(PetscObjectComm((PetscObject)xin),PETSC_ERR_ARG_WRONG,"Vector not generated from a DMDA");

  ierr = PetscOptionsGetBool(NULL,"-draw_vec_mark_points",&showpoints,NULL);CHKERRQ(ierr);

  ierr = DMDAGetInfo(da,0,&N,0,0,0,0,0,&step,0,&bx,0,0,0);CHKERRQ(ierr);
  ierr = DMDAGetCorners(da,&istart,0,0,&isize,0,0);CHKERRQ(ierr);
  ierr = VecGetArrayRead(xin,&array);CHKERRQ(ierr);
  ierr = VecGetLocalSize(xin,&n);CHKERRQ(ierr);
  n    = n/step;

  /* get coordinates of nodes */
  ierr = DMGetCoordinates(da,&xcoor);CHKERRQ(ierr);
  if (!xcoor) {
    ierr = DMDASetUniformCoordinates(da,0.0,1.0,0.0,0.0,0.0,0.0);CHKERRQ(ierr);
    ierr = DMGetCoordinates(da,&xcoor);CHKERRQ(ierr);
  }
  ierr = VecGetArrayRead(xcoor,&xg);CHKERRQ(ierr);

  ierr = PetscObjectGetComm((PetscObject)xin,&comm);CHKERRQ(ierr);
  ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
  ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);

  /*
      Determine the min and max x coordinate in plot
  */
  if (!rank) {
    xmin = PetscRealPart(xg[0]);
  }
  if (rank == size-1) {
    xmax = PetscRealPart(xg[n-1]);
  }
  ierr = MPI_Bcast(&xmin,1,MPIU_REAL,0,comm);CHKERRQ(ierr);
  ierr = MPI_Bcast(&xmax,1,MPIU_REAL,size-1,comm);CHKERRQ(ierr);

  ierr = DMDASelectFields(da,&ndisplayfields,&displayfields);CHKERRQ(ierr);
  for (k=0; k<ndisplayfields; k++) {
    j    = displayfields[k];
    ierr = PetscViewerDrawGetDraw(v,k,&draw);CHKERRQ(ierr);
    ierr = PetscDrawCheckResizedWindow(draw);CHKERRQ(ierr);

    /*
        Determine the min and max y coordinate in plot
    */
    min = 1.e20; max = -1.e20;
    for (i=0; i<n; i++) {
      if (PetscRealPart(array[j+i*step]) < min) min = PetscRealPart(array[j+i*step]);
      if (PetscRealPart(array[j+i*step]) > max) max = PetscRealPart(array[j+i*step]);
    }
    if (min + 1.e-10 > max) {
      min -= 1.e-5;
      max += 1.e-5;
    }
    if (j < nbounds) {
      min = PetscMin(min,bounds[2*j]);
      max = PetscMax(max,bounds[2*j+1]);
    }

    ierr = MPI_Reduce(&min,&ymin,1,MPIU_REAL,MPIU_MIN,0,comm);CHKERRQ(ierr);
    ierr = MPI_Reduce(&max,&ymax,1,MPIU_REAL,MPIU_MAX,0,comm);CHKERRQ(ierr);

    ierr = PetscViewerDrawGetHold(v,&hold);CHKERRQ(ierr);
    if (!hold) {
      ierr = PetscDrawSynchronizedClear(draw);CHKERRQ(ierr);
    }
    ierr = PetscViewerDrawGetDrawAxis(v,k,&axis);CHKERRQ(ierr);
    ierr = PetscLogObjectParent((PetscObject)draw,(PetscObject)axis);CHKERRQ(ierr);
    if (!rank) {
      const char *title;

      ierr = PetscDrawAxisSetLimits(axis,xmin,xmax,ymin,ymax);CHKERRQ(ierr);
      ierr = PetscDrawAxisDraw(axis);CHKERRQ(ierr);
      ierr = PetscDrawGetCoordinates(draw,coors,coors+1,coors+2,coors+3);CHKERRQ(ierr);
      ierr = DMDAGetFieldName(da,j,&title);CHKERRQ(ierr);
      if (title) {ierr = PetscDrawSetTitle(draw,title);CHKERRQ(ierr);}
//.........这里部分代码省略.........
开发者ID:hsahasra,项目名称:petsc-magma-dense-mat,代码行数:101,代码来源:gr1.c


示例6: PCGAMGcoarsen_GEO

PetscErrorCode PCGAMGcoarsen_GEO(PC a_pc,Mat *a_Gmat,PetscCoarsenData **a_llist_parent)
{
  PetscErrorCode ierr;
  PC_MG          *mg      = (PC_MG*)a_pc->data;
  PC_GAMG        *pc_gamg = (PC_GAMG*)mg->innerctx;
  PetscInt       Istart,Iend,nloc,kk,Ii,ncols;
  PetscMPIInt    rank,size;
  IS             perm;
  GAMGNode       *gnodes;
  PetscInt       *permute;
  Mat            Gmat  = *a_Gmat;
  MPI_Comm       comm;
  MatCoarsen     crs;

  PetscFunctionBegin;
  ierr = PetscObjectGetComm((PetscObject)a_pc,&comm);CHKERRQ(ierr);
#if defined PETSC_USE_LOG
  ierr = PetscLogEventBegin(PC_GAMGCoarsen_GEO,0,0,0,0);CHKERRQ(ierr);
#endif
  ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr);
  ierr = MPI_Comm_size(comm, &size);CHKERRQ(ierr);
  ierr = MatGetOwnershipRange(Gmat, &Istart, &Iend);CHKERRQ(ierr);
  nloc = (Iend-Istart);

  /* create random permutation with sort for geo-mg */
  ierr = PetscMalloc1(nloc, &gnodes);CHKERRQ(ierr);
  ierr = PetscMalloc1(nloc, &permute);CHKERRQ(ierr);

  for (Ii=Istart; Ii<Iend; Ii++) { /* locals only? */
    ierr = MatGetRow(Gmat,Ii,&ncols,0,0);CHKERRQ(ierr);
    {
      PetscInt lid = Ii - Istart;
      gnodes[lid].lid    = lid;
      gnodes[lid].degree = ncols;
    }
    ierr = MatRestoreRow(Gmat,Ii,&ncols,0,0);CHKERRQ(ierr);
  }
  /* randomize */
  srand(1); /* make deterministic */
  if (PETSC_TRUE) {
    PetscBool *bIndexSet;
    ierr = PetscMalloc1(nloc, &bIndexSet);CHKERRQ(ierr);
    for (Ii = 0; Ii < nloc; Ii++) bIndexSet[Ii] = PETSC_FALSE;
    for (Ii = 0; Ii < nloc; Ii++) {
      PetscInt iSwapIndex = rand()%nloc;
      if (!bIndexSet[iSwapIndex] && iSwapIndex != Ii) {
        GAMGNode iTemp = gnodes[iSwapIndex];
        gnodes[iSwapIndex]    = gnodes[Ii];
        gnodes[Ii]            = iTemp;
        bIndexSet[Ii]         = PETSC_TRUE;
        bIndexSet[iSwapIndex] = PETSC_TRUE;
      }
    }
    ierr = PetscFree(bIndexSet);CHKERRQ(ierr);
  }
  /* only sort locals */
  qsort(gnodes, nloc, sizeof(GAMGNode), petsc_geo_mg_compare);
  /* create IS of permutation */
  for (kk=0; kk<nloc; kk++) permute[kk] = gnodes[kk].lid; /* locals only */
  ierr = ISCreateGeneral(PETSC_COMM_SELF, nloc, permute, PETSC_OWN_POINTER, &perm);CHKERRQ(ierr);

  ierr = PetscFree(gnodes);CHKERRQ(ierr);

  /* get MIS aggs */

  ierr = MatCoarsenCreate(comm, &crs);CHKERRQ(ierr);
  ierr = MatCoarsenSetType(crs, MATCOARSENMIS);CHKERRQ(ierr);
  ierr = MatCoarsenSetGreedyOrdering(crs, perm);CHKERRQ(ierr);
  ierr = MatCoarsenSetAdjacency(crs, Gmat);CHKERRQ(ierr);
  ierr = MatCoarsenSetVerbose(crs, pc_gamg->verbose);CHKERRQ(ierr);
  ierr = MatCoarsenSetStrictAggs(crs, PETSC_FALSE);CHKERRQ(ierr);
  ierr = MatCoarsenApply(crs);CHKERRQ(ierr);
  ierr = MatCoarsenGetData(crs, a_llist_parent);CHKERRQ(ierr);
  ierr = MatCoarsenDestroy(&crs);CHKERRQ(ierr);

  ierr = ISDestroy(&perm);CHKERRQ(ierr);
#if defined PETSC_USE_LOG
  ierr = PetscLogEventEnd(PC_GAMGCoarsen_GEO,0,0,0,0);CHKERRQ(ierr);
#endif
  PetscFunctionReturn(0);
}
开发者ID:00liujj,项目名称:petsc,代码行数:81,代码来源:geo.c


示例7: PCGAMGProlongator_GEO

PetscErrorCode PCGAMGProlongator_GEO(PC pc,const Mat Amat,const Mat Gmat,PetscCoarsenData *agg_lists,Mat *a_P_out)
{
  PC_MG          *mg      = (PC_MG*)pc->data;
  PC_GAMG        *pc_gamg = (PC_GAMG*)mg->innerctx;
  const PetscInt verbose  = pc_gamg->verbose;
  const PetscInt dim      = pc_gamg->data_cell_cols, data_cols = pc_gamg->data_cell_cols;
  PetscErrorCode ierr;
  PetscInt       Istart,Iend,nloc,my0,jj,kk,ncols,nLocalSelected,bs,*clid_flid;
  Mat            Prol;
  PetscMPIInt    rank, size;
  MPI_Comm       comm;
  IS             selected_2,selected_1;
  const PetscInt *selected_idx;

  PetscFunctionBegin;
  ierr = PetscObjectGetComm((PetscObject)Amat,&comm);CHKERRQ(ierr);
#if defined PETSC_USE_LOG
  ierr = PetscLogEventBegin(PC_GAMGProlongator_GEO,0,0,0,0);CHKERRQ(ierr);
#endif
  ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
  ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
  ierr = MatGetOwnershipRange(Amat, &Istart, &Iend);CHKERRQ(ierr);
  ierr = MatGetBlockSize(Amat, &bs);CHKERRQ(ierr);
  nloc = (Iend-Istart)/bs; my0 = Istart/bs; 
  if ((Iend-Istart) % bs) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_PLIB,"(Iend %D - Istart %D) % bs %D",Iend,Istart,bs);

  /* get 'nLocalSelected' */
  ierr = PetscCDGetMIS(agg_lists, &selected_1);CHKERRQ(ierr);
  ierr = ISGetSize(selected_1, &jj);CHKERRQ(ierr);
  ierr = PetscMalloc1(jj, &clid_flid);CHKERRQ(ierr);
  ierr = ISGetIndices(selected_1, &selected_idx);CHKERRQ(ierr);
  for (kk=0,nLocalSelected=0; kk<jj; kk++) {
    PetscInt lid = selected_idx[kk];
    if (lid<nloc) {
      ierr = MatGetRow(Gmat,lid+my0,&ncols,0,0);CHKERRQ(ierr);
      if (ncols>1) clid_flid[nLocalSelected++] = lid; /* fiter out singletons */
      ierr = MatRestoreRow(Gmat,lid+my0,&ncols,0,0);CHKERRQ(ierr);
    }
  }
  ierr = ISRestoreIndices(selected_1, &selected_idx);CHKERRQ(ierr);
  ierr = ISDestroy(&selected_1);CHKERRQ(ierr); /* this is selected_1 in serial */

  /* create prolongator, create P matrix */
  ierr = MatCreate(comm, &Prol);CHKERRQ(ierr);
  ierr = MatSetSizes(Prol,nloc*bs,nLocalSelected*bs,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
  ierr = MatSetBlockSizes(Prol, bs, bs);CHKERRQ(ierr);
  ierr = MatSetType(Prol, MATAIJ);CHKERRQ(ierr);
  ierr = MatSeqAIJSetPreallocation(Prol,3*data_cols,NULL);CHKERRQ(ierr);
  ierr = MatMPIAIJSetPreallocation(Prol,3*data_cols,NULL,3*data_cols,NULL);CHKERRQ(ierr);
  /* ierr = MatCreateAIJ(comm,  */
  /*                      nloc*bs, nLocalSelected*bs, */
  /*                      PETSC_DETERMINE, PETSC_DETERMINE, */
  /*                      3*data_cols, NULL,  */
  /*                      3*data_cols, NULL, */
  /*                      &Prol); */
  /* CHKERRQ(ierr); */

  /* can get all points "removed" - but not on geomg */
  ierr =  MatGetSize(Prol, &kk, &jj);CHKERRQ(ierr);
  if (jj==0) {
    if (verbose) PetscPrintf(comm,"[%d]%s ERROE: no selected points on coarse grid\n",rank,__FUNCT__);
    ierr = PetscFree(clid_flid);CHKERRQ(ierr);
    ierr = MatDestroy(&Prol);CHKERRQ(ierr);
    *a_P_out = NULL;  /* out */
    PetscFunctionReturn(0);
  }

  {
    PetscReal *coords;
    PetscInt  data_stride;
    PetscInt  *crsGID = NULL;
    Mat       Gmat2;

    if (dim != data_cols) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"dim %D != data_cols %D",dim,data_cols);
    /* grow ghost data for better coarse grid cover of fine grid */
#if defined PETSC_GAMG_USE_LOG
    ierr = PetscLogEventBegin(petsc_gamg_setup_events[SET5],0,0,0,0);CHKERRQ(ierr);
#endif
    /* messy method, squares graph and gets some data */
    ierr = getGIDsOnSquareGraph(nLocalSelected, clid_flid, Gmat, &selected_2, &Gmat2, &crsGID);CHKERRQ(ierr);
    /* llist is now not valid wrt squared graph, but will work as iterator in 'triangulateAndFormProl' */
#if defined PETSC_GAMG_USE_LOG
    ierr = PetscLogEventEnd(petsc_gamg_setup_events[SET5],0,0,0,0);CHKERRQ(ierr);
#endif
    /* create global vector of coorindates in 'coords' */
    if (size > 1) {
      ierr = PCGAMGGetDataWithGhosts(Gmat2, dim, pc_gamg->data, &data_stride, &coords);CHKERRQ(ierr);
    } else {
      coords      = (PetscReal*)pc_gamg->data;
      data_stride = pc_gamg->data_sz/pc_gamg->data_cell_cols;
    }
    ierr = MatDestroy(&Gmat2);CHKERRQ(ierr);

    /* triangulate */
    if (dim == 2) {
      PetscReal metric,tm;
#if defined PETSC_GAMG_USE_LOG
      ierr = PetscLogEventBegin(petsc_gamg_setup_events[SET6],0,0,0,0);CHKERRQ(ierr);
#endif
      ierr = triangulateAndFormProl(selected_2, data_stride, coords,nLocalSelected, clid_flid, agg_lists, crsGID, bs, Prol, &metric);CHKERRQ(ierr);
//.........这里部分代码省略.........
开发者ID:00liujj,项目名称:petsc,代码行数:101,代码来源:geo.c


示例8: triangulateAndFormProl

static PetscErrorCode triangulateAndFormProl(IS  selected_2, /* list of selected local ID, includes selected ghosts */
                                             const PetscInt data_stride,
                                             const PetscReal coords[], /* column vector of local coordinates w/ ghosts */
                                             const PetscInt nselected_1, /* list of selected local ID, includes selected ghosts */
                                             const PetscInt clid_lid_1[],
                                             const PetscCoarsenData *agg_lists_1, /* selected_1 vertices of aggregate unselected vertices */
                                             const PetscInt crsGID[],
                                             const PetscInt bs,
                                             Mat a_Prol, /* prolongation operator (output) */
                                             PetscReal *a_worst_best) /* measure of worst missed fine vertex, 0 is no misses */
{
#if defined(PETSC_HAVE_TRIANGLE)
  PetscErrorCode       ierr;
  PetscInt             jj,tid,tt,idx,nselected_2;
  struct triangulateio in,mid;
  const PetscInt       *selected_idx_2;
  PetscMPIInt          rank,size;
  PetscInt             Istart,Iend,nFineLoc,myFine0;
  int                  kk,nPlotPts,sid;
  MPI_Comm             comm;
  PetscReal            tm;

  PetscFunctionBegin;
  ierr = PetscObjectGetComm((PetscObject)a_Prol,&comm);CHKERRQ(ierr);
  ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
  ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
  ierr = ISGetSize(selected_2, &nselected_2);CHKERRQ(ierr);
  if (nselected_2 == 1 || nselected_2 == 2) { /* 0 happens on idle processors */
    *a_worst_best = 100.0; /* this will cause a stop, but not globalized (should not happen) */
  } else *a_worst_best = 0.0;
  ierr = MPI_Allreduce(a_worst_best, &tm, 1, MPIU_REAL, MPIU_MAX, comm);CHKERRQ(ierr);
  if (tm > 0.0) {
    *a_worst_best = 100.0;
    PetscFunctionReturn(0);
  }
  ierr     = MatGetOwnershipRange(a_Prol, &Istart, &Iend);CHKERRQ(ierr);
  nFineLoc = (Iend-Istart)/bs; myFine0 = Istart/bs;
  nPlotPts = nFineLoc; /* locals */
  /* traingle */
  /* Define input points - in*/
  in.numberofpoints          = nselected_2;
  in.numberofpointattributes = 0;
  /* get nselected points */
  ierr = PetscMalloc1(2*(nselected_2), &in.pointlist);CHKERRQ(ierr);
  ierr = ISGetIndices(selected_2, &selected_idx_2);CHKERRQ(ierr);

  for (kk=0,sid=0; kk<nselected_2; kk++,sid += 2) {
    PetscInt lid = selected_idx_2[kk];
    in.pointlist[sid]   = coords[lid];
    in.pointlist[sid+1] = coords[data_stride + lid];
    if (lid>=nFineLoc) nPlotPts++;
  }
  if (sid != 2*nselected_2) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"sid %D != 2*nselected_2 %D",sid,nselected_2);

  in.numberofsegments      = 0;
  in.numberofedges         = 0;
  in.numberofholes         = 0;
  in.numberofregions       = 0;
  in.trianglelist          = 0;
  in.segmentmarkerlist     = 0;
  in.pointattributelist    = 0;
  in.pointmarkerlist       = 0;
  in.triangleattributelist = 0;
  in.trianglearealist      = 0;
  in.segmentlist           = 0;
  in.holelist              = 0;
  in.regionlist            = 0;
  in.edgelist              = 0;
  in.edgemarkerlist        = 0;
  in.normlist              = 0;

  /* triangulate */
  mid.pointlist = 0;            /* Not needed if -N switch used. */
  /* Not needed if -N switch used or number of point attributes is zero: */
  mid.pointattributelist = 0;
  mid.pointmarkerlist    = 0; /* Not needed if -N or -B switch used. */
  mid.trianglelist       = 0;    /* Not needed if -E switch used. */
  /* Not needed if -E switch used or number of triangle attributes is zero: */
  mid.triangleattributelist = 0;
  mid.neighborlist          = 0; /* Needed only if -n switch used. */
  /* Needed only if segments are output (-p or -c) and -P not used: */
  mid.segmentlist = 0;
  /* Needed only if segments are output (-p or -c) and -P and -B not used: */
  mid.segmentmarkerlist = 0;
  mid.edgelist          = 0;    /* Needed only if -e switch used. */
  mid.edgemarkerlist    = 0; /* Needed if -e used and -B not used. */
  mid.numberoftriangles = 0;

  /* Triangulate the points.  Switches are chosen to read and write a  */
  /*   PSLG (p), preserve the convex hull (c), number everything from  */
  /*   zero (z), assign a regional attribute to each element (A), and  */
  /*   produce an edge list (e), a Voronoi diagram (v), and a triangle */
  /*   neighbor list (n).                                            */
  if (nselected_2 != 0) { /* inactive processor */
    char args[] = "npczQ"; /* c is needed ? */
    triangulate(args, &in, &mid, (struct triangulateio*) NULL);
    /* output .poly files for 'showme' */
    if (!PETSC_TRUE) {
      static int level = 1;
      FILE       *file; char fname[32];
//.........这里部分代码省略.........
开发者ID:00liujj,项目名称:petsc,代码行数:101,代码来源:geo.c


示例9: getGIDsOnSquareGraph

static PetscErrorCode getGIDsOnSquareGraph(const PetscInt nselected_1,const PetscInt clid_lid_1[],const Mat Gmat1,IS *a_selected_2,Mat *a_Gmat_2,PetscInt **a_crsGID)
{
  PetscErrorCode ierr;
  PetscMPIInt    rank,size;
  PetscInt       *crsGID, kk,my0,Iend,nloc;
  MPI_Comm       comm;

  PetscFunctionBegin;
  ierr = PetscObjectGetComm((PetscObject)Gmat1,&comm);CHKERRQ(ierr);
  ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
  ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
  ierr = MatGetOwnershipRange(Gmat1,&my0,&Iend);CHKERRQ(ierr); /* AIJ */
  nloc = Iend - my0; /* this does not change */

  if (size == 1) { /* not much to do in serial */
    ierr = PetscMalloc1(nselected_1, &crsGID);CHKERRQ(ierr);
    for (kk=0; kk<nselected_1; kk++) crsGID[kk] = kk;
    *a_Gmat_2 = 0;
    ierr      = ISCreateGeneral(PETSC_COMM_SELF,nselected_1,clid_lid_1,PETSC_COPY_VALUES,a_selected_2);CHKERRQ(ierr);
  } else {
    PetscInt    idx,num_fine_ghosts,num_crs_ghost,myCrs0;
    Mat_MPIAIJ  *mpimat2;
    Mat         Gmat2;
    Vec         locState;
    PetscScalar *cpcol_state;

    /* scan my coarse zero gid, set 'lid_state' with coarse GID */
    kk = nselected_1;
    MPI_Scan(&kk, &myCrs0, 1, MPIU_INT, MPIU_SUM, comm);
    myCrs0 -= nselected_1;

    if (a_Gmat_2) { /* output */
      /* grow graph to get wider set of selected vertices to cover fine grid, invalidates 'llist' */
      ierr      = MatTransposeMatMult(Gmat1, Gmat1, MAT_INITIAL_MATRIX, PETSC_DEFAULT, &Gmat2);CHKERRQ(ierr);
      *a_Gmat_2 = Gmat2; /* output */
    } else Gmat2 = Gmat1;  /* use local to get crsGIDs at least */
    /* get coarse grid GIDS for selected (locals and ghosts) */
    mpimat2 = (Mat_MPIAIJ*)Gmat2->data;
    ierr    = MatGetVecs(Gmat2, &locState, 0);CHKERRQ(ierr);
    ierr    = VecSet(locState, (PetscScalar)(PetscReal)(-1));CHKERRQ(ierr); /* set with UNKNOWN state */
    for (kk=0; kk<nselected_1; kk++) {
      PetscInt    fgid = clid_lid_1[kk] + my0;
      PetscScalar v    = (PetscScalar)(kk+myCrs0);
      ierr = VecSetValues(locState, 1, &fgid, &v, INSERT_VALUES);CHKERRQ(ierr); /* set with PID */
    }
    ierr = VecAssemblyBegin(locState);CHKERRQ(ierr);
    ierr = VecAssemblyEnd(locState);CHKERRQ(ierr);
    ierr = VecScatterBegin(mpimat2->Mvctx,locState,mpimat2->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
    ierr =   VecScatterEnd(mpimat2->Mvctx,locState,mpimat2->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
    ierr = VecGetLocalSize(mpimat2->lvec, &num_fine_ghosts);CHKERRQ(ierr);
    ierr = VecGetArray(mpimat2->lvec, &cpcol_state);CHKERRQ(ierr);
    for (kk=0,num_crs_ghost=0; kk<num_fine_ghosts; kk++) {
      if ((PetscInt)PetscRealPart(cpcol_state[kk]) != -1) num_crs_ghost++;
    }
    ierr = PetscMalloc1((nselected_1+num_crs_ghost), &crsGID);CHKERRQ(ierr); /* output */
    {
      PetscInt *selected_set;
      ierr = PetscMalloc1((nselected_1+num_crs_ghost), &selected_set);CHKERRQ(ierr);
      /* do ghost of 'crsGID' */
      for (kk=0,idx=nselected_1; kk<num_fine_ghosts; kk++) {
        if ((PetscInt)PetscRealPart(cpcol_state[kk]) != -1) {
          PetscInt cgid = (PetscInt)PetscRealPart(cpcol_state[kk]);
          selected_set[idx] = nloc + kk;
          crsGID[idx++]     = cgid;
        }
      }
      if (idx != (nselected_1+num_crs_ghost)) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_PLIB,"idx %D != (nselected_1 %D + num_crs_ghost %D)",idx,nselected_1,num_crs_ghost);
      ierr = VecRestoreArray(mpimat2->lvec, &cpcol_state);CHKERRQ(ierr);
      /* do locals in 'crsGID' */
      ierr = VecGetArray(locState, &cpcol_state);CHKERRQ(ierr);
      for (kk=0,idx=0; kk<nloc; kk++) {
        if ((PetscInt)PetscRealPart(cpcol_state[kk]) != -1) {
          PetscInt cgid = (PetscInt)PetscRealPart(cpcol_state[kk]);
          selected_set[idx] = kk;
          crsGID[idx++]     = cgid;
        }
      }
      if (idx != nselected_1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"idx %D != nselected_1 %D",idx,nselected_1);
      ierr = VecRestoreArray(locState, &cpcol_state);CHKERRQ(ierr);

      if (a_selected_2 != 0) { /* output */
        ierr = ISCreateGeneral(PETSC_COMM_SELF,(nselected_1+num_crs_ghost),selected_set,PETSC_OWN_POINTER,a_selected_2);CHKERRQ(ierr);
      } else {
        ierr = PetscFree(selected_set);CHKERRQ(ierr);
      }
    }
    ierr = VecDestroy(&locState);CHKERRQ(ierr);
  }
  *a_crsGID = crsGID; /* output */
  PetscFunctionReturn(0);
}
开发者ID:00liujj,项目名称:petsc,代码行数:91,代码来源:geo.c


示例10: MatPartitioningApply_Parmetis_Private

static PetscErrorCode MatPartitioningApply_Parmetis_Private(MatPartitioning part, PetscBool useND, IS *partitioning)
{
  MatPartitioning_Parmetis *pmetis = (MatPartitioning_Parmetis*)part->data;
  PetscErrorCode           ierr;
  PetscInt                 *locals = NULL;
  Mat                      mat     = part->adj,amat,pmat;
  PetscBool                flg;
  PetscInt                 bs = 1;

  PetscFunctionBegin;
  ierr = PetscObjectTypeCompare((PetscObject)mat,MATMPIADJ,&flg);CHKERRQ(ierr);
  if (flg) {
    amat = mat;
    ierr = PetscObjectReference((PetscObject)amat);CHKERRQ(ierr);
  } else {
    /* bs indicates if the converted matrix is "reduced" from the original and hence the
       resulting partition results need to be stretched to match the original matrix */
    ierr = MatConvert(mat,MATMPIADJ,MAT_INITIAL_MATRIX,&amat);CHKERRQ(ierr);
    if (amat->rmap->n > 0) bs = mat->rmap->n/amat->rmap->n;
  }
  ierr = MatMPIAdjCreateNonemptySubcommMat(amat,&pmat);CHKERRQ(ierr);
  ierr = MPI_Barrier(PetscObjectComm((PetscObject)part));CHKERRQ(ierr);

  if (pmat) {
    MPI_Comm   pcomm,comm;
    Mat_MPIAdj *adj     = (Mat_MPIAdj*)pmat->data;
    PetscInt   *vtxdist = pmat->rmap->range;
    PetscInt   *xadj    = adj->i;
    PetscInt   *adjncy  = adj->j;
    PetscInt   *NDorder = NULL;
    PetscInt   itmp     = 0,wgtflag=0, numflag=0, ncon=1, nparts=part->n, options[24], i, j;
    real_t     *tpwgts,*ubvec,itr=0.1;
    int        status;

    ierr = PetscObjectGetComm((PetscObject)pmat,&pcomm);CHKERRQ(ierr);
#if defined(PETSC_USE_DEBUG)
    /* check that matrix has no diagonal entries */
    {
      PetscInt rstart;
      ierr = MatGetOwnershipRange(pmat,&rstart,NULL);CHKERRQ(ierr);
      for (i=0; i<pmat->rmap->n; i++) {
        for (j=xadj[i]; j<xadj[i+1]; j++) {
          if (adjncy[j] == i+rstart) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Row %d has diagonal entry; Parmetis forbids diagonal entry",i+rstart);
        }
      }
    }
#endif

    ierr = PetscMalloc1(pmat->rmap->n,&locals);CHKERRQ(ierr);

    if (adj->values && !part->vertex_weights)
      wgtflag = 1;
    if (part->vertex_weights && !adj->values)
      wgtflag = 2;
    if (part->vertex_weights && adj->values)
      wgtflag = 3;

    if (PetscLogPrintInfo) {itmp = pmetis->printout; pmetis->printout = 127;}
    ierr = PetscMalloc1(ncon*nparts,&tpwgts);CHKERRQ(ierr);
    for (i=0; i<ncon; i++) {
      for (j=0; j<nparts; j++) {
        if (part->part_weights) {
          tpwgts[i*nparts+j] = part->part_weights[i*nparts+j];
        } else {
          tpwgts[i*nparts+j] = 1./nparts;
        }
      }
    }
    ierr = PetscMalloc1(ncon,&ubvec);CHKERRQ(ierr);
    for (i=0; i<ncon; i++) {
      ubvec[i] = 1.05;
    }
    /* This sets the defaults */
    options[0] = 0;
    for (i=1; i<24; i++) {
      options[i] = -1;
    }
    /* Duplicate the communicator to be sure that ParMETIS attribute caching does not interfere with PETSc. */
    ierr = MPI_Comm_dup(pcomm,&comm);CHKERRQ(ierr);
    if (useND) {
      PetscInt    *sizes, *seps, log2size, subd, *level;
      PetscMPIInt size;
      idx_t       mtype = PARMETIS_MTYPE_GLOBAL, rtype = PARMETIS_SRTYPE_2PHASE, p_nseps = 1, s_nseps = 1;
      real_t      ubfrac = 1.05;

      ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
      ierr = PetscMalloc1(pmat->rmap->n,&NDorder);CHKERRQ(ierr);
      ierr = PetscMalloc3(2*size,&sizes,4*size,&seps,size,&level);CHKERRQ(ierr);
      PetscStackCallParmetis(ParMETIS_V32_NodeND,((idx_t*)vtxdist,(idx_t*)xadj,(idx_t*)adjncy,(idx_t*)part->vertex_weights,(idx_t*)&numflag,&mtype,&rtype,&p_nseps,&s_nseps,&ubfrac,NULL/* seed */,NULL/* dbglvl */,(idx_t*)NDorder,(idx_t*)(sizes),&comm));
      log2size = PetscLog2Real(size);
      subd = PetscPowInt(2,log2size);
      ierr = MatPartitioningSizesToSep_Private(subd,sizes,seps,level);CHKERRQ(ierr);
      for (i=0;i<pmat->rmap->n;i++) {
        PetscInt loc;

        ierr = PetscFindInt(NDorder[i],2*subd,seps,&loc);CHKERRQ(ierr);
        if (loc < 0) {
          loc = -(loc+1);
          if (loc%2) { /* part of subdomain */
            locals[i] = loc/2;
//.........这里部分代码省略.........
开发者ID:firedrakeproject,项目名称:petsc,代码行数:101,代码来源:pmetis.c


示例11: main

int main(int argc,char **args)
{
  Mat            C,Credundant;
  MatInfo        info;
  PetscMPIInt    rank,size,subsize;
  PetscInt       i,j,m = 3,n = 2,low,high,iglobal;
  PetscInt       Ii,J,ldim,nsubcomms;
  PetscErrorCode ierr;
  PetscBool      flg_info,flg_mat;
  PetscScalar    v,one = 1.0;
  Vec            x,y;
  MPI_Comm       subcomm;

  ierr = PetscInitialize(&argc,&args,(char*)0,help);if (ierr) return ierr;
  ierr = PetscOptionsGetInt(NULL,NULL,"-m",&m,NULL);CHKERRQ(ierr);
  ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr);
  ierr = MPI_Comm_size(PETSC_COMM_WORLD,&size);CHKERRQ(ierr);
  n    = 2*size;

  ierr = MatCreate(PETSC_COMM_WORLD,&C);CHKERRQ(ierr);
  ierr = MatSetSizes(C,PETSC_DECIDE,PETSC_DECIDE,m*n,m*n);CHKERRQ(ierr);
  ierr = MatSetFromOptions(C);CHKERRQ(ierr);
  ierr = MatSetUp(C);CHKERRQ(ierr);

  /* Create the matrix for the five point stencil, YET AGAIN */
  for (i=0; i<m; i++) {
    for (j=2*rank; j<2*rank+2; j++) {
      v = -1.0;  Ii = j + n*i;
      if (i>0)   {J = Ii - n; ierr = MatSetValues(C,1,&Ii,1,&J,&v,INSERT_VALUES);CHKERRQ(ierr);}
      if (i<m-1) {J = Ii + n; ierr = MatSetValues(C,1,&Ii,1,&J,&v,INSERT_VALUES);CHKERRQ(ierr);}
      if (j>0)   {J = Ii - 1; ierr = MatSetValues(C,1,&Ii,1,&J,&v,INSERT_VALUES);CHKERRQ(ierr);}
      if (j<n-1) {J = Ii + 1; ierr = MatSetValues(C,1,&Ii,1,&J,&v,INSERT_VALUES);CHKERRQ(ierr);}
      v = 4.0; ierr = MatSetValues(C,1,&Ii,1,&Ii,&v,INSERT_VALUES);CHKERRQ(ierr);
    }
  }

  /* Add extra elements (to illustrate variants of MatGetInfo) */
  Ii   = n; J = n-2; v = 100.0;
  ierr = MatSetValues(C,1,&Ii,1,&J,&v,INSERT_VALUES);CHKERRQ(ierr);
  Ii   = n-2; J = n; v = 100.0;
  ierr = MatSetValues(C,1,&Ii,1,&J,&v,INSERT_VALUES);CHKERRQ(ierr);

  ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
  ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);

  /* Form vectors */
  ierr = MatCreateVecs(C,&x,&y);CHKERRQ(ierr);
  ierr = VecGetLocalSize(x,&ldim);CHKERRQ(ierr);
  ierr = VecGetOwnershipRange(x,&low,&high);CHKERRQ(ierr);
  for (i=0; i<ldim; i++) {
    iglobal = i + low;
    v       = one*((PetscReal)i) + 100.0*rank;
    ierr    = VecSetValues(x,1,&iglobal,&v,INSERT_VALUES);CHKERRQ(ierr);
  }
  ierr = VecAssemblyBegin(x);CHKERRQ(ierr);
  ierr = VecAssemblyEnd(x);CHKERRQ(ierr);

  ierr = MatMult(C,x,y);CHKERRQ(ierr);

  ierr = PetscOptionsHasName(NULL,NULL,"-view_info",&flg_info);CHKERRQ(ierr);
  if (flg_info)  {
    ierr = PetscViewerPushFormat(PETSC_VIEWER_STDOUT_WORLD,PETSC_VIEWER_ASCII_INFO);CHKERRQ(ierr);
    ierr = MatView(C,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);
  
    ierr = MatGetInfo(C,MAT_GLOBAL_SUM,&info);CHKERRQ(ierr);
    ierr = PetscViewerASCIIPrintf(PETSC_VIEWER_STDOUT_WORLD,"matrix information (global sums):\nnonzeros = %D, allocated nonzeros = %D\n",(PetscInt)info.nz_used,(PetscInt)info.nz_allocated);CHKERRQ(ierr);
    ierr = MatGetInfo (C,MAT_GLOBAL_MAX,&info);CHKERRQ(ierr);
    ierr = PetscViewerASCIIPrintf(PETSC_VIEWER_STDOUT_WORLD,"matrix information (global max):\nnonzeros = %D, allocated nonzeros = %D\n",(PetscInt)info.nz_used,(PetscInt)info.nz_allocated);CHKERRQ(ierr);
  }
  
  ierr = PetscOptionsHasName(NULL,NULL,"-view_mat",&flg_mat);CHKERRQ(ierr);
  if (flg_mat) {
    ierr = MatView(C,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);
  }

  /* Test MatCreateRedundantMatrix() */
  nsubcomms = size;
  ierr = PetscOptionsGetInt(NULL,NULL,"-nsubcomms",&nsubcomms,NULL);CHKERRQ(ierr);
  ierr = MatCreateRedundantMatrix(C,nsubcomms,MPI_COMM_NULL,MAT_INITIAL_MATRIX,&Credundant);CHKERRQ(ierr);
  ierr = MatCreateRedundantMatrix(C,nsubcomms,MPI_COMM_NULL,MAT_REUSE_MATRIX,&Credundant);CHKERRQ(ierr);

  ierr = PetscObjectGetComm((PetscObject)Credundant,&subcomm);CHKERRQ(ierr);
  ierr = MPI_Comm_size(subcomm,&subsize);CHKERRQ(ierr);
    
  if (subsize==2 && flg_mat) {
    ierr = PetscViewerASCIIPrintf(PETSC_VIEWER_STDOUT_(subcomm),"\n[%d] Credundant:\n",rank);CHKERRQ(ierr);
    ierr = MatView(Credundant,PETSC_VIEWER_STDOUT_(subcomm));CHKERRQ(ierr);
  }
  ierr = MatDestroy(&Credundant);CHKERRQ(ierr);
   
  /* Test MatCreateRedundantMatrix() with user-provided subcomm */
  {
    PetscSubcomm psubcomm;

    ierr = PetscSubcommCreate(PETSC_COMM_WORLD,&psubcomm);CHKERRQ(ierr);
    ierr = PetscSubcommSetNumber(psubcomm,nsubcomms);CHKERRQ(ierr);
    ierr = PetscSubcommSetType(psubcomm,PETSC_SUBCOMM_CONTIGUOUS);CHKERRQ(ierr);
    /* enable runtime switch of psubcomm type, e.g., '-psubcomm_type interlaced */
    ierr = PetscSubcommSetFromOptions(psubcomm);CHKERRQ(ierr);

//.........这里部分代码省略.........
开发者ID:firedrakeproject,项目名称:petsc,代码行数:101,代码来源:ex9.c


示例12: DMPlexGetAdjacencyUseCone

/*@C
  DMPlexDistribute - Distributes the mesh and any associated sections.

  Not Collective

  Input Parameter:
+ dm  - The original DMPlex object
. partitioner - The partitioning package, or NULL for the default
- overlap - The overlap of partitions, 0 is the default

  Output Parameter:
+ sf - The PetscSF used for point distribution
- parallelMesh - The distributed DMPlex object, or NULL

  Note: If the mesh was not distributed, the return value is NULL.

  The user can control the definition of adjacency for the mesh using DMPlexGetAdjacencyUseCone() and
  DMPlexSetAdjacencyUseClosure(). They should choose the combination appropriate for the function
  representation on the mesh.

  Level: intermediate

.keywords: mesh, elements
.seealso: DMPlexCreate(), DMPlexDistributeByFace(), DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure()
@*/
PetscErrorCode DMPlexDistribute(DM dm, const char partitioner[], PetscInt overlap, PetscSF *sf, DM *dmParallel)
{
  DM_Plex               *mesh   = (DM_Plex*) dm->data, *pmesh;
  MPI_Comm         

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ PetscObjectReference函数代码示例发布时间:2022-05-30
下一篇:
C++ PetscObjectComposeFunction函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap