/*@
ISAllGather - Given an index set (IS) on each processor, generates a large
index set (same on each processor) by concatenating together each
processors index set.
Collective on IS
Input Parameter:
. is - the distributed index set
Output Parameter:
. isout - the concatenated index set (same on all processors)
Notes:
ISAllGather() is clearly not scalable for large index sets.
The IS created on each processor must be created with a common
communicator (e.g., PETSC_COMM_WORLD). If the index sets were created
with PETSC_COMM_SELF, this routine will not work as expected, since
each process will generate its own new IS that consists only of
itself.
The communicator for this new IS is PETSC_COMM_SELF
Level: intermediate
Concepts: gather^index sets
Concepts: index sets^gathering to all processors
Concepts: IS^gathering to all processors
.seealso: ISCreateGeneral(), ISCreateStride(), ISCreateBlock()
@*/
PetscErrorCode ISAllGather(IS is,IS *isout)
{
PetscErrorCode ierr;
PetscInt *indices,n,i,N,step,first;
const PetscInt *lindices;
MPI_Comm comm;
PetscMPIInt size,*sizes = NULL,*offsets = NULL,nn;
PetscBool stride;
PetscFunctionBegin;
PetscValidHeaderSpecific(is,IS_CLASSID,1);
PetscValidPointer(isout,2);
ierr = PetscObjectGetComm((PetscObject)is,&comm);CHKERRQ(ierr);
ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
ierr = ISGetLocalSize(is,&n);CHKERRQ(ierr);
ierr = PetscObjectTypeCompare((PetscObject)is,ISSTRIDE,&stride);CHKERRQ(ierr);
if (size == 1 && stride) { /* should handle parallel ISStride also */
ierr = ISStrideGetInfo(is,&first,&step);CHKERRQ(ierr);
ierr = ISCreateStride(PETSC_COMM_SELF,n,first,step,isout);CHKERRQ(ierr);
} else {
ierr = PetscMalloc2(size,&sizes,size,&offsets);CHKERRQ(ierr);
ierr = PetscMPIIntCast(n,&nn);CHKERRQ(ierr);
ierr = MPI_Allgather(&nn,1,MPI_INT,sizes,1,MPI_INT,comm);CHKERRQ(ierr);
offsets[0] = 0;
for (i=1; i<size; i++) offsets[i] = offsets[i-1] + sizes[i-1];
N = offsets[size-1] + sizes[size-1];
ierr = PetscMalloc1(N,&indices);CHKERRQ(ierr);
ierr = ISGetIndices(is,&lindices);CHKERRQ(ierr);
ierr = MPI_Allgatherv((void*)lindices,nn,MPIU_INT,indices,sizes,offsets,MPIU_INT,comm);CHKERRQ(ierr);
ierr = ISRestoreIndices(is,&lindices);CHKERRQ(ierr);
ierr = PetscFree2(sizes,offsets);CHKERRQ(ierr);
ierr = ISCreateGeneral(PETSC_COMM_SELF,N,indices,PETSC_OWN_POINTER,isout);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
PetscErrorCode DMLabelGetStratumBounds(DMLabel label, PetscInt value, PetscInt *start, PetscInt *end)
{
PetscInt v;
PetscErrorCode ierr;
PetscFunctionBegin;
if (start) {PetscValidPointer(start, 3); *start = 0;}
if (end) {PetscValidPointer(end, 4); *end = 0;}
for (v = 0; v < label->numStrata; ++v) {
const PetscInt *points;
if (label->stratumValues[v] != value) continue;
ierr = DMLabelMakeValid_Private(label, v);CHKERRQ(ierr);
if (label->stratumSizes[v] <= 0) break;
ierr = ISGetIndices(label->points[v],&points);CHKERRQ(ierr);
if (start) *start = points[0];
if (end) *end = points[label->stratumSizes[v]-1]+1;
ierr = ISRestoreIndices(label->points[v],&points);CHKERRQ(ierr);
break;
}
PetscFunctionReturn(0);
}
开发者ID:ziolai,项目名称:petsc,代码行数:22,代码来源:dmlabel.c
示例6: libraries
/*@
ISSetPermutation - Informs the index set that it is a permutation.
Logically Collective on IS
Input Parmeters:
. is - the index set
Level: intermediate
Concepts: permutation
Concepts: index sets^permutation
The debug version of the libraries (./configure --with-debugging=1) checks if the
index set is actually a permutation. The optimized version just believes you.
.seealso: ISPermutation()
@*/
PetscErrorCode ISSetPermutation(IS is)
{
PetscFunctionBegin;
PetscValidHeaderSpecific(is,IS_CLASSID,1);
#if defined(PETSC_USE_DEBUG)
{
PetscMPIInt size;
PetscErrorCode ierr;
ierr = MPI_Comm_size(PetscObjectComm((PetscObject)is),&size);
CHKERRQ(ierr);
if (size == 1) {
PetscInt i,n,*idx;
const PetscInt *iidx;
ierr = ISGetSize(is,&n);
CHKERRQ(ierr);
ierr = PetscMalloc1(n,&idx);
CHKERRQ(ierr);
ierr = ISGetIndices(is,&iidx);
CHKERRQ(ierr);
ierr = PetscMemcpy(idx,iidx,n*sizeof(PetscInt));
CHKERRQ(ierr);
ierr = PetscSortInt(n,idx);
CHKERRQ(ierr);
for (i=0; i<n; i++) {
if (idx[i] != i) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Index set is not a permutation");
}
ierr = PetscFree(idx);
CHKERRQ(ierr);
ierr = ISRestoreIndices(is,&iidx);
CHKERRQ(ierr);
}
}
#endif
is->isperm = PETSC_TRUE;
PetscFunctionReturn(0);
}
开发者ID:placasse,项目名称:petsc,代码行数:56,代码来源:index.c
示例7: VecSet
/*@
VecISSet - Sets the elements of a vector, specified by an index set, to a constant
Input Parameter:
+ V - the vector
. S - the locations in the vector
- c - the constant
.seealso VecSet()
Level: advanced
@*/
PetscErrorCode VecISSet(Vec V,IS S, PetscScalar c)
{
PetscErrorCode ierr;
PetscInt nloc,low,high,i;
const PetscInt *s;
PetscScalar *v;
PetscFunctionBegin;
PetscValidHeaderSpecific(V,VEC_CLASSID,1);
PetscValidHeaderSpecific(S,IS_CLASSID,2);
PetscValidType(V,3);
PetscCheckSameComm(V,3,S,1);
ierr = VecGetOwnershipRange(V, &low, &high);CHKERRQ(ierr);
ierr = ISGetLocalSize(S,&nloc);CHKERRQ(ierr);
ierr = ISGetIndices(S, &s);CHKERRQ(ierr);
ierr = VecGetArray(V,&v);CHKERRQ(ierr);
for (i=0; i<nloc; i++){
v[s[i]-low] = c;
}
ierr = ISRestoreIndices(S, &s);CHKERRQ(ierr);
ierr = VecRestoreArray(V,&v);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/*@
ISConcatenate - Forms a new IS by locally concatenating the indices from an IS list without reordering.
Collective on comm.
Input Parameter:
+ comm - communicator of the concatenated IS.
. len - size of islist array (nonnegative)
- islist - array of index sets
Output Parameters:
. isout - The concatenated index set; empty, if len == 0.
Notes: The semantics of calling this on comm imply that the comms of the members if islist also contain this rank.
Level: intermediate
.seealso: ISDifference(), ISSum(), ISExpand()
Concepts: index sets^concatenation
Concepts: IS^concatenation
@*/
PetscErrorCode ISConcatenate(MPI_Comm comm, PetscInt len, const IS islist[], IS *isout)
{
PetscErrorCode ierr;
PetscInt i,n,N;
const PetscInt *iidx;
PetscInt *idx;
PetscFunctionBegin;
PetscValidPointer(islist,2);
#if defined(PETSC_USE_DEBUG)
for(i = 0; i < len; ++i) {
PetscValidHeaderSpecific(islist[i], IS_CLASSID, 1);
}
#endif
PetscValidPointer(isout, 5);
if(!len) {
ierr = ISCreateStride(comm, 0,0,0, isout); CHKERRQ(ierr);
PetscFunctionReturn(0);
}
if(len < 0) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Negative array length: %D", len);
N = 0;
for(i = 0; i < len; ++i) {
ierr = ISGetLocalSize(islist[i], &n); CHKERRQ(ierr);
N += n;
}
ierr = PetscMalloc(sizeof(PetscInt)*N, &idx); CHKERRQ(ierr);
N = 0;
for(i = 0; i < len; ++i) {
ierr = ISGetLocalSize(islist[i], &n); CHKERRQ(ierr);
ierr = ISGetIndices(islist[i], &iidx); CHKERRQ(ierr);
ierr = PetscMemcpy(idx+N,iidx, sizeof(PetscInt)*n); CHKERRQ(ierr);
N += n;
}
ierr = ISCreateGeneral(comm, N, idx, PETSC_OWN_POINTER, isout); CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatPartitioningApply_Hierarchical(MatPartitioning part,IS *partitioning)
{
MatPartitioning_Hierarchical *hpart = (MatPartitioning_Hierarchical*)part->data;
const PetscInt *fineparts_indices, *coarseparts_indices;
PetscInt *parts_indices,i,j,mat_localsize;
Mat mat = part->adj,adj,sadj;
PetscBool flg;
PetscInt bs = 1;
MatPartitioning finePart, coarsePart;
PetscInt *coarse_vertex_weights = 0;
PetscMPIInt size,rank;
MPI_Comm comm,scomm;
IS destination,fineparts_temp;
ISLocalToGlobalMapping mapping;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = PetscObjectGetComm((PetscObject)part,&comm);CHKERRQ(ierr);
ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
ierr = PetscObjectTypeCompare((PetscObject)mat,MATMPIADJ,&flg);CHKERRQ(ierr);
if (flg) {
adj = mat;
ierr = PetscObjectReference((PetscObject)adj);CHKERRQ(ierr);
}else {
/* bs indicates if the converted matrix is "reduced" from the original and hence the
resulting partition results need to be stretched to match the original matrix */
ierr = MatConvert(mat,MATMPIADJ,MAT_INITIAL_MATRIX,&adj);CHKERRQ(ierr);
if (adj->rmap->n > 0) bs = mat->rmap->n/adj->rmap->n;
}
/*local size of mat*/
mat_localsize = adj->rmap->n;
/* check parameters */
/* how many small subdomains we want from a given 'big' suddomain */
if(!hpart->Nfineparts) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG," must set number of small subdomains for each big subdomain \n");
if(!hpart->Ncoarseparts && !part->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE," did not either set number of coarse parts or total number of parts \n");
if(part->n && part->n%hpart->Nfineparts!=0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,
" total number of parts %D can not be divided by number of fine parts %D\n",part->n,hpart->Nfineparts);
if(part->n){
hpart->Ncoarseparts = part->n/hpart->Nfineparts;
}else{
part->n = hpart->Ncoarseparts*hpart->Nfineparts;
}
/* we do not support this case currently, but this restriction should be
* removed in the further
* */
if(hpart->Ncoarseparts>size) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_SUP," we do not support number of coarse parts %D > size %D \n",hpart->Ncoarseparts,size);
/*create a coarse partitioner */
ierr = MatPartitioningCreate(comm,&coarsePart);CHKERRQ(ierr);
/*if did not set partitioning type yet, use parmetis by default */
if(!hpart->coarseparttype){
ierr = MatPartitioningSetType(coarsePart,MATPARTITIONINGPARMETIS);CHKERRQ(ierr);
}else{
ierr = MatPartitioningSetType(coarsePart,hpart->coarseparttype);CHKERRQ(ierr);
}
ierr = MatPartitioningSetAdjacency(coarsePart,adj);CHKERRQ(ierr);
ierr = MatPartitioningSetNParts(coarsePart, hpart->Ncoarseparts);CHKERRQ(ierr);
/*copy over vertex weights */
if(part->vertex_weights){
ierr = PetscMalloc(sizeof(PetscInt)*mat_localsize,&coarse_vertex_weights);CHKERRQ(ierr);
ierr = PetscMemcpy(coarse_vertex_weights,part->vertex_weights,sizeof(PetscInt)*mat_localsize);CHKERRQ(ierr);
ierr = MatPartitioningSetVertexWeights(coarsePart,coarse_vertex_weights);CHKERRQ(ierr);
}
/*It looks nontrivial to support part weights */
/*if(part->part_weights){
ierr = PetscMalloc(sizeof(part->part_weights)*1,&coarse_partition_weights);CHKERRQ(ierr);
ierr = PetscMemcpy(coarse_partition_weights,part->part_weights,sizeof(part->part_weights)*1);CHKERRQ(ierr);
ierr = MatPartitioningSetPartitionWeights(coarsePart,coarse_partition_weights);CHKERRQ(ierr);
}*/
ierr = MatPartitioningApply(coarsePart,&hpart->coarseparts);CHKERRQ(ierr);
ierr = MatPartitioningDestroy(&coarsePart);CHKERRQ(ierr);
/* In the current implementation, destination should be the same as hpart->coarseparts,
* and this interface is preserved to deal with the case hpart->coarseparts>size in the
* future.
* */
ierr = MatPartitioningHierarchical_DetermineDestination(part,hpart->coarseparts,0,hpart->Ncoarseparts,&destination);CHKERRQ(ierr);
/* create a sub-matrix*/
ierr = MatPartitioningHierarchical_AssembleSubdomain(adj,destination,&sadj,&mapping);CHKERRQ(ierr);
ierr = ISDestroy(&destination);CHKERRQ(ierr);
ierr = PetscObjectGetComm((PetscObject)sadj,&scomm);CHKERRQ(ierr);
/*create a fine partitioner */
ierr = MatPartitioningCreate(scomm,&finePart);CHKERRQ(ierr);
/*if do not set partitioning type, use parmetis by default */
if(!hpart->fineparttype){
ierr = MatPartitioningSetType(finePart,MATPARTITIONINGPARMETIS);CHKERRQ(ierr);
}else{
ierr = MatPartitioningSetType(finePart,hpart->fineparttype);CHKERRQ(ierr);
}
ierr = MatPartitioningSetAdjacency(finePart,sadj);CHKERRQ(ierr);
ierr = MatPartitioningSetNParts(finePart, hpart->Nfineparts);CHKERRQ(ierr);
ierr = MatPartitioningApply(finePart,&fineparts_temp);CHKERRQ(ierr);
ierr = MatDestroy(&sadj);CHKERRQ(ierr);
ierr = MatPartitioningDestroy(&finePart);CHKERRQ(ierr);
ierr = MatPartitioningHierarchical_ReassembleFineparts(adj,fineparts_temp,mapping,&hpart->fineparts);CHKERRQ(ierr);
ierr = ISDestroy(&fineparts_temp);CHKERRQ(ierr);
ierr = ISLocalToGlobalMappingDestroy(&mapping);CHKERRQ(ierr);
ierr = ISGetIndices(hpart->fineparts,&fineparts_indices);CHKERRQ(ierr);
ierr = ISGetIndices(hpart->coarseparts,&coarseparts_indices);CHKERRQ(ierr);
ierr = PetscMalloc1(bs*adj->rmap->n,&parts_indices);CHKERRQ(ierr);
//.........这里部分代码省略.........
请发表评论