Actual source code: iscoloring.c

  1: #define PETSCVEC_DLL

 3:  #include petscsys.h
 4:  #include petscis.h

  6: const char *ISColoringTypes[] = {"global","ghosted","ISColoringType","IS_COLORING_",0};

 10: /*@
 11:    ISColoringDestroy - Destroys a coloring context.

 13:    Collective on ISColoring

 15:    Input Parameter:
 16: .  iscoloring - the coloring context

 18:    Level: advanced

 20: .seealso: ISColoringView(), MatGetColoring()
 21: @*/
 22: PetscErrorCode  ISColoringDestroy(ISColoring iscoloring)
 23: {
 24:   PetscInt i;

 29:   if (--iscoloring->refct > 0) return(0);

 31:   if (iscoloring->is) {
 32:     for (i=0; i<iscoloring->n; i++) {
 33:       ISDestroy(iscoloring->is[i]);
 34:     }
 35:     PetscFree(iscoloring->is);
 36:   }
 37:   PetscFree(iscoloring->colors);
 38:   PetscCommDestroy(&iscoloring->comm);
 39:   PetscFree(iscoloring);
 40:   return(0);
 41: }

 45: /*@C
 46:    ISColoringView - Views a coloring context.

 48:    Collective on ISColoring

 50:    Input Parameters:
 51: +  iscoloring - the coloring context
 52: -  viewer - the viewer

 54:    Level: advanced

 56: .seealso: ISColoringDestroy(), ISColoringGetIS(), MatGetColoring()
 57: @*/
 58: PetscErrorCode  ISColoringView(ISColoring iscoloring,PetscViewer viewer)
 59: {
 60:   PetscInt       i;
 62:   PetscTruth     iascii;
 63:   IS             *is;

 67:   if (!viewer) {
 68:     PetscViewerASCIIGetStdout(iscoloring->comm,&viewer);
 69:   }

 72:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&iascii);
 73:   if (iascii) {
 74:     MPI_Comm    comm;
 75:     PetscMPIInt rank;
 76:     PetscObjectGetComm((PetscObject)viewer,&comm);
 77:     MPI_Comm_rank(comm,&rank);
 78:     PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Number of colors %d\n",rank,iscoloring->n);
 79:     PetscViewerFlush(viewer);
 80:   } else {
 81:     SETERRQ1(PETSC_ERR_SUP,"Viewer type %s not supported for ISColoring",((PetscObject)viewer)->type_name);
 82:   }

 84:   ISColoringGetIS(iscoloring,PETSC_IGNORE,&is);
 85:   for (i=0; i<iscoloring->n; i++) {
 86:     ISView(iscoloring->is[i],viewer);
 87:   }
 88:   ISColoringRestoreIS(iscoloring,&is);
 89:   return(0);
 90: }

 94: /*@C
 95:    ISColoringGetIS - Extracts index sets from the coloring context

 97:    Collective on ISColoring 

 99:    Input Parameter:
100: .  iscoloring - the coloring context

102:    Output Parameters:
103: +  nn - number of index sets in the coloring context
104: -  is - array of index sets

106:    Level: advanced

108: .seealso: ISColoringRestoreIS(), ISColoringView()
109: @*/
110: PetscErrorCode  ISColoringGetIS(ISColoring iscoloring,PetscInt *nn,IS *isis[])
111: {


117:   if (nn)  *nn  = iscoloring->n;
118:   if (isis) {
119:     if (!iscoloring->is) {
120:       PetscInt        *mcolors,**ii,nc = iscoloring->n,i,base, n = iscoloring->N;
121:       ISColoringValue *colors = iscoloring->colors;
122:       IS              *is;

124: #if defined(PETSC_USE_DEBUG)
125:       for (i=0; i<n; i++) {
126:         if (((PetscInt)colors[i]) >= nc) {
127:           SETERRQ3(PETSC_ERR_ARG_OUTOFRANGE,"Coloring is our of range index %d value %d number colors %d",(int)i,(int)colors[i],(int)nc);
128:         }
129:       }
130: #endif
131: 
132:       /* generate the lists of nodes for each color */
133:       PetscMalloc(nc*sizeof(PetscInt),&mcolors);
134:       PetscMemzero(mcolors,nc*sizeof(PetscInt));
135:       for (i=0; i<n; i++) {
136:         mcolors[colors[i]]++;
137:       }

139:       PetscMalloc(nc*sizeof(PetscInt*),&ii);
140:       PetscMalloc(n*sizeof(PetscInt),&ii[0]);
141:       for (i=1; i<nc; i++) {
142:         ii[i] = ii[i-1] + mcolors[i-1];
143:       }
144:       PetscMemzero(mcolors,nc*sizeof(PetscInt));

146:       if (iscoloring->ctype == IS_COLORING_GLOBAL){
147:         MPI_Scan(&iscoloring->N,&base,1,MPIU_INT,MPI_SUM,iscoloring->comm);
148:         base -= iscoloring->N;
149:         for (i=0; i<n; i++) {
150:           ii[colors[i]][mcolors[colors[i]]++] = i + base; /* global idx */
151:         }
152:       } else if (iscoloring->ctype == IS_COLORING_GHOSTED){
153:         for (i=0; i<n; i++) {
154:           ii[colors[i]][mcolors[colors[i]]++] = i;   /* local idx */
155:         }
156:       } else {
157:         SETERRQ(PETSC_ERR_SUP,"Not provided for this ISColoringType type");
158:       }
159: 
160:       PetscMalloc(nc*sizeof(IS),&is);
161:       for (i=0; i<nc; i++) {
162:         ISCreateGeneral(iscoloring->comm,mcolors[i],ii[i],is+i);
163:       }

165:       iscoloring->is   = is;
166:       PetscFree(ii[0]);
167:       PetscFree(ii);
168:       PetscFree(mcolors);
169:     }
170:     *isis = iscoloring->is;
171:   }
172:   return(0);
173: }

177: /*@C
178:    ISColoringRestoreIS - Restores the index sets extracted from the coloring context

180:    Collective on ISColoring 

182:    Input Parameter:
183: +  iscoloring - the coloring context
184: -  is - array of index sets

186:    Level: advanced

188: .seealso: ISColoringGetIS(), ISColoringView()
189: @*/
190: PetscErrorCode  ISColoringRestoreIS(ISColoring iscoloring,IS *is[])
191: {
194: 
195:   /* currently nothing is done here */

197:   return(0);
198: }


203: /*@C
204:     ISColoringCreate - Generates an ISColoring context from lists (provided 
205:     by each processor) of colors for each node.

207:     Collective on MPI_Comm

209:     Input Parameters:
210: +   comm - communicator for the processors creating the coloring
211: .   ncolors - max color value
212: .   n - number of nodes on this processor
213: -   colors - array containing the colors for this processor, color
214:              numbers begin at 0. In C/C++ this array must have been obtained with PetscMalloc()
215:              and should NOT be freed (The ISColoringDestroy() will free it).

217:     Output Parameter:
218: .   iscoloring - the resulting coloring data structure

220:     Options Database Key:
221: .   -is_coloring_view - Activates ISColoringView()

223:    Level: advanced
224:    
225:     Notes: By default sets coloring type to  IS_COLORING_GLOBAL

227: .seealso: MatColoringCreate(), ISColoringView(), ISColoringDestroy(), ISColoringSetType()

229: @*/
230: PetscErrorCode  ISColoringCreate(MPI_Comm comm,PetscInt ncolors,PetscInt n,const ISColoringValue colors[],ISColoring *iscoloring)
231: {
233:   PetscMPIInt    size,rank,tag;
234:   PetscInt       base,top,i;
235:   PetscInt       nc,ncwork;
236:   PetscTruth     flg;
237:   MPI_Status     status;

240:   if (ncolors != PETSC_DECIDE && ncolors > IS_COLORING_MAX) {
241:     if (ncolors > 65535) {
242:       SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Max color value exeeds 65535 limit. This number is unrealistic. Perhaps a bug in code?\nCurrent max: %d user rewuested: %d",IS_COLORING_MAX,ncolors);
243:     } else {
244:       SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Max color value exeeds limit. Perhaps reconfigure PETSc with --with-is-color-value-type=short?\n Current max: %d user rewuested: %d",IS_COLORING_MAX,ncolors);
245:     }
246:   }
247:   PetscNew(struct _n_ISColoring,iscoloring);
248:   PetscCommDuplicate(comm,&(*iscoloring)->comm,&tag);
249:   comm = (*iscoloring)->comm;

251:   /* compute the number of the first node on my processor */
252:   MPI_Comm_size(comm,&size);

254:   /* should use MPI_Scan() */
255:   MPI_Comm_rank(comm,&rank);
256:   if (!rank) {
257:     base = 0;
258:     top  = n;
259:   } else {
260:     MPI_Recv(&base,1,MPIU_INT,rank-1,tag,comm,&status);
261:     top = base+n;
262:   }
263:   if (rank < size-1) {
264:     MPI_Send(&top,1,MPIU_INT,rank+1,tag,comm);
265:   }

267:   /* compute the total number of colors */
268:   ncwork = 0;
269:   for (i=0; i<n; i++) {
270:     if (ncwork < colors[i]) ncwork = colors[i];
271:   }
272:   ncwork++;
273:   MPI_Allreduce(&ncwork,&nc,1,MPIU_INT,MPI_MAX,comm);
274:   if (nc > ncolors) SETERRQ2(PETSC_ERR_ARG_INCOMP,"Number of colors passed in %D is less then the actual number of colors in array %D",ncolors,nc);
275:   (*iscoloring)->n      = nc;
276:   (*iscoloring)->is     = 0;
277:   (*iscoloring)->colors = (ISColoringValue *)colors;
278:   (*iscoloring)->N      = n;
279:   (*iscoloring)->refct  = 1;
280:   (*iscoloring)->ctype  = IS_COLORING_GLOBAL;

282:   PetscOptionsHasName(PETSC_NULL,"-is_coloring_view",&flg);
283:   if (flg) {
284:     PetscViewer viewer;
285:     PetscViewerASCIIGetStdout((*iscoloring)->comm,&viewer);
286:     ISColoringView(*iscoloring,viewer);
287:   }
288:   PetscInfo1(0,"Number of colors %d\n",nc);
289:   return(0);
290: }

294: /*@
295:     ISPartitioningToNumbering - Takes an ISPartitioning and on each processor
296:     generates an IS that contains a new global node number for each index based
297:     on the partitioing.

299:     Collective on IS

301:     Input Parameters
302: .   partitioning - a partitioning as generated by MatPartitioningApply()

304:     Output Parameter:
305: .   is - on each processor the index set that defines the global numbers 
306:          (in the new numbering) for all the nodes currently (before the partitioning) 
307:          on that processor

309:    Level: advanced

311: .seealso: MatPartitioningCreate(), AOCreateBasic(), ISPartitioningCount()

313: @*/
314: PetscErrorCode  ISPartitioningToNumbering(IS part,IS *is)
315: {
316:   MPI_Comm       comm;
317:   PetscInt       i,np,npt,n,*starts = PETSC_NULL,*sums = PETSC_NULL,*lsizes = PETSC_NULL,*newi = PETSC_NULL;
318:   const PetscInt *indices = PETSC_NULL;

322:   PetscObjectGetComm((PetscObject)part,&comm);

324:   /* count the number of partitions, i.e., virtual processors */
325:   ISGetLocalSize(part,&n);
326:   ISGetIndices(part,&indices);
327:   np = 0;
328:   for (i=0; i<n; i++) {
329:     np = PetscMax(np,indices[i]);
330:   }
331:   MPI_Allreduce(&np,&npt,1,MPIU_INT,MPI_MAX,comm);
332:   np = npt+1; /* so that it looks like a MPI_Comm_size output */

334:   /*
335:         lsizes - number of elements of each partition on this particular processor
336:         sums - total number of "previous" nodes for any particular partition
337:         starts - global number of first element in each partition on this processor
338:   */
339:   PetscMalloc3(np,PetscInt,&lsizes,np,PetscInt,&starts,np,PetscInt,&sums);
340:   PetscMemzero(lsizes,np*sizeof(PetscInt));
341:   for (i=0; i<n; i++) {
342:     lsizes[indices[i]]++;
343:   }
344:   MPI_Allreduce(lsizes,sums,np,MPIU_INT,MPI_SUM,comm);
345:   MPI_Scan(lsizes,starts,np,MPIU_INT,MPI_SUM,comm);
346:   for (i=0; i<np; i++) {
347:     starts[i] -= lsizes[i];
348:   }
349:   for (i=1; i<np; i++) {
350:     sums[i]    += sums[i-1];
351:     starts[i]  += sums[i-1];
352:   }

354:   /* 
355:       For each local index give it the new global number
356:   */
357:   PetscMalloc(n*sizeof(PetscInt),&newi);
358:   for (i=0; i<n; i++) {
359:     newi[i] = starts[indices[i]]++;
360:   }
361:   PetscFree3(lsizes,starts,sums);

363:   ISRestoreIndices(part,&indices);
364:   ISCreateGeneral(comm,n,newi,is);
365:   PetscFree(newi);
366:   ISSetPermutation(*is);
367:   return(0);
368: }

372: /*@
373:     ISPartitioningCount - Takes a ISPartitioning and determines the number of 
374:     resulting elements on each (partition) process

376:     Collective on IS

378:     Input Parameters:
379: +   partitioning - a partitioning as generated by MatPartitioningApply()
380: -   len - length of the array count, this is the total number of partitions

382:     Output Parameter:
383: .   count - array of length size, to contain the number of elements assigned
384:         to each partition, where size is the number of partitions generated
385:          (see notes below).

387:    Level: advanced

389:     Notes:
390:         By default the number of partitions generated (and thus the length
391:         of count) is the size of the communicator associated with IS,
392:         but it can be set by MatPartitioningSetNParts. The resulting array
393:         of lengths can for instance serve as input of PCBJacobiSetTotalBlocks.


396: .seealso: MatPartitioningCreate(), AOCreateBasic(), ISPartitioningToNumbering(),
397:         MatPartitioningSetNParts()

399: @*/
400: PetscErrorCode  ISPartitioningCount(IS part,PetscInt len,PetscInt count[])
401: {
402:   MPI_Comm       comm;
403:   PetscInt       i,n,*lsizes;
404:   const PetscInt *indices;
406:   PetscMPIInt    npp;

409:   PetscObjectGetComm((PetscObject)part,&comm);

411:   /* count the number of partitions */
412:   ISGetLocalSize(part,&n);
413:   ISGetIndices(part,&indices);
414: #if defined(PETSC_USE_DEBUG)
415:   {
416:     PetscInt np = 0,npt;
417:     for (i=0; i<n; i++) {
418:       np = PetscMax(np,indices[i]);
419:     }
420:     MPI_Allreduce(&np,&npt,1,MPIU_INT,MPI_MAX,comm);
421:     np = npt+1; /* so that it looks like a MPI_Comm_size output */
422:     if (np > len) SETERRQ2(PETSC_ERR_ARG_SIZ,"Length of count array %D is less than number of partitions %D",len,np);
423:   }
424: #endif

426:   /*
427:         lsizes - number of elements of each partition on this particular processor
428:         sums - total number of "previous" nodes for any particular partition
429:         starts - global number of first element in each partition on this processor
430:   */
431:   PetscMalloc(len*sizeof(PetscInt),&lsizes);
432:   PetscMemzero(lsizes,len*sizeof(PetscInt));
433:   for (i=0; i<n; i++) {
434:     lsizes[indices[i]]++;
435:   }
436:   ISRestoreIndices(part,&indices);
437:   npp  = PetscMPIIntCast(len);
438:   MPI_Allreduce(lsizes,count,npp,MPIU_INT,MPI_SUM,comm);
439:   PetscFree(lsizes);
440:   return(0);
441: }

445: /*@
446:     ISAllGather - Given an index set (IS) on each processor, generates a large 
447:     index set (same on each processor) by concatenating together each
448:     processors index set.

450:     Collective on IS

452:     Input Parameter:
453: .   is - the distributed index set

455:     Output Parameter:
456: .   isout - the concatenated index set (same on all processors)

458:     Notes: 
459:     ISAllGather() is clearly not scalable for large index sets.

461:     The IS created on each processor must be created with a common
462:     communicator (e.g., PETSC_COMM_WORLD). If the index sets were created 
463:     with PETSC_COMM_SELF, this routine will not work as expected, since 
464:     each process will generate its own new IS that consists only of
465:     itself.

467:     The communicator for this new IS is PETSC_COMM_SELF

469:     Level: intermediate

471:     Concepts: gather^index sets
472:     Concepts: index sets^gathering to all processors
473:     Concepts: IS^gathering to all processors

475: .seealso: ISCreateGeneral(), ISCreateStride(), ISCreateBlock(), ISAllGatherIndices()
476: @*/
477: PetscErrorCode  ISAllGather(IS is,IS *isout)
478: {
480:   PetscInt       *indices,n,i,N,step,first;
481:   const PetscInt *lindices;
482:   MPI_Comm       comm;
483:   PetscMPIInt    size,*sizes = PETSC_NULL,*offsets = PETSC_NULL,nn;
484:   PetscTruth     stride;


490:   PetscObjectGetComm((PetscObject)is,&comm);
491:   MPI_Comm_size(comm,&size);
492:   ISGetLocalSize(is,&n);
493:   ISStride(is,&stride);
494:   if (size == 1 && stride) { /* should handle parallel ISStride also */
495:     ISStrideGetInfo(is,&first,&step);
496:     ISCreateStride(PETSC_COMM_SELF,n,first,step,isout);
497:   } else {
498:     PetscMalloc2(size,PetscMPIInt,&sizes,size,PetscMPIInt,&offsets);
499: 
500:     nn   = PetscMPIIntCast(n);
501:     MPI_Allgather(&nn,1,MPI_INT,sizes,1,MPI_INT,comm);
502:     offsets[0] = 0;
503:     for (i=1;i<size; i++) offsets[i] = offsets[i-1] + sizes[i-1];
504:     N = offsets[size-1] + sizes[size-1];
505: 
506:     PetscMalloc(N*sizeof(PetscInt),&indices);
507:     ISGetIndices(is,&lindices);
508:     MPI_Allgatherv((void*)lindices,nn,MPIU_INT,indices,sizes,offsets,MPIU_INT,comm);
509:     ISRestoreIndices(is,&lindices);
510:     PetscFree(sizes);

512:     ISCreateGeneral(PETSC_COMM_SELF,N,indices,isout);
513:     PetscFree2(indices,offsets);
514:   }
515:   return(0);
516: }

520: /*@C
521:     ISAllGatherIndices - Given a a set of integers on each processor, generates a large 
522:     set (same on each processor) by concatenating together each processors integers

524:     Collective on MPI_Comm

526:     Input Parameter:
527: +   comm - communicator to share the indices
528: .   n - local size of set
529: -   lindices - local indices

531:     Output Parameter:
532: +   outN - total number of indices
533: -   outindices - all of the integers

535:     Notes: 
536:     ISAllGatherIndices() is clearly not scalable for large index sets.


539:     Level: intermediate

541:     Concepts: gather^index sets
542:     Concepts: index sets^gathering to all processors
543:     Concepts: IS^gathering to all processors

545: .seealso: ISCreateGeneral(), ISCreateStride(), ISCreateBlock(), ISAllGather()
546: @*/
547: PetscErrorCode  ISAllGatherIndices(MPI_Comm comm,PetscInt n,const PetscInt lindices[],PetscInt *outN,PetscInt *outindices[])
548: {
550:   PetscInt       *indices,i,N;
551:   PetscMPIInt    size,*sizes = PETSC_NULL,*offsets = PETSC_NULL,nn;

554:   MPI_Comm_size(comm,&size);
555:   PetscMalloc2(size,PetscMPIInt,&sizes,size,PetscMPIInt,&offsets);
556: 
557:   nn   = PetscMPIIntCast(n);
558:   MPI_Allgather(&nn,1,MPI_INT,sizes,1,MPI_INT,comm);
559:   offsets[0] = 0;
560:   for (i=1;i<size; i++) offsets[i] = offsets[i-1] + sizes[i-1];
561:   N    = offsets[size-1] + sizes[size-1];

563:   PetscMalloc(N*sizeof(PetscInt),&indices);
564:   MPI_Allgatherv((void*)lindices,nn,MPIU_INT,indices,sizes,offsets,MPIU_INT,comm);
565:   PetscFree2(sizes,offsets);

567:   *outindices = indices;
568:   if (outN) *outN = N;
569:   return(0);
570: }



576: /*@C
577:     ISAllGatherColors - Given a a set of colors on each processor, generates a large 
578:     set (same on each processor) by concatenating together each processors colors

580:     Collective on MPI_Comm

582:     Input Parameter:
583: +   comm - communicator to share the indices
584: .   n - local size of set
585: -   lindices - local colors

587:     Output Parameter:
588: +   outN - total number of indices
589: -   outindices - all of the colors

591:     Notes: 
592:     ISAllGatherColors() is clearly not scalable for large index sets.


595:     Level: intermediate

597:     Concepts: gather^index sets
598:     Concepts: index sets^gathering to all processors
599:     Concepts: IS^gathering to all processors

601: .seealso: ISCreateGeneral(), ISCreateStride(), ISCreateBlock(), ISAllGather(), ISAllGatherIndices()
602: @*/
603: PetscErrorCode  ISAllGatherColors(MPI_Comm comm,PetscInt n,ISColoringValue *lindices,PetscInt *outN,ISColoringValue *outindices[])
604: {
605:   ISColoringValue *indices;
606:   PetscErrorCode  ierr;
607:   PetscInt        i,N;
608:   PetscMPIInt     size,*offsets = PETSC_NULL,*sizes = PETSC_NULL, nn = n;

611:   MPI_Comm_size(comm,&size);
612:   PetscMalloc2(size,PetscMPIInt,&sizes,size,PetscMPIInt,&offsets);
613: 
614:   MPI_Allgather(&nn,1,MPI_INT,sizes,1,MPI_INT,comm);
615:   offsets[0] = 0;
616:   for (i=1;i<size; i++) offsets[i] = offsets[i-1] + sizes[i-1];
617:   N    = offsets[size-1] + sizes[size-1];
618:   PetscFree2(sizes,offsets);

620:   PetscMalloc((N+1)*sizeof(ISColoringValue),&indices);
621:   MPI_Allgatherv(lindices,(PetscMPIInt)n,MPIU_COLORING_VALUE,indices,sizes,offsets,MPIU_COLORING_VALUE,comm);

623:   *outindices = indices;
624:   if (outN) *outN = N;
625:   return(0);
626: }

630: /*@
631:     ISComplement - Given a sequential index set (IS) generates the complement index set. That is all 
632:        all indices that are NOT in the given set.

634:     Collective on IS

636:     Input Parameter:
637: +   is - the index set
638: .   nmin - the first index desired in the complement
639: -   nmax - the largest index desired in the complement (note that all indices in is must be greater or equal to nmin and less than nmax)

641:     Output Parameter:
642: .   isout - the complement

644:     Notes:  The communicator for this new IS is the same as for the input IS

646:       For a parallel IS, this will generate the local part of the complement on each process

648:       To generate the entire complement (on each process) of a parallel IS, first call ISAllGather() and then
649:     call this routine.

651:     Level: intermediate

653:     Concepts: gather^index sets
654:     Concepts: index sets^gathering to all processors
655:     Concepts: IS^gathering to all processors

657: .seealso: ISCreateGeneral(), ISCreateStride(), ISCreateBlock(), ISAllGatherIndices(), ISAllGather()
658: @*/
659: PetscErrorCode  ISComplement(IS is,PetscInt nmin,PetscInt nmax,IS *isout)
660: {
662:   const PetscInt *indices;
663:   PetscInt       n,i,j,cnt,*nindices;
664:   PetscTruth     sorted;

669:   if (nmin < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"nmin %D cannot be negative",nmin);
670:   if (nmin > nmax) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"nmin %D cannot be greater than nmax %D",nmin,nmax);
671:   ISSorted(is,&sorted);
672:   if (!sorted) SETERRQ(PETSC_ERR_ARG_WRONG,"Index set must be sorted");

674:   ISGetLocalSize(is,&n);
675:   ISGetIndices(is,&indices);
676: #if defined(PETSC_USE_DEBUG)
677:   for (i=0; i<n; i++) {
678:     if (indices[i] <  nmin) SETERRQ3(PETSC_ERR_ARG_OUTOFRANGE,"Index %D's value %D is smaller than minimum given %D",i,indices[i],nmin);
679:     if (indices[i] >= nmax) SETERRQ3(PETSC_ERR_ARG_OUTOFRANGE,"Index %D's value %D is larger than maximum given %D",i,indices[i],nmax);
680:   }
681: #endif
682:   PetscMalloc((nmax - n)*sizeof(PetscInt),&nindices);
683:   cnt = 0;
684:   j   = nmin;
685:   for (i=0; i<n; i++) {
686:     for (; j<indices[i]; j++) {
687:       nindices[cnt++] = j;
688:     }
689:     j++;
690:   }
691:   for (; j<nmax; j++) {
692:     nindices[cnt++] = j;
693:   }
694:   if (cnt != nmax-nmin - n) SETERRQ2(PETSC_ERR_PLIB,"Number entries found in complement %D does not match expected %D",cnt,nmax-n);
695:   ISCreateGeneral(((PetscObject)is)->comm,nmax-nmin-n,nindices,isout);
696:   PetscFree(nindices);
697:   return(0);
698: }