Actual source code: sbaijov.c

  1: #define PETSCMAT_DLL

  3: /*
  4:    Routines to compute overlapping regions of a parallel MPI matrix.
  5:    Used for finding submatrices that were shared across processors.
  6: */
 7:  #include ../src/mat/impls/sbaij/mpi/mpisbaij.h
 8:  #include petscbt.h

 10: static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Once(Mat,PetscInt,IS*);
 11: static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Local(Mat,PetscInt*,PetscInt,PetscInt*,PetscBT*);

 15: PetscErrorCode MatIncreaseOverlap_MPISBAIJ(Mat C,PetscInt is_max,IS is[],PetscInt ov)
 16: {
 18:   PetscInt       i,N=C->cmap->N, bs=C->rmap->bs;
 19:   IS             *is_new;

 22:   PetscMalloc(is_max*sizeof(IS),&is_new);
 23:   /* Convert the indices into block format */
 24:   ISCompressIndicesGeneral(N,bs,is_max,is,is_new);
 25:   if (ov < 0){ SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Negative overlap specified\n");}
 26:   for (i=0; i<ov; ++i) {
 27:     MatIncreaseOverlap_MPISBAIJ_Once(C,is_max,is_new);
 28:   }
 29:   for (i=0; i<is_max; i++) {ISDestroy(is[i]);}
 30:   ISExpandIndicesGeneral(N,bs,is_max,is_new,is);
 31:   for (i=0; i<is_max; i++) {ISDestroy(is_new[i]);}
 32:   PetscFree(is_new);
 33:   return(0);
 34: }

 36: typedef enum {MINE,OTHER} WhoseOwner;
 37: /*  data1, odata1 and odata2 are packed in the format (for communication):
 38:        data[0]          = is_max, no of is 
 39:        data[1]          = size of is[0]
 40:         ...
 41:        data[is_max]     = size of is[is_max-1]
 42:        data[is_max + 1] = data(is[0])
 43:         ...
 44:        data[is_max+1+sum(size of is[k]), k=0,...,i-1] = data(is[i])
 45:         ...
 46:    data2 is packed in the format (for creating output is[]):
 47:        data[0]          = is_max, no of is 
 48:        data[1]          = size of is[0]
 49:         ...
 50:        data[is_max]     = size of is[is_max-1]
 51:        data[is_max + 1] = data(is[0])
 52:         ...
 53:        data[is_max + 1 + Mbs*i) = data(is[i])
 54:         ...
 55: */
 58: static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Once(Mat C,PetscInt is_max,IS is[])
 59: {
 60:   Mat_MPISBAIJ  *c = (Mat_MPISBAIJ*)C->data;
 62:   PetscMPIInt    size,rank,tag1,tag2,*len_s,nrqr,nrqs,*id_r1,*len_r1,flag,len;
 63:   const PetscInt *idx_i;
 64:   PetscInt       idx,isz,col,*n,*data1,**data1_start,*data2,*data2_i,*data,*data_i,
 65:                  Mbs,i,j,k,*odata1,*odata2,
 66:                  proc_id,**odata2_ptr,*ctable=0,*btable,len_max,len_est;
 67:   PetscInt       proc_end=0,*iwork,len_unused,nodata2;
 68:   PetscInt       ois_max; /* max no of is[] in each of processor */
 69:   char           *t_p;
 70:   MPI_Comm       comm;
 71:   MPI_Request    *s_waits1,*s_waits2,r_req;
 72:   MPI_Status     *s_status,r_status;
 73:   PetscBT        *table;  /* mark indices of this processor's is[] */
 74:   PetscBT        table_i;
 75:   PetscBT        otable; /* mark indices of other processors' is[] */
 76:   PetscInt       bs=C->rmap->bs,Bn = c->B->cmap->n,Bnbs = Bn/bs,*Bowners;
 77:   IS             garray_local,garray_gl;

 80:   comm = ((PetscObject)C)->comm;
 81:   size = c->size;
 82:   rank = c->rank;
 83:   Mbs  = c->Mbs;

 85:   PetscObjectGetNewTag((PetscObject)C,&tag1);
 86:   PetscObjectGetNewTag((PetscObject)C,&tag2);

 88:   /* create tables used in
 89:      step 1: table[i] - mark c->garray of proc [i]
 90:      step 3: table[i] - mark indices of is[i] when whose=MINE     
 91:              table[0] - mark incideces of is[] when whose=OTHER */
 92:   len = PetscMax(is_max, size);
 93:   len_max = len*sizeof(PetscBT) + (Mbs/PETSC_BITS_PER_BYTE+1)*len*sizeof(char) + 1;
 94:   PetscMalloc(len_max,&table);
 95:   t_p  = (char *)(table + len);
 96:   for (i=0; i<len; i++) {
 97:     table[i]  = t_p  + (Mbs/PETSC_BITS_PER_BYTE+1)*i;
 98:   }

100:   MPI_Allreduce(&is_max,&ois_max,1,MPIU_INT,MPI_MAX,comm);
101: 
102:   /* 1. Send this processor's is[] to other processors */
103:   /*---------------------------------------------------*/
104:   /* allocate spaces */
105:   PetscMalloc(is_max*sizeof(PetscInt),&n);
106:   len = 0;
107:   for (i=0; i<is_max; i++) {
108:     ISGetLocalSize(is[i],&n[i]);
109:     len += n[i];
110:   }
111:   if (!len) {
112:     is_max = 0;
113:   } else {
114:     len += 1 + is_max; /* max length of data1 for one processor */
115:   }

117: 
118:   PetscMalloc((size*len+1)*sizeof(PetscInt),&data1);
119:   PetscMalloc(size*sizeof(PetscInt*),&data1_start);
120:   for (i=0; i<size; i++) data1_start[i] = data1 + i*len;

122:   PetscMalloc((size*4+1)*sizeof(PetscInt),&len_s);
123:   btable  = (PetscInt*)(len_s + size);
124:   iwork   = btable + size;
125:   Bowners = iwork + size;

127:   /* gather c->garray from all processors */
128:   ISCreateGeneral(comm,Bnbs,c->garray,&garray_local);
129:   ISAllGather(garray_local, &garray_gl);
130:   ISDestroy(garray_local);
131:   MPI_Allgather(&Bnbs,1,MPIU_INT,Bowners+1,1,MPIU_INT,comm);
132:   Bowners[0] = 0;
133:   for (i=0; i<size; i++) Bowners[i+1] += Bowners[i];
134: 
135:   if (is_max){
136:     /* hash table ctable which maps c->row to proc_id) */
137:     PetscMalloc(Mbs*sizeof(PetscInt),&ctable);
138:     for (proc_id=0,j=0; proc_id<size; proc_id++) {
139:       for (; j<C->rmap->range[proc_id+1]; j++) {
140:         ctable[j] = proc_id;
141:       }
142:     }

144:     /* hash tables marking c->garray */
145:     ISGetIndices(garray_gl,&idx_i);
146:     for (i=0; i<size; i++){
147:       table_i = table[i];
148:       PetscBTMemzero(Mbs,table_i);
149:       for (j = Bowners[i]; j<Bowners[i+1]; j++){ /* go through B cols of proc[i]*/
150:         PetscBTSet(table_i,idx_i[j]);
151:       }
152:     }
153:     ISRestoreIndices(garray_gl,&idx_i);
154:   }  /* if (is_max) */
155:   ISDestroy(garray_gl);

157:   /* evaluate communication - mesg to who, length, and buffer space */
158:   for (i=0; i<size; i++) len_s[i] = 0;
159: 
160:   /* header of data1 */
161:   for (proc_id=0; proc_id<size; proc_id++){
162:     iwork[proc_id] = 0;
163:     *data1_start[proc_id] = is_max;
164:     data1_start[proc_id]++;
165:     for (j=0; j<is_max; j++) {
166:       if (proc_id == rank){
167:         *data1_start[proc_id] = n[j];
168:       } else {
169:         *data1_start[proc_id] = 0;
170:       }
171:       data1_start[proc_id]++;
172:     }
173:   }
174: 
175:   for (i=0; i<is_max; i++) {
176:     ISGetIndices(is[i],&idx_i);
177:     for (j=0; j<n[i]; j++){
178:       idx = idx_i[j];
179:       *data1_start[rank] = idx; data1_start[rank]++; /* for local proccessing */
180:       proc_end = ctable[idx];
181:       for (proc_id=0;  proc_id<=proc_end; proc_id++){ /* for others to process */
182:         if (proc_id == rank ) continue; /* done before this loop */
183:         if (proc_id < proc_end && !PetscBTLookup(table[proc_id],idx))
184:           continue;   /* no need for sending idx to [proc_id] */
185:         *data1_start[proc_id] = idx; data1_start[proc_id]++;
186:         len_s[proc_id]++;
187:       }
188:     }
189:     /* update header data */
190:     for (proc_id=0; proc_id<size; proc_id++){
191:       if (proc_id== rank) continue;
192:       *(data1 + proc_id*len + 1 + i) = len_s[proc_id] - iwork[proc_id];
193:       iwork[proc_id] = len_s[proc_id] ;
194:     }
195:     ISRestoreIndices(is[i],&idx_i);
196:   }

198:   nrqs = 0; nrqr = 0;
199:   for (i=0; i<size; i++){
200:     data1_start[i] = data1 + i*len;
201:     if (len_s[i]){
202:       nrqs++;
203:       len_s[i] += 1 + is_max; /* add no. of header msg */
204:     }
205:   }

207:   for (i=0; i<is_max; i++) {
208:     ISDestroy(is[i]);
209:   }
210:   PetscFree(n);
211:   PetscFree(ctable);

213:   /* Determine the number of messages to expect, their lengths, from from-ids */
214:   PetscGatherNumberOfMessages(comm,PETSC_NULL,len_s,&nrqr);
215:   PetscGatherMessageLengths(comm,nrqs,nrqr,len_s,&id_r1,&len_r1);
216: 
217:   /*  Now  post the sends */
218:   PetscMalloc(2*size*sizeof(MPI_Request),&s_waits1);
219:   s_waits2 = s_waits1 + size;
220:   k = 0;
221:   for (proc_id=0; proc_id<size; proc_id++){  /* send data1 to processor [proc_id] */
222:     if (len_s[proc_id]){
223:       MPI_Isend(data1_start[proc_id],len_s[proc_id],MPIU_INT,proc_id,tag1,comm,s_waits1+k);
224:       k++;
225:     }
226:   }

228:   /* 2. Receive other's is[] and process. Then send back */
229:   /*-----------------------------------------------------*/
230:   len = 0;
231:   for (i=0; i<nrqr; i++){
232:     if (len_r1[i] > len)len = len_r1[i];
233:   }
234:   PetscFree(len_r1);
235:   PetscFree(id_r1);

237:   for (proc_id=0; proc_id<size; proc_id++)
238:     len_s[proc_id] = iwork[proc_id] = 0;
239: 
240:   PetscMalloc((len+1)*sizeof(PetscInt),&odata1);
241:   PetscMalloc(size*sizeof(PetscInt**),&odata2_ptr);
242:   PetscBTCreate(Mbs,otable);

244:   len_max = ois_max*(Mbs+1);  /* max space storing all is[] for each receive */
245:   len_est = 2*len_max; /* estimated space of storing is[] for all receiving messages */
246:   PetscMalloc((len_est+1)*sizeof(PetscInt),&odata2);
247:   nodata2 = 0;       /* nodata2+1: num of PetscMalloc(,&odata2_ptr[]) called */
248:   odata2_ptr[nodata2] = odata2;
249:   len_unused = len_est; /* unused space in the array odata2_ptr[nodata2]-- needs to be >= len_max  */
250: 
251:   k = 0;
252:   while (k < nrqr){
253:     /* Receive messages */
254:     MPI_Iprobe(MPI_ANY_SOURCE,tag1,comm,&flag,&r_status);
255:     if (flag){
256:       MPI_Get_count(&r_status,MPIU_INT,&len);
257:       proc_id = r_status.MPI_SOURCE;
258:       MPI_Irecv(odata1,len,MPIU_INT,proc_id,r_status.MPI_TAG,comm,&r_req);
259:       MPI_Wait(&r_req,&r_status);

261:       /*  Process messages */
262:       /*  make sure there is enough unused space in odata2 array */
263:       if (len_unused < len_max){ /* allocate more space for odata2 */
264:         PetscMalloc((len_est+1)*sizeof(PetscInt),&odata2);
265:         odata2_ptr[++nodata2] = odata2;
266:         len_unused = len_est;
267:       }

269:       MatIncreaseOverlap_MPISBAIJ_Local(C,odata1,OTHER,odata2,&otable);
270:       len = 1 + odata2[0];
271:       for (i=0; i<odata2[0]; i++){
272:         len += odata2[1 + i];
273:       }

275:       /* Send messages back */
276:       MPI_Isend(odata2,len,MPIU_INT,proc_id,tag2,comm,s_waits2+k);
277:       k++;
278:       odata2     += len;
279:       len_unused -= len;
280:       len_s[proc_id] = len; /* num of messages sending back to [proc_id] by this proc */
281:     }
282:   }
283:   PetscFree(odata1);
284:   PetscBTDestroy(otable);

286:   /* 3. Do local work on this processor's is[] */
287:   /*-------------------------------------------*/
288:   /* make sure there is enough unused space in odata2(=data) array */
289:   len_max = is_max*(Mbs+1); /* max space storing all is[] for this processor */
290:   if (len_unused < len_max){ /* allocate more space for odata2 */
291:     PetscMalloc((len_est+1)*sizeof(PetscInt),&odata2);
292:     odata2_ptr[++nodata2] = odata2;
293:     len_unused = len_est;
294:   }

296:   data = odata2;
297:   MatIncreaseOverlap_MPISBAIJ_Local(C,data1_start[rank],MINE,data,table);
298:   PetscFree(data1_start);

300:   /* 4. Receive work done on other processors, then merge */
301:   /*------------------------------------------------------*/
302:   /* get max number of messages that this processor expects to recv */
303:   MPI_Allreduce(len_s,iwork,size,MPIU_INT,MPI_MAX,comm);
304:   PetscMalloc((iwork[rank]+1)*sizeof(PetscInt),&data2);
305:   PetscFree(len_s);

307:   k = 0;
308:   while (k < nrqs){
309:     /* Receive messages */
310:     MPI_Iprobe(MPI_ANY_SOURCE,tag2,comm,&flag,&r_status);
311:     if (flag){
312:       MPI_Get_count(&r_status,MPIU_INT,&len);
313:       proc_id = r_status.MPI_SOURCE;
314:       MPI_Irecv(data2,len,MPIU_INT,proc_id,r_status.MPI_TAG,comm,&r_req);
315:       MPI_Wait(&r_req,&r_status);
316:       if (len > 1+is_max){ /* Add data2 into data */
317:         data2_i = data2 + 1 + is_max;
318:         for (i=0; i<is_max; i++){
319:           table_i = table[i];
320:           data_i  = data + 1 + is_max + Mbs*i;
321:           isz     = data[1+i];
322:           for (j=0; j<data2[1+i]; j++){
323:             col = data2_i[j];
324:             if (!PetscBTLookupSet(table_i,col)) {data_i[isz++] = col;}
325:           }
326:           data[1+i] = isz;
327:           if (i < is_max - 1) data2_i += data2[1+i];
328:         }
329:       }
330:       k++;
331:     }
332:   }
333:   PetscFree(data2);
334:   PetscFree(table);

336:   /* phase 1 sends are complete */
337:   PetscMalloc(size*sizeof(MPI_Status),&s_status);
338:   if (nrqs) {MPI_Waitall(nrqs,s_waits1,s_status);}
339:   PetscFree(data1);
340: 
341:   /* phase 2 sends are complete */
342:   if (nrqr){MPI_Waitall(nrqr,s_waits2,s_status);}
343:   PetscFree(s_waits1);
344:   PetscFree(s_status);

346:   /* 5. Create new is[] */
347:   /*--------------------*/
348:   for (i=0; i<is_max; i++) {
349:     data_i = data + 1 + is_max + Mbs*i;
350:     ISCreateGeneral(PETSC_COMM_SELF,data[1+i],data_i,is+i);
351:   }
352:   for (k=0; k<=nodata2; k++){
353:     PetscFree(odata2_ptr[k]);
354:   }
355:   PetscFree(odata2_ptr);

357:   return(0);
358: }

362: /*  
363:    MatIncreaseOverlap_MPISBAIJ_Local - Called by MatIncreaseOverlap, to do 
364:        the work on the local processor.

366:      Inputs:
367:       C      - MAT_MPISBAIJ;
368:       data   - holds is[]. See MatIncreaseOverlap_MPISBAIJ_Once() for the format.
369:       whose  - whose is[] to be processed, 
370:                MINE:  this processor's is[]
371:                OTHER: other processor's is[]
372:      Output:  
373:        nidx  - whose = MINE:
374:                      holds input and newly found indices in the same format as data
375:                whose = OTHER:
376:                      only holds the newly found indices
377:        table - table[i]: mark the indices of is[i], i=0,...,is_max. Used only in the case 'whose=MINE'.
378: */
379: /* Would computation be reduced by swapping the loop 'for each is' and 'for each row'? */
380: static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Local(Mat C,PetscInt *data,PetscInt whose,PetscInt *nidx,PetscBT *table)
381: {
382:   Mat_MPISBAIJ   *c = (Mat_MPISBAIJ*)C->data;
383:   Mat_SeqSBAIJ   *a = (Mat_SeqSBAIJ*)(c->A)->data;
384:   Mat_SeqBAIJ    *b = (Mat_SeqBAIJ*)(c->B)->data;
386:   PetscInt       row,mbs,Mbs,*nidx_i,col,col_max,isz,isz0,*ai,*aj,*bi,*bj,*garray,rstart,l;
387:   PetscInt       a_start,a_end,b_start,b_end,i,j,k,is_max,*idx_i,n;
388:   PetscBT        table0;  /* mark the indices of input is[] for look up */
389:   PetscBT        table_i; /* poits to i-th table. When whose=OTHER, a single table is used for all is[] */
390: 
392:   Mbs = c->Mbs; mbs = a->mbs;
393:   ai = a->i; aj = a->j;
394:   bi = b->i; bj = b->j;
395:   garray = c->garray;
396:   rstart = c->rstartbs;
397:   is_max = data[0];

399:   PetscBTCreate(Mbs,table0);
400: 
401:   nidx[0] = is_max;
402:   idx_i   = data + is_max + 1; /* ptr to input is[0] array */
403:   nidx_i  = nidx + is_max + 1; /* ptr to output is[0] array */
404:   for (i=0; i<is_max; i++) { /* for each is */
405:     isz  = 0;
406:     n = data[1+i]; /* size of input is[i] */

408:     /* initialize and set table_i(mark idx and nidx) and table0(only mark idx) */
409:     if (whose == MINE){ /* process this processor's is[] */
410:       table_i = table[i];
411:       nidx_i  = nidx + 1+ is_max + Mbs*i;
412:     } else {            /* process other processor's is[] - only use one temp table */
413:       table_i = table[0];
414:     }
415:     PetscBTMemzero(Mbs,table_i);
416:     PetscBTMemzero(Mbs,table0);
417:     if (n==0) {
418:        nidx[1+i] = 0; /* size of new is[i] */
419:        continue;
420:     }

422:     isz0 = 0; col_max = 0;
423:     for (j=0; j<n; j++){
424:       col = idx_i[j];
425:       if (col >= Mbs) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"index col %D >= Mbs %D",col,Mbs);
426:       if(!PetscBTLookupSet(table_i,col)) {
427:         PetscBTSet(table0,col);
428:         if (whose == MINE) {nidx_i[isz0] = col;}
429:         if (col_max < col) col_max = col;
430:         isz0++;
431:       }
432:     }
433: 
434:     if (whose == MINE) {isz = isz0;}
435:     k = 0;  /* no. of indices from input is[i] that have been examined */
436:     for (row=0; row<mbs; row++){
437:       a_start = ai[row]; a_end = ai[row+1];
438:       b_start = bi[row]; b_end = bi[row+1];
439:       if (PetscBTLookup(table0,row+rstart)){ /* row is on input is[i]:
440:                                                 do row search: collect all col in this row */
441:         for (l = a_start; l<a_end ; l++){ /* Amat */
442:           col = aj[l] + rstart;
443:           if (!PetscBTLookupSet(table_i,col)) {nidx_i[isz++] = col;}
444:         }
445:         for (l = b_start; l<b_end ; l++){ /* Bmat */
446:           col = garray[bj[l]];
447:           if (!PetscBTLookupSet(table_i,col)) {nidx_i[isz++] = col;}
448:         }
449:         k++;
450:         if (k >= isz0) break; /* for (row=0; row<mbs; row++) */
451:       } else { /* row is not on input is[i]:
452:                   do col serach: add row onto nidx_i if there is a col in nidx_i */
453:         for (l = a_start; l<a_end ; l++){ /* Amat */
454:           col = aj[l] + rstart;
455:           if (col > col_max) break;
456:           if (PetscBTLookup(table0,col)){
457:             if (!PetscBTLookupSet(table_i,row+rstart)) {nidx_i[isz++] = row+rstart;}
458:             break; /* for l = start; l<end ; l++) */
459:           }
460:         }
461:         for (l = b_start; l<b_end ; l++){ /* Bmat */
462:           col = garray[bj[l]];
463:           if (col > col_max) break;
464:           if (PetscBTLookup(table0,col)){
465:             if (!PetscBTLookupSet(table_i,row+rstart)) {nidx_i[isz++] = row+rstart;}
466:             break; /* for l = start; l<end ; l++) */
467:           }
468:         }
469:       }
470:     }
471: 
472:     if (i < is_max - 1){
473:       idx_i  += n;   /* ptr to input is[i+1] array */
474:       nidx_i += isz; /* ptr to output is[i+1] array */
475:     }
476:     nidx[1+i] = isz; /* size of new is[i] */
477:   } /* for each is */
478:   PetscBTDestroy(table0);
479: 
480:   return(0);
481: }