Actual source code: mmdense.c

  1: #define PETSCMAT_DLL

  3: /*
  4:    Support for the parallel dense matrix vector multiply
  5: */
 6:  #include ../src/mat/impls/dense/mpi/mpidense.h

 10: PetscErrorCode MatSetUpMultiply_MPIDense(Mat mat)
 11: {
 12:   Mat_MPIDense *mdn = (Mat_MPIDense*)mat->data;
 14:   IS           from,to;
 15:   Vec          gvec;

 18:   /* Create local vector that is used to scatter into */
 19:   VecCreateSeq(PETSC_COMM_SELF,mat->cmap->N,&mdn->lvec);

 21:   /* Create temporary index set for building scatter gather */
 22:   ISCreateStride(((PetscObject)mat)->comm,mat->cmap->N,0,1,&from);
 23:   ISCreateStride(PETSC_COMM_SELF,mat->cmap->N,0,1,&to);

 25:   /* Create temporary global vector to generate scatter context */
 26:   /* n    = mdn->cowners[mdn->rank+1] - mdn->cowners[mdn->rank]; */

 28:   VecCreateMPIWithArray(((PetscObject)mat)->comm,mdn->nvec,mat->cmap->N,PETSC_NULL,&gvec);

 30:   /* Generate the scatter context */
 31:   VecScatterCreate(gvec,from,mdn->lvec,to,&mdn->Mvctx);
 32:   PetscLogObjectParent(mat,mdn->Mvctx);
 33:   PetscLogObjectParent(mat,mdn->lvec);
 34:   PetscLogObjectParent(mat,from);
 35:   PetscLogObjectParent(mat,to);
 36:   PetscLogObjectParent(mat,gvec);

 38:   ISDestroy(to);
 39:   ISDestroy(from);
 40:   VecDestroy(gvec);
 41:   return(0);
 42: }

 44: EXTERN PetscErrorCode MatGetSubMatrices_MPIDense_Local(Mat,PetscInt,const IS[],const IS[],MatReuse,Mat*);
 47: PetscErrorCode MatGetSubMatrices_MPIDense(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submat[])
 48: {
 50:   PetscInt           nmax,nstages_local,nstages,i,pos,max_no;

 53:   /* Allocate memory to hold all the submatrices */
 54:   if (scall != MAT_REUSE_MATRIX) {
 55:     PetscMalloc((ismax+1)*sizeof(Mat),submat);
 56:   }
 57:   /* Determine the number of stages through which submatrices are done */
 58:   nmax          = 20*1000000 / (C->cmap->N * sizeof(PetscInt));
 59:   if (!nmax) nmax = 1;
 60:   nstages_local = ismax/nmax + ((ismax % nmax)?1:0);

 62:   /* Make sure every processor loops through the nstages */
 63:   MPI_Allreduce(&nstages_local,&nstages,1,MPIU_INT,MPI_MAX,((PetscObject)C)->comm);


 66:   for (i=0,pos=0; i<nstages; i++) {
 67:     if (pos+nmax <= ismax) max_no = nmax;
 68:     else if (pos == ismax) max_no = 0;
 69:     else                   max_no = ismax-pos;
 70:     MatGetSubMatrices_MPIDense_Local(C,max_no,isrow+pos,iscol+pos,scall,*submat+pos);
 71:     pos += max_no;
 72:   }
 73:   return(0);
 74: }
 75: /* -------------------------------------------------------------------------*/
 78: PetscErrorCode MatGetSubMatrices_MPIDense_Local(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submats)
 79: {
 80:   Mat_MPIDense   *c = (Mat_MPIDense*)C->data;
 81:   Mat            A = c->A;
 82:   Mat_SeqDense   *a = (Mat_SeqDense*)A->data,*mat;
 84:   PetscMPIInt    rank,size,tag0,tag1,idex,end,i;
 85:   PetscInt       N = C->cmap->N,rstart = C->rmap->rstart,count;
 86:   const PetscInt **irow,**icol,*irow_i;
 87:   PetscInt       *nrow,*ncol,*w1,*w3,*w4,*rtable,start;
 88:   PetscInt       **sbuf1,m,j,k,l,ct1,**rbuf1,row,proc;
 89:   PetscInt       nrqs,msz,**ptr,*ctr,*pa,*tmp,bsz,nrqr;
 90:   PetscInt       is_no,jmax,**rmap,*rmap_i;
 91:   PetscInt       len,ctr_j,*sbuf1_j,*rbuf1_i;
 92:   MPI_Request    *s_waits1,*r_waits1,*s_waits2,*r_waits2;
 93:   MPI_Status     *r_status1,*r_status2,*s_status1,*s_status2;
 94:   MPI_Comm       comm;
 95:   PetscScalar    **rbuf2,**sbuf2;
 96:   PetscTruth     sorted;

 99:   comm   = ((PetscObject)C)->comm;
100:   tag0   = ((PetscObject)C)->tag;
101:   size   = c->size;
102:   rank   = c->rank;
103:   m      = C->rmap->N;
104: 
105:   /* Get some new tags to keep the communication clean */
106:   PetscObjectGetNewTag((PetscObject)C,&tag1);

108:     /* Check if the col indices are sorted */
109:   for (i=0; i<ismax; i++) {
110:     ISSorted(isrow[i],&sorted);
111:     if (!sorted) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"ISrow is not sorted");
112:     ISSorted(iscol[i],&sorted);
113:     if (!sorted) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"IScol is not sorted");
114:   }

116:   len    =  2*ismax*(sizeof(PetscInt*)+sizeof(PetscInt)) + (m+1)*sizeof(PetscInt);
117:   PetscMalloc(len,&irow);
118:   icol   = irow + ismax;
119:   nrow   = (PetscInt*)(icol + ismax);
120:   ncol   = nrow + ismax;
121:   rtable = ncol + ismax;

123:   for (i=0; i<ismax; i++) {
124:     ISGetIndices(isrow[i],&irow[i]);
125:     ISGetIndices(iscol[i],&icol[i]);
126:     ISGetLocalSize(isrow[i],&nrow[i]);
127:     ISGetLocalSize(iscol[i],&ncol[i]);
128:   }

130:   /* Create hash table for the mapping :row -> proc*/
131:   for (i=0,j=0; i<size; i++) {
132:     jmax = C->rmap->range[i+1];
133:     for (; j<jmax; j++) {
134:       rtable[j] = i;
135:     }
136:   }

138:   /* evaluate communication - mesg to who,length of mesg, and buffer space
139:      required. Based on this, buffers are allocated, and data copied into them*/
140:   PetscMalloc(size*4*sizeof(PetscInt),&w1); /* mesg size */
141:   w3     = w1 + 2*size;      /* no of IS that needs to be sent to proc i */
142:   w4     = w3 + size;      /* temp work space used in determining w1,  w3 */
143:   PetscMemzero(w1,size*3*sizeof(PetscInt)); /* initialize work vector*/
144:   for (i=0; i<ismax; i++) {
145:     PetscMemzero(w4,size*sizeof(PetscInt)); /* initialize work vector*/
146:     jmax   = nrow[i];
147:     irow_i = irow[i];
148:     for (j=0; j<jmax; j++) {
149:       row  = irow_i[j];
150:       proc = rtable[row];
151:       w4[proc]++;
152:     }
153:     for (j=0; j<size; j++) {
154:       if (w4[j]) { w1[2*j] += w4[j];  w3[j]++;}
155:     }
156:   }
157: 
158:   nrqs       = 0;              /* no of outgoing messages */
159:   msz        = 0;              /* total mesg length (for all procs) */
160:   w1[2*rank] = 0;              /* no mesg sent to self */
161:   w3[rank]   = 0;
162:   for (i=0; i<size; i++) {
163:     if (w1[2*i])  { w1[2*i+1] = 1; nrqs++;} /* there exists a message to proc i */
164:   }
165:   PetscMalloc((nrqs+1)*sizeof(PetscInt),&pa); /*(proc -array)*/
166:   for (i=0,j=0; i<size; i++) {
167:     if (w1[2*i]) { pa[j] = i; j++; }
168:   }

170:   /* Each message would have a header = 1 + 2*(no of IS) + data */
171:   for (i=0; i<nrqs; i++) {
172:     j       = pa[i];
173:     w1[2*j] += w1[2*j+1] + 2* w3[j];
174:     msz     += w1[2*j];
175:   }
176:   /* Do a global reduction to determine how many messages to expect*/
177:   PetscMaxSum(comm,w1,&bsz,&nrqr);

179:   /* Allocate memory for recv buffers . Prob none if nrqr = 0 ???? */
180:   len      = (nrqr+1)*sizeof(PetscInt*) + nrqr*bsz*sizeof(PetscInt);
181:   PetscMalloc(len,&rbuf1);
182:   rbuf1[0] = (PetscInt*)(rbuf1 + nrqr);
183:   for (i=1; i<nrqr; ++i) rbuf1[i] = rbuf1[i-1] + bsz;
184: 
185:   /* Post the receives */
186:   PetscMalloc((nrqr+1)*sizeof(MPI_Request),&r_waits1);
187:   for (i=0; i<nrqr; ++i) {
188:     MPI_Irecv(rbuf1[i],bsz,MPIU_INT,MPI_ANY_SOURCE,tag0,comm,r_waits1+i);
189:   }

191:   /* Allocate Memory for outgoing messages */
192:   len   = 2*size*sizeof(PetscInt*) + 2*msz*sizeof(PetscInt)+ size*sizeof(PetscInt);
193:   PetscMalloc(len,&sbuf1);
194:   ptr   = sbuf1 + size;   /* Pointers to the data in outgoing buffers */
195:   PetscMemzero(sbuf1,2*size*sizeof(PetscInt*));
196:   /* allocate memory for outgoing data + buf to receive the first reply */
197:   tmp   = (PetscInt*)(ptr + size);
198:   ctr   = tmp + 2*msz;

200:   {
201:     PetscInt *iptr = tmp,ict = 0;
202:     for (i=0; i<nrqs; i++) {
203:       j         = pa[i];
204:       iptr     += ict;
205:       sbuf1[j]  = iptr;
206:       ict       = w1[2*j];
207:     }
208:   }

210:   /* Form the outgoing messages */
211:   /* Initialize the header space */
212:   for (i=0; i<nrqs; i++) {
213:     j           = pa[i];
214:     sbuf1[j][0] = 0;
215:     PetscMemzero(sbuf1[j]+1,2*w3[j]*sizeof(PetscInt));
216:     ptr[j]      = sbuf1[j] + 2*w3[j] + 1;
217:   }
218: 
219:   /* Parse the isrow and copy data into outbuf */
220:   for (i=0; i<ismax; i++) {
221:     PetscMemzero(ctr,size*sizeof(PetscInt));
222:     irow_i = irow[i];
223:     jmax   = nrow[i];
224:     for (j=0; j<jmax; j++) {  /* parse the indices of each IS */
225:       row  = irow_i[j];
226:       proc = rtable[row];
227:       if (proc != rank) { /* copy to the outgoing buf*/
228:         ctr[proc]++;
229:         *ptr[proc] = row;
230:         ptr[proc]++;
231:       }
232:     }
233:     /* Update the headers for the current IS */
234:     for (j=0; j<size; j++) { /* Can Optimise this loop too */
235:       if ((ctr_j = ctr[j])) {
236:         sbuf1_j        = sbuf1[j];
237:         k              = ++sbuf1_j[0];
238:         sbuf1_j[2*k]   = ctr_j;
239:         sbuf1_j[2*k-1] = i;
240:       }
241:     }
242:   }

244:   /*  Now  post the sends */
245:   PetscMalloc((nrqs+1)*sizeof(MPI_Request),&s_waits1);
246:   for (i=0; i<nrqs; ++i) {
247:     j = pa[i];
248:     MPI_Isend(sbuf1[j],w1[2*j],MPIU_INT,j,tag0,comm,s_waits1+i);
249:   }

251:   /* Post recieves to capture the row_data from other procs */
252:   PetscMalloc((nrqs+1)*sizeof(MPI_Request),&r_waits2);
253:   PetscMalloc((nrqs+1)*sizeof(PetscScalar*),&rbuf2);
254:   for (i=0; i<nrqs; i++) {
255:     j        = pa[i];
256:     count    = (w1[2*j] - (2*sbuf1[j][0] + 1))*N;
257:     PetscMalloc((count+1)*sizeof(PetscScalar),&rbuf2[i]);
258:     MPI_Irecv(rbuf2[i],count,MPIU_SCALAR,j,tag1,comm,r_waits2+i);
259:   }

261:   /* Receive messages(row_nos) and then, pack and send off the rowvalues
262:      to the correct processors */

264:   PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits2);
265:   PetscMalloc((nrqr+1)*sizeof(MPI_Status),&r_status1);
266:   PetscMalloc((nrqr+1)*sizeof(PetscScalar*),&sbuf2);
267: 
268:   {
269:     PetscScalar *sbuf2_i,*v_start;
270:     PetscInt         s_proc;
271:     for (i=0; i<nrqr; ++i) {
272:       MPI_Waitany(nrqr,r_waits1,&idex,r_status1+i);
273:       s_proc          = r_status1[i].MPI_SOURCE; /* send processor */
274:       rbuf1_i         = rbuf1[idex]; /* Actual message from s_proc */
275:       /* no of rows = end - start; since start is array idex[], 0idex, whel end
276:          is length of the buffer - which is 1idex */
277:       start           = 2*rbuf1_i[0] + 1;
278:       MPI_Get_count(r_status1+i,MPIU_INT,&end);
279:       /* allocate memory sufficinet to hold all the row values */
280:       PetscMalloc((end-start)*N*sizeof(PetscScalar),&sbuf2[idex]);
281:       sbuf2_i      = sbuf2[idex];
282:       /* Now pack the data */
283:       for (j=start; j<end; j++) {
284:         row = rbuf1_i[j] - rstart;
285:         v_start = a->v + row;
286:         for (k=0; k<N; k++) {
287:           sbuf2_i[0] = v_start[0];
288:           sbuf2_i++; v_start += C->rmap->n;
289:         }
290:       }
291:       /* Now send off the data */
292:       MPI_Isend(sbuf2[idex],(end-start)*N,MPIU_SCALAR,s_proc,tag1,comm,s_waits2+i);
293:     }
294:   }
295:   /* End Send-Recv of IS + row_numbers */
296:   PetscFree(r_status1);
297:   PetscFree(r_waits1);
298:   PetscMalloc((nrqs+1)*sizeof(MPI_Status),&s_status1);
299:   if (nrqs) {MPI_Waitall(nrqs,s_waits1,s_status1);}
300:   PetscFree(s_status1);
301:   PetscFree(s_waits1);

303:   /* Create the submatrices */
304:   if (scall == MAT_REUSE_MATRIX) {
305:     for (i=0; i<ismax; i++) {
306:       mat = (Mat_SeqDense *)(submats[i]->data);
307:       if ((submats[i]->rmap->n != nrow[i]) || (submats[i]->cmap->n != ncol[i])) {
308:         SETERRQ(PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. wrong size");
309:       }
310:       PetscMemzero(mat->v,submats[i]->rmap->n*submats[i]->cmap->n*sizeof(PetscScalar));
311:       submats[i]->factor = C->factor;
312:     }
313:   } else {
314:     for (i=0; i<ismax; i++) {
315:       MatCreate(PETSC_COMM_SELF,submats+i);
316:       MatSetSizes(submats[i],nrow[i],ncol[i],nrow[i],ncol[i]);
317:       MatSetType(submats[i],((PetscObject)A)->type_name);
318:       MatSeqDenseSetPreallocation(submats[i],PETSC_NULL);
319:     }
320:   }
321: 
322:   /* Assemble the matrices */
323:   {
324:     PetscInt         col;
325:     PetscScalar *imat_v,*mat_v,*imat_vi,*mat_vi;
326: 
327:     for (i=0; i<ismax; i++) {
328:       mat       = (Mat_SeqDense*)submats[i]->data;
329:       mat_v     = a->v;
330:       imat_v    = mat->v;
331:       irow_i    = irow[i];
332:       m         = nrow[i];
333:       for (j=0; j<m; j++) {
334:         row      = irow_i[j] ;
335:         proc     = rtable[row];
336:         if (proc == rank) {
337:           row      = row - rstart;
338:           mat_vi   = mat_v + row;
339:           imat_vi  = imat_v + j;
340:           for (k=0; k<ncol[i]; k++) {
341:             col = icol[i][k];
342:             imat_vi[k*m] = mat_vi[col*C->rmap->n];
343:           }
344:         }
345:       }
346:     }
347:   }

349:   /* Create row map-> This maps c->row to submat->row for each submat*/
350:   /* this is a very expensive operation wrt memory usage */
351:   len     = (1+ismax)*sizeof(PetscInt*)+ ismax*C->rmap->N*sizeof(PetscInt);
352:   PetscMalloc(len,&rmap);
353:   rmap[0] = (PetscInt*)(rmap + ismax);
354:   PetscMemzero(rmap[0],ismax*C->rmap->N*sizeof(PetscInt));
355:   for (i=1; i<ismax; i++) { rmap[i] = rmap[i-1] + C->rmap->N;}
356:   for (i=0; i<ismax; i++) {
357:     rmap_i = rmap[i];
358:     irow_i = irow[i];
359:     jmax   = nrow[i];
360:     for (j=0; j<jmax; j++) {
361:       rmap_i[irow_i[j]] = j;
362:     }
363:   }
364: 
365:   /* Now Receive the row_values and assemble the rest of the matrix */
366:   PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status2);

368:   {
369:     PetscInt    is_max,tmp1,col,*sbuf1_i,is_sz;
370:     PetscScalar *rbuf2_i,*imat_v,*imat_vi;
371: 
372:     for (tmp1=0; tmp1<nrqs; tmp1++) { /* For each message */
373:       MPI_Waitany(nrqs,r_waits2,&i,r_status2+tmp1);
374:       /* Now dig out the corresponding sbuf1, which contains the IS data_structure */
375:       sbuf1_i = sbuf1[pa[i]];
376:       is_max  = sbuf1_i[0];
377:       ct1     = 2*is_max+1;
378:       rbuf2_i = rbuf2[i];
379:       for (j=1; j<=is_max; j++) { /* For each IS belonging to the message */
380:         is_no     = sbuf1_i[2*j-1];
381:         is_sz     = sbuf1_i[2*j];
382:         mat       = (Mat_SeqDense*)submats[is_no]->data;
383:         imat_v    = mat->v;
384:         rmap_i    = rmap[is_no];
385:         m         = nrow[is_no];
386:         for (k=0; k<is_sz; k++,rbuf2_i+=N) {  /* For each row */
387:           row      = sbuf1_i[ct1]; ct1++;
388:           row      = rmap_i[row];
389:           imat_vi  = imat_v + row;
390:           for (l=0; l<ncol[is_no]; l++) { /* For each col */
391:             col = icol[is_no][l];
392:             imat_vi[l*m] = rbuf2_i[col];
393:           }
394:         }
395:       }
396:     }
397:   }
398:   /* End Send-Recv of row_values */
399:   PetscFree(r_status2);
400:   PetscFree(r_waits2);
401:   PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status2);
402:   if (nrqr) {MPI_Waitall(nrqr,s_waits2,s_status2);}
403:   PetscFree(s_status2);
404:   PetscFree(s_waits2);

406:   /* Restore the indices */
407:   for (i=0; i<ismax; i++) {
408:     ISRestoreIndices(isrow[i],irow+i);
409:     ISRestoreIndices(iscol[i],icol+i);
410:   }

412:   /* Destroy allocated memory */
413:   PetscFree(irow);
414:   PetscFree(w1);
415:   PetscFree(pa);


418:   for (i=0; i<nrqs; ++i) {
419:     PetscFree(rbuf2[i]);
420:   }
421:   PetscFree(rbuf2);
422:   PetscFree(sbuf1);
423:   PetscFree(rbuf1);

425:   for (i=0; i<nrqr; ++i) {
426:     PetscFree(sbuf2[i]);
427:   }

429:   PetscFree(sbuf2);
430:   PetscFree(rmap);

432:   for (i=0; i<ismax; i++) {
433:     MatAssemblyBegin(submats[i],MAT_FINAL_ASSEMBLY);
434:     MatAssemblyEnd(submats[i],MAT_FINAL_ASSEMBLY);
435:   }

437:   return(0);
438: }