Actual source code: mpimatmatmult.c
1: #define PETSCMAT_DLL
3: /*
4: Defines matrix-matrix product routines for pairs of MPIAIJ matrices
5: C = A * B
6: */
7: #include ../src/mat/impls/aij/seq/aij.h
8: #include ../src/mat/utils/freespace.h
9: #include ../src/mat/impls/aij/mpi/mpiaij.h
10: #include petscbt.h
11: #include ../src/mat/impls/dense/mpi/mpidense.h
15: PetscErrorCode MatMatMult_MPIAIJ_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscReal fill, Mat *C)
16: {
20: if (scall == MAT_INITIAL_MATRIX){
21: MatMatMultSymbolic_MPIAIJ_MPIAIJ(A,B,fill,C);/* numeric product is computed as well */
22: } else if (scall == MAT_REUSE_MATRIX){
23: MatMatMultNumeric_MPIAIJ_MPIAIJ(A,B,*C);
24: } else {
25: SETERRQ1(PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
26: }
27: return(0);
28: }
32: PetscErrorCode PetscContainerDestroy_Mat_MatMatMultMPI(void *ptr)
33: {
34: PetscErrorCode ierr;
35: Mat_MatMatMultMPI *mult=(Mat_MatMatMultMPI*)ptr;
38: PetscFree(mult->startsj);
39: PetscFree(mult->bufa);
40: if (mult->isrowa){ISDestroy(mult->isrowa);}
41: if (mult->isrowb){ISDestroy(mult->isrowb);}
42: if (mult->iscolb){ISDestroy(mult->iscolb);}
43: if (mult->C_seq){MatDestroy(mult->C_seq);}
44: if (mult->A_loc){MatDestroy(mult->A_loc); }
45: if (mult->B_seq){MatDestroy(mult->B_seq);}
46: if (mult->B_loc){MatDestroy(mult->B_loc);}
47: if (mult->B_oth){MatDestroy(mult->B_oth);}
48: PetscFree(mult->abi);
49: PetscFree(mult->abj);
50: PetscFree(mult);
51: return(0);
52: }
54: EXTERN PetscErrorCode MatDestroy_AIJ(Mat);
57: PetscErrorCode MatDestroy_MPIAIJ_MatMatMult(Mat A)
58: {
59: PetscErrorCode ierr;
60: PetscContainer container;
61: Mat_MatMatMultMPI *mult=PETSC_NULL;
64: PetscObjectQuery((PetscObject)A,"Mat_MatMatMultMPI",(PetscObject *)&container);
65: if (container) {
66: PetscContainerGetPointer(container,(void **)&mult);
67: } else {
68: SETERRQ(PETSC_ERR_PLIB,"Container does not exit");
69: }
70: A->ops->destroy = mult->MatDestroy;
71: PetscObjectCompose((PetscObject)A,"Mat_MatMatMultMPI",0);
72: (*A->ops->destroy)(A);
73: PetscContainerDestroy(container);
74: return(0);
75: }
79: PetscErrorCode MatDuplicate_MPIAIJ_MatMatMult(Mat A, MatDuplicateOption op, Mat *M) {
80: PetscErrorCode ierr;
81: Mat_MatMatMultMPI *mult;
82: PetscContainer container;
85: PetscObjectQuery((PetscObject)A,"Mat_MatMatMultMPI",(PetscObject *)&container);
86: if (container) {
87: PetscContainerGetPointer(container,(void **)&mult);
88: } else {
89: SETERRQ(PETSC_ERR_PLIB,"Container does not exit");
90: }
91: /* Note: the container is not duplicated, because it requires deep copying of
92: several large data sets (see PetscContainerDestroy_Mat_MatMatMultMPI()).
93: These data sets are only used for repeated calling of MatMatMultNumeric().
94: *M is unlikely being used in this way. Thus we create *M with pure mpiaij format */
95: (*mult->MatDuplicate)(A,op,M);
96: (*M)->ops->destroy = mult->MatDestroy; /* = MatDestroy_MPIAIJ, *M doesn't duplicate A's container! */
97: (*M)->ops->duplicate = mult->MatDuplicate; /* = MatDuplicate_MPIAIJ */
98: return(0);
99: }
103: PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat B,PetscReal fill,Mat *C)
104: {
105: PetscErrorCode ierr;
106: PetscInt start,end;
107: Mat_MatMatMultMPI *mult;
108: PetscContainer container;
109:
111: if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend){
112: SETERRQ4(PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
113: }
114: PetscNew(Mat_MatMatMultMPI,&mult);
116: /* create a seq matrix B_seq = submatrix of B by taking rows of B that equal to nonzero col of A */
117: MatGetBrowsOfAcols(A,B,MAT_INITIAL_MATRIX,&mult->isrowb,&mult->iscolb,&mult->brstart,&mult->B_seq);
119: /* create a seq matrix A_seq = submatrix of A by taking all local rows of A */
120: start = A->rmap->rstart; end = A->rmap->rend;
121: ISCreateStride(PETSC_COMM_SELF,end-start,start,1,&mult->isrowa);
122: MatGetLocalMatCondensed(A,MAT_INITIAL_MATRIX,&mult->isrowa,&mult->isrowb,&mult->A_loc);
124: /* compute C_seq = A_seq * B_seq */
125: MatMatMult_SeqAIJ_SeqAIJ(mult->A_loc,mult->B_seq,MAT_INITIAL_MATRIX,fill,&mult->C_seq);
127: /* create mpi matrix C by concatinating C_seq */
128: PetscObjectReference((PetscObject)mult->C_seq); /* prevent C_seq being destroyed by MatMerge() */
129: MatMerge(((PetscObject)A)->comm,mult->C_seq,B->cmap->n,MAT_INITIAL_MATRIX,C);
130:
131: /* attach the supporting struct to C for reuse of symbolic C */
132: PetscContainerCreate(PETSC_COMM_SELF,&container);
133: PetscContainerSetPointer(container,mult);
134: PetscObjectCompose((PetscObject)(*C),"Mat_MatMatMultMPI",(PetscObject)container);
135: PetscContainerSetUserDestroy(container,PetscContainerDestroy_Mat_MatMatMultMPI);
136: mult->MatDestroy = (*C)->ops->destroy;
137: mult->MatDuplicate = (*C)->ops->duplicate;
139: (*C)->ops->destroy = MatDestroy_MPIAIJ_MatMatMult;
140: (*C)->ops->duplicate = MatDuplicate_MPIAIJ_MatMatMult;
141: return(0);
142: }
144: /* This routine is called ONLY in the case of reusing previously computed symbolic C */
147: PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ(Mat A,Mat B,Mat C)
148: {
149: PetscErrorCode ierr;
150: Mat *seq;
151: Mat_MatMatMultMPI *mult;
152: PetscContainer container;
155: PetscObjectQuery((PetscObject)C,"Mat_MatMatMultMPI",(PetscObject *)&container);
156: if (container) {
157: PetscContainerGetPointer(container,(void **)&mult);
158: } else {
159: SETERRQ(PETSC_ERR_PLIB,"Container does not exit");
160: }
162: seq = &mult->B_seq;
163: MatGetSubMatrices(B,1,&mult->isrowb,&mult->iscolb,MAT_REUSE_MATRIX,&seq);
164: mult->B_seq = *seq;
165:
166: seq = &mult->A_loc;
167: MatGetSubMatrices(A,1,&mult->isrowa,&mult->isrowb,MAT_REUSE_MATRIX,&seq);
168: mult->A_loc = *seq;
170: MatMatMult_SeqAIJ_SeqAIJ(mult->A_loc,mult->B_seq,MAT_REUSE_MATRIX,0.0,&mult->C_seq);
172: PetscObjectReference((PetscObject)mult->C_seq);
173: MatMerge(((PetscObject)A)->comm,mult->C_seq,B->cmap->n,MAT_REUSE_MATRIX,&C);
174: return(0);
175: }
179: PetscErrorCode MatMatMult_MPIAIJ_MPIDense(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
180: {
184: if (scall == MAT_INITIAL_MATRIX){
185: MatMatMultSymbolic_MPIAIJ_MPIDense(A,B,fill,C);
186: }
187: MatMatMultNumeric_MPIAIJ_MPIDense(A,B,*C);
188: return(0);
189: }
191: typedef struct {
192: Mat workB;
193: PetscScalar *rvalues,*svalues;
194: MPI_Request *rwaits,*swaits;
195: } MPIAIJ_MPIDense;
197: PetscErrorCode MPIAIJ_MPIDenseDestroy(void *ctx)
198: {
199: MPIAIJ_MPIDense *contents = (MPIAIJ_MPIDense*) ctx;
200: PetscErrorCode ierr;
203: if (contents->workB) {MatDestroy(contents->workB);}
204: PetscFree4(contents->rvalues,contents->svalues,contents->rwaits,contents->swaits);
205: PetscFree(contents);
206: return(0);
207: }
211: PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat A,Mat B,PetscReal fill,Mat *C)
212: {
213: PetscErrorCode ierr;
214: Mat_MPIAIJ *aij = (Mat_MPIAIJ*) A->data;
215: PetscInt nz = aij->B->cmap->n;
216: PetscContainer cont;
217: MPIAIJ_MPIDense *contents;
218: VecScatter ctx = aij->Mvctx;
219: VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata;
220: VecScatter_MPI_General *to = ( VecScatter_MPI_General*) ctx->todata;
221: PetscInt m=A->rmap->n,n=B->cmap->n;
224: MatCreate(((PetscObject)B)->comm,C);
225: MatSetSizes(*C,m,n,A->rmap->N,B->cmap->N);
226: MatSetType(*C,MATMPIDENSE);
227: MatAssemblyBegin(*C,MAT_FINAL_ASSEMBLY);
228: MatAssemblyEnd(*C,MAT_FINAL_ASSEMBLY);
230: PetscContainerCreate(((PetscObject)A)->comm,&cont);
231: PetscNew(MPIAIJ_MPIDense,&contents);
232: PetscContainerSetPointer(cont,contents);
233: PetscContainerSetUserDestroy(cont,MPIAIJ_MPIDenseDestroy);
235: /* Create work matrix used to store off processor rows of B needed for local product */
236: MatCreateSeqDense(PETSC_COMM_SELF,nz,B->cmap->N,PETSC_NULL,&contents->workB);
238: /* Create work arrays needed */
239: PetscMalloc4(B->cmap->N*from->starts[from->n],PetscScalar,&contents->rvalues,
240: B->cmap->N*to->starts[to->n],PetscScalar,&contents->svalues,
241: from->n,MPI_Request,&contents->rwaits,
242: to->n,MPI_Request,&contents->swaits);
244: PetscObjectCompose((PetscObject)(*C),"workB",(PetscObject)cont);
245: PetscContainerDestroy(cont);
246: return(0);
247: }
249: /*
250: Performs an efficient scatter on the rows of B needed by this process; this is
251: a modification of the VecScatterBegin_() routines.
252: */
253: PetscErrorCode MatMPIDenseScatter(Mat A,Mat B,Mat C,Mat *outworkB)
254: {
255: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data;
256: PetscErrorCode ierr;
257: PetscScalar *b,*w,*svalues,*rvalues;
258: VecScatter ctx = aij->Mvctx;
259: VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata;
260: VecScatter_MPI_General *to = ( VecScatter_MPI_General*) ctx->todata;
261: PetscInt i,j,k;
262: PetscInt *sindices,*sstarts,*rindices,*rstarts;
263: PetscMPIInt *sprocs,*rprocs,nrecvs;
264: MPI_Request *swaits,*rwaits;
265: MPI_Comm comm = ((PetscObject)A)->comm;
266: PetscMPIInt tag = ((PetscObject)ctx)->tag,ncols = B->cmap->N, nrows = aij->B->cmap->n,imdex,nrowsB = B->rmap->n;
267: MPI_Status status;
268: MPIAIJ_MPIDense *contents;
269: PetscContainer cont;
270: Mat workB;
273: PetscObjectQuery((PetscObject)C,"workB",(PetscObject*)&cont);
274: PetscContainerGetPointer(cont,(void**)&contents);
276: workB = *outworkB = contents->workB;
277: if (nrows != workB->rmap->n) SETERRQ2(PETSC_ERR_PLIB,"Number of rows of workB %D not equal to columns of aij->B %D",nrows,workB->cmap->n);
278: sindices = to->indices;
279: sstarts = to->starts;
280: sprocs = to->procs;
281: swaits = contents->swaits;
282: svalues = contents->svalues;
284: rindices = from->indices;
285: rstarts = from->starts;
286: rprocs = from->procs;
287: rwaits = contents->rwaits;
288: rvalues = contents->rvalues;
290: MatGetArray(B,&b);
291: MatGetArray(workB,&w);
293: for (i=0; i<from->n; i++) {
294: MPI_Irecv(rvalues+ncols*rstarts[i],ncols*(rstarts[i+1]-rstarts[i]),MPIU_SCALAR,rprocs[i],tag,comm,rwaits+i);
295: }
297: for (i=0; i<to->n; i++) {
298: /* pack a message at a time */
299: CHKMEMQ;
300: for (j=0; j<sstarts[i+1]-sstarts[i]; j++){
301: for (k=0; k<ncols; k++) {
302: svalues[ncols*(sstarts[i] + j) + k] = b[sindices[sstarts[i]+j] + nrowsB*k];
303: }
304: }
305: CHKMEMQ;
306: MPI_Isend(svalues+ncols*sstarts[i],ncols*(sstarts[i+1]-sstarts[i]),MPIU_SCALAR,sprocs[i],tag,comm,swaits+i);
307: }
309: nrecvs = from->n;
310: while (nrecvs) {
311: MPI_Waitany(from->n,rwaits,&imdex,&status);
312: nrecvs--;
313: /* unpack a message at a time */
314: CHKMEMQ;
315: for (j=0; j<rstarts[imdex+1]-rstarts[imdex]; j++){
316: for (k=0; k<ncols; k++) {
317: w[rindices[rstarts[imdex]+j] + nrows*k] = rvalues[ncols*(rstarts[imdex] + j) + k];
318: }
319: }
320: CHKMEMQ;
321: }
322: if (to->n) {MPI_Waitall(to->n,swaits,to->sstatus);CHKERRQ(ierr)}
324: MatRestoreArray(B,&b);
325: MatRestoreArray(workB,&w);
326: MatAssemblyBegin(workB,MAT_FINAL_ASSEMBLY);
327: MatAssemblyEnd(workB,MAT_FINAL_ASSEMBLY);
328: return(0);
329: }
334: PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat A,Mat B,Mat C)
335: {
336: PetscErrorCode ierr;
337: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data;
338: Mat_MPIDense *bdense = (Mat_MPIDense*)B->data;
339: Mat_MPIDense *cdense = (Mat_MPIDense*)C->data;
340: Mat workB;
344: /* diagonal block of A times all local rows of B*/
345: MatMatMultNumeric_SeqAIJ_SeqDense(aij->A,bdense->A,cdense->A);
347: /* get off processor parts of B needed to complete the product */
348: MatMPIDenseScatter(A,B,C,&workB);
350: /* off-diagonal block of A times nonlocal rows of B */
351: MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B,workB,cdense->A);
352: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
353: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
354: return(0);
355: }