Actual source code: mpisbaij.c
1: #define PETSCMAT_DLL
3: #include ../src/mat/impls/baij/mpi/mpibaij.h
4: #include mpisbaij.h
5: #include ../src/mat/impls/sbaij/seq/sbaij.h
7: EXTERN PetscErrorCode MatSetUpMultiply_MPISBAIJ(Mat);
8: EXTERN PetscErrorCode MatSetUpMultiply_MPISBAIJ_2comm(Mat);
9: EXTERN PetscErrorCode DisAssemble_MPISBAIJ(Mat);
10: EXTERN PetscErrorCode MatIncreaseOverlap_MPISBAIJ(Mat,PetscInt,IS[],PetscInt);
11: EXTERN PetscErrorCode MatGetValues_SeqSBAIJ(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],PetscScalar []);
12: EXTERN PetscErrorCode MatGetValues_SeqBAIJ(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],PetscScalar []);
13: EXTERN PetscErrorCode MatSetValues_SeqSBAIJ(Mat,PetscInt,const PetscInt [],PetscInt,const PetscInt [],const PetscScalar [],InsertMode);
14: EXTERN PetscErrorCode MatSetValuesBlocked_SeqSBAIJ(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],const PetscScalar[],InsertMode);
15: EXTERN PetscErrorCode MatSetValuesBlocked_SeqBAIJ(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],const PetscScalar[],InsertMode);
16: EXTERN PetscErrorCode MatGetRow_SeqSBAIJ(Mat,PetscInt,PetscInt*,PetscInt**,PetscScalar**);
17: EXTERN PetscErrorCode MatRestoreRow_SeqSBAIJ(Mat,PetscInt,PetscInt*,PetscInt**,PetscScalar**);
18: EXTERN PetscErrorCode MatZeroRows_SeqSBAIJ(Mat,IS,PetscScalar*);
19: EXTERN PetscErrorCode MatZeroRows_SeqBAIJ(Mat,IS,PetscScalar *);
20: EXTERN PetscErrorCode MatGetRowMaxAbs_MPISBAIJ(Mat,Vec,PetscInt[]);
21: EXTERN PetscErrorCode MatRelax_MPISBAIJ(Mat,Vec,PetscReal,MatSORType,PetscReal,PetscInt,PetscInt,Vec);
26: PetscErrorCode MatStoreValues_MPISBAIJ(Mat mat)
27: {
28: Mat_MPISBAIJ *aij = (Mat_MPISBAIJ *)mat->data;
32: MatStoreValues(aij->A);
33: MatStoreValues(aij->B);
34: return(0);
35: }
41: PetscErrorCode MatRetrieveValues_MPISBAIJ(Mat mat)
42: {
43: Mat_MPISBAIJ *aij = (Mat_MPISBAIJ *)mat->data;
47: MatRetrieveValues(aij->A);
48: MatRetrieveValues(aij->B);
49: return(0);
50: }
54: #define CHUNKSIZE 10
56: #define MatSetValues_SeqSBAIJ_A_Private(row,col,value,addv) \
57: { \
58: \
59: brow = row/bs; \
60: rp = aj + ai[brow]; ap = aa + bs2*ai[brow]; \
61: rmax = aimax[brow]; nrow = ailen[brow]; \
62: bcol = col/bs; \
63: ridx = row % bs; cidx = col % bs; \
64: low = 0; high = nrow; \
65: while (high-low > 3) { \
66: t = (low+high)/2; \
67: if (rp[t] > bcol) high = t; \
68: else low = t; \
69: } \
70: for (_i=low; _i<high; _i++) { \
71: if (rp[_i] > bcol) break; \
72: if (rp[_i] == bcol) { \
73: bap = ap + bs2*_i + bs*cidx + ridx; \
74: if (addv == ADD_VALUES) *bap += value; \
75: else *bap = value; \
76: goto a_noinsert; \
77: } \
78: } \
79: if (a->nonew == 1) goto a_noinsert; \
80: if (a->nonew == -1) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero (%D, %D) into matrix", row, col); \
81: MatSeqXAIJReallocateAIJ(A,a->mbs,bs2,nrow,brow,bcol,rmax,aa,ai,aj,rp,ap,aimax,a->nonew,MatScalar); \
82: N = nrow++ - 1; \
83: /* shift up all the later entries in this row */ \
84: for (ii=N; ii>=_i; ii--) { \
85: rp[ii+1] = rp[ii]; \
86: PetscMemcpy(ap+bs2*(ii+1),ap+bs2*(ii),bs2*sizeof(MatScalar)); \
87: } \
88: if (N>=_i) { PetscMemzero(ap+bs2*_i,bs2*sizeof(MatScalar)); } \
89: rp[_i] = bcol; \
90: ap[bs2*_i + bs*cidx + ridx] = value; \
91: a_noinsert:; \
92: ailen[brow] = nrow; \
93: }
95: #define MatSetValues_SeqSBAIJ_B_Private(row,col,value,addv) \
96: { \
97: brow = row/bs; \
98: rp = bj + bi[brow]; ap = ba + bs2*bi[brow]; \
99: rmax = bimax[brow]; nrow = bilen[brow]; \
100: bcol = col/bs; \
101: ridx = row % bs; cidx = col % bs; \
102: low = 0; high = nrow; \
103: while (high-low > 3) { \
104: t = (low+high)/2; \
105: if (rp[t] > bcol) high = t; \
106: else low = t; \
107: } \
108: for (_i=low; _i<high; _i++) { \
109: if (rp[_i] > bcol) break; \
110: if (rp[_i] == bcol) { \
111: bap = ap + bs2*_i + bs*cidx + ridx; \
112: if (addv == ADD_VALUES) *bap += value; \
113: else *bap = value; \
114: goto b_noinsert; \
115: } \
116: } \
117: if (b->nonew == 1) goto b_noinsert; \
118: if (b->nonew == -1) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero (%D, %D) into matrix", row, col); \
119: MatSeqXAIJReallocateAIJ(B,b->mbs,bs2,nrow,brow,bcol,rmax,ba,bi,bj,rp,ap,bimax,b->nonew,MatScalar); \
120: N = nrow++ - 1; \
121: /* shift up all the later entries in this row */ \
122: for (ii=N; ii>=_i; ii--) { \
123: rp[ii+1] = rp[ii]; \
124: PetscMemcpy(ap+bs2*(ii+1),ap+bs2*(ii),bs2*sizeof(MatScalar)); \
125: } \
126: if (N>=_i) { PetscMemzero(ap+bs2*_i,bs2*sizeof(MatScalar));} \
127: rp[_i] = bcol; \
128: ap[bs2*_i + bs*cidx + ridx] = value; \
129: b_noinsert:; \
130: bilen[brow] = nrow; \
131: }
133: /* Only add/insert a(i,j) with i<=j (blocks).
134: Any a(i,j) with i>j input by user is ingored.
135: */
138: PetscErrorCode MatSetValues_MPISBAIJ(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv)
139: {
140: Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
141: MatScalar value;
142: PetscTruth roworiented = baij->roworiented;
144: PetscInt i,j,row,col;
145: PetscInt rstart_orig=mat->rmap->rstart;
146: PetscInt rend_orig=mat->rmap->rend,cstart_orig=mat->cmap->rstart;
147: PetscInt cend_orig=mat->cmap->rend,bs=mat->rmap->bs;
149: /* Some Variables required in the macro */
150: Mat A = baij->A;
151: Mat_SeqSBAIJ *a = (Mat_SeqSBAIJ*)(A)->data;
152: PetscInt *aimax=a->imax,*ai=a->i,*ailen=a->ilen,*aj=a->j;
153: MatScalar *aa=a->a;
155: Mat B = baij->B;
156: Mat_SeqBAIJ *b = (Mat_SeqBAIJ*)(B)->data;
157: PetscInt *bimax=b->imax,*bi=b->i,*bilen=b->ilen,*bj=b->j;
158: MatScalar *ba=b->a;
160: PetscInt *rp,ii,nrow,_i,rmax,N,brow,bcol;
161: PetscInt low,high,t,ridx,cidx,bs2=a->bs2;
162: MatScalar *ap,*bap;
164: /* for stash */
165: PetscInt n_loc, *in_loc = PETSC_NULL;
166: MatScalar *v_loc = PETSC_NULL;
170: if (!baij->donotstash){
171: if (n > baij->n_loc) {
172: PetscFree(baij->in_loc);
173: PetscFree(baij->v_loc);
174: PetscMalloc(n*sizeof(PetscInt),&baij->in_loc);
175: PetscMalloc(n*sizeof(MatScalar),&baij->v_loc);
176: baij->n_loc = n;
177: }
178: in_loc = baij->in_loc;
179: v_loc = baij->v_loc;
180: }
182: for (i=0; i<m; i++) {
183: if (im[i] < 0) continue;
184: #if defined(PETSC_USE_DEBUG)
185: if (im[i] >= mat->rmap->N) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->rmap->N-1);
186: #endif
187: if (im[i] >= rstart_orig && im[i] < rend_orig) { /* this processor entry */
188: row = im[i] - rstart_orig; /* local row index */
189: for (j=0; j<n; j++) {
190: if (im[i]/bs > in[j]/bs){
191: if (a->ignore_ltriangular){
192: continue; /* ignore lower triangular blocks */
193: } else {
194: SETERRQ(PETSC_ERR_USER,"Lower triangular value cannot be set for sbaij format. Ignoring these values, run with -mat_ignore_lower_triangular or call MatSetOption(mat,MAT_IGNORE_LOWER_TRIANGULAR,PETSC_TRUE)");
195: }
196: }
197: if (in[j] >= cstart_orig && in[j] < cend_orig){ /* diag entry (A) */
198: col = in[j] - cstart_orig; /* local col index */
199: brow = row/bs; bcol = col/bs;
200: if (brow > bcol) continue; /* ignore lower triangular blocks of A */
201: if (roworiented) value = v[i*n+j]; else value = v[i+j*m];
202: MatSetValues_SeqSBAIJ_A_Private(row,col,value,addv);
203: /* MatSetValues_SeqBAIJ(baij->A,1,&row,1,&col,&value,addv); */
204: } else if (in[j] < 0) continue;
205: #if defined(PETSC_USE_DEBUG)
206: else if (in[j] >= mat->cmap->N) {SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",in[j],mat->cmap->N-1);}
207: #endif
208: else { /* off-diag entry (B) */
209: if (mat->was_assembled) {
210: if (!baij->colmap) {
211: CreateColmap_MPIBAIJ_Private(mat);
212: }
213: #if defined (PETSC_USE_CTABLE)
214: PetscTableFind(baij->colmap,in[j]/bs + 1,&col);
215: col = col - 1;
216: #else
217: col = baij->colmap[in[j]/bs] - 1;
218: #endif
219: if (col < 0 && !((Mat_SeqSBAIJ*)(baij->A->data))->nonew) {
220: DisAssemble_MPISBAIJ(mat);
221: col = in[j];
222: /* Reinitialize the variables required by MatSetValues_SeqBAIJ_B_Private() */
223: B = baij->B;
224: b = (Mat_SeqBAIJ*)(B)->data;
225: bimax=b->imax;bi=b->i;bilen=b->ilen;bj=b->j;
226: ba=b->a;
227: } else col += in[j]%bs;
228: } else col = in[j];
229: if (roworiented) value = v[i*n+j]; else value = v[i+j*m];
230: MatSetValues_SeqSBAIJ_B_Private(row,col,value,addv);
231: /* MatSetValues_SeqBAIJ(baij->B,1,&row,1,&col,&value,addv); */
232: }
233: }
234: } else { /* off processor entry */
235: if (!baij->donotstash) {
236: n_loc = 0;
237: for (j=0; j<n; j++){
238: if (im[i]/bs > in[j]/bs) continue; /* ignore lower triangular blocks */
239: in_loc[n_loc] = in[j];
240: if (roworiented) {
241: v_loc[n_loc] = v[i*n+j];
242: } else {
243: v_loc[n_loc] = v[j*m+i];
244: }
245: n_loc++;
246: }
247: MatStashValuesRow_Private(&mat->stash,im[i],n_loc,in_loc,v_loc);
248: }
249: }
250: }
251: return(0);
252: }
256: PetscErrorCode MatSetValuesBlocked_MPISBAIJ(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const MatScalar v[],InsertMode addv)
257: {
258: Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
259: const MatScalar *value;
260: MatScalar *barray=baij->barray;
261: PetscTruth roworiented = baij->roworiented;
262: PetscErrorCode ierr;
263: PetscInt i,j,ii,jj,row,col,rstart=baij->rstartbs;
264: PetscInt rend=baij->rendbs,cstart=baij->rstartbs,stepval;
265: PetscInt cend=baij->rendbs,bs=mat->rmap->bs,bs2=baij->bs2;
268: if(!barray) {
269: PetscMalloc(bs2*sizeof(MatScalar),&barray);
270: baij->barray = barray;
271: }
273: if (roworiented) {
274: stepval = (n-1)*bs;
275: } else {
276: stepval = (m-1)*bs;
277: }
278: for (i=0; i<m; i++) {
279: if (im[i] < 0) continue;
280: #if defined(PETSC_USE_DEBUG)
281: if (im[i] >= baij->Mbs) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large, row %D max %D",im[i],baij->Mbs-1);
282: #endif
283: if (im[i] >= rstart && im[i] < rend) {
284: row = im[i] - rstart;
285: for (j=0; j<n; j++) {
286: /* If NumCol = 1 then a copy is not required */
287: if ((roworiented) && (n == 1)) {
288: barray = (MatScalar*) v + i*bs2;
289: } else if((!roworiented) && (m == 1)) {
290: barray = (MatScalar*) v + j*bs2;
291: } else { /* Here a copy is required */
292: if (roworiented) {
293: value = v + i*(stepval+bs)*bs + j*bs;
294: } else {
295: value = v + j*(stepval+bs)*bs + i*bs;
296: }
297: for (ii=0; ii<bs; ii++,value+=stepval) {
298: for (jj=0; jj<bs; jj++) {
299: *barray++ = *value++;
300: }
301: }
302: barray -=bs2;
303: }
304:
305: if (in[j] >= cstart && in[j] < cend){
306: col = in[j] - cstart;
307: MatSetValuesBlocked_SeqSBAIJ(baij->A,1,&row,1,&col,barray,addv);
308: }
309: else if (in[j] < 0) continue;
310: #if defined(PETSC_USE_DEBUG)
311: else if (in[j] >= baij->Nbs) {SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Column too large, col %D max %D",in[j],baij->Nbs-1);}
312: #endif
313: else {
314: if (mat->was_assembled) {
315: if (!baij->colmap) {
316: CreateColmap_MPIBAIJ_Private(mat);
317: }
319: #if defined(PETSC_USE_DEBUG)
320: #if defined (PETSC_USE_CTABLE)
321: { PetscInt data;
322: PetscTableFind(baij->colmap,in[j]+1,&data);
323: if ((data - 1) % bs) SETERRQ(PETSC_ERR_PLIB,"Incorrect colmap");
324: }
325: #else
326: if ((baij->colmap[in[j]] - 1) % bs) SETERRQ(PETSC_ERR_PLIB,"Incorrect colmap");
327: #endif
328: #endif
329: #if defined (PETSC_USE_CTABLE)
330: PetscTableFind(baij->colmap,in[j]+1,&col);
331: col = (col - 1)/bs;
332: #else
333: col = (baij->colmap[in[j]] - 1)/bs;
334: #endif
335: if (col < 0 && !((Mat_SeqBAIJ*)(baij->A->data))->nonew) {
336: DisAssemble_MPISBAIJ(mat);
337: col = in[j];
338: }
339: }
340: else col = in[j];
341: MatSetValuesBlocked_SeqBAIJ(baij->B,1,&row,1,&col,barray,addv);
342: }
343: }
344: } else {
345: if (!baij->donotstash) {
346: if (roworiented) {
347: MatStashValuesRowBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);
348: } else {
349: MatStashValuesColBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);
350: }
351: }
352: }
353: }
354: return(0);
355: }
359: PetscErrorCode MatGetValues_MPISBAIJ(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[])
360: {
361: Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
363: PetscInt bs=mat->rmap->bs,i,j,bsrstart = mat->rmap->rstart,bsrend = mat->rmap->rend;
364: PetscInt bscstart = mat->cmap->rstart,bscend = mat->cmap->rend,row,col,data;
367: for (i=0; i<m; i++) {
368: if (idxm[i] < 0) continue; /* SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Negative row: %D",idxm[i]); */
369: if (idxm[i] >= mat->rmap->N) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",idxm[i],mat->rmap->N-1);
370: if (idxm[i] >= bsrstart && idxm[i] < bsrend) {
371: row = idxm[i] - bsrstart;
372: for (j=0; j<n; j++) {
373: if (idxn[j] < 0) continue; /* SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Negative column %D",idxn[j]); */
374: if (idxn[j] >= mat->cmap->N) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",idxn[j],mat->cmap->N-1);
375: if (idxn[j] >= bscstart && idxn[j] < bscend){
376: col = idxn[j] - bscstart;
377: MatGetValues_SeqSBAIJ(baij->A,1,&row,1,&col,v+i*n+j);
378: } else {
379: if (!baij->colmap) {
380: CreateColmap_MPIBAIJ_Private(mat);
381: }
382: #if defined (PETSC_USE_CTABLE)
383: PetscTableFind(baij->colmap,idxn[j]/bs+1,&data);
384: data --;
385: #else
386: data = baij->colmap[idxn[j]/bs]-1;
387: #endif
388: if((data < 0) || (baij->garray[data/bs] != idxn[j]/bs)) *(v+i*n+j) = 0.0;
389: else {
390: col = data + idxn[j]%bs;
391: MatGetValues_SeqBAIJ(baij->B,1,&row,1,&col,v+i*n+j);
392: }
393: }
394: }
395: } else {
396: SETERRQ(PETSC_ERR_SUP,"Only local values currently supported");
397: }
398: }
399: return(0);
400: }
404: PetscErrorCode MatNorm_MPISBAIJ(Mat mat,NormType type,PetscReal *norm)
405: {
406: Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
408: PetscReal sum[2],*lnorm2;
411: if (baij->size == 1) {
412: MatNorm(baij->A,type,norm);
413: } else {
414: if (type == NORM_FROBENIUS) {
415: PetscMalloc(2*sizeof(PetscReal),&lnorm2);
416: MatNorm(baij->A,type,lnorm2);
417: *lnorm2 = (*lnorm2)*(*lnorm2); lnorm2++; /* squar power of norm(A) */
418: MatNorm(baij->B,type,lnorm2);
419: *lnorm2 = (*lnorm2)*(*lnorm2); lnorm2--; /* squar power of norm(B) */
420: MPI_Allreduce(lnorm2,&sum,2,MPIU_REAL,MPI_SUM,((PetscObject)mat)->comm);
421: *norm = sqrt(sum[0] + 2*sum[1]);
422: PetscFree(lnorm2);
423: } else if (type == NORM_INFINITY || type == NORM_1) { /* max row/column sum */
424: Mat_SeqSBAIJ *amat=(Mat_SeqSBAIJ*)baij->A->data;
425: Mat_SeqBAIJ *bmat=(Mat_SeqBAIJ*)baij->B->data;
426: PetscReal *rsum,*rsum2,vabs;
427: PetscInt *jj,*garray=baij->garray,rstart=baij->rstartbs,nz;
428: PetscInt brow,bcol,col,bs=baij->A->rmap->bs,row,grow,gcol,mbs=amat->mbs;
429: MatScalar *v;
431: PetscMalloc((2*mat->cmap->N+1)*sizeof(PetscReal),&rsum);
432: rsum2 = rsum + mat->cmap->N;
433: PetscMemzero(rsum,mat->cmap->N*sizeof(PetscReal));
434: /* Amat */
435: v = amat->a; jj = amat->j;
436: for (brow=0; brow<mbs; brow++) {
437: grow = bs*(rstart + brow);
438: nz = amat->i[brow+1] - amat->i[brow];
439: for (bcol=0; bcol<nz; bcol++){
440: gcol = bs*(rstart + *jj); jj++;
441: for (col=0; col<bs; col++){
442: for (row=0; row<bs; row++){
443: vabs = PetscAbsScalar(*v); v++;
444: rsum[gcol+col] += vabs;
445: /* non-diagonal block */
446: if (bcol > 0 && vabs > 0.0) rsum[grow+row] += vabs;
447: }
448: }
449: }
450: }
451: /* Bmat */
452: v = bmat->a; jj = bmat->j;
453: for (brow=0; brow<mbs; brow++) {
454: grow = bs*(rstart + brow);
455: nz = bmat->i[brow+1] - bmat->i[brow];
456: for (bcol=0; bcol<nz; bcol++){
457: gcol = bs*garray[*jj]; jj++;
458: for (col=0; col<bs; col++){
459: for (row=0; row<bs; row++){
460: vabs = PetscAbsScalar(*v); v++;
461: rsum[gcol+col] += vabs;
462: rsum[grow+row] += vabs;
463: }
464: }
465: }
466: }
467: MPI_Allreduce(rsum,rsum2,mat->cmap->N,MPIU_REAL,MPI_SUM,((PetscObject)mat)->comm);
468: *norm = 0.0;
469: for (col=0; col<mat->cmap->N; col++) {
470: if (rsum2[col] > *norm) *norm = rsum2[col];
471: }
472: PetscFree(rsum);
473: } else {
474: SETERRQ(PETSC_ERR_SUP,"No support for this norm yet");
475: }
476: }
477: return(0);
478: }
482: PetscErrorCode MatAssemblyBegin_MPISBAIJ(Mat mat,MatAssemblyType mode)
483: {
484: Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
486: PetscInt nstash,reallocs;
487: InsertMode addv;
490: if (baij->donotstash) {
491: return(0);
492: }
494: /* make sure all processors are either in INSERTMODE or ADDMODE */
495: MPI_Allreduce(&mat->insertmode,&addv,1,MPI_INT,MPI_BOR,((PetscObject)mat)->comm);
496: if (addv == (ADD_VALUES|INSERT_VALUES)) {
497: SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Some processors inserted others added");
498: }
499: mat->insertmode = addv; /* in case this processor had no cache */
501: MatStashScatterBegin_Private(mat,&mat->stash,mat->rmap->range);
502: MatStashScatterBegin_Private(mat,&mat->bstash,baij->rangebs);
503: MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);
504: PetscInfo2(mat,"Stash has %D entries,uses %D mallocs.\n",nstash,reallocs);
505: MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);
506: PetscInfo2(mat,"Block-Stash has %D entries, uses %D mallocs.\n",nstash,reallocs);
507: return(0);
508: }
512: PetscErrorCode MatAssemblyEnd_MPISBAIJ(Mat mat,MatAssemblyType mode)
513: {
514: Mat_MPISBAIJ *baij=(Mat_MPISBAIJ*)mat->data;
515: Mat_SeqSBAIJ *a=(Mat_SeqSBAIJ*)baij->A->data;
517: PetscInt i,j,rstart,ncols,flg,bs2=baij->bs2;
518: PetscInt *row,*col;
519: PetscTruth other_disassembled;
520: PetscMPIInt n;
521: PetscTruth r1,r2,r3;
522: MatScalar *val;
523: InsertMode addv = mat->insertmode;
525: /* do not use 'b=(Mat_SeqBAIJ*)baij->B->data' as B can be reset in disassembly */
528: if (!baij->donotstash) {
529: while (1) {
530: MatStashScatterGetMesg_Private(&mat->stash,&n,&row,&col,&val,&flg);
531: if (!flg) break;
533: for (i=0; i<n;) {
534: /* Now identify the consecutive vals belonging to the same row */
535: for (j=i,rstart=row[j]; j<n; j++) { if (row[j] != rstart) break; }
536: if (j < n) ncols = j-i;
537: else ncols = n-i;
538: /* Now assemble all these values with a single function call */
539: MatSetValues_MPISBAIJ(mat,1,row+i,ncols,col+i,val+i,addv);
540: i = j;
541: }
542: }
543: MatStashScatterEnd_Private(&mat->stash);
544: /* Now process the block-stash. Since the values are stashed column-oriented,
545: set the roworiented flag to column oriented, and after MatSetValues()
546: restore the original flags */
547: r1 = baij->roworiented;
548: r2 = a->roworiented;
549: r3 = ((Mat_SeqBAIJ*)baij->B->data)->roworiented;
550: baij->roworiented = PETSC_FALSE;
551: a->roworiented = PETSC_FALSE;
552: ((Mat_SeqBAIJ*)baij->B->data)->roworiented = PETSC_FALSE; /* b->roworinted */
553: while (1) {
554: MatStashScatterGetMesg_Private(&mat->bstash,&n,&row,&col,&val,&flg);
555: if (!flg) break;
556:
557: for (i=0; i<n;) {
558: /* Now identify the consecutive vals belonging to the same row */
559: for (j=i,rstart=row[j]; j<n; j++) { if (row[j] != rstart) break; }
560: if (j < n) ncols = j-i;
561: else ncols = n-i;
562: MatSetValuesBlocked_MPISBAIJ(mat,1,row+i,ncols,col+i,val+i*bs2,addv);
563: i = j;
564: }
565: }
566: MatStashScatterEnd_Private(&mat->bstash);
567: baij->roworiented = r1;
568: a->roworiented = r2;
569: ((Mat_SeqBAIJ*)baij->B->data)->roworiented = r3; /* b->roworinted */
570: }
572: MatAssemblyBegin(baij->A,mode);
573: MatAssemblyEnd(baij->A,mode);
575: /* determine if any processor has disassembled, if so we must
576: also disassemble ourselfs, in order that we may reassemble. */
577: /*
578: if nonzero structure of submatrix B cannot change then we know that
579: no processor disassembled thus we can skip this stuff
580: */
581: if (!((Mat_SeqBAIJ*)baij->B->data)->nonew) {
582: MPI_Allreduce(&mat->was_assembled,&other_disassembled,1,MPI_INT,MPI_PROD,((PetscObject)mat)->comm);
583: if (mat->was_assembled && !other_disassembled) {
584: DisAssemble_MPISBAIJ(mat);
585: }
586: }
588: if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) {
589: MatSetUpMultiply_MPISBAIJ(mat); /* setup Mvctx and sMvctx */
590: }
591: ((Mat_SeqBAIJ*)baij->B->data)->compressedrow.use = PETSC_TRUE; /* b->compressedrow.use */
592: MatAssemblyBegin(baij->B,mode);
593: MatAssemblyEnd(baij->B,mode);
594:
595: PetscFree(baij->rowvalues);
596: baij->rowvalues = 0;
598: return(0);
599: }
604: static PetscErrorCode MatView_MPISBAIJ_ASCIIorDraworSocket(Mat mat,PetscViewer viewer)
605: {
606: Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
607: PetscErrorCode ierr;
608: PetscInt bs = mat->rmap->bs;
609: PetscMPIInt size = baij->size,rank = baij->rank;
610: PetscTruth iascii,isdraw;
611: PetscViewer sviewer;
612: PetscViewerFormat format;
615: PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&iascii);
616: PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_DRAW,&isdraw);
617: if (iascii) {
618: PetscViewerGetFormat(viewer,&format);
619: if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
620: MatInfo info;
621: MPI_Comm_rank(((PetscObject)mat)->comm,&rank);
622: MatGetInfo(mat,MAT_LOCAL,&info);
623: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %D nz %D nz alloced %D bs %D mem %D\n",
624: rank,mat->rmap->N,(PetscInt)info.nz_used*bs,(PetscInt)info.nz_allocated*bs,
625: mat->rmap->bs,(PetscInt)info.memory);
626: MatGetInfo(baij->A,MAT_LOCAL,&info);
627: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] on-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used*bs);
628: MatGetInfo(baij->B,MAT_LOCAL,&info);
629: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] off-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used*bs);
630: PetscViewerFlush(viewer);
631: PetscViewerASCIIPrintf(viewer,"Information on VecScatter used in matrix-vector product: \n");
632: VecScatterView(baij->Mvctx,viewer);
633: return(0);
634: } else if (format == PETSC_VIEWER_ASCII_INFO) {
635: PetscViewerASCIIPrintf(viewer," block size is %D\n",bs);
636: return(0);
637: } else if (format == PETSC_VIEWER_ASCII_FACTOR_INFO) {
638: return(0);
639: }
640: }
642: if (isdraw) {
643: PetscDraw draw;
644: PetscTruth isnull;
645: PetscViewerDrawGetDraw(viewer,0,&draw);
646: PetscDrawIsNull(draw,&isnull); if (isnull) return(0);
647: }
649: if (size == 1) {
650: PetscObjectSetName((PetscObject)baij->A,((PetscObject)mat)->name);
651: MatView(baij->A,viewer);
652: } else {
653: /* assemble the entire matrix onto first processor. */
654: Mat A;
655: Mat_SeqSBAIJ *Aloc;
656: Mat_SeqBAIJ *Bloc;
657: PetscInt M = mat->rmap->N,N = mat->cmap->N,*ai,*aj,col,i,j,k,*rvals,mbs = baij->mbs;
658: MatScalar *a;
660: /* Should this be the same type as mat? */
661: MatCreate(((PetscObject)mat)->comm,&A);
662: if (!rank) {
663: MatSetSizes(A,M,N,M,N);
664: } else {
665: MatSetSizes(A,0,0,M,N);
666: }
667: MatSetType(A,MATMPISBAIJ);
668: MatMPISBAIJSetPreallocation(A,mat->rmap->bs,0,PETSC_NULL,0,PETSC_NULL);
669: PetscLogObjectParent(mat,A);
671: /* copy over the A part */
672: Aloc = (Mat_SeqSBAIJ*)baij->A->data;
673: ai = Aloc->i; aj = Aloc->j; a = Aloc->a;
674: PetscMalloc(bs*sizeof(PetscInt),&rvals);
676: for (i=0; i<mbs; i++) {
677: rvals[0] = bs*(baij->rstartbs + i);
678: for (j=1; j<bs; j++) { rvals[j] = rvals[j-1] + 1; }
679: for (j=ai[i]; j<ai[i+1]; j++) {
680: col = (baij->cstartbs+aj[j])*bs;
681: for (k=0; k<bs; k++) {
682: MatSetValues_MPISBAIJ(A,bs,rvals,1,&col,a,INSERT_VALUES);
683: col++; a += bs;
684: }
685: }
686: }
687: /* copy over the B part */
688: Bloc = (Mat_SeqBAIJ*)baij->B->data;
689: ai = Bloc->i; aj = Bloc->j; a = Bloc->a;
690: for (i=0; i<mbs; i++) {
691:
692: rvals[0] = bs*(baij->rstartbs + i);
693: for (j=1; j<bs; j++) { rvals[j] = rvals[j-1] + 1; }
694: for (j=ai[i]; j<ai[i+1]; j++) {
695: col = baij->garray[aj[j]]*bs;
696: for (k=0; k<bs; k++) {
697: MatSetValues_MPIBAIJ(A,bs,rvals,1,&col,a,INSERT_VALUES);
698: col++; a += bs;
699: }
700: }
701: }
702: PetscFree(rvals);
703: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
704: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
705: /*
706: Everyone has to call to draw the matrix since the graphics waits are
707: synchronized across all processors that share the PetscDraw object
708: */
709: PetscViewerGetSingleton(viewer,&sviewer);
710: if (!rank) {
711: PetscObjectSetName((PetscObject)((Mat_MPISBAIJ*)(A->data))->A,((PetscObject)mat)->name);
712: MatView(((Mat_MPISBAIJ*)(A->data))->A,sviewer);
713: }
714: PetscViewerRestoreSingleton(viewer,&sviewer);
715: MatDestroy(A);
716: }
717: return(0);
718: }
722: PetscErrorCode MatView_MPISBAIJ(Mat mat,PetscViewer viewer)
723: {
725: PetscTruth iascii,isdraw,issocket,isbinary;
728: PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&iascii);
729: PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_DRAW,&isdraw);
730: PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_SOCKET,&issocket);
731: PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_BINARY,&isbinary);
732: if (iascii || isdraw || issocket || isbinary) {
733: MatView_MPISBAIJ_ASCIIorDraworSocket(mat,viewer);
734: } else {
735: SETERRQ1(PETSC_ERR_SUP,"Viewer type %s not supported by MPISBAIJ matrices",((PetscObject)viewer)->type_name);
736: }
737: return(0);
738: }
742: PetscErrorCode MatDestroy_MPISBAIJ(Mat mat)
743: {
744: Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
748: #if defined(PETSC_USE_LOG)
749: PetscLogObjectState((PetscObject)mat,"Rows=%D,Cols=%D",mat->rmap->N,mat->cmap->N);
750: #endif
751: MatStashDestroy_Private(&mat->stash);
752: MatStashDestroy_Private(&mat->bstash);
753: MatDestroy(baij->A);
754: MatDestroy(baij->B);
755: #if defined (PETSC_USE_CTABLE)
756: if (baij->colmap) {PetscTableDestroy(baij->colmap);}
757: #else
758: PetscFree(baij->colmap);
759: #endif
760: PetscFree(baij->garray);
761: if (baij->lvec) {VecDestroy(baij->lvec);}
762: if (baij->Mvctx) {VecScatterDestroy(baij->Mvctx);}
763: if (baij->slvec0) {
764: VecDestroy(baij->slvec0);
765: VecDestroy(baij->slvec0b);
766: }
767: if (baij->slvec1) {
768: VecDestroy(baij->slvec1);
769: VecDestroy(baij->slvec1a);
770: VecDestroy(baij->slvec1b);
771: }
772: if (baij->sMvctx) {VecScatterDestroy(baij->sMvctx);}
773: PetscFree(baij->rowvalues);
774: PetscFree(baij->barray);
775: PetscFree(baij->hd);
776: #if defined(PETSC_USE_MAT_SINGLE)
777: PetscFree(baij->setvaluescopy);
778: #endif
779: PetscFree(baij->in_loc);
780: PetscFree(baij->v_loc);
781: PetscFree(baij->rangebs);
782: PetscFree(baij);
784: PetscObjectChangeTypeName((PetscObject)mat,0);
785: PetscObjectComposeFunction((PetscObject)mat,"MatStoreValues_C","",PETSC_NULL);
786: PetscObjectComposeFunction((PetscObject)mat,"MatRetrieveValues_C","",PETSC_NULL);
787: PetscObjectComposeFunction((PetscObject)mat,"MatGetDiagonalBlock_C","",PETSC_NULL);
788: PetscObjectComposeFunction((PetscObject)mat,"MatMPISBAIJSetPreallocation_C","",PETSC_NULL);
789: return(0);
790: }
794: PetscErrorCode MatMult_MPISBAIJ(Mat A,Vec xx,Vec yy)
795: {
796: Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
798: PetscInt nt,mbs=a->mbs,bs=A->rmap->bs;
799: PetscScalar *x,*from,zero=0.0;
800:
802: VecGetLocalSize(xx,&nt);
803: if (nt != A->cmap->n) {
804: SETERRQ(PETSC_ERR_ARG_SIZ,"Incompatible partition of A and xx");
805: }
807: /* diagonal part */
808: (*a->A->ops->mult)(a->A,xx,a->slvec1a);
809: VecSet(a->slvec1b,zero);
811: /* subdiagonal part */
812: (*a->B->ops->multtranspose)(a->B,xx,a->slvec0b);
814: /* copy x into the vec slvec0 */
815: VecGetArray(a->slvec0,&from);
816: VecGetArray(xx,&x);
818: PetscMemcpy(from,x,bs*mbs*sizeof(MatScalar));
819: VecRestoreArray(a->slvec0,&from);
820: VecRestoreArray(xx,&x);
821:
822: VecScatterBegin(a->sMvctx,a->slvec0,a->slvec1,ADD_VALUES,SCATTER_FORWARD);
823: VecScatterEnd(a->sMvctx,a->slvec0,a->slvec1,ADD_VALUES,SCATTER_FORWARD);
824: /* supperdiagonal part */
825: (*a->B->ops->multadd)(a->B,a->slvec1b,a->slvec1a,yy);
826: return(0);
827: }
831: PetscErrorCode MatMult_MPISBAIJ_2comm(Mat A,Vec xx,Vec yy)
832: {
833: Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
835: PetscInt nt;
838: VecGetLocalSize(xx,&nt);
839: if (nt != A->cmap->n) {
840: SETERRQ(PETSC_ERR_ARG_SIZ,"Incompatible partition of A and xx");
841: }
842: VecGetLocalSize(yy,&nt);
843: if (nt != A->rmap->N) {
844: SETERRQ(PETSC_ERR_ARG_SIZ,"Incompatible parition of A and yy");
845: }
847: VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
848: /* do diagonal part */
849: (*a->A->ops->mult)(a->A,xx,yy);
850: /* do supperdiagonal part */
851: VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
852: (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);
853: /* do subdiagonal part */
854: (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
855: VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
856: VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
858: return(0);
859: }
863: PetscErrorCode MatMultAdd_MPISBAIJ(Mat A,Vec xx,Vec yy,Vec zz)
864: {
865: Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
867: PetscInt mbs=a->mbs,bs=A->rmap->bs;
868: PetscScalar *x,*from,zero=0.0;
869:
871: /*
872: PetscSynchronizedPrintf(((PetscObject)A)->comm," MatMultAdd is called ...\n");
873: PetscSynchronizedFlush(((PetscObject)A)->comm);
874: */
875: /* diagonal part */
876: (*a->A->ops->multadd)(a->A,xx,yy,a->slvec1a);
877: VecSet(a->slvec1b,zero);
879: /* subdiagonal part */
880: (*a->B->ops->multtranspose)(a->B,xx,a->slvec0b);
882: /* copy x into the vec slvec0 */
883: VecGetArray(a->slvec0,&from);
884: VecGetArray(xx,&x);
885: PetscMemcpy(from,x,bs*mbs*sizeof(MatScalar));
886: VecRestoreArray(a->slvec0,&from);
887:
888: VecScatterBegin(a->sMvctx,a->slvec0,a->slvec1,ADD_VALUES,SCATTER_FORWARD);
889: VecRestoreArray(xx,&x);
890: VecScatterEnd(a->sMvctx,a->slvec0,a->slvec1,ADD_VALUES,SCATTER_FORWARD);
891:
892: /* supperdiagonal part */
893: (*a->B->ops->multadd)(a->B,a->slvec1b,a->slvec1a,zz);
894:
895: return(0);
896: }
900: PetscErrorCode MatMultAdd_MPISBAIJ_2comm(Mat A,Vec xx,Vec yy,Vec zz)
901: {
902: Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
906: VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
907: /* do diagonal part */
908: (*a->A->ops->multadd)(a->A,xx,yy,zz);
909: /* do supperdiagonal part */
910: VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
911: (*a->B->ops->multadd)(a->B,a->lvec,zz,zz);
913: /* do subdiagonal part */
914: (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
915: VecScatterBegin(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);
916: VecScatterEnd(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);
918: return(0);
919: }
921: /*
922: This only works correctly for square matrices where the subblock A->A is the
923: diagonal block
924: */
927: PetscErrorCode MatGetDiagonal_MPISBAIJ(Mat A,Vec v)
928: {
929: Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
933: /* if (a->rmap->N != a->cmap->N) SETERRQ(PETSC_ERR_SUP,"Supports only square matrix where A->A is diag block"); */
934: MatGetDiagonal(a->A,v);
935: return(0);
936: }
940: PetscErrorCode MatScale_MPISBAIJ(Mat A,PetscScalar aa)
941: {
942: Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
946: MatScale(a->A,aa);
947: MatScale(a->B,aa);
948: return(0);
949: }
953: PetscErrorCode MatGetRow_MPISBAIJ(Mat matin,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
954: {
955: Mat_MPISBAIJ *mat = (Mat_MPISBAIJ*)matin->data;
956: PetscScalar *vworkA,*vworkB,**pvA,**pvB,*v_p;
958: PetscInt bs = matin->rmap->bs,bs2 = mat->bs2,i,*cworkA,*cworkB,**pcA,**pcB;
959: PetscInt nztot,nzA,nzB,lrow,brstart = matin->rmap->rstart,brend = matin->rmap->rend;
960: PetscInt *cmap,*idx_p,cstart = mat->rstartbs;
963: if (mat->getrowactive == PETSC_TRUE) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Already active");
964: mat->getrowactive = PETSC_TRUE;
966: if (!mat->rowvalues && (idx || v)) {
967: /*
968: allocate enough space to hold information from the longest row.
969: */
970: Mat_SeqSBAIJ *Aa = (Mat_SeqSBAIJ*)mat->A->data;
971: Mat_SeqBAIJ *Ba = (Mat_SeqBAIJ*)mat->B->data;
972: PetscInt max = 1,mbs = mat->mbs,tmp;
973: for (i=0; i<mbs; i++) {
974: tmp = Aa->i[i+1] - Aa->i[i] + Ba->i[i+1] - Ba->i[i]; /* row length */
975: if (max < tmp) { max = tmp; }
976: }
977: PetscMalloc(max*bs2*(sizeof(PetscInt)+sizeof(PetscScalar)),&mat->rowvalues);
978: mat->rowindices = (PetscInt*)(mat->rowvalues + max*bs2);
979: }
980:
981: if (row < brstart || row >= brend) SETERRQ(PETSC_ERR_SUP,"Only local rows")
982: lrow = row - brstart; /* local row index */
984: pvA = &vworkA; pcA = &cworkA; pvB = &vworkB; pcB = &cworkB;
985: if (!v) {pvA = 0; pvB = 0;}
986: if (!idx) {pcA = 0; if (!v) pcB = 0;}
987: (*mat->A->ops->getrow)(mat->A,lrow,&nzA,pcA,pvA);
988: (*mat->B->ops->getrow)(mat->B,lrow,&nzB,pcB,pvB);
989: nztot = nzA + nzB;
991: cmap = mat->garray;
992: if (v || idx) {
993: if (nztot) {
994: /* Sort by increasing column numbers, assuming A and B already sorted */
995: PetscInt imark = -1;
996: if (v) {
997: *v = v_p = mat->rowvalues;
998: for (i=0; i<nzB; i++) {
999: if (cmap[cworkB[i]/bs] < cstart) v_p[i] = vworkB[i];
1000: else break;
1001: }
1002: imark = i;
1003: for (i=0; i<nzA; i++) v_p[imark+i] = vworkA[i];
1004: for (i=imark; i<nzB; i++) v_p[nzA+i] = vworkB[i];
1005: }
1006: if (idx) {
1007: *idx = idx_p = mat->rowindices;
1008: if (imark > -1) {
1009: for (i=0; i<imark; i++) {
1010: idx_p[i] = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs;
1011: }
1012: } else {
1013: for (i=0; i<nzB; i++) {
1014: if (cmap[cworkB[i]/bs] < cstart)
1015: idx_p[i] = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs ;
1016: else break;
1017: }
1018: imark = i;
1019: }
1020: for (i=0; i<nzA; i++) idx_p[imark+i] = cstart*bs + cworkA[i];
1021: for (i=imark; i<nzB; i++) idx_p[nzA+i] = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs ;
1022: }
1023: } else {
1024: if (idx) *idx = 0;
1025: if (v) *v = 0;
1026: }
1027: }
1028: *nz = nztot;
1029: (*mat->A->ops->restorerow)(mat->A,lrow,&nzA,pcA,pvA);
1030: (*mat->B->ops->restorerow)(mat->B,lrow,&nzB,pcB,pvB);
1031: return(0);
1032: }
1036: PetscErrorCode MatRestoreRow_MPISBAIJ(Mat mat,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1037: {
1038: Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
1041: if (!baij->getrowactive) {
1042: SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"MatGetRow() must be called first");
1043: }
1044: baij->getrowactive = PETSC_FALSE;
1045: return(0);
1046: }
1050: PetscErrorCode MatGetRowUpperTriangular_MPISBAIJ(Mat A)
1051: {
1052: Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
1053: Mat_SeqSBAIJ *aA = (Mat_SeqSBAIJ*)a->A->data;
1056: aA->getrow_utriangular = PETSC_TRUE;
1057: return(0);
1058: }
1061: PetscErrorCode MatRestoreRowUpperTriangular_MPISBAIJ(Mat A)
1062: {
1063: Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
1064: Mat_SeqSBAIJ *aA = (Mat_SeqSBAIJ*)a->A->data;
1067: aA->getrow_utriangular = PETSC_FALSE;
1068: return(0);
1069: }
1073: PetscErrorCode MatRealPart_MPISBAIJ(Mat A)
1074: {
1075: Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
1079: MatRealPart(a->A);
1080: MatRealPart(a->B);
1081: return(0);
1082: }
1086: PetscErrorCode MatImaginaryPart_MPISBAIJ(Mat A)
1087: {
1088: Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
1092: MatImaginaryPart(a->A);
1093: MatImaginaryPart(a->B);
1094: return(0);
1095: }
1099: PetscErrorCode MatZeroEntries_MPISBAIJ(Mat A)
1100: {
1101: Mat_MPISBAIJ *l = (Mat_MPISBAIJ*)A->data;
1105: MatZeroEntries(l->A);
1106: MatZeroEntries(l->B);
1107: return(0);
1108: }
1112: PetscErrorCode MatGetInfo_MPISBAIJ(Mat matin,MatInfoType flag,MatInfo *info)
1113: {
1114: Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)matin->data;
1115: Mat A = a->A,B = a->B;
1117: PetscReal isend[5],irecv[5];
1120: info->block_size = (PetscReal)matin->rmap->bs;
1121: MatGetInfo(A,MAT_LOCAL,info);
1122: isend[0] = info->nz_used; isend[1] = info->nz_allocated; isend[2] = info->nz_unneeded;
1123: isend[3] = info->memory; isend[4] = info->mallocs;
1124: MatGetInfo(B,MAT_LOCAL,info);
1125: isend[0] += info->nz_used; isend[1] += info->nz_allocated; isend[2] += info->nz_unneeded;
1126: isend[3] += info->memory; isend[4] += info->mallocs;
1127: if (flag == MAT_LOCAL) {
1128: info->nz_used = isend[0];
1129: info->nz_allocated = isend[1];
1130: info->nz_unneeded = isend[2];
1131: info->memory = isend[3];
1132: info->mallocs = isend[4];
1133: } else if (flag == MAT_GLOBAL_MAX) {
1134: MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPI_MAX,((PetscObject)matin)->comm);
1135: info->nz_used = irecv[0];
1136: info->nz_allocated = irecv[1];
1137: info->nz_unneeded = irecv[2];
1138: info->memory = irecv[3];
1139: info->mallocs = irecv[4];
1140: } else if (flag == MAT_GLOBAL_SUM) {
1141: MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPI_SUM,((PetscObject)matin)->comm);
1142: info->nz_used = irecv[0];
1143: info->nz_allocated = irecv[1];
1144: info->nz_unneeded = irecv[2];
1145: info->memory = irecv[3];
1146: info->mallocs = irecv[4];
1147: } else {
1148: SETERRQ1(PETSC_ERR_ARG_WRONG,"Unknown MatInfoType argument %d",(int)flag);
1149: }
1150: info->fill_ratio_given = 0; /* no parallel LU/ILU/Cholesky */
1151: info->fill_ratio_needed = 0;
1152: info->factor_mallocs = 0;
1153: return(0);
1154: }
1158: PetscErrorCode MatSetOption_MPISBAIJ(Mat A,MatOption op,PetscTruth flg)
1159: {
1160: Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
1161: Mat_SeqSBAIJ *aA = (Mat_SeqSBAIJ*)a->A->data;
1165: switch (op) {
1166: case MAT_NEW_NONZERO_LOCATIONS:
1167: case MAT_NEW_NONZERO_ALLOCATION_ERR:
1168: case MAT_UNUSED_NONZERO_LOCATION_ERR:
1169: case MAT_KEEP_ZEROED_ROWS:
1170: case MAT_NEW_NONZERO_LOCATION_ERR:
1171: MatSetOption(a->A,op,flg);
1172: MatSetOption(a->B,op,flg);
1173: break;
1174: case MAT_ROW_ORIENTED:
1175: a->roworiented = flg;
1176: MatSetOption(a->A,op,flg);
1177: MatSetOption(a->B,op,flg);
1178: break;
1179: case MAT_NEW_DIAGONALS:
1180: PetscInfo1(A,"Option %s ignored\n",MatOptions[op]);
1181: break;
1182: case MAT_IGNORE_OFF_PROC_ENTRIES:
1183: a->donotstash = flg;
1184: break;
1185: case MAT_USE_HASH_TABLE:
1186: a->ht_flag = flg;
1187: break;
1188: case MAT_HERMITIAN:
1189: if (flg) SETERRQ(PETSC_ERR_SUP,"Matrix must be symmetric");
1190: case MAT_SYMMETRIC:
1191: case MAT_STRUCTURALLY_SYMMETRIC:
1192: case MAT_SYMMETRY_ETERNAL:
1193: if (!flg) SETERRQ(PETSC_ERR_SUP,"Matrix must be symmetric");
1194: PetscInfo1(A,"Option %s ignored\n",MatOptions[op]);
1195: break;
1196: case MAT_IGNORE_LOWER_TRIANGULAR:
1197: aA->ignore_ltriangular = flg;
1198: break;
1199: case MAT_ERROR_LOWER_TRIANGULAR:
1200: aA->ignore_ltriangular = flg;
1201: break;
1202: case MAT_GETROW_UPPERTRIANGULAR:
1203: aA->getrow_utriangular = flg;
1204: break;
1205: default:
1206: SETERRQ1(PETSC_ERR_SUP,"unknown option %d",op);
1207: }
1208: return(0);
1209: }
1213: PetscErrorCode MatTranspose_MPISBAIJ(Mat A,MatReuse reuse,Mat *B)
1214: {
1217: if (MAT_INITIAL_MATRIX || *B != A) {
1218: MatDuplicate(A,MAT_COPY_VALUES,B);
1219: }
1220: return(0);
1221: }
1225: PetscErrorCode MatDiagonalScale_MPISBAIJ(Mat mat,Vec ll,Vec rr)
1226: {
1227: Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
1228: Mat a=baij->A, b=baij->B;
1230: PetscInt nv,m,n;
1231: PetscTruth flg;
1234: if (ll != rr){
1235: VecEqual(ll,rr,&flg);
1236: if (!flg)
1237: SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"For symmetric format, left and right scaling vectors must be same\n");
1238: }
1239: if (!ll) return(0);
1241: MatGetLocalSize(mat,&m,&n);
1242: if (m != n) SETERRQ2(PETSC_ERR_ARG_SIZ,"For symmetric format, local size %d %d must be same",m,n);
1243:
1244: VecGetLocalSize(rr,&nv);
1245: if (nv!=n) SETERRQ(PETSC_ERR_ARG_SIZ,"Left and right vector non-conforming local size");
1247: VecScatterBegin(baij->Mvctx,rr,baij->lvec,INSERT_VALUES,SCATTER_FORWARD);
1248:
1249: /* left diagonalscale the off-diagonal part */
1250: (*b->ops->diagonalscale)(b,ll,PETSC_NULL);
1251:
1252: /* scale the diagonal part */
1253: (*a->ops->diagonalscale)(a,ll,rr);
1255: /* right diagonalscale the off-diagonal part */
1256: VecScatterEnd(baij->Mvctx,rr,baij->lvec,INSERT_VALUES,SCATTER_FORWARD);
1257: (*b->ops->diagonalscale)(b,PETSC_NULL,baij->lvec);
1258: return(0);
1259: }
1263: PetscErrorCode MatSetUnfactored_MPISBAIJ(Mat A)
1264: {
1265: Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
1269: MatSetUnfactored(a->A);
1270: return(0);
1271: }
1273: static PetscErrorCode MatDuplicate_MPISBAIJ(Mat,MatDuplicateOption,Mat *);
1277: PetscErrorCode MatEqual_MPISBAIJ(Mat A,Mat B,PetscTruth *flag)
1278: {
1279: Mat_MPISBAIJ *matB = (Mat_MPISBAIJ*)B->data,*matA = (Mat_MPISBAIJ*)A->data;
1280: Mat a,b,c,d;
1281: PetscTruth flg;
1285: a = matA->A; b = matA->B;
1286: c = matB->A; d = matB->B;
1288: MatEqual(a,c,&flg);
1289: if (flg) {
1290: MatEqual(b,d,&flg);
1291: }
1292: MPI_Allreduce(&flg,flag,1,MPI_INT,MPI_LAND,((PetscObject)A)->comm);
1293: return(0);
1294: }
1298: PetscErrorCode MatCopy_MPISBAIJ(Mat A,Mat B,MatStructure str)
1299: {
1301: Mat_MPISBAIJ *a = (Mat_MPISBAIJ *)A->data;
1302: Mat_MPISBAIJ *b = (Mat_MPISBAIJ *)B->data;
1305: /* If the two matrices don't have the same copy implementation, they aren't compatible for fast copy. */
1306: if ((str != SAME_NONZERO_PATTERN) || (A->ops->copy != B->ops->copy)) {
1307: MatGetRowUpperTriangular(A);
1308: MatCopy_Basic(A,B,str);
1309: MatRestoreRowUpperTriangular(A);
1310: } else {
1311: MatCopy(a->A,b->A,str);
1312: MatCopy(a->B,b->B,str);
1313: }
1314: return(0);
1315: }
1319: PetscErrorCode MatSetUpPreallocation_MPISBAIJ(Mat A)
1320: {
1324: MatMPISBAIJSetPreallocation(A,-PetscMax(A->rmap->bs,1),PETSC_DEFAULT,0,PETSC_DEFAULT,0);
1325: return(0);
1326: }
1328: #include petscblaslapack.h
1331: PetscErrorCode MatAXPY_MPISBAIJ(Mat Y,PetscScalar a,Mat X,MatStructure str)
1332: {
1334: Mat_MPISBAIJ *xx=(Mat_MPISBAIJ *)X->data,*yy=(Mat_MPISBAIJ *)Y->data;
1335: PetscBLASInt bnz,one=1;
1336: Mat_SeqSBAIJ *xa,*ya;
1337: Mat_SeqBAIJ *xb,*yb;
1340: if (str == SAME_NONZERO_PATTERN) {
1341: PetscScalar alpha = a;
1342: xa = (Mat_SeqSBAIJ *)xx->A->data;
1343: ya = (Mat_SeqSBAIJ *)yy->A->data;
1344: bnz = PetscBLASIntCast(xa->nz);
1345: BLASaxpy_(&bnz,&alpha,xa->a,&one,ya->a,&one);
1346: xb = (Mat_SeqBAIJ *)xx->B->data;
1347: yb = (Mat_SeqBAIJ *)yy->B->data;
1348: bnz = PetscBLASIntCast(xb->nz);
1349: BLASaxpy_(&bnz,&alpha,xb->a,&one,yb->a,&one);
1350: } else {
1351: MatGetRowUpperTriangular(X);
1352: MatAXPY_Basic(Y,a,X,str);
1353: MatRestoreRowUpperTriangular(X);
1354: }
1355: return(0);
1356: }
1360: PetscErrorCode MatGetSubMatrices_MPISBAIJ(Mat A,PetscInt n,const IS irow[],const IS icol[],MatReuse scall,Mat *B[])
1361: {
1363: PetscInt i;
1364: PetscTruth flg;
1367: for (i=0; i<n; i++) {
1368: ISEqual(irow[i],icol[i],&flg);
1369: if (!flg) {
1370: SETERRQ(PETSC_ERR_SUP,"Can only get symmetric submatrix for MPISBAIJ matrices");
1371: }
1372: }
1373: MatGetSubMatrices_MPIBAIJ(A,n,irow,icol,scall,B);
1374: return(0);
1375: }
1376:
1378: /* -------------------------------------------------------------------*/
1379: static struct _MatOps MatOps_Values = {
1380: MatSetValues_MPISBAIJ,
1381: MatGetRow_MPISBAIJ,
1382: MatRestoreRow_MPISBAIJ,
1383: MatMult_MPISBAIJ,
1384: /* 4*/ MatMultAdd_MPISBAIJ,
1385: MatMult_MPISBAIJ, /* transpose versions are same as non-transpose */
1386: MatMultAdd_MPISBAIJ,
1387: 0,
1388: 0,
1389: 0,
1390: /*10*/ 0,
1391: 0,
1392: 0,
1393: MatRelax_MPISBAIJ,
1394: MatTranspose_MPISBAIJ,
1395: /*15*/ MatGetInfo_MPISBAIJ,
1396: MatEqual_MPISBAIJ,
1397: MatGetDiagonal_MPISBAIJ,
1398: MatDiagonalScale_MPISBAIJ,
1399: MatNorm_MPISBAIJ,
1400: /*20*/ MatAssemblyBegin_MPISBAIJ,
1401: MatAssemblyEnd_MPISBAIJ,
1402: 0,
1403: MatSetOption_MPISBAIJ,
1404: MatZeroEntries_MPISBAIJ,
1405: /*25*/ 0,
1406: 0,
1407: 0,
1408: 0,
1409: 0,
1410: /*30*/ MatSetUpPreallocation_MPISBAIJ,
1411: 0,
1412: 0,
1413: 0,
1414: 0,
1415: /*35*/ MatDuplicate_MPISBAIJ,
1416: 0,
1417: 0,
1418: 0,
1419: 0,
1420: /*40*/ MatAXPY_MPISBAIJ,
1421: MatGetSubMatrices_MPISBAIJ,
1422: MatIncreaseOverlap_MPISBAIJ,
1423: MatGetValues_MPISBAIJ,
1424: MatCopy_MPISBAIJ,
1425: /*45*/ 0,
1426: MatScale_MPISBAIJ,
1427: 0,
1428: 0,
1429: 0,
1430: /*50*/ 0,
1431: 0,
1432: 0,
1433: 0,
1434: 0,
1435: /*55*/ 0,
1436: 0,
1437: MatSetUnfactored_MPISBAIJ,
1438: 0,
1439: MatSetValuesBlocked_MPISBAIJ,
1440: /*60*/ 0,
1441: 0,
1442: 0,
1443: 0,
1444: 0,
1445: /*65*/ 0,
1446: 0,
1447: 0,
1448: 0,
1449: 0,
1450: /*70*/ MatGetRowMaxAbs_MPISBAIJ,
1451: 0,
1452: 0,
1453: 0,
1454: 0,
1455: /*75*/ 0,
1456: 0,
1457: 0,
1458: 0,
1459: 0,
1460: /*80*/ 0,
1461: 0,
1462: 0,
1463: 0,
1464: MatLoad_MPISBAIJ,
1465: /*85*/ 0,
1466: 0,
1467: 0,
1468: 0,
1469: 0,
1470: /*90*/ 0,
1471: 0,
1472: 0,
1473: 0,
1474: 0,
1475: /*95*/ 0,
1476: 0,
1477: 0,
1478: 0,
1479: 0,
1480: /*100*/0,
1481: 0,
1482: 0,
1483: 0,
1484: 0,
1485: /*105*/0,
1486: MatRealPart_MPISBAIJ,
1487: MatImaginaryPart_MPISBAIJ,
1488: MatGetRowUpperTriangular_MPISBAIJ,
1489: MatRestoreRowUpperTriangular_MPISBAIJ
1490: };
1496: PetscErrorCode MatGetDiagonalBlock_MPISBAIJ(Mat A,PetscTruth *iscopy,MatReuse reuse,Mat *a)
1497: {
1499: *a = ((Mat_MPISBAIJ *)A->data)->A;
1500: *iscopy = PETSC_FALSE;
1501: return(0);
1502: }
1508: PetscErrorCode MatMPISBAIJSetPreallocation_MPISBAIJ(Mat B,PetscInt bs,PetscInt d_nz,PetscInt *d_nnz,PetscInt o_nz,PetscInt *o_nnz)
1509: {
1510: Mat_MPISBAIJ *b;
1512: PetscInt i,mbs,Mbs,newbs = PetscAbs(bs);
1515: if (bs < 0){
1516: PetscOptionsBegin(((PetscObject)B)->comm,((PetscObject)B)->prefix,"Options for MPISBAIJ matrix","Mat");
1517: PetscOptionsInt("-mat_block_size","Set the blocksize used to store the matrix","MatMPIBAIJSetPreallocation",newbs,&newbs,PETSC_NULL);
1518: PetscOptionsEnd();
1519: bs = PetscAbs(bs);
1520: }
1521: if ((d_nnz || o_nnz) && newbs != bs) {
1522: SETERRQ(PETSC_ERR_ARG_WRONG,"Cannot change blocksize from command line if setting d_nnz or o_nnz");
1523: }
1524: bs = newbs;
1526: if (d_nz == PETSC_DECIDE || d_nz == PETSC_DEFAULT) d_nz = 3;
1527: if (o_nz == PETSC_DECIDE || o_nz == PETSC_DEFAULT) o_nz = 1;
1528: if (d_nz < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"d_nz cannot be less than 0: value %D",d_nz);
1529: if (o_nz < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"o_nz cannot be less than 0: value %D",o_nz);
1531: B->rmap->bs = B->cmap->bs = bs;
1532: PetscMapSetUp(B->rmap);
1533: PetscMapSetUp(B->cmap);
1535: if (d_nnz) {
1536: for (i=0; i<B->rmap->n/bs; i++) {
1537: if (d_nnz[i] < 0) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"d_nnz cannot be less than -1: local row %D value %D",i,d_nnz[i]);
1538: }
1539: }
1540: if (o_nnz) {
1541: for (i=0; i<B->rmap->n/bs; i++) {
1542: if (o_nnz[i] < 0) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"o_nnz cannot be less than -1: local row %D value %D",i,o_nnz[i]);
1543: }
1544: }
1545: B->preallocated = PETSC_TRUE;
1547: b = (Mat_MPISBAIJ*)B->data;
1548: mbs = B->rmap->n/bs;
1549: Mbs = B->rmap->N/bs;
1550: if (mbs*bs != B->rmap->n) {
1551: SETERRQ2(PETSC_ERR_ARG_SIZ,"No of local rows %D must be divisible by blocksize %D",B->rmap->N,bs);
1552: }
1554: B->rmap->bs = bs;
1555: b->bs2 = bs*bs;
1556: b->mbs = mbs;
1557: b->nbs = mbs;
1558: b->Mbs = Mbs;
1559: b->Nbs = Mbs;
1561: for (i=0; i<=b->size; i++) {
1562: b->rangebs[i] = B->rmap->range[i]/bs;
1563: }
1564: b->rstartbs = B->rmap->rstart/bs;
1565: b->rendbs = B->rmap->rend/bs;
1566:
1567: b->cstartbs = B->cmap->rstart/bs;
1568: b->cendbs = B->cmap->rend/bs;
1569:
1570: MatCreate(PETSC_COMM_SELF,&b->A);
1571: MatSetSizes(b->A,B->rmap->n,B->cmap->n,B->rmap->n,B->cmap->n);
1572: MatSetType(b->A,MATSEQSBAIJ);
1573: MatSeqSBAIJSetPreallocation(b->A,bs,d_nz,d_nnz);
1574: PetscLogObjectParent(B,b->A);
1576: MatCreate(PETSC_COMM_SELF,&b->B);
1577: MatSetSizes(b->B,B->rmap->n,B->cmap->N,B->rmap->n,B->cmap->N);
1578: MatSetType(b->B,MATSEQBAIJ);
1579: MatSeqBAIJSetPreallocation(b->B,bs,o_nz,o_nnz);
1580: PetscLogObjectParent(B,b->B);
1582: /* build cache for off array entries formed */
1583: MatStashCreate_Private(((PetscObject)B)->comm,bs,&B->bstash);
1585: return(0);
1586: }
1590: #if defined(PETSC_HAVE_MUMPS)
1592: #endif
1593: #if defined(PETSC_HAVE_SPOOLES)
1595: #endif
1596: #if defined(PETSC_HAVE_PASTIX)
1598: #endif
1601: /*MC
1602: MATMPISBAIJ - MATMPISBAIJ = "mpisbaij" - A matrix type to be used for distributed symmetric sparse block matrices,
1603: based on block compressed sparse row format. Only the upper triangular portion of the matrix is stored.
1605: Options Database Keys:
1606: . -mat_type mpisbaij - sets the matrix type to "mpisbaij" during a call to MatSetFromOptions()
1608: Level: beginner
1610: .seealso: MatCreateMPISBAIJ
1611: M*/
1616: PetscErrorCode MatCreate_MPISBAIJ(Mat B)
1617: {
1618: Mat_MPISBAIJ *b;
1620: PetscTruth flg;
1624: PetscNewLog(B,Mat_MPISBAIJ,&b);
1625: B->data = (void*)b;
1626: PetscMemcpy(B->ops,&MatOps_Values,sizeof(struct _MatOps));
1628: B->ops->destroy = MatDestroy_MPISBAIJ;
1629: B->ops->view = MatView_MPISBAIJ;
1630: B->mapping = 0;
1631: B->assembled = PETSC_FALSE;
1633: B->insertmode = NOT_SET_VALUES;
1634: MPI_Comm_rank(((PetscObject)B)->comm,&b->rank);
1635: MPI_Comm_size(((PetscObject)B)->comm,&b->size);
1637: /* build local table of row and column ownerships */
1638: PetscMalloc((b->size+2)*sizeof(PetscInt),&b->rangebs);
1640: /* build cache for off array entries formed */
1641: MatStashCreate_Private(((PetscObject)B)->comm,1,&B->stash);
1642: b->donotstash = PETSC_FALSE;
1643: b->colmap = PETSC_NULL;
1644: b->garray = PETSC_NULL;
1645: b->roworiented = PETSC_TRUE;
1647: /* stuff used in block assembly */
1648: b->barray = 0;
1650: /* stuff used for matrix vector multiply */
1651: b->lvec = 0;
1652: b->Mvctx = 0;
1653: b->slvec0 = 0;
1654: b->slvec0b = 0;
1655: b->slvec1 = 0;
1656: b->slvec1a = 0;
1657: b->slvec1b = 0;
1658: b->sMvctx = 0;
1660: /* stuff for MatGetRow() */
1661: b->rowindices = 0;
1662: b->rowvalues = 0;
1663: b->getrowactive = PETSC_FALSE;
1665: /* hash table stuff */
1666: b->ht = 0;
1667: b->hd = 0;
1668: b->ht_size = 0;
1669: b->ht_flag = PETSC_FALSE;
1670: b->ht_fact = 0;
1671: b->ht_total_ct = 0;
1672: b->ht_insert_ct = 0;
1674: b->in_loc = 0;
1675: b->v_loc = 0;
1676: b->n_loc = 0;
1677: PetscOptionsBegin(((PetscObject)B)->comm,PETSC_NULL,"Options for loading MPISBAIJ matrix 1","Mat");
1678: PetscOptionsTruth("-mat_use_hash_table","Use hash table to save memory in constructing matrix","MatSetOption",PETSC_FALSE,&flg,PETSC_NULL);
1679: if (flg) {
1680: PetscReal fact = 1.39;
1681: MatSetOption(B,MAT_USE_HASH_TABLE,PETSC_TRUE);
1682: PetscOptionsReal("-mat_use_hash_table","Use hash table factor","MatMPIBAIJSetHashTableFactor",fact,&fact,PETSC_NULL);
1683: if (fact <= 1.0) fact = 1.39;
1684: MatMPIBAIJSetHashTableFactor(B,fact);
1685: PetscInfo1(B,"Hash table Factor used %5.2f\n",fact);
1686: }
1687: PetscOptionsEnd();
1689: #if defined(PETSC_HAVE_PASTIX)
1690: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatGetFactor_mpisbaij_pastix_C",
1691: "MatGetFactor_mpisbaij_pastix",
1692: MatGetFactor_mpisbaij_pastix);
1693: #endif
1694: #if defined(PETSC_HAVE_MUMPS)
1695: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatGetFactor_mpisbaij_mumps_C",
1696: "MatGetFactor_mpisbaij_mumps",
1697: MatGetFactor_mpisbaij_mumps);
1698: #endif
1699: #if defined(PETSC_HAVE_SPOOLES)
1700: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatGetFactor_mpisbaij_spooles_C",
1701: "MatGetFactor_mpisbaij_spooles",
1702: MatGetFactor_mpisbaij_spooles);
1703: #endif
1704: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatStoreValues_C",
1705: "MatStoreValues_MPISBAIJ",
1706: MatStoreValues_MPISBAIJ);
1707: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatRetrieveValues_C",
1708: "MatRetrieveValues_MPISBAIJ",
1709: MatRetrieveValues_MPISBAIJ);
1710: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatGetDiagonalBlock_C",
1711: "MatGetDiagonalBlock_MPISBAIJ",
1712: MatGetDiagonalBlock_MPISBAIJ);
1713: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatMPISBAIJSetPreallocation_C",
1714: "MatMPISBAIJSetPreallocation_MPISBAIJ",
1715: MatMPISBAIJSetPreallocation_MPISBAIJ);
1716: B->symmetric = PETSC_TRUE;
1717: B->structurally_symmetric = PETSC_TRUE;
1718: B->symmetric_set = PETSC_TRUE;
1719: B->structurally_symmetric_set = PETSC_TRUE;
1720: PetscObjectChangeTypeName((PetscObject)B,MATMPISBAIJ);
1721: return(0);
1722: }
1725: /*MC
1726: MATSBAIJ - MATSBAIJ = "sbaij" - A matrix type to be used for symmetric block sparse matrices.
1728: This matrix type is identical to MATSEQSBAIJ when constructed with a single process communicator,
1729: and MATMPISBAIJ otherwise.
1731: Options Database Keys:
1732: . -mat_type sbaij - sets the matrix type to "sbaij" during a call to MatSetFromOptions()
1734: Level: beginner
1736: .seealso: MatCreateMPISBAIJ,MATSEQSBAIJ,MATMPISBAIJ
1737: M*/
1742: PetscErrorCode MatCreate_SBAIJ(Mat A)
1743: {
1745: PetscMPIInt size;
1748: MPI_Comm_size(((PetscObject)A)->comm,&size);
1749: if (size == 1) {
1750: MatSetType(A,MATSEQSBAIJ);
1751: } else {
1752: MatSetType(A,MATMPISBAIJ);
1753: }
1754: return(0);
1755: }
1760: /*@C
1761: MatMPISBAIJSetPreallocation - For good matrix assembly performance
1762: the user should preallocate the matrix storage by setting the parameters
1763: d_nz (or d_nnz) and o_nz (or o_nnz). By setting these parameters accurately,
1764: performance can be increased by more than a factor of 50.
1766: Collective on Mat
1768: Input Parameters:
1769: + A - the matrix
1770: . bs - size of blockk
1771: . d_nz - number of block nonzeros per block row in diagonal portion of local
1772: submatrix (same for all local rows)
1773: . d_nnz - array containing the number of block nonzeros in the various block rows
1774: in the upper triangular and diagonal part of the in diagonal portion of the local
1775: (possibly different for each block row) or PETSC_NULL. You must leave room
1776: for the diagonal entry even if it is zero.
1777: . o_nz - number of block nonzeros per block row in the off-diagonal portion of local
1778: submatrix (same for all local rows).
1779: - o_nnz - array containing the number of nonzeros in the various block rows of the
1780: off-diagonal portion of the local submatrix (possibly different for
1781: each block row) or PETSC_NULL.
1784: Options Database Keys:
1785: . -mat_no_unroll - uses code that does not unroll the loops in the
1786: block calculations (much slower)
1787: . -mat_block_size - size of the blocks to use
1789: Notes:
1791: If PETSC_DECIDE or PETSC_DETERMINE is used for a particular argument on one processor
1792: than it must be used on all processors that share the object for that argument.
1794: If the *_nnz parameter is given then the *_nz parameter is ignored
1796: Storage Information:
1797: For a square global matrix we define each processor's diagonal portion
1798: to be its local rows and the corresponding columns (a square submatrix);
1799: each processor's off-diagonal portion encompasses the remainder of the
1800: local matrix (a rectangular submatrix).
1802: The user can specify preallocated storage for the diagonal part of
1803: the local submatrix with either d_nz or d_nnz (not both). Set
1804: d_nz=PETSC_DEFAULT and d_nnz=PETSC_NULL for PETSc to control dynamic
1805: memory allocation. Likewise, specify preallocated storage for the
1806: off-diagonal part of the local submatrix with o_nz or o_nnz (not both).
1808: You can call MatGetInfo() to get information on how effective the preallocation was;
1809: for example the fields mallocs,nz_allocated,nz_used,nz_unneeded;
1810: You can also run with the option -info and look for messages with the string
1811: malloc in them to see if additional memory allocation was needed.
1813: Consider a processor that owns rows 3, 4 and 5 of a parallel matrix. In
1814: the figure below we depict these three local rows and all columns (0-11).
1816: .vb
1817: 0 1 2 3 4 5 6 7 8 9 10 11
1818: -------------------
1819: row 3 | o o o d d d o o o o o o
1820: row 4 | o o o d d d o o o o o o
1821: row 5 | o o o d d d o o o o o o
1822: -------------------
1823: .ve
1824:
1825: Thus, any entries in the d locations are stored in the d (diagonal)
1826: submatrix, and any entries in the o locations are stored in the
1827: o (off-diagonal) submatrix. Note that the d matrix is stored in
1828: MatSeqSBAIJ format and the o submatrix in MATSEQBAIJ format.
1830: Now d_nz should indicate the number of block nonzeros per row in the upper triangular
1831: plus the diagonal part of the d matrix,
1832: and o_nz should indicate the number of block nonzeros per row in the o matrix.
1833: In general, for PDE problems in which most nonzeros are near the diagonal,
1834: one expects d_nz >> o_nz. For large problems you MUST preallocate memory
1835: or you will get TERRIBLE performance; see the users' manual chapter on
1836: matrices.
1838: Level: intermediate
1840: .keywords: matrix, block, aij, compressed row, sparse, parallel
1842: .seealso: MatCreate(), MatCreateSeqSBAIJ(), MatSetValues(), MatCreateMPIBAIJ()
1843: @*/
1844: PetscErrorCode MatMPISBAIJSetPreallocation(Mat B,PetscInt bs,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
1845: {
1846: PetscErrorCode ierr,(*f)(Mat,PetscInt,PetscInt,const PetscInt[],PetscInt,const PetscInt[]);
1849: PetscObjectQueryFunction((PetscObject)B,"MatMPISBAIJSetPreallocation_C",(void (**)(void))&f);
1850: if (f) {
1851: (*f)(B,bs,d_nz,d_nnz,o_nz,o_nnz);
1852: }
1853: return(0);
1854: }
1858: /*@C
1859: MatCreateMPISBAIJ - Creates a sparse parallel matrix in symmetric block AIJ format
1860: (block compressed row). For good matrix assembly performance
1861: the user should preallocate the matrix storage by setting the parameters
1862: d_nz (or d_nnz) and o_nz (or o_nnz). By setting these parameters accurately,
1863: performance can be increased by more than a factor of 50.
1865: Collective on MPI_Comm
1867: Input Parameters:
1868: + comm - MPI communicator
1869: . bs - size of blockk
1870: . m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
1871: This value should be the same as the local size used in creating the
1872: y vector for the matrix-vector product y = Ax.
1873: . n - number of local columns (or PETSC_DECIDE to have calculated if N is given)
1874: This value should be the same as the local size used in creating the
1875: x vector for the matrix-vector product y = Ax.
1876: . M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
1877: . N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
1878: . d_nz - number of block nonzeros per block row in diagonal portion of local
1879: submatrix (same for all local rows)
1880: . d_nnz - array containing the number of block nonzeros in the various block rows
1881: in the upper triangular portion of the in diagonal portion of the local
1882: (possibly different for each block block row) or PETSC_NULL.
1883: You must leave room for the diagonal entry even if it is zero.
1884: . o_nz - number of block nonzeros per block row in the off-diagonal portion of local
1885: submatrix (same for all local rows).
1886: - o_nnz - array containing the number of nonzeros in the various block rows of the
1887: off-diagonal portion of the local submatrix (possibly different for
1888: each block row) or PETSC_NULL.
1890: Output Parameter:
1891: . A - the matrix
1893: Options Database Keys:
1894: . -mat_no_unroll - uses code that does not unroll the loops in the
1895: block calculations (much slower)
1896: . -mat_block_size - size of the blocks to use
1897: . -mat_mpi - use the parallel matrix data structures even on one processor
1898: (defaults to using SeqBAIJ format on one processor)
1900: It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(),
1901: MatXXXXSetPreallocation() paradgm instead of this routine directly. This is definitely
1902: true if you plan to use the external direct solvers such as SuperLU, MUMPS or Spooles.
1903: [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation]
1905: Notes:
1906: The number of rows and columns must be divisible by blocksize.
1907: This matrix type does not support complex Hermitian operation.
1909: The user MUST specify either the local or global matrix dimensions
1910: (possibly both).
1912: If PETSC_DECIDE or PETSC_DETERMINE is used for a particular argument on one processor
1913: than it must be used on all processors that share the object for that argument.
1915: If the *_nnz parameter is given then the *_nz parameter is ignored
1917: Storage Information:
1918: For a square global matrix we define each processor's diagonal portion
1919: to be its local rows and the corresponding columns (a square submatrix);
1920: each processor's off-diagonal portion encompasses the remainder of the
1921: local matrix (a rectangular submatrix).
1923: The user can specify preallocated storage for the diagonal part of
1924: the local submatrix with either d_nz or d_nnz (not both). Set
1925: d_nz=PETSC_DEFAULT and d_nnz=PETSC_NULL for PETSc to control dynamic
1926: memory allocation. Likewise, specify preallocated storage for the
1927: off-diagonal part of the local submatrix with o_nz or o_nnz (not both).
1929: Consider a processor that owns rows 3, 4 and 5 of a parallel matrix. In
1930: the figure below we depict these three local rows and all columns (0-11).
1932: .vb
1933: 0 1 2 3 4 5 6 7 8 9 10 11
1934: -------------------
1935: row 3 | o o o d d d o o o o o o
1936: row 4 | o o o d d d o o o o o o
1937: row 5 | o o o d d d o o o o o o
1938: -------------------
1939: .ve
1940:
1941: Thus, any entries in the d locations are stored in the d (diagonal)
1942: submatrix, and any entries in the o locations are stored in the
1943: o (off-diagonal) submatrix. Note that the d matrix is stored in
1944: MatSeqSBAIJ format and the o submatrix in MATSEQBAIJ format.
1946: Now d_nz should indicate the number of block nonzeros per row in the upper triangular
1947: plus the diagonal part of the d matrix,
1948: and o_nz should indicate the number of block nonzeros per row in the o matrix.
1949: In general, for PDE problems in which most nonzeros are near the diagonal,
1950: one expects d_nz >> o_nz. For large problems you MUST preallocate memory
1951: or you will get TERRIBLE performance; see the users' manual chapter on
1952: matrices.
1954: Level: intermediate
1956: .keywords: matrix, block, aij, compressed row, sparse, parallel
1958: .seealso: MatCreate(), MatCreateSeqSBAIJ(), MatSetValues(), MatCreateMPIBAIJ()
1959: @*/
1961: PetscErrorCode MatCreateMPISBAIJ(MPI_Comm comm,PetscInt bs,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A)
1962: {
1964: PetscMPIInt size;
1967: MatCreate(comm,A);
1968: MatSetSizes(*A,m,n,M,N);
1969: MPI_Comm_size(comm,&size);
1970: if (size > 1) {
1971: MatSetType(*A,MATMPISBAIJ);
1972: MatMPISBAIJSetPreallocation(*A,bs,d_nz,d_nnz,o_nz,o_nnz);
1973: } else {
1974: MatSetType(*A,MATSEQSBAIJ);
1975: MatSeqSBAIJSetPreallocation(*A,bs,d_nz,d_nnz);
1976: }
1977: return(0);
1978: }
1983: static PetscErrorCode MatDuplicate_MPISBAIJ(Mat matin,MatDuplicateOption cpvalues,Mat *newmat)
1984: {
1985: Mat mat;
1986: Mat_MPISBAIJ *a,*oldmat = (Mat_MPISBAIJ*)matin->data;
1988: PetscInt len=0,nt,bs=matin->rmap->bs,mbs=oldmat->mbs;
1989: PetscScalar *array;
1992: *newmat = 0;
1993: MatCreate(((PetscObject)matin)->comm,&mat);
1994: MatSetSizes(mat,matin->rmap->n,matin->cmap->n,matin->rmap->N,matin->cmap->N);
1995: MatSetType(mat,((PetscObject)matin)->type_name);
1996: PetscMemcpy(mat->ops,matin->ops,sizeof(struct _MatOps));
1997: PetscMapCopy(((PetscObject)matin)->comm,matin->rmap,mat->rmap);
1998: PetscMapCopy(((PetscObject)matin)->comm,matin->cmap,mat->cmap);
1999:
2000: mat->factor = matin->factor;
2001: mat->preallocated = PETSC_TRUE;
2002: mat->assembled = PETSC_TRUE;
2003: mat->insertmode = NOT_SET_VALUES;
2005: a = (Mat_MPISBAIJ*)mat->data;
2006: a->bs2 = oldmat->bs2;
2007: a->mbs = oldmat->mbs;
2008: a->nbs = oldmat->nbs;
2009: a->Mbs = oldmat->Mbs;
2010: a->Nbs = oldmat->Nbs;
2013: a->size = oldmat->size;
2014: a->rank = oldmat->rank;
2015: a->donotstash = oldmat->donotstash;
2016: a->roworiented = oldmat->roworiented;
2017: a->rowindices = 0;
2018: a->rowvalues = 0;
2019: a->getrowactive = PETSC_FALSE;
2020: a->barray = 0;
2021: a->rstartbs = oldmat->rstartbs;
2022: a->rendbs = oldmat->rendbs;
2023: a->cstartbs = oldmat->cstartbs;
2024: a->cendbs = oldmat->cendbs;
2026: /* hash table stuff */
2027: a->ht = 0;
2028: a->hd = 0;
2029: a->ht_size = 0;
2030: a->ht_flag = oldmat->ht_flag;
2031: a->ht_fact = oldmat->ht_fact;
2032: a->ht_total_ct = 0;
2033: a->ht_insert_ct = 0;
2034:
2035: PetscMemcpy(a->rangebs,oldmat->rangebs,(a->size+2)*sizeof(PetscInt));
2036: MatStashCreate_Private(((PetscObject)matin)->comm,1,&mat->stash);
2037: MatStashCreate_Private(((PetscObject)matin)->comm,matin->rmap->bs,&mat->bstash);
2038: if (oldmat->colmap) {
2039: #if defined (PETSC_USE_CTABLE)
2040: PetscTableCreateCopy(oldmat->colmap,&a->colmap);
2041: #else
2042: PetscMalloc((a->Nbs)*sizeof(PetscInt),&a->colmap);
2043: PetscLogObjectMemory(mat,(a->Nbs)*sizeof(PetscInt));
2044: PetscMemcpy(a->colmap,oldmat->colmap,(a->Nbs)*sizeof(PetscInt));
2045: #endif
2046: } else a->colmap = 0;
2048: if (oldmat->garray && (len = ((Mat_SeqBAIJ*)(oldmat->B->data))->nbs)) {
2049: PetscMalloc(len*sizeof(PetscInt),&a->garray);
2050: PetscLogObjectMemory(mat,len*sizeof(PetscInt));
2051: PetscMemcpy(a->garray,oldmat->garray,len*sizeof(PetscInt));
2052: } else a->garray = 0;
2053:
2054: VecDuplicate(oldmat->lvec,&a->lvec);
2055: PetscLogObjectParent(mat,a->lvec);
2056: VecScatterCopy(oldmat->Mvctx,&a->Mvctx);
2057: PetscLogObjectParent(mat,a->Mvctx);
2059: VecDuplicate(oldmat->slvec0,&a->slvec0);
2060: PetscLogObjectParent(mat,a->slvec0);
2061: VecDuplicate(oldmat->slvec1,&a->slvec1);
2062: PetscLogObjectParent(mat,a->slvec1);
2064: VecGetLocalSize(a->slvec1,&nt);
2065: VecGetArray(a->slvec1,&array);
2066: VecCreateSeqWithArray(PETSC_COMM_SELF,bs*mbs,array,&a->slvec1a);
2067: VecCreateSeqWithArray(PETSC_COMM_SELF,nt-bs*mbs,array+bs*mbs,&a->slvec1b);
2068: VecRestoreArray(a->slvec1,&array);
2069: VecGetArray(a->slvec0,&array);
2070: VecCreateSeqWithArray(PETSC_COMM_SELF,nt-bs*mbs,array+bs*mbs,&a->slvec0b);
2071: VecRestoreArray(a->slvec0,&array);
2072: PetscLogObjectParent(mat,a->slvec0);
2073: PetscLogObjectParent(mat,a->slvec1);
2074: PetscLogObjectParent(mat,a->slvec0b);
2075: PetscLogObjectParent(mat,a->slvec1a);
2076: PetscLogObjectParent(mat,a->slvec1b);
2078: /* VecScatterCopy(oldmat->sMvctx,&a->sMvctx); - not written yet, replaced by the lazy trick: */
2079: PetscObjectReference((PetscObject)oldmat->sMvctx);
2080: a->sMvctx = oldmat->sMvctx;
2081: PetscLogObjectParent(mat,a->sMvctx);
2083: MatDuplicate(oldmat->A,cpvalues,&a->A);
2084: PetscLogObjectParent(mat,a->A);
2085: MatDuplicate(oldmat->B,cpvalues,&a->B);
2086: PetscLogObjectParent(mat,a->B);
2087: PetscFListDuplicate(((PetscObject)matin)->qlist,&((PetscObject)mat)->qlist);
2088: *newmat = mat;
2089: return(0);
2090: }
2092: #include petscsys.h
2096: PetscErrorCode MatLoad_MPISBAIJ(PetscViewer viewer, const MatType type,Mat *newmat)
2097: {
2098: Mat A;
2100: PetscInt i,nz,j,rstart,rend;
2101: PetscScalar *vals,*buf;
2102: MPI_Comm comm = ((PetscObject)viewer)->comm;
2103: MPI_Status status;
2104: PetscMPIInt rank,size,tag = ((PetscObject)viewer)->tag,*sndcounts = 0,*browners,maxnz,*rowners,*locrowlens,mmbs;
2105: PetscInt header[4],*rowlengths = 0,M,N,m,*cols;
2106: PetscInt *procsnz = 0,jj,*mycols,*ibuf;
2107: PetscInt bs=1,Mbs,mbs,extra_rows;
2108: PetscInt *dlens,*odlens,*mask,*masked1,*masked2,rowcount,odcount;
2109: PetscInt dcount,kmax,k,nzcount,tmp;
2110: int fd;
2111:
2113: PetscOptionsBegin(comm,PETSC_NULL,"Options for loading MPISBAIJ matrix 2","Mat");
2114: PetscOptionsInt("-matload_block_size","Set the blocksize used to store the matrix","MatLoad",bs,&bs,PETSC_NULL);
2115: PetscOptionsEnd();
2117: MPI_Comm_size(comm,&size);
2118: MPI_Comm_rank(comm,&rank);
2119: if (!rank) {
2120: PetscViewerBinaryGetDescriptor(viewer,&fd);
2121: PetscBinaryRead(fd,(char *)header,4,PETSC_INT);
2122: if (header[0] != MAT_FILE_COOKIE) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"not matrix object");
2123: if (header[3] < 0) {
2124: SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"Matrix stored in special format, cannot load as MPISBAIJ");
2125: }
2126: }
2128: MPI_Bcast(header+1,3,MPIU_INT,0,comm);
2129: M = header[1]; N = header[2];
2131: if (M != N) SETERRQ(PETSC_ERR_SUP,"Can only do square matrices");
2133: /*
2134: This code adds extra rows to make sure the number of rows is
2135: divisible by the blocksize
2136: */
2137: Mbs = M/bs;
2138: extra_rows = bs - M + bs*(Mbs);
2139: if (extra_rows == bs) extra_rows = 0;
2140: else Mbs++;
2141: if (extra_rows &&!rank) {
2142: PetscInfo(viewer,"Padding loaded matrix to match blocksize\n");
2143: }
2145: /* determine ownership of all rows */
2146: mbs = Mbs/size + ((Mbs % size) > rank);
2147: m = mbs*bs;
2148: PetscMalloc(2*(size+2)*sizeof(PetscMPIInt),&rowners);
2149: browners = rowners + size + 1;
2150: mmbs = PetscMPIIntCast(mbs);
2151: MPI_Allgather(&mmbs,1,MPI_INT,rowners+1,1,MPI_INT,comm);
2152: rowners[0] = 0;
2153: for (i=2; i<=size; i++) rowners[i] += rowners[i-1];
2154: for (i=0; i<=size; i++) browners[i] = rowners[i]*bs;
2155: rstart = rowners[rank];
2156: rend = rowners[rank+1];
2157:
2158: /* distribute row lengths to all processors */
2159: PetscMalloc((rend-rstart)*bs*sizeof(PetscMPIInt),&locrowlens);
2160: if (!rank) {
2161: PetscMalloc((M+extra_rows)*sizeof(PetscInt),&rowlengths);
2162: PetscBinaryRead(fd,rowlengths,M,PETSC_INT);
2163: for (i=0; i<extra_rows; i++) rowlengths[M+i] = 1;
2164: PetscMalloc(size*sizeof(PetscMPIInt),&sndcounts);
2165: for (i=0; i<size; i++) sndcounts[i] = browners[i+1] - browners[i];
2166: MPI_Scatterv(rowlengths,sndcounts,browners,MPIU_INT,locrowlens,(rend-rstart)*bs,MPIU_INT,0,comm);
2167: PetscFree(sndcounts);
2168: } else {
2169: MPI_Scatterv(0,0,0,MPIU_INT,locrowlens,(rend-rstart)*bs,MPIU_INT,0,comm);
2170: }
2171:
2172: if (!rank) { /* procs[0] */
2173: /* calculate the number of nonzeros on each processor */
2174: PetscMalloc(size*sizeof(PetscInt),&procsnz);
2175: PetscMemzero(procsnz,size*sizeof(PetscInt));
2176: for (i=0; i<size; i++) {
2177: for (j=rowners[i]*bs; j< rowners[i+1]*bs; j++) {
2178: procsnz[i] += rowlengths[j];
2179: }
2180: }
2181: PetscFree(rowlengths);
2182:
2183: /* determine max buffer needed and allocate it */
2184: maxnz = 0;
2185: for (i=0; i<size; i++) {
2186: maxnz = PetscMax(maxnz,procsnz[i]);
2187: }
2188: PetscMalloc(maxnz*sizeof(PetscInt),&cols);
2190: /* read in my part of the matrix column indices */
2191: nz = procsnz[0];
2192: PetscMalloc(nz*sizeof(PetscInt),&ibuf);
2193: mycols = ibuf;
2194: if (size == 1) nz -= extra_rows;
2195: PetscBinaryRead(fd,mycols,nz,PETSC_INT);
2196: if (size == 1) for (i=0; i< extra_rows; i++) { mycols[nz+i] = M+i; }
2198: /* read in every ones (except the last) and ship off */
2199: for (i=1; i<size-1; i++) {
2200: nz = procsnz[i];
2201: PetscBinaryRead(fd,cols,nz,PETSC_INT);
2202: MPI_Send(cols,nz,MPIU_INT,i,tag,comm);
2203: }
2204: /* read in the stuff for the last proc */
2205: if (size != 1) {
2206: nz = procsnz[size-1] - extra_rows; /* the extra rows are not on the disk */
2207: PetscBinaryRead(fd,cols,nz,PETSC_INT);
2208: for (i=0; i<extra_rows; i++) cols[nz+i] = M+i;
2209: MPI_Send(cols,nz+extra_rows,MPIU_INT,size-1,tag,comm);
2210: }
2211: PetscFree(cols);
2212: } else { /* procs[i], i>0 */
2213: /* determine buffer space needed for message */
2214: nz = 0;
2215: for (i=0; i<m; i++) {
2216: nz += locrowlens[i];
2217: }
2218: PetscMalloc(nz*sizeof(PetscInt),&ibuf);
2219: mycols = ibuf;
2220: /* receive message of column indices*/
2221: MPI_Recv(mycols,nz,MPIU_INT,0,tag,comm,&status);
2222: MPI_Get_count(&status,MPIU_INT,&maxnz);
2223: if (maxnz != nz) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");
2224: }
2226: /* loop over local rows, determining number of off diagonal entries */
2227: PetscMalloc(2*(rend-rstart+1)*sizeof(PetscInt),&dlens);
2228: odlens = dlens + (rend-rstart);
2229: PetscMalloc(3*Mbs*sizeof(PetscInt),&mask);
2230: PetscMemzero(mask,3*Mbs*sizeof(PetscInt));
2231: masked1 = mask + Mbs;
2232: masked2 = masked1 + Mbs;
2233: rowcount = 0; nzcount = 0;
2234: for (i=0; i<mbs; i++) {
2235: dcount = 0;
2236: odcount = 0;
2237: for (j=0; j<bs; j++) {
2238: kmax = locrowlens[rowcount];
2239: for (k=0; k<kmax; k++) {
2240: tmp = mycols[nzcount++]/bs; /* block col. index */
2241: if (!mask[tmp]) {
2242: mask[tmp] = 1;
2243: if (tmp < rstart || tmp >= rend) masked2[odcount++] = tmp; /* entry in off-diag portion */
2244: else masked1[dcount++] = tmp; /* entry in diag portion */
2245: }
2246: }
2247: rowcount++;
2248: }
2249:
2250: dlens[i] = dcount; /* d_nzz[i] */
2251: odlens[i] = odcount; /* o_nzz[i] */
2253: /* zero out the mask elements we set */
2254: for (j=0; j<dcount; j++) mask[masked1[j]] = 0;
2255: for (j=0; j<odcount; j++) mask[masked2[j]] = 0;
2256: }
2257:
2258: /* create our matrix */
2259: MatCreate(comm,&A);
2260: MatSetSizes(A,m,m,PETSC_DETERMINE,PETSC_DETERMINE);
2261: MatSetType(A,type);
2262: MatMPISBAIJSetPreallocation(A,bs,0,dlens,0,odlens);
2263:
2264: if (!rank) {
2265: PetscMalloc(maxnz*sizeof(PetscScalar),&buf);
2266: /* read in my part of the matrix numerical values */
2267: nz = procsnz[0];
2268: vals = buf;
2269: mycols = ibuf;
2270: if (size == 1) nz -= extra_rows;
2271: PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
2272: if (size == 1) for (i=0; i< extra_rows; i++) { vals[nz+i] = 1.0; }
2274: /* insert into matrix */
2275: jj = rstart*bs;
2276: for (i=0; i<m; i++) {
2277: MatSetValues(A,1,&jj,locrowlens[i],mycols,vals,INSERT_VALUES);
2278: mycols += locrowlens[i];
2279: vals += locrowlens[i];
2280: jj++;
2281: }
2283: /* read in other processors (except the last one) and ship out */
2284: for (i=1; i<size-1; i++) {
2285: nz = procsnz[i];
2286: vals = buf;
2287: PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
2288: MPI_Send(vals,nz,MPIU_SCALAR,i,((PetscObject)A)->tag,comm);
2289: }
2290: /* the last proc */
2291: if (size != 1){
2292: nz = procsnz[i] - extra_rows;
2293: vals = buf;
2294: PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
2295: for (i=0; i<extra_rows; i++) vals[nz+i] = 1.0;
2296: MPI_Send(vals,nz+extra_rows,MPIU_SCALAR,size-1,((PetscObject)A)->tag,comm);
2297: }
2298: PetscFree(procsnz);
2300: } else {
2301: /* receive numeric values */
2302: PetscMalloc(nz*sizeof(PetscScalar),&buf);
2304: /* receive message of values*/
2305: vals = buf;
2306: mycols = ibuf;
2307: MPI_Recv(vals,nz,MPIU_SCALAR,0,((PetscObject)A)->tag,comm,&status);
2308: MPI_Get_count(&status,MPIU_SCALAR,&maxnz);
2309: if (maxnz != nz) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");
2311: /* insert into matrix */
2312: jj = rstart*bs;
2313: for (i=0; i<m; i++) {
2314: MatSetValues_MPISBAIJ(A,1,&jj,locrowlens[i],mycols,vals,INSERT_VALUES);
2315: mycols += locrowlens[i];
2316: vals += locrowlens[i];
2317: jj++;
2318: }
2319: }
2321: PetscFree(locrowlens);
2322: PetscFree(buf);
2323: PetscFree(ibuf);
2324: PetscFree(rowners);
2325: PetscFree(dlens);
2326: PetscFree(mask);
2327: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
2328: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
2329: *newmat = A;
2330: return(0);
2331: }
2335: /*XXXXX@
2336: MatMPISBAIJSetHashTableFactor - Sets the factor required to compute the size of the HashTable.
2338: Input Parameters:
2339: . mat - the matrix
2340: . fact - factor
2342: Collective on Mat
2344: Level: advanced
2346: Notes:
2347: This can also be set by the command line option: -mat_use_hash_table fact
2349: .keywords: matrix, hashtable, factor, HT
2351: .seealso: MatSetOption()
2352: @XXXXX*/
2357: PetscErrorCode MatGetRowMaxAbs_MPISBAIJ(Mat A,Vec v,PetscInt idx[])
2358: {
2359: Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
2360: Mat_SeqBAIJ *b = (Mat_SeqBAIJ*)(a->B)->data;
2361: PetscReal atmp;
2362: PetscReal *work,*svalues,*rvalues;
2364: PetscInt i,bs,mbs,*bi,*bj,brow,j,ncols,krow,kcol,col,row,Mbs,bcol;
2365: PetscMPIInt rank,size;
2366: PetscInt *rowners_bs,dest,count,source;
2367: PetscScalar *va;
2368: MatScalar *ba;
2369: MPI_Status stat;
2372: if (idx) SETERRQ(PETSC_ERR_SUP,"Send email to petsc-maint@mcs.anl.gov");
2373: MatGetRowMaxAbs(a->A,v,PETSC_NULL);
2374: VecGetArray(v,&va);
2376: MPI_Comm_size(((PetscObject)A)->comm,&size);
2377: MPI_Comm_rank(((PetscObject)A)->comm,&rank);
2379: bs = A->rmap->bs;
2380: mbs = a->mbs;
2381: Mbs = a->Mbs;
2382: ba = b->a;
2383: bi = b->i;
2384: bj = b->j;
2386: /* find ownerships */
2387: rowners_bs = A->rmap->range;
2389: /* each proc creates an array to be distributed */
2390: PetscMalloc(bs*Mbs*sizeof(PetscReal),&work);
2391: PetscMemzero(work,bs*Mbs*sizeof(PetscReal));
2393: /* row_max for B */
2394: if (rank != size-1){
2395: for (i=0; i<mbs; i++) {
2396: ncols = bi[1] - bi[0]; bi++;
2397: brow = bs*i;
2398: for (j=0; j<ncols; j++){
2399: bcol = bs*(*bj);
2400: for (kcol=0; kcol<bs; kcol++){
2401: col = bcol + kcol; /* local col index */
2402: col += rowners_bs[rank+1]; /* global col index */
2403: for (krow=0; krow<bs; krow++){
2404: atmp = PetscAbsScalar(*ba); ba++;
2405: row = brow + krow; /* local row index */
2406: if (PetscRealPart(va[row]) < atmp) va[row] = atmp;
2407: if (work[col] < atmp) work[col] = atmp;
2408: }
2409: }
2410: bj++;
2411: }
2412: }
2414: /* send values to its owners */
2415: for (dest=rank+1; dest<size; dest++){
2416: svalues = work + rowners_bs[dest];
2417: count = rowners_bs[dest+1]-rowners_bs[dest];
2418: MPI_Send(svalues,count,MPIU_REAL,dest,rank,((PetscObject)A)->comm);
2419: }
2420: }
2421:
2422: /* receive values */
2423: if (rank){
2424: rvalues = work;
2425: count = rowners_bs[rank+1]-rowners_bs[rank];
2426: for (source=0; source<rank; source++){
2427: MPI_Recv(rvalues,count,MPIU_REAL,MPI_ANY_SOURCE,MPI_ANY_TAG,((PetscObject)A)->comm,&stat);
2428: /* process values */
2429: for (i=0; i<count; i++){
2430: if (PetscRealPart(va[i]) < rvalues[i]) va[i] = rvalues[i];
2431: }
2432: }
2433: }
2435: VecRestoreArray(v,&va);
2436: PetscFree(work);
2437: return(0);
2438: }
2442: PetscErrorCode MatRelax_MPISBAIJ(Mat matin,Vec bb,PetscReal omega,MatSORType flag,PetscReal fshift,PetscInt its,PetscInt lits,Vec xx)
2443: {
2444: Mat_MPISBAIJ *mat = (Mat_MPISBAIJ*)matin->data;
2446: PetscInt mbs=mat->mbs,bs=matin->rmap->bs;
2447: PetscScalar *x,*b,*ptr,zero=0.0;
2448: Vec bb1;
2449:
2451: if (its <= 0 || lits <= 0) SETERRQ2(PETSC_ERR_ARG_WRONG,"Relaxation requires global its %D and local its %D both positive",its,lits);
2452: if (bs > 1)
2453: SETERRQ(PETSC_ERR_SUP,"SSOR for block size > 1 is not yet implemented");
2455: if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP){
2456: if ( flag & SOR_ZERO_INITIAL_GUESS ) {
2457: (*mat->A->ops->relax)(mat->A,bb,omega,flag,fshift,lits,lits,xx);
2458: its--;
2459: }
2461: VecDuplicate(bb,&bb1);
2462: while (its--){
2463:
2464: /* lower triangular part: slvec0b = - B^T*xx */
2465: (*mat->B->ops->multtranspose)(mat->B,xx,mat->slvec0b);
2466:
2467: /* copy xx into slvec0a */
2468: VecGetArray(mat->slvec0,&ptr);
2469: VecGetArray(xx,&x);
2470: PetscMemcpy(ptr,x,bs*mbs*sizeof(MatScalar));
2471: VecRestoreArray(mat->slvec0,&ptr);
2473: VecScale(mat->slvec0,-1.0);
2475: /* copy bb into slvec1a */
2476: VecGetArray(mat->slvec1,&ptr);
2477: VecGetArray(bb,&b);
2478: PetscMemcpy(ptr,b,bs*mbs*sizeof(MatScalar));
2479: VecRestoreArray(mat->slvec1,&ptr);
2481: /* set slvec1b = 0 */
2482: VecSet(mat->slvec1b,zero);
2484: VecScatterBegin(mat->sMvctx,mat->slvec0,mat->slvec1,ADD_VALUES,SCATTER_FORWARD);
2485: VecRestoreArray(xx,&x);
2486: VecRestoreArray(bb,&b);
2487: VecScatterEnd(mat->sMvctx,mat->slvec0,mat->slvec1,ADD_VALUES,SCATTER_FORWARD);
2489: /* upper triangular part: bb1 = bb1 - B*x */
2490: (*mat->B->ops->multadd)(mat->B,mat->slvec1b,mat->slvec1a,bb1);
2491:
2492: /* local diagonal sweep */
2493: (*mat->A->ops->relax)(mat->A,bb1,omega,SOR_SYMMETRIC_SWEEP,fshift,lits,lits,xx);
2494: }
2495: VecDestroy(bb1);
2496: } else {
2497: SETERRQ(PETSC_ERR_SUP,"MatSORType is not supported for SBAIJ matrix format");
2498: }
2499: return(0);
2500: }
2504: PetscErrorCode MatRelax_MPISBAIJ_2comm(Mat matin,Vec bb,PetscReal omega,MatSORType flag,PetscReal fshift,PetscInt its,PetscInt lits,Vec xx)
2505: {
2506: Mat_MPISBAIJ *mat = (Mat_MPISBAIJ*)matin->data;
2508: Vec lvec1,bb1;
2509:
2511: if (its <= 0 || lits <= 0) SETERRQ2(PETSC_ERR_ARG_WRONG,"Relaxation requires global its %D and local its %D both positive",its,lits);
2512: if (matin->rmap->bs > 1)
2513: SETERRQ(PETSC_ERR_SUP,"SSOR for block size > 1 is not yet implemented");
2515: if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP){
2516: if ( flag & SOR_ZERO_INITIAL_GUESS ) {
2517: (*mat->A->ops->relax)(mat->A,bb,omega,flag,fshift,lits,lits,xx);
2518: its--;
2519: }
2521: VecDuplicate(mat->lvec,&lvec1);
2522: VecDuplicate(bb,&bb1);
2523: while (its--){
2524: VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
2525:
2526: /* lower diagonal part: bb1 = bb - B^T*xx */
2527: (*mat->B->ops->multtranspose)(mat->B,xx,lvec1);
2528: VecScale(lvec1,-1.0);
2530: VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
2531: VecCopy(bb,bb1);
2532: VecScatterBegin(mat->Mvctx,lvec1,bb1,ADD_VALUES,SCATTER_REVERSE);
2534: /* upper diagonal part: bb1 = bb1 - B*x */
2535: VecScale(mat->lvec,-1.0);
2536: (*mat->B->ops->multadd)(mat->B,mat->lvec,bb1,bb1);
2538: VecScatterEnd(mat->Mvctx,lvec1,bb1,ADD_VALUES,SCATTER_REVERSE);
2539:
2540: /* diagonal sweep */
2541: (*mat->A->ops->relax)(mat->A,bb1,omega,SOR_SYMMETRIC_SWEEP,fshift,lits,lits,xx);
2542: }
2543: VecDestroy(lvec1);
2544: VecDestroy(bb1);
2545: } else {
2546: SETERRQ(PETSC_ERR_SUP,"MatSORType is not supported for SBAIJ matrix format");
2547: }
2548: return(0);
2549: }