Actual source code: mpisbaijspooles.c

  1: #define PETSCMAT_DLL

  3: /* 
  4:    Provides an interface to the Spooles parallel sparse solver (MPI SPOOLES)
  5: */

 7:  #include ../src/mat/impls/aij/seq/spooles/spooles.h
 8:  #include ../src/mat/impls/sbaij/mpi/mpisbaij.h

 10: #if !defined(PETSC_USE_COMPLEX)
 11: /* 
 12:   input:
 13:    F:                 numeric factor
 14:   output:
 15:    nneg, nzero, npos: global matrix inertia in all processors
 16: */

 20: PetscErrorCode MatGetInertia_MPISBAIJSpooles(Mat F,int *nneg,int *nzero,int *npos)
 21: {
 22:   Mat_Spooles *lu = (Mat_Spooles*)F->spptr;
 24:   int neg,zero,pos,sbuf[3],rbuf[3];

 27:   FrontMtx_inertia(lu->frontmtx, &neg, &zero, &pos);
 28:   sbuf[0] = neg; sbuf[1] = zero; sbuf[2] = pos;
 29:   MPI_Allreduce(sbuf,rbuf,3,MPI_INT,MPI_SUM,((PetscObject)F)->comm);
 30:   *nneg  = rbuf[0]; *nzero = rbuf[1]; *npos  = rbuf[2];
 31:   return(0);
 32: }
 33: #endif /* !defined(PETSC_USE_COMPLEX) */

 35: /* Note the Petsc r permutation is ignored */
 38: PetscErrorCode MatCholeskyFactorSymbolic_MPISBAIJSpooles(Mat B,Mat A,IS r,const MatFactorInfo *info)
 39: {
 40:   Mat_Spooles   *lu;
 42: 

 45:   lu                       = (Mat_Spooles*)(B->spptr);
 46:   lu->options.pivotingflag = SPOOLES_NO_PIVOTING;
 47:   lu->flg                  = DIFFERENT_NONZERO_PATTERN;
 48:   lu->options.useQR        = PETSC_FALSE;
 49:   lu->options.symflag      = SPOOLES_SYMMETRIC;  /* default */

 51:   MPI_Comm_dup(((PetscObject)A)->comm,&(lu->comm_spooles));
 52:   (B)->ops->choleskyfactornumeric  = MatFactorNumeric_MPISpooles;
 53:   return(0);
 54: }

 59: PetscErrorCode MatDestroy_MPISBAIJSpooles(Mat A)
 60: {
 61:   Mat_Spooles   *lu = (Mat_Spooles*)A->spptr;
 63: 
 65:   if (lu->CleanUpSpooles) {
 66:     FrontMtx_free(lu->frontmtx);
 67:     IV_free(lu->newToOldIV);
 68:     IV_free(lu->oldToNewIV);
 69:     IV_free(lu->vtxmapIV);
 70:     InpMtx_free(lu->mtxA);
 71:     ETree_free(lu->frontETree);
 72:     IVL_free(lu->symbfacIVL);
 73:     SubMtxManager_free(lu->mtxmanager);
 74:     DenseMtx_free(lu->mtxX);
 75:     DenseMtx_free(lu->mtxY);
 76:     MPI_Comm_free(&(lu->comm_spooles));
 77:     if ( lu->scat ){
 78:       VecDestroy(lu->vec_spooles);
 79:       ISDestroy(lu->iden);
 80:       ISDestroy(lu->is_petsc);
 81:       VecScatterDestroy(lu->scat);
 82:     }
 83:   }
 84:   MatDestroy_MPISBAIJ(A);

 86:   return(0);
 87: }

 92: PetscErrorCode MatFactorGetSolverPackage_mpisbaij_spooles(Mat A,const MatSolverPackage *type)
 93: {
 95:   *type = MAT_SOLVER_SPOOLES;
 96:   return(0);
 97: }

103: PetscErrorCode MatGetFactor_mpisbaij_spooles(Mat A,MatFactorType ftype,Mat *F)
104: {
105:   Mat_Spooles    *lu;
106:   Mat            B;


111:   /* Create the factorization matrix F */
112:   MatCreate(((PetscObject)A)->comm,&B);
113:   MatSetSizes(B,A->rmap->n,A->cmap->n,A->rmap->N,A->cmap->N);
114:   MatSetType(B,((PetscObject)A)->type_name);
115:   MatMPISBAIJSetPreallocation(B,1,0,PETSC_NULL,0,PETSC_NULL);

117:   PetscNewLog(B,Mat_Spooles,&lu);
118:   B->spptr          = lu;
119:   lu->flg           = DIFFERENT_NONZERO_PATTERN;
120:   lu->options.useQR = PETSC_FALSE;

122:   if (ftype == MAT_FACTOR_CHOLESKY) {
123:     B->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_MPISBAIJSpooles;
124:     B->ops->destroy         = MatDestroy_MPISBAIJSpooles;
125:     PetscObjectComposeFunctionDynamic((PetscObject)B,"MatFactorGetSolverPackage_C","MatFactorGetSolverPackage_mpisbaij_spooles",MatFactorGetSolverPackage_mpisbaij_spooles);

127:     lu->options.symflag      = SPOOLES_NONSYMMETRIC;
128:     lu->options.pivotingflag = SPOOLES_NO_PIVOTING;
129:     lu->options.symflag      = SPOOLES_SYMMETRIC;
130:   } else SETERRQ(PETSC_ERR_SUP,"Only Cholesky for SBAIJ matrices");
131:   B->factor = ftype;

133:   MPI_Comm_dup(((PetscObject)A)->comm,&(lu->comm_spooles));

135:   *F = B;
136:   return(0);
137: }

140: /*MC
141:   MAT_SOLVER_SPOOLES - "spooles" - a matrix type providing direct solvers (LU and Cholesky) for distributed symmetric
142:   and non-symmetric  matrices via the external package Spooles.

144:   If Spooles is installed (run config/configure.py with the option --download-spooles)

146:   Options Database Keys:
147: + -mat_spooles_tau <tau> - upper bound on the magnitude of the largest element in L or U
148: . -mat_spooles_seed <seed> - random number seed used for ordering
149: . -mat_spooles_msglvl <msglvl> - message output level
150: . -mat_spooles_ordering <BestOfNDandMS,MMD,MS,ND> - ordering used
151: . -mat_spooles_maxdomainsize <n> - maximum subgraph size used by Spooles orderings
152: . -mat_spooles_maxzeros <n> - maximum number of zeros inside a supernode
153: . -mat_spooles_maxsize <n> - maximum size of a supernode
154: . -mat_spooles_FrontMtxInfo <true,fase> - print Spooles information about the computed factorization
155: . -mat_spooles_symmetryflag <0,1,2> - 0: SPOOLES_SYMMETRIC, 1: SPOOLES_HERMITIAN, 2: SPOOLES_NONSYMMETRIC
156: . -mat_spooles_patchAndGoFlag <0,1,2> - 0: no patch, 1: use PatchAndGo strategy 1, 2: use PatchAndGo strategy 2
157: . -mat_spooles_toosmall <dt> - drop tolerance for PatchAndGo strategy 1
158: . -mat_spooles_storeids <bool integer> - if nonzero, stores row and col numbers where patches were applied in an IV object
159: . -mat_spooles_fudge <delta> - fudge factor for rescaling diagonals with PatchAndGo strategy 2
160: - -mat_spooles_storevalues <bool integer> - if nonzero and PatchAndGo strategy 2 is used, store change in diagonal value in a DV object

162:    Level: beginner

164: .seealso: MAT_SOLVER_SUPERLU, MAT_SOLVER_MUMPS, MAT_SOLVER_SUPERLU_DIST, PCFactorSetMatSolverPackage(), MatSolverPackage 
165: M*/