Actual source code: mpidense.c

  1: #define PETSCMAT_DLL

  3: /*
  4:    Basic functions for basic parallel dense matrices.
  5: */
  6: 
 7:  #include src/mat/impls/dense/mpi/mpidense.h

 11: PetscErrorCode MatLUFactorSymbolic_MPIDense(Mat A,IS row,IS col,MatFactorInfo *info,Mat *fact)
 12: {

 16:   MatDuplicate(A,MAT_DO_NOT_COPY_VALUES,fact);
 17:   return(0);
 18: }

 22: PetscErrorCode MatCholeskyFactorSymbolic_MPIDense(Mat A,IS perm,MatFactorInfo *info,Mat *fact)
 23: {

 27:   MatDuplicate(A,MAT_DO_NOT_COPY_VALUES,fact);
 28:   return(0);
 29: }

 33: /*@

 35:       MatDenseGetLocalMatrix - For a MATMPIDENSE or MATSEQDENSE matrix returns the sequential
 36:               matrix that represents the operator. For sequential matrices it returns itself.

 38:     Input Parameter:
 39: .      A - the Seq or MPI dense matrix

 41:     Output Parameter:
 42: .      B - the inner matrix

 44:     Level: intermediate

 46: @*/
 47: PetscErrorCode MatDenseGetLocalMatrix(Mat A,Mat *B)
 48: {
 49:   Mat_MPIDense   *mat = (Mat_MPIDense*)A->data;
 51:   PetscTruth     flg;
 52:   PetscMPIInt    size;

 55:   PetscTypeCompare((PetscObject)A,MATMPIDENSE,&flg);
 56:   if (!flg) {  /* this check sucks! */
 57:     PetscTypeCompare((PetscObject)A,MATDENSE,&flg);
 58:     if (flg) {
 59:       MPI_Comm_size(A->comm,&size);
 60:       if (size == 1) flg = PETSC_FALSE;
 61:     }
 62:   }
 63:   if (flg) {
 64:     *B = mat->A;
 65:   } else {
 66:     *B = A;
 67:   }
 68:   return(0);
 69: }

 73: PetscErrorCode MatGetRow_MPIDense(Mat A,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
 74: {
 75:   Mat_MPIDense   *mat = (Mat_MPIDense*)A->data;
 77:   PetscInt       lrow,rstart = A->rmap.rstart,rend = A->rmap.rend;

 80:   if (row < rstart || row >= rend) SETERRQ(PETSC_ERR_SUP,"only local rows")
 81:   lrow = row - rstart;
 82:   MatGetRow(mat->A,lrow,nz,(const PetscInt **)idx,(const PetscScalar **)v);
 83:   return(0);
 84: }

 88: PetscErrorCode MatRestoreRow_MPIDense(Mat mat,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
 89: {

 93:   if (idx) {PetscFree(*idx);}
 94:   if (v) {PetscFree(*v);}
 95:   return(0);
 96: }

101: PetscErrorCode  MatGetDiagonalBlock_MPIDense(Mat A,PetscTruth *iscopy,MatReuse reuse,Mat *B)
102: {
103:   Mat_MPIDense   *mdn = (Mat_MPIDense*)A->data;
105:   PetscInt       m = A->rmap.n,rstart = A->rmap.rstart;
106:   PetscScalar    *array;
107:   MPI_Comm       comm;

110:   if (A->rmap.N != A->cmap.N) SETERRQ(PETSC_ERR_SUP,"Only square matrices supported.");

112:   /* The reuse aspect is not implemented efficiently */
113:   if (reuse) { MatDestroy(*B);}

115:   PetscObjectGetComm((PetscObject)(mdn->A),&comm);
116:   MatGetArray(mdn->A,&array);
117:   MatCreate(comm,B);
118:   MatSetSizes(*B,m,m,m,m);
119:   MatSetType(*B,mdn->A->type_name);
120:   MatSeqDenseSetPreallocation(*B,array+m*rstart);
121:   MatRestoreArray(mdn->A,&array);
122:   MatAssemblyBegin(*B,MAT_FINAL_ASSEMBLY);
123:   MatAssemblyEnd(*B,MAT_FINAL_ASSEMBLY);
124: 
125:   *iscopy = PETSC_TRUE;
126:   return(0);
127: }

132: PetscErrorCode MatSetValues_MPIDense(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],const PetscScalar v[],InsertMode addv)
133: {
134:   Mat_MPIDense   *A = (Mat_MPIDense*)mat->data;
136:   PetscInt       i,j,rstart = mat->rmap.rstart,rend = mat->rmap.rend,row;
137:   PetscTruth     roworiented = A->roworiented;

140:   for (i=0; i<m; i++) {
141:     if (idxm[i] < 0) continue;
142:     if (idxm[i] >= mat->rmap.N) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Row too large");
143:     if (idxm[i] >= rstart && idxm[i] < rend) {
144:       row = idxm[i] - rstart;
145:       if (roworiented) {
146:         MatSetValues(A->A,1,&row,n,idxn,v+i*n,addv);
147:       } else {
148:         for (j=0; j<n; j++) {
149:           if (idxn[j] < 0) continue;
150:           if (idxn[j] >= mat->cmap.N) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Column too large");
151:           MatSetValues(A->A,1,&row,1,&idxn[j],v+i+j*m,addv);
152:         }
153:       }
154:     } else {
155:       if (!A->donotstash) {
156:         if (roworiented) {
157:           MatStashValuesRow_Private(&mat->stash,idxm[i],n,idxn,v+i*n);
158:         } else {
159:           MatStashValuesCol_Private(&mat->stash,idxm[i],n,idxn,v+i,m);
160:         }
161:       }
162:     }
163:   }
164:   return(0);
165: }

169: PetscErrorCode MatGetValues_MPIDense(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[])
170: {
171:   Mat_MPIDense   *mdn = (Mat_MPIDense*)mat->data;
173:   PetscInt       i,j,rstart = mat->rmap.rstart,rend = mat->rmap.rend,row;

176:   for (i=0; i<m; i++) {
177:     if (idxm[i] < 0) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Negative row");
178:     if (idxm[i] >= mat->rmap.N) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Row too large");
179:     if (idxm[i] >= rstart && idxm[i] < rend) {
180:       row = idxm[i] - rstart;
181:       for (j=0; j<n; j++) {
182:         if (idxn[j] < 0) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Negative column");
183:         if (idxn[j] >= mat->cmap.N) {
184:           SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Column too large");
185:         }
186:         MatGetValues(mdn->A,1,&row,1,&idxn[j],v+i*n+j);
187:       }
188:     } else {
189:       SETERRQ(PETSC_ERR_SUP,"Only local values currently supported");
190:     }
191:   }
192:   return(0);
193: }

197: PetscErrorCode MatGetArray_MPIDense(Mat A,PetscScalar *array[])
198: {
199:   Mat_MPIDense   *a = (Mat_MPIDense*)A->data;

203:   MatGetArray(a->A,array);
204:   return(0);
205: }

209: static PetscErrorCode MatGetSubMatrix_MPIDense(Mat A,IS isrow,IS iscol,PetscInt cs,MatReuse scall,Mat *B)
210: {
211:   Mat_MPIDense   *mat = (Mat_MPIDense*)A->data,*newmatd;
212:   Mat_SeqDense   *lmat = (Mat_SeqDense*)mat->A->data;
214:   PetscInt       i,j,*irow,*icol,rstart,rend,nrows,ncols,nlrows,nlcols;
215:   PetscScalar    *av,*bv,*v = lmat->v;
216:   Mat            newmat;

219:   ISGetIndices(isrow,&irow);
220:   ISGetIndices(iscol,&icol);
221:   ISGetLocalSize(isrow,&nrows);
222:   ISGetLocalSize(iscol,&ncols);

224:   /* No parallel redistribution currently supported! Should really check each index set
225:      to comfirm that it is OK.  ... Currently supports only submatrix same partitioning as
226:      original matrix! */

228:   MatGetLocalSize(A,&nlrows,&nlcols);
229:   MatGetOwnershipRange(A,&rstart,&rend);
230: 
231:   /* Check submatrix call */
232:   if (scall == MAT_REUSE_MATRIX) {
233:     /* SETERRQ(PETSC_ERR_ARG_SIZ,"Reused submatrix wrong size"); */
234:     /* Really need to test rows and column sizes! */
235:     newmat = *B;
236:   } else {
237:     /* Create and fill new matrix */
238:     MatCreate(A->comm,&newmat);
239:     MatSetSizes(newmat,nrows,cs,PETSC_DECIDE,ncols);
240:     MatSetType(newmat,A->type_name);
241:     MatMPIDenseSetPreallocation(newmat,PETSC_NULL);
242:   }

244:   /* Now extract the data pointers and do the copy, column at a time */
245:   newmatd = (Mat_MPIDense*)newmat->data;
246:   bv      = ((Mat_SeqDense *)newmatd->A->data)->v;
247: 
248:   for (i=0; i<ncols; i++) {
249:     av = v + nlrows*icol[i];
250:     for (j=0; j<nrows; j++) {
251:       *bv++ = av[irow[j] - rstart];
252:     }
253:   }

255:   /* Assemble the matrices so that the correct flags are set */
256:   MatAssemblyBegin(newmat,MAT_FINAL_ASSEMBLY);
257:   MatAssemblyEnd(newmat,MAT_FINAL_ASSEMBLY);

259:   /* Free work space */
260:   ISRestoreIndices(isrow,&irow);
261:   ISRestoreIndices(iscol,&icol);
262:   *B = newmat;
263:   return(0);
264: }

268: PetscErrorCode MatRestoreArray_MPIDense(Mat A,PetscScalar *array[])
269: {
271:   return(0);
272: }

276: PetscErrorCode MatAssemblyBegin_MPIDense(Mat mat,MatAssemblyType mode)
277: {
278:   Mat_MPIDense   *mdn = (Mat_MPIDense*)mat->data;
279:   MPI_Comm       comm = mat->comm;
281:   PetscInt       nstash,reallocs;
282:   InsertMode     addv;

285:   /* make sure all processors are either in INSERTMODE or ADDMODE */
286:   MPI_Allreduce(&mat->insertmode,&addv,1,MPI_INT,MPI_BOR,comm);
287:   if (addv == (ADD_VALUES|INSERT_VALUES)) {
288:     SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Cannot mix adds/inserts on different procs");
289:   }
290:   mat->insertmode = addv; /* in case this processor had no cache */

292:   MatStashScatterBegin_Private(&mat->stash,mat->rmap.range);
293:   MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);
294:   PetscInfo2(mdn->A,"Stash has %D entries, uses %D mallocs.\n",nstash,reallocs);
295:   return(0);
296: }

300: PetscErrorCode MatAssemblyEnd_MPIDense(Mat mat,MatAssemblyType mode)
301: {
302:   Mat_MPIDense    *mdn=(Mat_MPIDense*)mat->data;
303:   PetscErrorCode  ierr;
304:   PetscInt        i,*row,*col,flg,j,rstart,ncols;
305:   PetscMPIInt     n;
306:   PetscScalar     *val;
307:   InsertMode      addv=mat->insertmode;

310:   /*  wait on receives */
311:   while (1) {
312:     MatStashScatterGetMesg_Private(&mat->stash,&n,&row,&col,&val,&flg);
313:     if (!flg) break;
314: 
315:     for (i=0; i<n;) {
316:       /* Now identify the consecutive vals belonging to the same row */
317:       for (j=i,rstart=row[j]; j<n; j++) { if (row[j] != rstart) break; }
318:       if (j < n) ncols = j-i;
319:       else       ncols = n-i;
320:       /* Now assemble all these values with a single function call */
321:       MatSetValues_MPIDense(mat,1,row+i,ncols,col+i,val+i,addv);
322:       i = j;
323:     }
324:   }
325:   MatStashScatterEnd_Private(&mat->stash);
326: 
327:   MatAssemblyBegin(mdn->A,mode);
328:   MatAssemblyEnd(mdn->A,mode);

330:   if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) {
331:     MatSetUpMultiply_MPIDense(mat);
332:   }
333:   return(0);
334: }

338: PetscErrorCode MatZeroEntries_MPIDense(Mat A)
339: {
341:   Mat_MPIDense   *l = (Mat_MPIDense*)A->data;

344:   MatZeroEntries(l->A);
345:   return(0);
346: }

348: /* the code does not do the diagonal entries correctly unless the 
349:    matrix is square and the column and row owerships are identical.
350:    This is a BUG. The only way to fix it seems to be to access 
351:    mdn->A and mdn->B directly and not through the MatZeroRows() 
352:    routine. 
353: */
356: PetscErrorCode MatZeroRows_MPIDense(Mat A,PetscInt N,const PetscInt rows[],PetscScalar diag)
357: {
358:   Mat_MPIDense   *l = (Mat_MPIDense*)A->data;
360:   PetscInt       i,*owners = A->rmap.range;
361:   PetscInt       *nprocs,j,idx,nsends;
362:   PetscInt       nmax,*svalues,*starts,*owner,nrecvs;
363:   PetscInt       *rvalues,tag = A->tag,count,base,slen,*source;
364:   PetscInt       *lens,*lrows,*values;
365:   PetscMPIInt    n,imdex,rank = l->rank,size = l->size;
366:   MPI_Comm       comm = A->comm;
367:   MPI_Request    *send_waits,*recv_waits;
368:   MPI_Status     recv_status,*send_status;
369:   PetscTruth     found;

372:   /*  first count number of contributors to each processor */
373:   PetscMalloc(2*size*sizeof(PetscInt),&nprocs);
374:   PetscMemzero(nprocs,2*size*sizeof(PetscInt));
375:   PetscMalloc((N+1)*sizeof(PetscInt),&owner); /* see note*/
376:   for (i=0; i<N; i++) {
377:     idx = rows[i];
378:     found = PETSC_FALSE;
379:     for (j=0; j<size; j++) {
380:       if (idx >= owners[j] && idx < owners[j+1]) {
381:         nprocs[2*j]++; nprocs[2*j+1] = 1; owner[i] = j; found = PETSC_TRUE; break;
382:       }
383:     }
384:     if (!found) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Index out of range");
385:   }
386:   nsends = 0;  for (i=0; i<size; i++) { nsends += nprocs[2*i+1];}

388:   /* inform other processors of number of messages and max length*/
389:   PetscMaxSum(comm,nprocs,&nmax,&nrecvs);

391:   /* post receives:   */
392:   PetscMalloc((nrecvs+1)*(nmax+1)*sizeof(PetscInt),&rvalues);
393:   PetscMalloc((nrecvs+1)*sizeof(MPI_Request),&recv_waits);
394:   for (i=0; i<nrecvs; i++) {
395:     MPI_Irecv(rvalues+nmax*i,nmax,MPIU_INT,MPI_ANY_SOURCE,tag,comm,recv_waits+i);
396:   }

398:   /* do sends:
399:       1) starts[i] gives the starting index in svalues for stuff going to 
400:          the ith processor
401:   */
402:   PetscMalloc((N+1)*sizeof(PetscInt),&svalues);
403:   PetscMalloc((nsends+1)*sizeof(MPI_Request),&send_waits);
404:   PetscMalloc((size+1)*sizeof(PetscInt),&starts);
405:   starts[0]  = 0;
406:   for (i=1; i<size; i++) { starts[i] = starts[i-1] + nprocs[2*i-2];}
407:   for (i=0; i<N; i++) {
408:     svalues[starts[owner[i]]++] = rows[i];
409:   }

411:   starts[0] = 0;
412:   for (i=1; i<size+1; i++) { starts[i] = starts[i-1] + nprocs[2*i-2];}
413:   count = 0;
414:   for (i=0; i<size; i++) {
415:     if (nprocs[2*i+1]) {
416:       MPI_Isend(svalues+starts[i],nprocs[2*i],MPIU_INT,i,tag,comm,send_waits+count++);
417:     }
418:   }
419:   PetscFree(starts);

421:   base = owners[rank];

423:   /*  wait on receives */
424:   PetscMalloc(2*(nrecvs+1)*sizeof(PetscInt),&lens);
425:   source = lens + nrecvs;
426:   count  = nrecvs; slen = 0;
427:   while (count) {
428:     MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);
429:     /* unpack receives into our local space */
430:     MPI_Get_count(&recv_status,MPIU_INT,&n);
431:     source[imdex]  = recv_status.MPI_SOURCE;
432:     lens[imdex]    = n;
433:     slen += n;
434:     count--;
435:   }
436:   PetscFree(recv_waits);
437: 
438:   /* move the data into the send scatter */
439:   PetscMalloc((slen+1)*sizeof(PetscInt),&lrows);
440:   count = 0;
441:   for (i=0; i<nrecvs; i++) {
442:     values = rvalues + i*nmax;
443:     for (j=0; j<lens[i]; j++) {
444:       lrows[count++] = values[j] - base;
445:     }
446:   }
447:   PetscFree(rvalues);
448:   PetscFree(lens);
449:   PetscFree(owner);
450:   PetscFree(nprocs);
451: 
452:   /* actually zap the local rows */
453:   MatZeroRows(l->A,slen,lrows,diag);
454:   PetscFree(lrows);

456:   /* wait on sends */
457:   if (nsends) {
458:     PetscMalloc(nsends*sizeof(MPI_Status),&send_status);
459:     MPI_Waitall(nsends,send_waits,send_status);
460:     PetscFree(send_status);
461:   }
462:   PetscFree(send_waits);
463:   PetscFree(svalues);

465:   return(0);
466: }

470: PetscErrorCode MatMult_MPIDense(Mat mat,Vec xx,Vec yy)
471: {
472:   Mat_MPIDense   *mdn = (Mat_MPIDense*)mat->data;

476:   VecScatterBegin(xx,mdn->lvec,INSERT_VALUES,SCATTER_FORWARD,mdn->Mvctx);
477:   VecScatterEnd(xx,mdn->lvec,INSERT_VALUES,SCATTER_FORWARD,mdn->Mvctx);
478:   MatMult_SeqDense(mdn->A,mdn->lvec,yy);
479:   return(0);
480: }

484: PetscErrorCode MatMultAdd_MPIDense(Mat mat,Vec xx,Vec yy,Vec zz)
485: {
486:   Mat_MPIDense   *mdn = (Mat_MPIDense*)mat->data;

490:   VecScatterBegin(xx,mdn->lvec,INSERT_VALUES,SCATTER_FORWARD,mdn->Mvctx);
491:   VecScatterEnd(xx,mdn->lvec,INSERT_VALUES,SCATTER_FORWARD,mdn->Mvctx);
492:   MatMultAdd_SeqDense(mdn->A,mdn->lvec,yy,zz);
493:   return(0);
494: }

498: PetscErrorCode MatMultTranspose_MPIDense(Mat A,Vec xx,Vec yy)
499: {
500:   Mat_MPIDense   *a = (Mat_MPIDense*)A->data;
502:   PetscScalar    zero = 0.0;

505:   VecSet(yy,zero);
506:   MatMultTranspose_SeqDense(a->A,xx,a->lvec);
507:   VecScatterBegin(a->lvec,yy,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);
508:   VecScatterEnd(a->lvec,yy,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);
509:   return(0);
510: }

514: PetscErrorCode MatMultTransposeAdd_MPIDense(Mat A,Vec xx,Vec yy,Vec zz)
515: {
516:   Mat_MPIDense   *a = (Mat_MPIDense*)A->data;

520:   VecCopy(yy,zz);
521:   MatMultTranspose_SeqDense(a->A,xx,a->lvec);
522:   VecScatterBegin(a->lvec,zz,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);
523:   VecScatterEnd(a->lvec,zz,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);
524:   return(0);
525: }

529: PetscErrorCode MatGetDiagonal_MPIDense(Mat A,Vec v)
530: {
531:   Mat_MPIDense   *a = (Mat_MPIDense*)A->data;
532:   Mat_SeqDense   *aloc = (Mat_SeqDense*)a->A->data;
534:   PetscInt       len,i,n,m = A->rmap.n,radd;
535:   PetscScalar    *x,zero = 0.0;
536: 
538:   VecSet(v,zero);
539:   VecGetArray(v,&x);
540:   VecGetSize(v,&n);
541:   if (n != A->rmap.N) SETERRQ(PETSC_ERR_ARG_SIZ,"Nonconforming mat and vec");
542:   len  = PetscMin(a->A->rmap.n,a->A->cmap.n);
543:   radd = A->rmap.rstart*m;
544:   for (i=0; i<len; i++) {
545:     x[i] = aloc->v[radd + i*m + i];
546:   }
547:   VecRestoreArray(v,&x);
548:   return(0);
549: }

553: PetscErrorCode MatDestroy_MPIDense(Mat mat)
554: {
555:   Mat_MPIDense   *mdn = (Mat_MPIDense*)mat->data;


560: #if defined(PETSC_USE_LOG)
561:   PetscLogObjectState((PetscObject)mat,"Rows=%D, Cols=%D",mat->rmap.N,mat->cmap.N);
562: #endif
563:   MatStashDestroy_Private(&mat->stash);
564:   MatDestroy(mdn->A);
565:   if (mdn->lvec)   {VecDestroy(mdn->lvec);}
566:   if (mdn->Mvctx)  {VecScatterDestroy(mdn->Mvctx);}
567:   if (mdn->factor) {
568:     PetscFree(mdn->factor->temp);
569:     PetscFree(mdn->factor->tag);
570:     PetscFree(mdn->factor->pivots);
571:     PetscFree(mdn->factor);
572:   }
573:   PetscFree(mdn);
574:   PetscObjectChangeTypeName((PetscObject)mat,0);
575:   PetscObjectComposeFunctionDynamic((PetscObject)mat,"MatGetDiagonalBlock_C","",PETSC_NULL);
576:   PetscObjectComposeFunctionDynamic((PetscObject)mat,"MatMPIDenseSetPreallocation_C","",PETSC_NULL);
577:   PetscObjectComposeFunction((PetscObject)mat,"MatMatMult_mpiaij_mpidense_C","",PETSC_NULL);
578:   PetscObjectComposeFunction((PetscObject)mat,"MatMatMultSymbolic_mpiaij_mpidense_C","",PETSC_NULL);
579:   PetscObjectComposeFunction((PetscObject)mat,"MatMatMultNumeric_mpiaij_mpidense_C","",PETSC_NULL);
580:   return(0);
581: }

585: static PetscErrorCode MatView_MPIDense_Binary(Mat mat,PetscViewer viewer)
586: {
587:   Mat_MPIDense   *mdn = (Mat_MPIDense*)mat->data;

591:   if (mdn->size == 1) {
592:     MatView(mdn->A,viewer);
593:   }
594:   else SETERRQ(PETSC_ERR_SUP,"Only uniprocessor output supported");
595:   return(0);
596: }

600: static PetscErrorCode MatView_MPIDense_ASCIIorDraworSocket(Mat mat,PetscViewer viewer)
601: {
602:   Mat_MPIDense      *mdn = (Mat_MPIDense*)mat->data;
603:   PetscErrorCode    ierr;
604:   PetscMPIInt       size = mdn->size,rank = mdn->rank;
605:   PetscViewerType   vtype;
606:   PetscTruth        iascii,isdraw;
607:   PetscViewer       sviewer;
608:   PetscViewerFormat format;

611:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&iascii);
612:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_DRAW,&isdraw);
613:   if (iascii) {
614:     PetscViewerGetType(viewer,&vtype);
615:     PetscViewerGetFormat(viewer,&format);
616:     if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
617:       MatInfo info;
618:       MatGetInfo(mat,MAT_LOCAL,&info);
619:       PetscViewerASCIISynchronizedPrintf(viewer,"  [%d] local rows %D nz %D nz alloced %D mem %D \n",rank,mat->rmap.n,
620:                    (PetscInt)info.nz_used,(PetscInt)info.nz_allocated,(PetscInt)info.memory);
621:       PetscViewerFlush(viewer);
622:       VecScatterView(mdn->Mvctx,viewer);
623:       return(0);
624:     } else if (format == PETSC_VIEWER_ASCII_INFO) {
625:       return(0);
626:     }
627:   } else if (isdraw) {
628:     PetscDraw  draw;
629:     PetscTruth isnull;

631:     PetscViewerDrawGetDraw(viewer,0,&draw);
632:     PetscDrawIsNull(draw,&isnull);
633:     if (isnull) return(0);
634:   }

636:   if (size == 1) {
637:     MatView(mdn->A,viewer);
638:   } else {
639:     /* assemble the entire matrix onto first processor. */
640:     Mat         A;
641:     PetscInt    M = mat->rmap.N,N = mat->cmap.N,m,row,i,nz;
642:     PetscInt    *cols;
643:     PetscScalar *vals;

645:     MatCreate(mat->comm,&A);
646:     if (!rank) {
647:       MatSetSizes(A,M,N,M,N);
648:     } else {
649:       MatSetSizes(A,0,0,M,N);
650:     }
651:     /* Since this is a temporary matrix, MATMPIDENSE instead of A->type_name here is probably acceptable. */
652:     MatSetType(A,MATMPIDENSE);
653:     MatMPIDenseSetPreallocation(A,PETSC_NULL);
654:     PetscLogObjectParent(mat,A);

656:     /* Copy the matrix ... This isn't the most efficient means,
657:        but it's quick for now */
658:     A->insertmode = INSERT_VALUES;
659:     row = mat->rmap.rstart; m = mdn->A->rmap.n;
660:     for (i=0; i<m; i++) {
661:       MatGetRow_MPIDense(mat,row,&nz,&cols,&vals);
662:       MatSetValues_MPIDense(A,1,&row,nz,cols,vals,INSERT_VALUES);
663:       MatRestoreRow_MPIDense(mat,row,&nz,&cols,&vals);
664:       row++;
665:     }

667:     MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
668:     MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
669:     PetscViewerGetSingleton(viewer,&sviewer);
670:     if (!rank) {
671:       MatView(((Mat_MPIDense*)(A->data))->A,sviewer);
672:     }
673:     PetscViewerRestoreSingleton(viewer,&sviewer);
674:     PetscViewerFlush(viewer);
675:     MatDestroy(A);
676:   }
677:   return(0);
678: }

682: PetscErrorCode MatView_MPIDense(Mat mat,PetscViewer viewer)
683: {
685:   PetscTruth     iascii,isbinary,isdraw,issocket;
686: 
688: 
689:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&iascii);
690:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_BINARY,&isbinary);
691:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_SOCKET,&issocket);
692:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_DRAW,&isdraw);

694:   if (iascii || issocket || isdraw) {
695:     MatView_MPIDense_ASCIIorDraworSocket(mat,viewer);
696:   } else if (isbinary) {
697:     MatView_MPIDense_Binary(mat,viewer);
698:   } else {
699:     SETERRQ1(PETSC_ERR_SUP,"Viewer type %s not supported by MPI dense matrix",((PetscObject)viewer)->type_name);
700:   }
701:   return(0);
702: }

706: PetscErrorCode MatGetInfo_MPIDense(Mat A,MatInfoType flag,MatInfo *info)
707: {
708:   Mat_MPIDense   *mat = (Mat_MPIDense*)A->data;
709:   Mat            mdn = mat->A;
711:   PetscReal      isend[5],irecv[5];

714:   info->rows_global    = (double)A->rmap.N;
715:   info->columns_global = (double)A->cmap.N;
716:   info->rows_local     = (double)A->rmap.n;
717:   info->columns_local  = (double)A->cmap.N;
718:   info->block_size     = 1.0;
719:   MatGetInfo(mdn,MAT_LOCAL,info);
720:   isend[0] = info->nz_used; isend[1] = info->nz_allocated; isend[2] = info->nz_unneeded;
721:   isend[3] = info->memory;  isend[4] = info->mallocs;
722:   if (flag == MAT_LOCAL) {
723:     info->nz_used      = isend[0];
724:     info->nz_allocated = isend[1];
725:     info->nz_unneeded  = isend[2];
726:     info->memory       = isend[3];
727:     info->mallocs      = isend[4];
728:   } else if (flag == MAT_GLOBAL_MAX) {
729:     MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPI_MAX,A->comm);
730:     info->nz_used      = irecv[0];
731:     info->nz_allocated = irecv[1];
732:     info->nz_unneeded  = irecv[2];
733:     info->memory       = irecv[3];
734:     info->mallocs      = irecv[4];
735:   } else if (flag == MAT_GLOBAL_SUM) {
736:     MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPI_SUM,A->comm);
737:     info->nz_used      = irecv[0];
738:     info->nz_allocated = irecv[1];
739:     info->nz_unneeded  = irecv[2];
740:     info->memory       = irecv[3];
741:     info->mallocs      = irecv[4];
742:   }
743:   info->fill_ratio_given  = 0; /* no parallel LU/ILU/Cholesky */
744:   info->fill_ratio_needed = 0;
745:   info->factor_mallocs    = 0;
746:   return(0);
747: }

751: PetscErrorCode MatSetOption_MPIDense(Mat A,MatOption op)
752: {
753:   Mat_MPIDense   *a = (Mat_MPIDense*)A->data;

757:   switch (op) {
758:   case MAT_NO_NEW_NONZERO_LOCATIONS:
759:   case MAT_YES_NEW_NONZERO_LOCATIONS:
760:   case MAT_NEW_NONZERO_LOCATION_ERR:
761:   case MAT_NEW_NONZERO_ALLOCATION_ERR:
762:   case MAT_COLUMNS_SORTED:
763:   case MAT_COLUMNS_UNSORTED:
764:     MatSetOption(a->A,op);
765:     break;
766:   case MAT_ROW_ORIENTED:
767:     a->roworiented = PETSC_TRUE;
768:     MatSetOption(a->A,op);
769:     break;
770:   case MAT_ROWS_SORTED:
771:   case MAT_ROWS_UNSORTED:
772:   case MAT_YES_NEW_DIAGONALS:
773:   case MAT_USE_HASH_TABLE:
774:     PetscInfo1(A,"Option %d ignored\n",op);
775:     break;
776:   case MAT_COLUMN_ORIENTED:
777:     a->roworiented = PETSC_FALSE;
778:     MatSetOption(a->A,op);
779:     break;
780:   case MAT_IGNORE_OFF_PROC_ENTRIES:
781:     a->donotstash = PETSC_TRUE;
782:     break;
783:   case MAT_NO_NEW_DIAGONALS:
784:     SETERRQ(PETSC_ERR_SUP,"MAT_NO_NEW_DIAGONALS");
785:   case MAT_SYMMETRIC:
786:   case MAT_STRUCTURALLY_SYMMETRIC:
787:   case MAT_NOT_SYMMETRIC:
788:   case MAT_NOT_STRUCTURALLY_SYMMETRIC:
789:   case MAT_HERMITIAN:
790:   case MAT_NOT_HERMITIAN:
791:   case MAT_SYMMETRY_ETERNAL:
792:   case MAT_NOT_SYMMETRY_ETERNAL:
793:     break;
794:   default:
795:     SETERRQ1(PETSC_ERR_SUP,"unknown option %d",op);
796:   }
797:   return(0);
798: }


803: PetscErrorCode MatDiagonalScale_MPIDense(Mat A,Vec ll,Vec rr)
804: {
805:   Mat_MPIDense   *mdn = (Mat_MPIDense*)A->data;
806:   Mat_SeqDense   *mat = (Mat_SeqDense*)mdn->A->data;
807:   PetscScalar    *l,*r,x,*v;
809:   PetscInt       i,j,s2a,s3a,s2,s3,m=mdn->A->rmap.n,n=mdn->A->cmap.n;

812:   MatGetLocalSize(A,&s2,&s3);
813:   if (ll) {
814:     VecGetLocalSize(ll,&s2a);
815:     if (s2a != s2) SETERRQ2(PETSC_ERR_ARG_SIZ,"Left scaling vector non-conforming local size, %d != %d.", s2a, s2);
816:     VecGetArray(ll,&l);
817:     for (i=0; i<m; i++) {
818:       x = l[i];
819:       v = mat->v + i;
820:       for (j=0; j<n; j++) { (*v) *= x; v+= m;}
821:     }
822:     VecRestoreArray(ll,&l);
823:     PetscLogFlops(n*m);
824:   }
825:   if (rr) {
826:     VecGetLocalSize(rr,&s3a);
827:     if (s3a != s3) SETERRQ2(PETSC_ERR_ARG_SIZ,"Right scaling vec non-conforming local size, %d != %d.", s3a, s3);
828:     VecScatterBegin(rr,mdn->lvec,INSERT_VALUES,SCATTER_FORWARD,mdn->Mvctx);
829:     VecScatterEnd(rr,mdn->lvec,INSERT_VALUES,SCATTER_FORWARD,mdn->Mvctx);
830:     VecGetArray(mdn->lvec,&r);
831:     for (i=0; i<n; i++) {
832:       x = r[i];
833:       v = mat->v + i*m;
834:       for (j=0; j<m; j++) { (*v++) *= x;}
835:     }
836:     VecRestoreArray(mdn->lvec,&r);
837:     PetscLogFlops(n*m);
838:   }
839:   return(0);
840: }

844: PetscErrorCode MatNorm_MPIDense(Mat A,NormType type,PetscReal *nrm)
845: {
846:   Mat_MPIDense   *mdn = (Mat_MPIDense*)A->data;
847:   Mat_SeqDense   *mat = (Mat_SeqDense*)mdn->A->data;
849:   PetscInt       i,j;
850:   PetscReal      sum = 0.0;
851:   PetscScalar    *v = mat->v;

854:   if (mdn->size == 1) {
855:      MatNorm(mdn->A,type,nrm);
856:   } else {
857:     if (type == NORM_FROBENIUS) {
858:       for (i=0; i<mdn->A->cmap.n*mdn->A->rmap.n; i++) {
859: #if defined(PETSC_USE_COMPLEX)
860:         sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
861: #else
862:         sum += (*v)*(*v); v++;
863: #endif
864:       }
865:       MPI_Allreduce(&sum,nrm,1,MPIU_REAL,MPI_SUM,A->comm);
866:       *nrm = sqrt(*nrm);
867:       PetscLogFlops(2*mdn->A->cmap.n*mdn->A->rmap.n);
868:     } else if (type == NORM_1) {
869:       PetscReal *tmp,*tmp2;
870:       PetscMalloc(2*A->cmap.N*sizeof(PetscReal),&tmp);
871:       tmp2 = tmp + A->cmap.N;
872:       PetscMemzero(tmp,2*A->cmap.N*sizeof(PetscReal));
873:       *nrm = 0.0;
874:       v = mat->v;
875:       for (j=0; j<mdn->A->cmap.n; j++) {
876:         for (i=0; i<mdn->A->rmap.n; i++) {
877:           tmp[j] += PetscAbsScalar(*v);  v++;
878:         }
879:       }
880:       MPI_Allreduce(tmp,tmp2,A->cmap.N,MPIU_REAL,MPI_SUM,A->comm);
881:       for (j=0; j<A->cmap.N; j++) {
882:         if (tmp2[j] > *nrm) *nrm = tmp2[j];
883:       }
884:       PetscFree(tmp);
885:       PetscLogFlops(A->cmap.n*A->rmap.n);
886:     } else if (type == NORM_INFINITY) { /* max row norm */
887:       PetscReal ntemp;
888:       MatNorm(mdn->A,type,&ntemp);
889:       MPI_Allreduce(&ntemp,nrm,1,MPIU_REAL,MPI_MAX,A->comm);
890:     } else {
891:       SETERRQ(PETSC_ERR_SUP,"No support for two norm");
892:     }
893:   }
894:   return(0);
895: }

899: PetscErrorCode MatTranspose_MPIDense(Mat A,Mat *matout)
900: {
901:   Mat_MPIDense   *a = (Mat_MPIDense*)A->data;
902:   Mat_SeqDense   *Aloc = (Mat_SeqDense*)a->A->data;
903:   Mat            B;
904:   PetscInt       M = A->rmap.N,N = A->cmap.N,m,n,*rwork,rstart = A->rmap.rstart;
906:   PetscInt       j,i;
907:   PetscScalar    *v;

910:   if (!matout && M != N) {
911:     SETERRQ(PETSC_ERR_SUP,"Supports square matrix only in-place");
912:   }
913:   MatCreate(A->comm,&B);
914:   MatSetSizes(B,PETSC_DECIDE,PETSC_DECIDE,N,M);
915:   MatSetType(B,A->type_name);
916:   MatMPIDenseSetPreallocation(B,PETSC_NULL);

918:   m = a->A->rmap.n; n = a->A->cmap.n; v = Aloc->v;
919:   PetscMalloc(n*sizeof(PetscInt),&rwork);
920:   for (j=0; j<n; j++) {
921:     for (i=0; i<m; i++) rwork[i] = rstart + i;
922:     MatSetValues(B,1,&j,m,rwork,v,INSERT_VALUES);
923:     v   += m;
924:   }
925:   PetscFree(rwork);
926:   MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
927:   MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
928:   if (matout) {
929:     *matout = B;
930:   } else {
931:     MatHeaderCopy(A,B);
932:   }
933:   return(0);
934: }

936:  #include petscblaslapack.h
939: PetscErrorCode MatScale_MPIDense(Mat inA,PetscScalar alpha)
940: {
941:   Mat_MPIDense   *A = (Mat_MPIDense*)inA->data;
942:   Mat_SeqDense   *a = (Mat_SeqDense*)A->A->data;
943:   PetscScalar    oalpha = alpha;
944:   PetscBLASInt   one = 1,nz = (PetscBLASInt)inA->rmap.n*inA->cmap.N;

948:   BLASscal_(&nz,&oalpha,a->v,&one);
949:   PetscLogFlops(nz);
950:   return(0);
951: }

953: static PetscErrorCode MatDuplicate_MPIDense(Mat,MatDuplicateOption,Mat *);

957: PetscErrorCode MatSetUpPreallocation_MPIDense(Mat A)
958: {

962:    MatMPIDenseSetPreallocation(A,0);
963:   return(0);
964: }

968: PetscErrorCode MatMatMultSymbolic_MPIDense_MPIDense(Mat A,Mat B,PetscReal fill,Mat *C)
969: {
971:   PetscInt       m=A->rmap.n,n=B->cmap.n;
972:   Mat            Cmat;

975:   if (A->cmap.n != B->rmap.n) SETERRQ2(PETSC_ERR_ARG_SIZ,"A->cmap.n %d != B->rmap.n %d\n",A->cmap.n,B->rmap.n);
976:   MatCreate(B->comm,&Cmat);
977:   MatSetSizes(Cmat,m,n,A->rmap.N,B->cmap.N);
978:   MatSetType(Cmat,MATMPIDENSE);
979:   MatMPIDenseSetPreallocation(Cmat,PETSC_NULL);
980:   MatAssemblyBegin(Cmat,MAT_FINAL_ASSEMBLY);
981:   MatAssemblyEnd(Cmat,MAT_FINAL_ASSEMBLY);
982:   *C = Cmat;
983:   return(0);
984: }

986: /* -------------------------------------------------------------------*/
987: static struct _MatOps MatOps_Values = {MatSetValues_MPIDense,
988:        MatGetRow_MPIDense,
989:        MatRestoreRow_MPIDense,
990:        MatMult_MPIDense,
991: /* 4*/ MatMultAdd_MPIDense,
992:        MatMultTranspose_MPIDense,
993:        MatMultTransposeAdd_MPIDense,
994:        0,
995:        0,
996:        0,
997: /*10*/ 0,
998:        0,
999:        0,
1000:        0,
1001:        MatTranspose_MPIDense,
1002: /*15*/ MatGetInfo_MPIDense,
1003:        MatEqual_MPIDense,
1004:        MatGetDiagonal_MPIDense,
1005:        MatDiagonalScale_MPIDense,
1006:        MatNorm_MPIDense,
1007: /*20*/ MatAssemblyBegin_MPIDense,
1008:        MatAssemblyEnd_MPIDense,
1009:        0,
1010:        MatSetOption_MPIDense,
1011:        MatZeroEntries_MPIDense,
1012: /*25*/ MatZeroRows_MPIDense,
1013:        MatLUFactorSymbolic_MPIDense,
1014:        0,
1015:        MatCholeskyFactorSymbolic_MPIDense,
1016:        0,
1017: /*30*/ MatSetUpPreallocation_MPIDense,
1018:        0,
1019:        0,
1020:        MatGetArray_MPIDense,
1021:        MatRestoreArray_MPIDense,
1022: /*35*/ MatDuplicate_MPIDense,
1023:        0,
1024:        0,
1025:        0,
1026:        0,
1027: /*40*/ 0,
1028:        MatGetSubMatrices_MPIDense,
1029:        0,
1030:        MatGetValues_MPIDense,
1031:        0,
1032: /*45*/ 0,
1033:        MatScale_MPIDense,
1034:        0,
1035:        0,
1036:        0,
1037: /*50*/ 0,
1038:        0,
1039:        0,
1040:        0,
1041:        0,
1042: /*55*/ 0,
1043:        0,
1044:        0,
1045:        0,
1046:        0,
1047: /*60*/ MatGetSubMatrix_MPIDense,
1048:        MatDestroy_MPIDense,
1049:        MatView_MPIDense,
1050:        0,
1051:        0,
1052: /*65*/ 0,
1053:        0,
1054:        0,
1055:        0,
1056:        0,
1057: /*70*/ 0,
1058:        0,
1059:        0,
1060:        0,
1061:        0,
1062: /*75*/ 0,
1063:        0,
1064:        0,
1065:        0,
1066:        0,
1067: /*80*/ 0,
1068:        0,
1069:        0,
1070:        0,
1071: /*84*/ MatLoad_MPIDense,
1072:        0,
1073:        0,
1074:        0,
1075:        0,
1076:        0,
1077: /*90*/ 0,
1078:        MatMatMultSymbolic_MPIDense_MPIDense,
1079:        0,
1080:        0,
1081:        0,
1082: /*95*/ 0,
1083:        0,
1084:        0,
1085:        0};

1090: PetscErrorCode  MatMPIDenseSetPreallocation_MPIDense(Mat mat,PetscScalar *data)
1091: {
1092:   Mat_MPIDense   *a;

1096:   mat->preallocated = PETSC_TRUE;
1097:   /* Note:  For now, when data is specified above, this assumes the user correctly
1098:    allocates the local dense storage space.  We should add error checking. */

1100:   a    = (Mat_MPIDense*)mat->data;
1101:   MatCreate(PETSC_COMM_SELF,&a->A);
1102:   MatSetSizes(a->A,mat->rmap.n,mat->cmap.N,mat->rmap.n,mat->cmap.N);
1103:   MatSetType(a->A,MATSEQDENSE);
1104:   MatSeqDenseSetPreallocation(a->A,data);
1105:   PetscLogObjectParent(mat,a->A);
1106:   return(0);
1107: }

1110: /*MC
1111:    MATMPIDENSE - MATMPIDENSE = "mpidense" - A matrix type to be used for distributed dense matrices.

1113:    Options Database Keys:
1114: . -mat_type mpidense - sets the matrix type to "mpidense" during a call to MatSetFromOptions()

1116:   Level: beginner

1118: .seealso: MatCreateMPIDense
1119: M*/

1124: PetscErrorCode  MatCreate_MPIDense(Mat mat)
1125: {
1126:   Mat_MPIDense   *a;

1130:   PetscNew(Mat_MPIDense,&a);
1131:   mat->data         = (void*)a;
1132:   PetscMemcpy(mat->ops,&MatOps_Values,sizeof(struct _MatOps));
1133:   mat->factor       = 0;
1134:   mat->mapping      = 0;

1136:   a->factor       = 0;
1137:   mat->insertmode = NOT_SET_VALUES;
1138:   MPI_Comm_rank(mat->comm,&a->rank);
1139:   MPI_Comm_size(mat->comm,&a->size);

1141:   mat->rmap.bs = mat->cmap.bs = 1;
1142:   PetscMapInitialize(mat->comm,&mat->rmap);
1143:   PetscMapInitialize(mat->comm,&mat->cmap);
1144:   a->nvec = mat->cmap.n;

1146:   /* build cache for off array entries formed */
1147:   a->donotstash = PETSC_FALSE;
1148:   MatStashCreate_Private(mat->comm,1,&mat->stash);

1150:   /* stuff used for matrix vector multiply */
1151:   a->lvec        = 0;
1152:   a->Mvctx       = 0;
1153:   a->roworiented = PETSC_TRUE;

1155:   PetscObjectComposeFunctionDynamic((PetscObject)mat,"MatGetDiagonalBlock_C",
1156:                                      "MatGetDiagonalBlock_MPIDense",
1157:                                      MatGetDiagonalBlock_MPIDense);
1158:   PetscObjectComposeFunctionDynamic((PetscObject)mat,"MatMPIDenseSetPreallocation_C",
1159:                                      "MatMPIDenseSetPreallocation_MPIDense",
1160:                                      MatMPIDenseSetPreallocation_MPIDense);
1161:   PetscObjectComposeFunctionDynamic((PetscObject)mat,"MatMatMult_mpiaij_mpidense_C",
1162:                                      "MatMatMult_MPIAIJ_MPIDense",
1163:                                       MatMatMult_MPIAIJ_MPIDense);
1164:   PetscObjectComposeFunctionDynamic((PetscObject)mat,"MatMatMultSymbolic_mpiaij_mpidense_C",
1165:                                      "MatMatMultSymbolic_MPIAIJ_MPIDense",
1166:                                       MatMatMultSymbolic_MPIAIJ_MPIDense);
1167:   PetscObjectComposeFunctionDynamic((PetscObject)mat,"MatMatMultNumeric_mpiaij_mpidense_C",
1168:                                      "MatMatMultNumeric_MPIAIJ_MPIDense",
1169:                                       MatMatMultNumeric_MPIAIJ_MPIDense);
1170:   PetscObjectChangeTypeName((PetscObject)mat,MATMPIDENSE);
1171:   return(0);
1172: }

1175: /*MC
1176:    MATDENSE - MATDENSE = "dense" - A matrix type to be used for dense matrices.

1178:    This matrix type is identical to MATSEQDENSE when constructed with a single process communicator,
1179:    and MATMPIDENSE otherwise.

1181:    Options Database Keys:
1182: . -mat_type dense - sets the matrix type to "dense" during a call to MatSetFromOptions()

1184:   Level: beginner

1186: .seealso: MatCreateMPIDense,MATSEQDENSE,MATMPIDENSE
1187: M*/

1192: PetscErrorCode  MatCreate_Dense(Mat A)
1193: {
1195:   PetscMPIInt    size;

1198:   MPI_Comm_size(A->comm,&size);
1199:   if (size == 1) {
1200:     MatSetType(A,MATSEQDENSE);
1201:   } else {
1202:     MatSetType(A,MATMPIDENSE);
1203:   }
1204:   return(0);
1205: }

1210: /*@C
1211:    MatMPIDenseSetPreallocation - Sets the array used to store the matrix entries

1213:    Not collective

1215:    Input Parameters:
1216: .  A - the matrix
1217: -  data - optional location of matrix data.  Set data=PETSC_NULL for PETSc
1218:    to control all matrix memory allocation.

1220:    Notes:
1221:    The dense format is fully compatible with standard Fortran 77
1222:    storage by columns.

1224:    The data input variable is intended primarily for Fortran programmers
1225:    who wish to allocate their own matrix memory space.  Most users should
1226:    set data=PETSC_NULL.

1228:    Level: intermediate

1230: .keywords: matrix,dense, parallel

1232: .seealso: MatCreate(), MatCreateSeqDense(), MatSetValues()
1233: @*/
1234: PetscErrorCode  MatMPIDenseSetPreallocation(Mat mat,PetscScalar *data)
1235: {
1236:   PetscErrorCode ierr,(*f)(Mat,PetscScalar *);

1239:   PetscObjectQueryFunction((PetscObject)mat,"MatMPIDenseSetPreallocation_C",(void (**)(void))&f);
1240:   if (f) {
1241:     (*f)(mat,data);
1242:   }
1243:   return(0);
1244: }

1248: /*@C
1249:    MatCreateMPIDense - Creates a sparse parallel matrix in dense format.

1251:    Collective on MPI_Comm

1253:    Input Parameters:
1254: +  comm - MPI communicator
1255: .  m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
1256: .  n - number of local columns (or PETSC_DECIDE to have calculated if N is given)
1257: .  M - number of global rows (or PETSC_DECIDE to have calculated if m is given)
1258: .  N - number of global columns (or PETSC_DECIDE to have calculated if n is given)
1259: -  data - optional location of matrix data.  Set data=PETSC_NULL (PETSC_NULL_SCALAR for Fortran users) for PETSc
1260:    to control all matrix memory allocation.

1262:    Output Parameter:
1263: .  A - the matrix

1265:    Notes:
1266:    The dense format is fully compatible with standard Fortran 77
1267:    storage by columns.

1269:    The data input variable is intended primarily for Fortran programmers
1270:    who wish to allocate their own matrix memory space.  Most users should
1271:    set data=PETSC_NULL (PETSC_NULL_SCALAR for Fortran users).

1273:    The user MUST specify either the local or global matrix dimensions
1274:    (possibly both).

1276:    Level: intermediate

1278: .keywords: matrix,dense, parallel

1280: .seealso: MatCreate(), MatCreateSeqDense(), MatSetValues()
1281: @*/
1282: PetscErrorCode  MatCreateMPIDense(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscScalar *data,Mat *A)
1283: {
1285:   PetscMPIInt    size;

1288:   MatCreate(comm,A);
1289:   MatSetSizes(*A,m,n,M,N);
1290:   MPI_Comm_size(comm,&size);
1291:   if (size > 1) {
1292:     MatSetType(*A,MATMPIDENSE);
1293:     MatMPIDenseSetPreallocation(*A,data);
1294:   } else {
1295:     MatSetType(*A,MATSEQDENSE);
1296:     MatSeqDenseSetPreallocation(*A,data);
1297:   }
1298:   return(0);
1299: }

1303: static PetscErrorCode MatDuplicate_MPIDense(Mat A,MatDuplicateOption cpvalues,Mat *newmat)
1304: {
1305:   Mat            mat;
1306:   Mat_MPIDense   *a,*oldmat = (Mat_MPIDense*)A->data;

1310:   *newmat       = 0;
1311:   MatCreate(A->comm,&mat);
1312:   MatSetSizes(mat,A->rmap.n,A->cmap.n,A->rmap.N,A->cmap.N);
1313:   MatSetType(mat,A->type_name);
1314:   a                 = (Mat_MPIDense*)mat->data;
1315:   PetscMemcpy(mat->ops,A->ops,sizeof(struct _MatOps));
1316:   mat->factor       = A->factor;
1317:   mat->assembled    = PETSC_TRUE;
1318:   mat->preallocated = PETSC_TRUE;

1320:   mat->rmap.rstart     = A->rmap.rstart;
1321:   mat->rmap.rend       = A->rmap.rend;
1322:   a->size              = oldmat->size;
1323:   a->rank              = oldmat->rank;
1324:   mat->insertmode      = NOT_SET_VALUES;
1325:   a->nvec              = oldmat->nvec;
1326:   a->donotstash        = oldmat->donotstash;
1327: 
1328:   PetscMemcpy(mat->rmap.range,A->rmap.range,(a->size+1)*sizeof(PetscInt));
1329:   PetscMemcpy(mat->cmap.range,A->cmap.range,(a->size+1)*sizeof(PetscInt));
1330:   MatStashCreate_Private(A->comm,1,&mat->stash);

1332:   MatSetUpMultiply_MPIDense(mat);
1333:   MatDuplicate(oldmat->A,cpvalues,&a->A);
1334:   PetscLogObjectParent(mat,a->A);
1335:   *newmat = mat;
1336:   return(0);
1337: }

1339:  #include petscsys.h

1343: PetscErrorCode MatLoad_MPIDense_DenseInFile(MPI_Comm comm,PetscInt fd,PetscInt M,PetscInt N, MatType type,Mat *newmat)
1344: {
1346:   PetscMPIInt    rank,size;
1347:   PetscInt       *rowners,i,m,nz,j;
1348:   PetscScalar    *array,*vals,*vals_ptr;
1349:   MPI_Status     status;

1352:   MPI_Comm_rank(comm,&rank);
1353:   MPI_Comm_size(comm,&size);

1355:   /* determine ownership of all rows */
1356:   m          = M/size + ((M % size) > rank);
1357:   PetscMalloc((size+2)*sizeof(PetscInt),&rowners);
1358:   MPI_Allgather(&m,1,MPIU_INT,rowners+1,1,MPIU_INT,comm);
1359:   rowners[0] = 0;
1360:   for (i=2; i<=size; i++) {
1361:     rowners[i] += rowners[i-1];
1362:   }

1364:   MatCreate(comm,newmat);
1365:   MatSetSizes(*newmat,m,PETSC_DECIDE,M,N);
1366:   MatSetType(*newmat,type);
1367:   MatMPIDenseSetPreallocation(*newmat,PETSC_NULL);
1368:   MatGetArray(*newmat,&array);

1370:   if (!rank) {
1371:     PetscMalloc(m*N*sizeof(PetscScalar),&vals);

1373:     /* read in my part of the matrix numerical values  */
1374:     PetscBinaryRead(fd,vals,m*N,PETSC_SCALAR);
1375: 
1376:     /* insert into matrix-by row (this is why cannot directly read into array */
1377:     vals_ptr = vals;
1378:     for (i=0; i<m; i++) {
1379:       for (j=0; j<N; j++) {
1380:         array[i + j*m] = *vals_ptr++;
1381:       }
1382:     }

1384:     /* read in other processors and ship out */
1385:     for (i=1; i<size; i++) {
1386:       nz   = (rowners[i+1] - rowners[i])*N;
1387:       PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
1388:       MPI_Send(vals,nz,MPIU_SCALAR,i,(*newmat)->tag,comm);
1389:     }
1390:   } else {
1391:     /* receive numeric values */
1392:     PetscMalloc(m*N*sizeof(PetscScalar),&vals);

1394:     /* receive message of values*/
1395:     MPI_Recv(vals,m*N,MPIU_SCALAR,0,(*newmat)->tag,comm,&status);

1397:     /* insert into matrix-by row (this is why cannot directly read into array */
1398:     vals_ptr = vals;
1399:     for (i=0; i<m; i++) {
1400:       for (j=0; j<N; j++) {
1401:         array[i + j*m] = *vals_ptr++;
1402:       }
1403:     }
1404:   }
1405:   PetscFree(rowners);
1406:   PetscFree(vals);
1407:   MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);
1408:   MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);
1409:   return(0);
1410: }

1414: PetscErrorCode MatLoad_MPIDense(PetscViewer viewer, MatType type,Mat *newmat)
1415: {
1416:   Mat            A;
1417:   PetscScalar    *vals,*svals;
1418:   MPI_Comm       comm = ((PetscObject)viewer)->comm;
1419:   MPI_Status     status;
1420:   PetscMPIInt    rank,size,tag = ((PetscObject)viewer)->tag,*rowners,*sndcounts,m,maxnz;
1421:   PetscInt       header[4],*rowlengths = 0,M,N,*cols;
1422:   PetscInt       *ourlens,*procsnz = 0,*offlens,jj,*mycols,*smycols;
1423:   PetscInt       i,nz,j,rstart,rend;
1424:   int            fd;

1428:   MPI_Comm_size(comm,&size);
1429:   MPI_Comm_rank(comm,&rank);
1430:   if (!rank) {
1431:     PetscViewerBinaryGetDescriptor(viewer,&fd);
1432:     PetscBinaryRead(fd,(char *)header,4,PETSC_INT);
1433:     if (header[0] != MAT_FILE_COOKIE) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"not matrix object");
1434:   }

1436:   MPI_Bcast(header+1,3,MPIU_INT,0,comm);
1437:   M = header[1]; N = header[2]; nz = header[3];

1439:   /*
1440:        Handle case where matrix is stored on disk as a dense matrix 
1441:   */
1442:   if (nz == MATRIX_BINARY_FORMAT_DENSE) {
1443:     MatLoad_MPIDense_DenseInFile(comm,fd,M,N,type,newmat);
1444:     return(0);
1445:   }

1447:   /* determine ownership of all rows */
1448:   m          = M/size + ((M % size) > rank);
1449:   PetscMalloc((size+2)*sizeof(PetscInt),&rowners);
1450:   MPI_Allgather(&m,1,MPI_INT,rowners+1,1,MPI_INT,comm);
1451:   rowners[0] = 0;
1452:   for (i=2; i<=size; i++) {
1453:     rowners[i] += rowners[i-1];
1454:   }
1455:   rstart = rowners[rank];
1456:   rend   = rowners[rank+1];

1458:   /* distribute row lengths to all processors */
1459:   PetscMalloc(2*(rend-rstart+1)*sizeof(PetscInt),&ourlens);
1460:   offlens = ourlens + (rend-rstart);
1461:   if (!rank) {
1462:     PetscMalloc(M*sizeof(PetscInt),&rowlengths);
1463:     PetscBinaryRead(fd,rowlengths,M,PETSC_INT);
1464:     PetscMalloc(size*sizeof(PetscMPIInt),&sndcounts);
1465:     for (i=0; i<size; i++) sndcounts[i] = rowners[i+1] - rowners[i];
1466:     MPI_Scatterv(rowlengths,sndcounts,rowners,MPIU_INT,ourlens,rend-rstart,MPIU_INT,0,comm);
1467:     PetscFree(sndcounts);
1468:   } else {
1469:     MPI_Scatterv(0,0,0,MPIU_INT,ourlens,rend-rstart,MPIU_INT,0,comm);
1470:   }

1472:   if (!rank) {
1473:     /* calculate the number of nonzeros on each processor */
1474:     PetscMalloc(size*sizeof(PetscInt),&procsnz);
1475:     PetscMemzero(procsnz,size*sizeof(PetscInt));
1476:     for (i=0; i<size; i++) {
1477:       for (j=rowners[i]; j< rowners[i+1]; j++) {
1478:         procsnz[i] += rowlengths[j];
1479:       }
1480:     }
1481:     PetscFree(rowlengths);

1483:     /* determine max buffer needed and allocate it */
1484:     maxnz = 0;
1485:     for (i=0; i<size; i++) {
1486:       maxnz = PetscMax(maxnz,procsnz[i]);
1487:     }
1488:     PetscMalloc(maxnz*sizeof(PetscInt),&cols);

1490:     /* read in my part of the matrix column indices  */
1491:     nz = procsnz[0];
1492:     PetscMalloc(nz*sizeof(PetscInt),&mycols);
1493:     PetscBinaryRead(fd,mycols,nz,PETSC_INT);

1495:     /* read in every one elses and ship off */
1496:     for (i=1; i<size; i++) {
1497:       nz   = procsnz[i];
1498:       PetscBinaryRead(fd,cols,nz,PETSC_INT);
1499:       MPI_Send(cols,nz,MPIU_INT,i,tag,comm);
1500:     }
1501:     PetscFree(cols);
1502:   } else {
1503:     /* determine buffer space needed for message */
1504:     nz = 0;
1505:     for (i=0; i<m; i++) {
1506:       nz += ourlens[i];
1507:     }
1508:     PetscMalloc((nz+1)*sizeof(PetscInt),&mycols);

1510:     /* receive message of column indices*/
1511:     MPI_Recv(mycols,nz,MPIU_INT,0,tag,comm,&status);
1512:     MPI_Get_count(&status,MPIU_INT,&maxnz);
1513:     if (maxnz != nz) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");
1514:   }

1516:   /* loop over local rows, determining number of off diagonal entries */
1517:   PetscMemzero(offlens,m*sizeof(PetscInt));
1518:   jj = 0;
1519:   for (i=0; i<m; i++) {
1520:     for (j=0; j<ourlens[i]; j++) {
1521:       if (mycols[jj] < rstart || mycols[jj] >= rend) offlens[i]++;
1522:       jj++;
1523:     }
1524:   }

1526:   /* create our matrix */
1527:   for (i=0; i<m; i++) {
1528:     ourlens[i] -= offlens[i];
1529:   }
1530:   MatCreate(comm,newmat);
1531:   MatSetSizes(*newmat,m,PETSC_DECIDE,M,N);
1532:   MatSetType(*newmat,type);
1533:   MatMPIDenseSetPreallocation(*newmat,PETSC_NULL);
1534:   A = *newmat;
1535:   for (i=0; i<m; i++) {
1536:     ourlens[i] += offlens[i];
1537:   }

1539:   if (!rank) {
1540:     PetscMalloc(maxnz*sizeof(PetscScalar),&vals);

1542:     /* read in my part of the matrix numerical values  */
1543:     nz = procsnz[0];
1544:     PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
1545: 
1546:     /* insert into matrix */
1547:     jj      = rstart;
1548:     smycols = mycols;
1549:     svals   = vals;
1550:     for (i=0; i<m; i++) {
1551:       MatSetValues(A,1,&jj,ourlens[i],smycols,svals,INSERT_VALUES);
1552:       smycols += ourlens[i];
1553:       svals   += ourlens[i];
1554:       jj++;
1555:     }

1557:     /* read in other processors and ship out */
1558:     for (i=1; i<size; i++) {
1559:       nz   = procsnz[i];
1560:       PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
1561:       MPI_Send(vals,nz,MPIU_SCALAR,i,A->tag,comm);
1562:     }
1563:     PetscFree(procsnz);
1564:   } else {
1565:     /* receive numeric values */
1566:     PetscMalloc((nz+1)*sizeof(PetscScalar),&vals);

1568:     /* receive message of values*/
1569:     MPI_Recv(vals,nz,MPIU_SCALAR,0,A->tag,comm,&status);
1570:     MPI_Get_count(&status,MPIU_SCALAR,&maxnz);
1571:     if (maxnz != nz) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");

1573:     /* insert into matrix */
1574:     jj      = rstart;
1575:     smycols = mycols;
1576:     svals   = vals;
1577:     for (i=0; i<m; i++) {
1578:       MatSetValues(A,1,&jj,ourlens[i],smycols,svals,INSERT_VALUES);
1579:       smycols += ourlens[i];
1580:       svals   += ourlens[i];
1581:       jj++;
1582:     }
1583:   }
1584:   PetscFree(ourlens);
1585:   PetscFree(vals);
1586:   PetscFree(mycols);
1587:   PetscFree(rowners);

1589:   MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
1590:   MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
1591:   return(0);
1592: }

1596: PetscErrorCode MatEqual_MPIDense(Mat A,Mat B,PetscTruth *flag)
1597: {
1598:   Mat_MPIDense   *matB = (Mat_MPIDense*)B->data,*matA = (Mat_MPIDense*)A->data;
1599:   Mat            a,b;
1600:   PetscTruth     flg;

1604:   a = matA->A;
1605:   b = matB->A;
1606:   MatEqual(a,b,&flg);
1607:   return(0);
1608: }