Actual source code: mpibdiag.c
1: #define PETSCMAT_DLL
3: /*
4: The basic matrix operations for the Block diagonal parallel
5: matrices.
6: */
7: #include src/mat/impls/bdiag/mpi/mpibdiag.h
11: PetscErrorCode MatSetValues_MPIBDiag(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],const PetscScalar v[],InsertMode addv)
12: {
13: Mat_MPIBDiag *mbd = (Mat_MPIBDiag*)mat->data;
15: PetscInt i,j,row,rstart = mat->rmap.rstart,rend = mat->rmap.rend;
16: PetscTruth roworiented = mbd->roworiented;
19: for (i=0; i<m; i++) {
20: if (idxm[i] < 0) continue;
21: if (idxm[i] >= mat->rmap.N) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Row too large");
22: if (idxm[i] >= rstart && idxm[i] < rend) {
23: row = idxm[i] - rstart;
24: for (j=0; j<n; j++) {
25: if (idxn[j] < 0) continue;
26: if (idxn[j] >= mat->cmap.N) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Column too large");
27: if (roworiented) {
28: MatSetValues(mbd->A,1,&row,1,&idxn[j],v+i*n+j,addv);
29: } else {
30: MatSetValues(mbd->A,1,&row,1,&idxn[j],v+i+j*m,addv);
31: }
32: }
33: } else {
34: if (!mbd->donotstash) {
35: if (roworiented) {
36: MatStashValuesRow_Private(&mat->stash,idxm[i],n,idxn,v+i*n);
37: } else {
38: MatStashValuesCol_Private(&mat->stash,idxm[i],n,idxn,v+i,m);
39: }
40: }
41: }
42: }
43: return(0);
44: }
48: PetscErrorCode MatGetValues_MPIBDiag(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[])
49: {
50: Mat_MPIBDiag *mbd = (Mat_MPIBDiag*)mat->data;
52: PetscInt i,j,row,rstart = mat->rmap.rstart,rend = mat->rmap.rend;
55: for (i=0; i<m; i++) {
56: if (idxm[i] < 0) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Negative row");
57: if (idxm[i] >= mat->rmap.N) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Row too large");
58: if (idxm[i] >= rstart && idxm[i] < rend) {
59: row = idxm[i] - rstart;
60: for (j=0; j<n; j++) {
61: if (idxn[j] < 0) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Negative column");
62: if (idxn[j] >= mat->cmap.N) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Column too large");
63: MatGetValues(mbd->A,1,&row,1,&idxn[j],v+i*n+j);
64: }
65: } else {
66: SETERRQ(PETSC_ERR_SUP,"Only local values currently supported");
67: }
68: }
69: return(0);
70: }
74: PetscErrorCode MatAssemblyBegin_MPIBDiag(Mat mat,MatAssemblyType mode)
75: {
76: MPI_Comm comm = mat->comm;
78: PetscInt nstash,reallocs;
79: InsertMode addv;
82: MPI_Allreduce(&mat->insertmode,&addv,1,MPI_INT,MPI_BOR,comm);
83: if (addv == (ADD_VALUES|INSERT_VALUES)) {
84: SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Cannot mix adds/inserts on different procs");
85: }
86: mat->insertmode = addv; /* in case this processor had no cache */
87: MatStashScatterBegin_Private(&mat->stash,mat->rmap.range);
88: MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);
89: PetscInfo2(0,"Stash has %D entries,uses %D mallocs.\n",nstash,reallocs);
90: return(0);
91: }
95: PetscErrorCode MatAssemblyEnd_MPIBDiag(Mat mat,MatAssemblyType mode)
96: {
97: Mat_MPIBDiag *mbd = (Mat_MPIBDiag*)mat->data;
98: Mat_SeqBDiag *mlocal;
100: PetscMPIInt n;
101: PetscInt i,*row,*col;
102: PetscInt *tmp1,*tmp2,len,ict,Mblock,Nblock,flg,j,rstart,ncols;
103: PetscScalar *val;
104: InsertMode addv = mat->insertmode;
108: while (1) {
109: MatStashScatterGetMesg_Private(&mat->stash,&n,&row,&col,&val,&flg);
110: if (!flg) break;
111:
112: for (i=0; i<n;) {
113: /* Now identify the consecutive vals belonging to the same row */
114: for (j=i,rstart=row[j]; j<n; j++) { if (row[j] != rstart) break; }
115: if (j < n) ncols = j-i;
116: else ncols = n-i;
117: /* Now assemble all these values with a single function call */
118: MatSetValues_MPIBDiag(mat,1,row+i,ncols,col+i,val+i,addv);
119: i = j;
120: }
121: }
122: MatStashScatterEnd_Private(&mat->stash);
124: MatAssemblyBegin(mbd->A,mode);
125: MatAssemblyEnd(mbd->A,mode);
127: /* Fix main diagonal location and determine global diagonals */
128: mlocal = (Mat_SeqBDiag*)mbd->A->data;
129: Mblock = mat->rmap.N/mat->rmap.bs; Nblock = mat->cmap.N/mat->rmap.bs;
130: len = Mblock + Nblock + 1; /* add 1 to prevent 0 malloc */
131: PetscMalloc(2*len*sizeof(PetscInt),&tmp1);
132: tmp2 = tmp1 + len;
133: PetscMemzero(tmp1,2*len*sizeof(PetscInt));
134: mlocal->mainbd = -1;
135: for (i=0; i<mlocal->nd; i++) {
136: if (mlocal->diag[i] + mbd->brstart == 0) mlocal->mainbd = i;
137: tmp1[mlocal->diag[i] + mbd->brstart + Mblock] = 1;
138: }
139: MPI_Allreduce(tmp1,tmp2,len,MPIU_INT,MPI_SUM,mat->comm);
140: ict = 0;
141: for (i=0; i<len; i++) {
142: if (tmp2[i]) {
143: mbd->gdiag[ict] = i - Mblock;
144: ict++;
145: }
146: }
147: mbd->gnd = ict;
148: PetscFree(tmp1);
150: if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) {
151: MatSetUpMultiply_MPIBDiag(mat);
152: }
153: return(0);
154: }
158: PetscErrorCode MatZeroEntries_MPIBDiag(Mat A)
159: {
160: Mat_MPIBDiag *l = (Mat_MPIBDiag*)A->data;
164: MatZeroEntries(l->A);
165: return(0);
166: }
168: /* again this uses the same basic stratagy as in the assembly and
169: scatter create routines, we should try to do it systematically
170: if we can figure out the proper level of generality. */
172: /* the code does not do the diagonal entries correctly unless the
173: matrix is square and the column and row owerships are identical.
174: This is a BUG. The only way to fix it seems to be to access
175: aij->A and aij->B directly and not through the MatZeroRows()
176: routine.
177: */
181: PetscErrorCode MatZeroRows_MPIBDiag(Mat A,PetscInt N,const PetscInt rows[],PetscScalar diag)
182: {
183: Mat_MPIBDiag *l = (Mat_MPIBDiag*)A->data;
185: PetscMPIInt n,imdex,size = l->size,rank = l->rank,tag = A->tag;
186: PetscInt i,*owners = A->rmap.range;
187: PetscInt *nprocs,j,idx,nsends;
188: PetscInt nmax,*svalues,*starts,*owner,nrecvs;
189: PetscInt *rvalues,count,base,slen,*source;
190: PetscInt *lens,*lrows,*values;
191: MPI_Comm comm = A->comm;
192: MPI_Request *send_waits,*recv_waits;
193: MPI_Status recv_status,*send_status;
194: PetscTruth found;
197: /* first count number of contributors to each processor */
198: PetscMalloc(2*size*sizeof(PetscInt),&nprocs);
199: PetscMemzero(nprocs,2*size*sizeof(PetscInt));
200: PetscMalloc((N+1)*sizeof(PetscInt),&owner); /* see note*/
201: for (i=0; i<N; i++) {
202: idx = rows[i];
203: found = PETSC_FALSE;
204: for (j=0; j<size; j++) {
205: if (idx >= owners[j] && idx < owners[j+1]) {
206: nprocs[2*j]++; nprocs[2*j+1] = 1; owner[i] = j; found = PETSC_TRUE; break;
207: }
208: }
209: if (!found) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"row out of range");
210: }
211: nsends = 0; for (i=0; i<size; i++) {nsends += nprocs[2*i+1];}
213: /* inform other processors of number of messages and max length*/
214: PetscMaxSum(comm,nprocs,&nmax,&nrecvs);
216: /* post receives: */
217: PetscMalloc((nrecvs+1)*(nmax+1)*sizeof(PetscInt),&rvalues);
218: PetscMalloc((nrecvs+1)*sizeof(MPI_Request),&recv_waits);
219: for (i=0; i<nrecvs; i++) {
220: MPI_Irecv(rvalues+nmax*i,nmax,MPIU_INT,MPI_ANY_SOURCE,tag,comm,recv_waits+i);
221: }
223: /* do sends:
224: 1) starts[i] gives the starting index in svalues for stuff going to
225: the ith processor
226: */
227: PetscMalloc((N+1)*sizeof(PetscInt),&svalues);
228: PetscMalloc((nsends+1)*sizeof(MPI_Request),&send_waits);
229: PetscMalloc((size+1)*sizeof(PetscInt),&starts);
230: starts[0] = 0;
231: for (i=1; i<size; i++) { starts[i] = starts[i-1] + nprocs[2*i-2];}
232: for (i=0; i<N; i++) {
233: svalues[starts[owner[i]]++] = rows[i];
234: }
236: starts[0] = 0;
237: for (i=1; i<size+1; i++) { starts[i] = starts[i-1] + nprocs[2*i-2];}
238: count = 0;
239: for (i=0; i<size; i++) {
240: if (nprocs[2*i+1]) {
241: MPI_Isend(svalues+starts[i],nprocs[2*i],MPIU_INT,i,tag,comm,send_waits+count++);
242: }
243: }
244: PetscFree(starts);
246: base = owners[rank];
248: /* wait on receives */
249: PetscMalloc(2*(nrecvs+1)*sizeof(PetscInt),&lens);
250: source = lens + nrecvs;
251: count = nrecvs;
252: slen = 0;
253: while (count) {
254: MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);
255: /* unpack receives into our local space */
256: MPI_Get_count(&recv_status,MPIU_INT,&n);
257: source[imdex] = recv_status.MPI_SOURCE;
258: lens[imdex] = n;
259: slen += n;
260: count--;
261: }
262: PetscFree(recv_waits);
263:
264: /* move the data into the send scatter */
265: PetscMalloc((slen+1)*sizeof(PetscInt),&lrows);
266: count = 0;
267: for (i=0; i<nrecvs; i++) {
268: values = rvalues + i*nmax;
269: for (j=0; j<lens[i]; j++) {
270: lrows[count++] = values[j] - base;
271: }
272: }
273: PetscFree(rvalues);
274: PetscFree(lens);
275: PetscFree(owner);
276: PetscFree(nprocs);
277:
278: /* actually zap the local rows */
279: MatZeroRows(l->A,slen,lrows,diag);
280: PetscFree(lrows);
282: /* wait on sends */
283: if (nsends) {
284: PetscMalloc(nsends*sizeof(MPI_Status),&send_status);
285: MPI_Waitall(nsends,send_waits,send_status);
286: PetscFree(send_status);
287: }
288: PetscFree(send_waits);
289: PetscFree(svalues);
291: return(0);
292: }
296: PetscErrorCode MatMult_MPIBDiag(Mat mat,Vec xx,Vec yy)
297: {
298: Mat_MPIBDiag *mbd = (Mat_MPIBDiag*)mat->data;
302: VecScatterBegin(xx,mbd->lvec,INSERT_VALUES,SCATTER_FORWARD,mbd->Mvctx);
303: VecScatterEnd(xx,mbd->lvec,INSERT_VALUES,SCATTER_FORWARD,mbd->Mvctx);
304: (*mbd->A->ops->mult)(mbd->A,mbd->lvec,yy);
305: return(0);
306: }
310: PetscErrorCode MatMultAdd_MPIBDiag(Mat mat,Vec xx,Vec yy,Vec zz)
311: {
312: Mat_MPIBDiag *mbd = (Mat_MPIBDiag*)mat->data;
316: VecScatterBegin(xx,mbd->lvec,INSERT_VALUES,SCATTER_FORWARD,mbd->Mvctx);
317: VecScatterEnd(xx,mbd->lvec,INSERT_VALUES,SCATTER_FORWARD,mbd->Mvctx);
318: (*mbd->A->ops->multadd)(mbd->A,mbd->lvec,yy,zz);
319: return(0);
320: }
324: PetscErrorCode MatMultTranspose_MPIBDiag(Mat A,Vec xx,Vec yy)
325: {
326: Mat_MPIBDiag *a = (Mat_MPIBDiag*)A->data;
328: PetscScalar zero = 0.0;
331: VecSet(yy,zero);
332: (*a->A->ops->multtranspose)(a->A,xx,a->lvec);
333: VecScatterBegin(a->lvec,yy,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);
334: VecScatterEnd(a->lvec,yy,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);
335: return(0);
336: }
340: PetscErrorCode MatMultTransposeAdd_MPIBDiag(Mat A,Vec xx,Vec yy,Vec zz)
341: {
342: Mat_MPIBDiag *a = (Mat_MPIBDiag*)A->data;
346: VecCopy(yy,zz);
347: (*a->A->ops->multtranspose)(a->A,xx,a->lvec);
348: VecScatterBegin(a->lvec,zz,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);
349: VecScatterEnd(a->lvec,zz,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);
350: return(0);
351: }
355: PetscErrorCode MatGetInfo_MPIBDiag(Mat matin,MatInfoType flag,MatInfo *info)
356: {
357: Mat_MPIBDiag *mat = (Mat_MPIBDiag*)matin->data;
359: PetscReal isend[5],irecv[5];
362: info->block_size = (PetscReal)mat->A->rmap.bs;
363: MatGetInfo(mat->A,MAT_LOCAL,info);
364: isend[0] = info->nz_used; isend[1] = info->nz_allocated; isend[2] = info->nz_unneeded;
365: isend[3] = info->memory; isend[4] = info->mallocs;
366: if (flag == MAT_LOCAL) {
367: info->nz_used = isend[0];
368: info->nz_allocated = isend[1];
369: info->nz_unneeded = isend[2];
370: info->memory = isend[3];
371: info->mallocs = isend[4];
372: } else if (flag == MAT_GLOBAL_MAX) {
373: MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPI_MAX,matin->comm);
374: info->nz_used = irecv[0];
375: info->nz_allocated = irecv[1];
376: info->nz_unneeded = irecv[2];
377: info->memory = irecv[3];
378: info->mallocs = irecv[4];
379: } else if (flag == MAT_GLOBAL_SUM) {
380: MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPI_SUM,matin->comm);
381: info->nz_used = irecv[0];
382: info->nz_allocated = irecv[1];
383: info->nz_unneeded = irecv[2];
384: info->memory = irecv[3];
385: info->mallocs = irecv[4];
386: }
387: info->rows_global = (double)matin->rmap.N;
388: info->columns_global = (double)matin->cmap.N;
389: info->rows_local = (double)matin->rmap.n;
390: info->columns_local = (double)matin->cmap.N;
391: return(0);
392: }
396: PetscErrorCode MatGetDiagonal_MPIBDiag(Mat mat,Vec v)
397: {
399: Mat_MPIBDiag *A = (Mat_MPIBDiag*)mat->data;
402: MatGetDiagonal(A->A,v);
403: return(0);
404: }
408: PetscErrorCode MatDestroy_MPIBDiag(Mat mat)
409: {
410: Mat_MPIBDiag *mbd = (Mat_MPIBDiag*)mat->data;
412: #if defined(PETSC_USE_LOG)
413: Mat_SeqBDiag *ms = (Mat_SeqBDiag*)mbd->A->data;
416: PetscLogObjectState((PetscObject)mat,"Rows=%D, Cols=%D, BSize=%D, NDiag=%D",mat->rmap.N,mat->cmap.N,mat->rmap.bs,ms->nd);
417: #else
419: #endif
420: MatStashDestroy_Private(&mat->stash);
421: PetscFree(mbd->gdiag);
422: MatDestroy(mbd->A);
423: if (mbd->lvec) {VecDestroy(mbd->lvec);}
424: if (mbd->Mvctx) {VecScatterDestroy(mbd->Mvctx);}
425: PetscFree(mbd);
427: PetscObjectChangeTypeName((PetscObject)mat,0);
428: PetscObjectComposeFunction((PetscObject)mat,"MatGetDiagonalBlock_C","",PETSC_NULL);
429: PetscObjectComposeFunction((PetscObject)mat,"MatMPIBDiagSetPreallocation_C","",PETSC_NULL);
430: return(0);
431: }
436: static PetscErrorCode MatView_MPIBDiag_Binary(Mat mat,PetscViewer viewer)
437: {
438: Mat_MPIBDiag *mbd = (Mat_MPIBDiag*)mat->data;
442: if (mbd->size == 1) {
443: MatView(mbd->A,viewer);
444: } else SETERRQ(PETSC_ERR_SUP,"Only uniprocessor output supported");
445: return(0);
446: }
450: static PetscErrorCode MatView_MPIBDiag_ASCIIorDraw(Mat mat,PetscViewer viewer)
451: {
452: Mat_MPIBDiag *mbd = (Mat_MPIBDiag*)mat->data;
453: PetscErrorCode ierr;
454: PetscMPIInt size = mbd->size,rank = mbd->rank;
455: PetscInt i;
456: PetscTruth iascii,isdraw;
457: PetscViewer sviewer;
458: PetscViewerFormat format;
461: PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&iascii);
462: PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_DRAW,&isdraw);
463: if (iascii) {
464: PetscViewerGetFormat(viewer,&format);
465: if (format == PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
466: PetscInt nline = PetscMin(10,mbd->gnd),k,nk,np;
467: PetscViewerASCIIPrintf(viewer," block size=%D, total number of diagonals=%D\n",mat->rmap.bs,mbd->gnd);
468: nk = (mbd->gnd-1)/nline + 1;
469: for (k=0; k<nk; k++) {
470: PetscViewerASCIIPrintf(viewer," global diag numbers:");
471: np = PetscMin(nline,mbd->gnd - nline*k);
472: for (i=0; i<np; i++) {
473: PetscViewerASCIIPrintf(viewer," %D",mbd->gdiag[i+nline*k]);
474: }
475: PetscViewerASCIIPrintf(viewer,"\n");
476: }
477: if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
478: MatInfo info;
479: MPI_Comm_rank(mat->comm,&rank);
480: MatGetInfo(mat,MAT_LOCAL,&info);
481: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] local rows %D nz %D nz alloced %D mem %D \n",rank,mat->rmap.N,
482: (PetscInt)info.nz_used,(PetscInt)info.nz_allocated,(PetscInt)info.memory);
483: PetscViewerFlush(viewer);
484: VecScatterView(mbd->Mvctx,viewer);
485: }
486: return(0);
487: }
488: }
490: if (isdraw) {
491: PetscDraw draw;
492: PetscTruth isnull;
493: PetscViewerDrawGetDraw(viewer,0,&draw);
494: PetscDrawIsNull(draw,&isnull); if (isnull) return(0);
495: }
497: if (size == 1) {
498: MatView(mbd->A,viewer);
499: } else {
500: /* assemble the entire matrix onto first processor. */
501: Mat A;
502: PetscInt M = mat->rmap.N,N = mat->cmap.N,m,row,nz,*cols;
503: PetscScalar *vals;
505: /* Here we are constructing a temporary matrix, so we will explicitly set the type to MPIBDiag */
506: MatCreate(mat->comm,&A);
507: if (!rank) {
508: MatSetSizes(A,M,N,M,N);
509: MatSetType(A,MATMPIBDIAG);
510: MatMPIBDiagSetPreallocation(A,mbd->gnd,mbd->A->rmap.bs,mbd->gdiag,PETSC_NULL);
511: } else {
512: MatSetSizes(A,0,0,M,N);
513: MatSetType(A,MATMPIBDIAG);
514: MatMPIBDiagSetPreallocation(A,0,mbd->A->rmap.bs,PETSC_NULL,PETSC_NULL);
515: }
516: PetscLogObjectParent(mat,A);
518: /* Copy the matrix ... This isn't the most efficient means,
519: but it's quick for now */
520: row = mat->rmap.rstart;
521: m = mbd->A->rmap.N;
522: for (i=0; i<m; i++) {
523: MatGetRow_MPIBDiag(mat,row,&nz,&cols,&vals);
524: MatSetValues(A,1,&row,nz,cols,vals,INSERT_VALUES);
525: MatRestoreRow_MPIBDiag(mat,row,&nz,&cols,&vals);
526: row++;
527: }
528: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
529: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
530: PetscViewerGetSingleton(viewer,&sviewer);
531: if (!rank) {
532: MatView(((Mat_MPIBDiag*)(A->data))->A,sviewer);
533: }
534: PetscViewerRestoreSingleton(viewer,&sviewer);
535: PetscViewerFlush(viewer);
536: MatDestroy(A);
537: }
538: return(0);
539: }
543: PetscErrorCode MatView_MPIBDiag(Mat mat,PetscViewer viewer)
544: {
546: PetscTruth iascii,isdraw,isbinary;
549: PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&iascii);
550: PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_DRAW,&isdraw);
551: PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_BINARY,&isbinary);
552: if (iascii || isdraw) {
553: MatView_MPIBDiag_ASCIIorDraw(mat,viewer);
554: } else if (isbinary) {
555: MatView_MPIBDiag_Binary(mat,viewer);
556: } else {
557: SETERRQ1(PETSC_ERR_SUP,"Viewer type %s not supported by MPIBdiag matrices",((PetscObject)viewer)->type_name);
558: }
559: return(0);
560: }
564: PetscErrorCode MatSetOption_MPIBDiag(Mat A,MatOption op)
565: {
566: Mat_MPIBDiag *mbd = (Mat_MPIBDiag*)A->data;
569: switch (op) {
570: case MAT_NO_NEW_NONZERO_LOCATIONS:
571: case MAT_YES_NEW_NONZERO_LOCATIONS:
572: case MAT_NEW_NONZERO_LOCATION_ERR:
573: case MAT_NEW_NONZERO_ALLOCATION_ERR:
574: case MAT_NO_NEW_DIAGONALS:
575: case MAT_YES_NEW_DIAGONALS:
576: MatSetOption(mbd->A,op);
577: break;
578: case MAT_ROW_ORIENTED:
579: mbd->roworiented = PETSC_TRUE;
580: MatSetOption(mbd->A,op);
581: break;
582: case MAT_COLUMN_ORIENTED:
583: mbd->roworiented = PETSC_FALSE;
584: MatSetOption(mbd->A,op);
585: break;
586: case MAT_IGNORE_OFF_PROC_ENTRIES:
587: mbd->donotstash = PETSC_TRUE;
588: break;
589: case MAT_ROWS_SORTED:
590: case MAT_ROWS_UNSORTED:
591: case MAT_COLUMNS_SORTED:
592: case MAT_COLUMNS_UNSORTED:
593: PetscInfo1(A,"Option %d ignored\n",op);
594: break;
595: case MAT_SYMMETRIC:
596: case MAT_STRUCTURALLY_SYMMETRIC:
597: case MAT_NOT_SYMMETRIC:
598: case MAT_NOT_STRUCTURALLY_SYMMETRIC:
599: case MAT_HERMITIAN:
600: case MAT_NOT_HERMITIAN:
601: case MAT_SYMMETRY_ETERNAL:
602: case MAT_NOT_SYMMETRY_ETERNAL:
603: break;
604: default:
605: SETERRQ1(PETSC_ERR_SUP,"unknown option %d",op);
606: }
607: return(0);
608: }
613: PetscErrorCode MatGetRow_MPIBDiag(Mat matin,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
614: {
615: Mat_MPIBDiag *mat = (Mat_MPIBDiag*)matin->data;
617: PetscInt lrow;
620: if (row < matin->rmap.rstart || row >= matin->rmap.rend) SETERRQ(PETSC_ERR_SUP,"only for local rows")
621: lrow = row - matin->rmap.rstart;
622: MatGetRow_SeqBDiag(mat->A,lrow,nz,idx,v);
623: return(0);
624: }
628: PetscErrorCode MatRestoreRow_MPIBDiag(Mat matin,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
629: {
630: Mat_MPIBDiag *mat = (Mat_MPIBDiag*)matin->data;
632: PetscInt lrow;
635: lrow = row - matin->rmap.rstart;
636: MatRestoreRow_SeqBDiag(mat->A,lrow,nz,idx,v);
637: return(0);
638: }
643: PetscErrorCode MatNorm_MPIBDiag(Mat A,NormType type,PetscReal *nrm)
644: {
645: Mat_MPIBDiag *mbd = (Mat_MPIBDiag*)A->data;
646: Mat_SeqBDiag *a = (Mat_SeqBDiag*)mbd->A->data;
647: PetscReal sum = 0.0;
649: PetscInt d,i,nd = a->nd,bs = A->rmap.bs,len;
650: PetscScalar *dv;
653: if (type == NORM_FROBENIUS) {
654: for (d=0; d<nd; d++) {
655: dv = a->diagv[d];
656: len = a->bdlen[d]*bs*bs;
657: for (i=0; i<len; i++) {
658: #if defined(PETSC_USE_COMPLEX)
659: sum += PetscRealPart(PetscConj(dv[i])*dv[i]);
660: #else
661: sum += dv[i]*dv[i];
662: #endif
663: }
664: }
665: MPI_Allreduce(&sum,nrm,1,MPIU_REAL,MPI_SUM,A->comm);
666: *nrm = sqrt(*nrm);
667: PetscLogFlops(2*A->rmap.n*A->rmap.N);
668: } else if (type == NORM_1) { /* max column norm */
669: PetscReal *tmp,*tmp2;
670: PetscInt j;
671: PetscMalloc((mbd->A->cmap.n+1)*sizeof(PetscReal),&tmp);
672: PetscMalloc((mbd->A->cmap.n+1)*sizeof(PetscReal),&tmp2);
673: MatNorm_SeqBDiag_Columns(mbd->A,tmp,mbd->A->cmap.n);
674: *nrm = 0.0;
675: MPI_Allreduce(tmp,tmp2,mbd->A->cmap.n,MPIU_REAL,MPI_SUM,A->comm);
676: for (j=0; j<mbd->A->cmap.n; j++) {
677: if (tmp2[j] > *nrm) *nrm = tmp2[j];
678: }
679: PetscFree(tmp);
680: PetscFree(tmp2);
681: } else if (type == NORM_INFINITY) { /* max row norm */
682: PetscReal normtemp;
683: MatNorm(mbd->A,type,&normtemp);
684: MPI_Allreduce(&normtemp,nrm,1,MPIU_REAL,MPI_MAX,A->comm);
685: }
686: return(0);
687: }
691: PetscErrorCode MatScale_MPIBDiag(Mat A,PetscScalar alpha)
692: {
694: Mat_MPIBDiag *a = (Mat_MPIBDiag*)A->data;
697: MatScale_SeqBDiag(a->A,alpha);
698: return(0);
699: }
703: PetscErrorCode MatSetUpPreallocation_MPIBDiag(Mat A)
704: {
708: MatMPIBDiagSetPreallocation(A,PETSC_DEFAULT,PETSC_DEFAULT,0,0);
709: return(0);
710: }
712: /* -------------------------------------------------------------------*/
714: static struct _MatOps MatOps_Values = {MatSetValues_MPIBDiag,
715: MatGetRow_MPIBDiag,
716: MatRestoreRow_MPIBDiag,
717: MatMult_MPIBDiag,
718: /* 4*/ MatMultAdd_MPIBDiag,
719: MatMultTranspose_MPIBDiag,
720: MatMultTransposeAdd_MPIBDiag,
721: 0,
722: 0,
723: 0,
724: /*10*/ 0,
725: 0,
726: 0,
727: 0,
728: 0,
729: /*15*/ MatGetInfo_MPIBDiag,
730: 0,
731: MatGetDiagonal_MPIBDiag,
732: 0,
733: MatNorm_MPIBDiag,
734: /*20*/ MatAssemblyBegin_MPIBDiag,
735: MatAssemblyEnd_MPIBDiag,
736: 0,
737: MatSetOption_MPIBDiag,
738: MatZeroEntries_MPIBDiag,
739: /*25*/ MatZeroRows_MPIBDiag,
740: 0,
741: 0,
742: 0,
743: 0,
744: /*30*/ MatSetUpPreallocation_MPIBDiag,
745: 0,
746: 0,
747: 0,
748: 0,
749: /*35*/ 0,
750: 0,
751: 0,
752: 0,
753: 0,
754: /*40*/ 0,
755: 0,
756: 0,
757: MatGetValues_MPIBDiag,
758: 0,
759: /*45*/ 0,
760: MatScale_MPIBDiag,
761: 0,
762: 0,
763: 0,
764: /*50*/ 0,
765: 0,
766: 0,
767: 0,
768: 0,
769: /*55*/ 0,
770: 0,
771: 0,
772: 0,
773: 0,
774: /*60*/ 0,
775: MatDestroy_MPIBDiag,
776: MatView_MPIBDiag,
777: 0,
778: 0,
779: /*65*/ 0,
780: 0,
781: 0,
782: 0,
783: 0,
784: /*70*/ 0,
785: 0,
786: 0,
787: 0,
788: 0,
789: /*75*/ 0,
790: 0,
791: 0,
792: 0,
793: 0,
794: /*80*/ 0,
795: 0,
796: 0,
797: 0,
798: MatLoad_MPIBDiag,
799: /*85*/ 0,
800: 0,
801: 0,
802: 0,
803: 0,
804: /*90*/ 0,
805: 0,
806: 0,
807: 0,
808: 0,
809: /*95*/ 0,
810: 0,
811: 0,
812: 0};
817: PetscErrorCode MatGetDiagonalBlock_MPIBDiag(Mat A,PetscTruth *iscopy,MatReuse reuse,Mat *a)
818: {
819: Mat_MPIBDiag *matin = (Mat_MPIBDiag *)A->data;
821: PetscInt lrows,lcols,rstart,rend;
822: IS localc,localr;
825: MatGetLocalSize(A,&lrows,&lcols);
826: MatGetOwnershipRange(A,&rstart,&rend);
827: ISCreateStride(PETSC_COMM_SELF,lrows,rstart,1,&localc);
828: ISCreateStride(PETSC_COMM_SELF,lrows,0,1,&localr);
829: MatGetSubMatrix(matin->A,localr,localc,PETSC_DECIDE,reuse,a);
830: ISDestroy(localr);
831: ISDestroy(localc);
833: *iscopy = PETSC_TRUE;
834: return(0);
835: }
841: PetscErrorCode MatMPIBDiagSetPreallocation_MPIBDiag(Mat B,PetscInt nd,PetscInt bs,PetscInt *diag,PetscScalar **diagv)
842: {
843: Mat_MPIBDiag *b;
845: PetscInt i,k,*ldiag,len,nd2;
846: PetscScalar **ldiagv = 0;
847: PetscTruth flg2;
850: B->preallocated = PETSC_TRUE;
851: if (bs == PETSC_DEFAULT) bs = 1;
852: if (nd == PETSC_DEFAULT) nd = 0;
853: PetscOptionsGetInt(PETSC_NULL,"-mat_block_size",&bs,PETSC_NULL);
854: PetscOptionsGetInt(PETSC_NULL,"-mat_bdiag_ndiag",&nd,PETSC_NULL);
855: PetscOptionsHasName(PETSC_NULL,"-mat_bdiag_diags",&flg2);
856: if (nd && !diag) {
857: PetscMalloc(nd*sizeof(PetscInt),&diag);
858: nd2 = nd;
859: PetscOptionsGetIntArray(PETSC_NULL,"-mat_bdiag_dvals",diag,&nd2,PETSC_NULL);
860: if (nd2 != nd) {
861: SETERRQ(PETSC_ERR_ARG_INCOMP,"Incompatible number of diags and diagonal vals");
862: }
863: } else if (flg2) {
864: SETERRQ(PETSC_ERR_ARG_WRONG,"Must specify number of diagonals with -mat_bdiag_ndiag");
865: }
867: if (bs <= 0) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Blocksize must be positive");
869: B->rmap.bs = B->cmap.bs = bs;
871: PetscMapInitialize(B->comm,&B->rmap);
872: PetscMapInitialize(B->comm,&B->cmap);
874: if ((B->cmap.N%bs)) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Invalid block size - bad column number");
875: if ((B->rmap.N%bs)) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Invalid block size - bad local row number");
876: if ((B->rmap.N%bs)) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Invalid block size - bad global row number");
879: b = (Mat_MPIBDiag*)B->data;
880: b->gnd = nd;
881: b->brstart = (B->rmap.rstart)/bs;
882: b->brend = (B->rmap.rend)/bs;
885: /* Determine local diagonals; for now, assume global rows = global cols */
886: /* These are sorted in MatCreateSeqBDiag */
887: PetscMalloc((nd+1)*sizeof(PetscInt),&ldiag);
888: len = B->rmap.N/bs + B->cmap.N/bs + 1;
889: PetscMalloc(len*sizeof(PetscInt),&b->gdiag);
890: k = 0;
891: if (diagv) {
892: PetscMalloc((nd+1)*sizeof(PetscScalar*),&ldiagv);
893: }
894: for (i=0; i<nd; i++) {
895: b->gdiag[i] = diag[i];
896: if (diag[i] > 0) { /* lower triangular */
897: if (diag[i] < b->brend) {
898: ldiag[k] = diag[i] - b->brstart;
899: if (diagv) ldiagv[k] = diagv[i];
900: k++;
901: }
902: } else { /* upper triangular */
903: if (B->rmap.N/bs - diag[i] > B->cmap.N/bs) {
904: if (B->rmap.N/bs + diag[i] > b->brstart) {
905: ldiag[k] = diag[i] - b->brstart;
906: if (diagv) ldiagv[k] = diagv[i];
907: k++;
908: }
909: } else {
910: if (B->rmap.N/bs > b->brstart) {
911: ldiag[k] = diag[i] - b->brstart;
912: if (diagv) ldiagv[k] = diagv[i];
913: k++;
914: }
915: }
916: }
917: }
919: /* Form local matrix */
920: MatCreate(PETSC_COMM_SELF,&b->A);
921: MatSetSizes(b->A,B->rmap.n,B->cmap.N,B->rmap.n,B->cmap.N);
922: MatSetType(b->A,MATSEQBDIAG);
923: MatSeqBDiagSetPreallocation(b->A,k,bs,ldiag,ldiagv);
924: PetscLogObjectParent(B,b->A);
925: PetscFree(ldiag);
926: PetscFree(ldiagv);
928: return(0);
929: }
932: /*MC
933: MATMPIBDIAG - MATMPIBDIAG = "mpibdiag" - A matrix type to be used for distributed block diagonal matrices.
935: Options Database Keys:
936: . -mat_type mpibdiag - sets the matrix type to "mpibdiag" during a call to MatSetFromOptions()
938: Level: beginner
940: .seealso: MatCreateMPIBDiag
941: M*/
946: PetscErrorCode MatCreate_MPIBDiag(Mat B)
947: {
948: Mat_MPIBDiag *b;
952: PetscNew(Mat_MPIBDiag,&b);
953: B->data = (void*)b;
954: PetscMemcpy(B->ops,&MatOps_Values,sizeof(struct _MatOps));
955: B->factor = 0;
956: B->mapping = 0;
958: B->insertmode = NOT_SET_VALUES;
959: MPI_Comm_rank(B->comm,&b->rank);
960: MPI_Comm_size(B->comm,&b->size);
962: /* build cache for off array entries formed */
963: MatStashCreate_Private(B->comm,1,&B->stash);
964: b->donotstash = PETSC_FALSE;
966: /* stuff used for matrix-vector multiply */
967: b->lvec = 0;
968: b->Mvctx = 0;
970: /* used for MatSetValues() input */
971: b->roworiented = PETSC_TRUE;
973: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatGetDiagonalBlock_C",
974: "MatGetDiagonalBlock_MPIBDiag",
975: MatGetDiagonalBlock_MPIBDiag);
976: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatMPIBDiagSetPreallocation_C",
977: "MatMPIBDiagSetPreallocation_MPIBDiag",
978: MatMPIBDiagSetPreallocation_MPIBDiag);
979: PetscObjectChangeTypeName((PetscObject)B,MATMPIBDIAG);
980: return(0);
981: }
984: /*MC
985: MATBDIAG - MATBDIAG = "bdiag" - A matrix type to be used for block diagonal matrices.
987: This matrix type is identical to MATSEQBDIAG when constructed with a single process communicator,
988: and MATMPIBDIAG otherwise.
990: Options Database Keys:
991: . -mat_type bdiag - sets the matrix type to "bdiag" during a call to MatSetFromOptions()
993: Level: beginner
995: .seealso: MatCreateMPIBDiag,MATSEQBDIAG,MATMPIBDIAG
996: M*/
1001: PetscErrorCode MatCreate_BDiag(Mat A)
1002: {
1004: PetscMPIInt size;
1007: MPI_Comm_size(A->comm,&size);
1008: if (size == 1) {
1009: MatSetType(A,MATSEQBDIAG);
1010: } else {
1011: MatSetType(A,MATMPIBDIAG);
1012: }
1013: return(0);
1014: }
1019: /*@C
1020: MatMPIBDiagSetPreallocation -
1022: Collective on Mat
1024: Input Parameters:
1025: + A - the matrix
1026: . nd - number of block diagonals (global) (optional)
1027: . bs - each element of a diagonal is an bs x bs dense matrix
1028: . diag - optional array of block diagonal numbers (length nd).
1029: For a matrix element A[i,j], where i=row and j=column, the
1030: diagonal number is
1031: $ diag = i/bs - j/bs (integer division)
1032: Set diag=PETSC_NULL on input for PETSc to dynamically allocate memory as
1033: needed (expensive).
1034: - diagv - pointer to actual diagonals (in same order as diag array),
1035: if allocated by user. Otherwise, set diagv=PETSC_NULL on input for PETSc
1036: to control memory allocation.
1039: Options Database Keys:
1040: . -mat_block_size <bs> - Sets blocksize
1041: . -mat_bdiag_diags <s1,s2,s3,...> - Sets diagonal numbers
1043: Notes:
1044: If PETSC_DECIDE or PETSC_DETERMINE is used for a particular argument on one processor
1045: than it must be used on all processors that share the object for that argument.
1047: The parallel matrix is partitioned across the processors by rows, where
1048: each local rectangular matrix is stored in the uniprocessor block
1049: diagonal format. See the users manual for further details.
1051: The user MUST specify either the local or global numbers of rows
1052: (possibly both).
1054: The case bs=1 (conventional diagonal storage) is implemented as
1055: a special case.
1057: Fortran Notes:
1058: Fortran programmers cannot set diagv; this variable is ignored.
1060: Level: intermediate
1062: .keywords: matrix, block, diagonal, parallel, sparse
1064: .seealso: MatCreate(), MatCreateSeqBDiag(), MatSetValues()
1065: @*/
1066: PetscErrorCode MatMPIBDiagSetPreallocation(Mat B,PetscInt nd,PetscInt bs,const PetscInt diag[],PetscScalar *diagv[])
1067: {
1068: PetscErrorCode ierr,(*f)(Mat,PetscInt,PetscInt,const PetscInt[],PetscScalar*[]);
1071: PetscObjectQueryFunction((PetscObject)B,"MatMPIBDiagSetPreallocation_C",(void (**)(void))&f);
1072: if (f) {
1073: (*f)(B,nd,bs,diag,diagv);
1074: }
1075: return(0);
1076: }
1080: /*@C
1081: MatCreateMPIBDiag - Creates a sparse parallel matrix in MPIBDiag format.
1083: Collective on MPI_Comm
1085: Input Parameters:
1086: + comm - MPI communicator
1087: . m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
1088: . M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
1089: . N - number of columns (local and global)
1090: . nd - number of block diagonals (global) (optional)
1091: . bs - each element of a diagonal is an bs x bs dense matrix
1092: . diag - optional array of block diagonal numbers (length nd).
1093: For a matrix element A[i,j], where i=row and j=column, the
1094: diagonal number is
1095: $ diag = i/bs - j/bs (integer division)
1096: Set diag=PETSC_NULL on input for PETSc to dynamically allocate memory as
1097: needed (expensive).
1098: - diagv - pointer to actual diagonals (in same order as diag array),
1099: if allocated by user. Otherwise, set diagv=PETSC_NULL on input for PETSc
1100: to control memory allocation.
1102: Output Parameter:
1103: . A - the matrix
1105: Options Database Keys:
1106: . -mat_block_size <bs> - Sets blocksize
1107: . -mat_bdiag_diags <s1,s2,s3,...> - Sets diagonal numbers
1109: Notes:
1110: If PETSC_DECIDE or PETSC_DETERMINE is used for a particular argument on one processor
1111: than it must be used on all processors that share the object for that argument.
1113: The parallel matrix is partitioned across the processors by rows, where
1114: each local rectangular matrix is stored in the uniprocessor block
1115: diagonal format. See the users manual for further details.
1117: The user MUST specify either the local or global numbers of rows
1118: (possibly both).
1120: The case bs=1 (conventional diagonal storage) is implemented as
1121: a special case.
1123: Fortran Notes:
1124: Fortran programmers cannot set diagv; this variable is ignored.
1126: Level: intermediate
1128: .keywords: matrix, block, diagonal, parallel, sparse
1130: .seealso: MatCreate(), MatCreateSeqBDiag(), MatSetValues()
1131: @*/
1132: PetscErrorCode MatCreateMPIBDiag(MPI_Comm comm,PetscInt m,PetscInt M,PetscInt N,PetscInt nd,PetscInt bs,const PetscInt diag[],PetscScalar *diagv[],Mat *A)
1133: {
1135: PetscMPIInt size;
1138: MatCreate(comm,A);
1139: MatSetSizes(*A,m,m,M,N);
1140: MPI_Comm_size(comm,&size);
1141: if (size > 1) {
1142: MatSetType(*A,MATMPIBDIAG);
1143: MatMPIBDiagSetPreallocation(*A,nd,bs,diag,diagv);
1144: } else {
1145: MatSetType(*A,MATSEQBDIAG);
1146: MatSeqBDiagSetPreallocation(*A,nd,bs,diag,diagv);
1147: }
1148: return(0);
1149: }
1153: /*@C
1154: MatBDiagGetData - Gets the data for the block diagonal matrix format.
1155: For the parallel case, this returns information for the local submatrix.
1157: Input Parameters:
1158: . mat - the matrix, stored in block diagonal format.
1160: Not Collective
1162: Output Parameters:
1163: + m - number of rows
1164: . n - number of columns
1165: . nd - number of block diagonals
1166: . bs - each element of a diagonal is an bs x bs dense matrix
1167: . bdlen - array of total block lengths of block diagonals
1168: . diag - optional array of block diagonal numbers (length nd).
1169: For a matrix element A[i,j], where i=row and j=column, the
1170: diagonal number is
1171: $ diag = i/bs - j/bs (integer division)
1172: Set diag=PETSC_NULL on input for PETSc to dynamically allocate memory as
1173: needed (expensive).
1174: - diagv - pointer to actual diagonals (in same order as diag array),
1176: Level: advanced
1178: Notes:
1179: See the users manual for further details regarding this storage format.
1181: .keywords: matrix, block, diagonal, get, data
1183: .seealso: MatCreateSeqBDiag(), MatCreateMPIBDiag()
1184: @*/
1185: PetscErrorCode MatBDiagGetData(Mat mat,PetscInt *nd,PetscInt *bs,PetscInt *diag[],PetscInt *bdlen[],PetscScalar ***diagv)
1186: {
1187: Mat_MPIBDiag *pdmat;
1188: Mat_SeqBDiag *dmat = 0;
1189: PetscTruth isseq,ismpi;
1194: PetscTypeCompare((PetscObject)mat,MATSEQBDIAG,&isseq);
1195: PetscTypeCompare((PetscObject)mat,MATMPIBDIAG,&ismpi);
1196: if (isseq) {
1197: dmat = (Mat_SeqBDiag*)mat->data;
1198: } else if (ismpi) {
1199: pdmat = (Mat_MPIBDiag*)mat->data;
1200: dmat = (Mat_SeqBDiag*)pdmat->A->data;
1201: } else SETERRQ(PETSC_ERR_SUP,"Valid only for MATSEQBDIAG and MATMPIBDIAG formats");
1202: *nd = dmat->nd;
1203: *bs = mat->rmap.bs;
1204: *diag = dmat->diag;
1205: *bdlen = dmat->bdlen;
1206: *diagv = dmat->diagv;
1207: return(0);
1208: }
1210: #include petscsys.h
1214: PetscErrorCode MatLoad_MPIBDiag(PetscViewer viewer, MatType type,Mat *newmat)
1215: {
1216: Mat A;
1217: PetscScalar *vals,*svals;
1218: MPI_Comm comm = ((PetscObject)viewer)->comm;
1219: MPI_Status status;
1221: int fd;
1222: PetscMPIInt tag = ((PetscObject)viewer)->tag,rank,size,*sndcounts = 0,*rowners,maxnz,mm;
1223: PetscInt bs,i,nz,j,rstart,rend,*cols;
1224: PetscInt header[4],*rowlengths = 0,M,N,m,Mbs;
1225: PetscInt *ourlens,*procsnz = 0,jj,*mycols,*smycols;
1226: PetscInt extra_rows;
1229: MPI_Comm_size(comm,&size);
1230: MPI_Comm_rank(comm,&rank);
1231: if (!rank) {
1232: PetscViewerBinaryGetDescriptor(viewer,&fd);
1233: PetscBinaryRead(fd,(char *)header,4,PETSC_INT);
1234: if (header[0] != MAT_FILE_COOKIE) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"not matrix object");
1235: if (header[3] < 0) {
1236: SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"Matrix stored in special format,cannot load as MPIBDiag");
1237: }
1238: }
1239: MPI_Bcast(header+1,3,MPIU_INT,0,comm);
1240: M = header[1]; N = header[2];
1242: bs = 1; /* uses a block size of 1 by default; */
1243: PetscOptionsGetInt(PETSC_NULL,"-matload_block_size",&bs,PETSC_NULL);
1245: /*
1246: This code adds extra rows to make sure the number of rows is
1247: divisible by the blocksize
1248: */
1249: Mbs = M/bs;
1250: extra_rows = bs - M + bs*(Mbs);
1251: if (extra_rows == bs) extra_rows = 0;
1252: else Mbs++;
1253: if (extra_rows && !rank) {
1254: PetscInfo(0,"Padding loaded matrix to match blocksize\n");
1255: }
1257: /* determine ownership of all rows */
1258: m = bs*(Mbs/size + ((Mbs % size) > rank));
1259: PetscMalloc((size+2)*sizeof(PetscInt),&rowners);
1260: mm = (PetscMPIInt)m;
1261: MPI_Allgather(&mm,1,MPI_INT,rowners+1,1,MPI_INT,comm);
1262: rowners[0] = 0;
1263: for (i=2; i<=size; i++) {
1264: rowners[i] += rowners[i-1];
1265: }
1266: rstart = rowners[rank];
1267: rend = rowners[rank+1];
1269: /* distribute row lengths to all processors */
1270: PetscMalloc((rend-rstart)*sizeof(PetscInt),&ourlens);
1271: if (!rank) {
1272: PetscMalloc((M+extra_rows)*sizeof(PetscInt),&rowlengths);
1273: PetscBinaryRead(fd,rowlengths,M,PETSC_INT);
1274: for (i=0; i<extra_rows; i++) rowlengths[M+i] = 1;
1275: PetscMalloc(size*sizeof(PetscMPIInt),&sndcounts);
1276: for (i=0; i<size; i++) sndcounts[i] = rowners[i+1] - rowners[i];
1277: MPI_Scatterv(rowlengths,sndcounts,rowners,MPIU_INT,ourlens,rend-rstart,MPIU_INT,0,comm);
1278: PetscFree(sndcounts);
1279: } else {
1280: MPI_Scatterv(0,0,0,MPIU_INT,ourlens,rend-rstart,MPIU_INT,0,comm);
1281: }
1283: if (!rank) {
1284: /* calculate the number of nonzeros on each processor */
1285: PetscMalloc(size*sizeof(PetscInt),&procsnz);
1286: PetscMemzero(procsnz,size*sizeof(PetscInt));
1287: for (i=0; i<size; i++) {
1288: for (j=rowners[i]; j<rowners[i+1]; j++) {
1289: procsnz[i] += rowlengths[j];
1290: }
1291: }
1292: PetscFree(rowlengths);
1294: /* determine max buffer needed and allocate it */
1295: maxnz = 0;
1296: for (i=0; i<size; i++) {
1297: maxnz = PetscMax(maxnz,procsnz[i]);
1298: }
1299: PetscMalloc(maxnz*sizeof(PetscInt),&cols);
1301: /* read in my part of the matrix column indices */
1302: nz = procsnz[0];
1303: PetscMalloc(nz*sizeof(PetscInt),&mycols);
1304: if (size == 1) nz -= extra_rows;
1305: PetscBinaryRead(fd,mycols,nz,PETSC_INT);
1306: if (size == 1) for (i=0; i<extra_rows; i++) { mycols[nz+i] = M+i; }
1308: /* read in every one elses and ship off */
1309: for (i=1; i<size-1; i++) {
1310: nz = procsnz[i];
1311: PetscBinaryRead(fd,cols,nz,PETSC_INT);
1312: MPI_Send(cols,nz,MPIU_INT,i,tag,comm);
1313: }
1314: /* read in the stuff for the last proc */
1315: if (size != 1) {
1316: nz = procsnz[size-1] - extra_rows; /* the extra rows are not on the disk */
1317: PetscBinaryRead(fd,cols,nz,PETSC_INT);
1318: for (i=0; i<extra_rows; i++) cols[nz+i] = M+i;
1319: MPI_Send(cols,nz+extra_rows,MPIU_INT,size-1,tag,comm);
1320: }
1321: PetscFree(cols);
1322: } else {
1323: /* determine buffer space needed for message */
1324: nz = 0;
1325: for (i=0; i<m; i++) {
1326: nz += ourlens[i];
1327: }
1328: PetscMalloc(nz*sizeof(PetscInt),&mycols);
1330: /* receive message of column indices*/
1331: MPI_Recv(mycols,nz,MPIU_INT,0,tag,comm,&status);
1332: MPI_Get_count(&status,MPIU_INT,&maxnz);
1333: if (maxnz != nz) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");
1334: }
1336: MatCreate(comm,newmat);
1337: MatSetSizes(*newmat,m,m,M+extra_rows,N+extra_rows);
1338: MatSetType(*newmat,type);
1339: MatMPIBDiagSetPreallocation(*newmat,0,bs,PETSC_NULL,PETSC_NULL);
1340: A = *newmat;
1342: if (!rank) {
1343: PetscMalloc(maxnz*sizeof(PetscScalar),&vals);
1345: /* read in my part of the matrix numerical values */
1346: nz = procsnz[0];
1347: if (size == 1) nz -= extra_rows;
1348: PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
1349: if (size == 1) for (i=0; i<extra_rows; i++) { vals[nz+i] = 1.0; }
1351: /* insert into matrix */
1352: jj = rstart;
1353: smycols = mycols;
1354: svals = vals;
1355: for (i=0; i<m; i++) {
1356: MatSetValues(A,1,&jj,ourlens[i],smycols,svals,INSERT_VALUES);
1357: smycols += ourlens[i];
1358: svals += ourlens[i];
1359: jj++;
1360: }
1362: /* read in other processors (except the last one) and ship out */
1363: for (i=1; i<size-1; i++) {
1364: nz = procsnz[i];
1365: PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
1366: MPI_Send(vals,nz,MPIU_SCALAR,i,A->tag,comm);
1367: }
1368: /* the last proc */
1369: if (size != 1){
1370: nz = procsnz[i] - extra_rows;
1371: PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
1372: for (i=0; i<extra_rows; i++) vals[nz+i] = 1.0;
1373: MPI_Send(vals,nz+extra_rows,MPIU_SCALAR,size-1,A->tag,comm);
1374: }
1375: PetscFree(procsnz);
1376: } else {
1377: /* receive numeric values */
1378: PetscMalloc(nz*sizeof(PetscScalar),&vals);
1380: /* receive message of values*/
1381: MPI_Recv(vals,nz,MPIU_SCALAR,0,A->tag,comm,&status);
1382: MPI_Get_count(&status,MPIU_SCALAR,&maxnz);
1383: if (maxnz != nz) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");
1385: /* insert into matrix */
1386: jj = rstart;
1387: smycols = mycols;
1388: svals = vals;
1389: for (i=0; i<m; i++) {
1390: MatSetValues(A,1,&jj,ourlens[i],smycols,svals,INSERT_VALUES);
1391: smycols += ourlens[i];
1392: svals += ourlens[i];
1393: jj++;
1394: }
1395: }
1396: PetscFree(ourlens);
1397: PetscFree(vals);
1398: PetscFree(mycols);
1399: PetscFree(rowners);
1401: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
1402: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
1403: return(0);
1404: }