Actual source code: mpimatmatmult.c
1: #define PETSCMAT_DLL
3: /*
4: Defines matrix-matrix product routines for pairs of MPIAIJ matrices
5: C = A * B
6: */
7: #include src/mat/impls/aij/seq/aij.h
8: #include src/mat/utils/freespace.h
9: #include src/mat/impls/aij/mpi/mpiaij.h
10: #include petscbt.h
11: #include src/mat/impls/dense/mpi/mpidense.h
15: PetscErrorCode MatMatMult_MPIAIJ_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscReal fill, Mat *C)
16: {
20: if (scall == MAT_INITIAL_MATRIX){
21: MatMatMultSymbolic_MPIAIJ_MPIAIJ(A,B,fill,C);/* numeric product is computed as well */
22: } else if (scall == MAT_REUSE_MATRIX){
23: MatMatMultNumeric_MPIAIJ_MPIAIJ(A,B,*C);
24: } else {
25: SETERRQ1(PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
26: }
27: return(0);
28: }
32: PetscErrorCode PetscObjectContainerDestroy_Mat_MatMatMultMPI(void *ptr)
33: {
34: PetscErrorCode ierr;
35: Mat_MatMatMultMPI *mult=(Mat_MatMatMultMPI*)ptr;
38: PetscFree(mult->startsj);
39: PetscFree(mult->bufa);
40: if (mult->isrowa){ISDestroy(mult->isrowa);}
41: if (mult->isrowb){ISDestroy(mult->isrowb);}
42: if (mult->iscolb){ISDestroy(mult->iscolb);}
43: if (mult->C_seq){MatDestroy(mult->C_seq);}
44: if (mult->A_loc){MatDestroy(mult->A_loc); }
45: if (mult->B_seq){MatDestroy(mult->B_seq);}
46: if (mult->B_loc){MatDestroy(mult->B_loc);}
47: if (mult->B_oth){MatDestroy(mult->B_oth);}
48: PetscFree(mult->abi);
49: PetscFree(mult->abj);
50: PetscFree(mult);
51: return(0);
52: }
54: EXTERN PetscErrorCode MatDestroy_AIJ(Mat);
57: PetscErrorCode MatDestroy_MPIAIJ_MatMatMult(Mat A)
58: {
59: PetscErrorCode ierr;
60: PetscObjectContainer container;
61: Mat_MatMatMultMPI *mult=PETSC_NULL;
64: PetscObjectQuery((PetscObject)A,"Mat_MatMatMultMPI",(PetscObject *)&container);
65: if (container) {
66: PetscObjectContainerGetPointer(container,(void **)&mult);
67: } else {
68: SETERRQ(PETSC_ERR_PLIB,"Container does not exit");
69: }
70: A->ops->destroy = mult->MatDestroy;
71: PetscObjectCompose((PetscObject)A,"Mat_MatMatMultMPI",0);
72: (*A->ops->destroy)(A);
73: PetscObjectContainerDestroy(container);
74: return(0);
75: }
79: PetscErrorCode MatDuplicate_MPIAIJ_MatMatMult(Mat A, MatDuplicateOption op, Mat *M) {
80: PetscErrorCode ierr;
81: Mat_MatMatMultMPI *mult;
82: PetscObjectContainer container;
85: PetscObjectQuery((PetscObject)A,"Mat_MatMatMultMPI",(PetscObject *)&container);
86: if (container) {
87: PetscObjectContainerGetPointer(container,(void **)&mult);
88: } else {
89: SETERRQ(PETSC_ERR_PLIB,"Container does not exit");
90: }
91: (*mult->MatDuplicate)(A,op,M);
92: (*M)->ops->destroy = mult->MatDestroy; /* =MatDestroy_MPIAIJ, *M doesn't duplicate A's container! */
93: (*M)->ops->duplicate = mult->MatDuplicate; /* =MatDuplicate_ MPIAIJ */
94: return(0);
95: }
99: PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat B,PetscReal fill,Mat *C)
100: {
101: PetscErrorCode ierr;
102: PetscInt start,end;
103: Mat_MatMatMultMPI *mult;
104: PetscObjectContainer container;
105:
107: if (A->cmap.rstart != B->rmap.rstart || A->cmap.rend != B->rmap.rend){
108: SETERRQ4(PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap.rstart,A->cmap.rend,B->rmap.rstart,B->rmap.rend);
109: }
110: PetscNew(Mat_MatMatMultMPI,&mult);
112: /* create a seq matrix B_seq = submatrix of B by taking rows of B that equal to nonzero col of A */
113: MatGetBrowsOfAcols(A,B,MAT_INITIAL_MATRIX,&mult->isrowb,&mult->iscolb,&mult->brstart,&mult->B_seq);
115: /* create a seq matrix A_seq = submatrix of A by taking all local rows of A */
116: start = A->rmap.rstart; end = A->rmap.rend;
117: ISCreateStride(PETSC_COMM_SELF,end-start,start,1,&mult->isrowa);
118: MatGetLocalMatCondensed(A,MAT_INITIAL_MATRIX,&mult->isrowa,&mult->isrowb,&mult->A_loc);
120: /* compute C_seq = A_seq * B_seq */
121: MatMatMult_SeqAIJ_SeqAIJ(mult->A_loc,mult->B_seq,MAT_INITIAL_MATRIX,fill,&mult->C_seq);
123: /* create mpi matrix C by concatinating C_seq */
124: PetscObjectReference((PetscObject)mult->C_seq); /* prevent C_seq being destroyed by MatMerge() */
125: MatMerge(A->comm,mult->C_seq,B->cmap.n,MAT_INITIAL_MATRIX,C);
126:
127: /* attach the supporting struct to C for reuse of symbolic C */
128: PetscObjectContainerCreate(PETSC_COMM_SELF,&container);
129: PetscObjectContainerSetPointer(container,mult);
130: PetscObjectCompose((PetscObject)(*C),"Mat_MatMatMultMPI",(PetscObject)container);
131: PetscObjectContainerSetUserDestroy(container,PetscObjectContainerDestroy_Mat_MatMatMultMPI);
132: mult->MatDestroy = (*C)->ops->destroy;
133: mult->MatDuplicate = (*C)->ops->duplicate;
135: (*C)->ops->destroy = MatDestroy_MPIAIJ_MatMatMult;
136: (*C)->ops->duplicate = MatDuplicate_MPIAIJ_MatMatMult;
137: return(0);
138: }
140: /* This routine is called ONLY in the case of reusing previously computed symbolic C */
143: PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ(Mat A,Mat B,Mat C)
144: {
145: PetscErrorCode ierr;
146: Mat *seq;
147: Mat_MatMatMultMPI *mult;
148: PetscObjectContainer container;
151: PetscObjectQuery((PetscObject)C,"Mat_MatMatMultMPI",(PetscObject *)&container);
152: if (container) {
153: PetscObjectContainerGetPointer(container,(void **)&mult);
154: } else {
155: SETERRQ(PETSC_ERR_PLIB,"Container does not exit");
156: }
158: seq = &mult->B_seq;
159: MatGetSubMatrices(B,1,&mult->isrowb,&mult->iscolb,MAT_REUSE_MATRIX,&seq);
160: mult->B_seq = *seq;
161:
162: seq = &mult->A_loc;
163: MatGetSubMatrices(A,1,&mult->isrowa,&mult->isrowb,MAT_REUSE_MATRIX,&seq);
164: mult->A_loc = *seq;
166: MatMatMult_SeqAIJ_SeqAIJ(mult->A_loc,mult->B_seq,MAT_REUSE_MATRIX,0.0,&mult->C_seq);
168: PetscObjectReference((PetscObject)mult->C_seq);
169: MatMerge(A->comm,mult->C_seq,B->cmap.n,MAT_REUSE_MATRIX,&C);
170: return(0);
171: }
175: PetscErrorCode MatMatMult_MPIAIJ_MPIDense(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
176: {
180: if (scall == MAT_INITIAL_MATRIX){
181: MatMatMultSymbolic_MPIAIJ_MPIDense(A,B,fill,C);
182: }
183: MatMatMultNumeric_MPIAIJ_MPIDense(A,B,*C);
184: return(0);
185: }
187: typedef struct {
188: Mat workB;
189: PetscScalar *rvalues,*svalues;
190: MPI_Request *rwaits,*swaits;
191: } MPIAIJ_MPIDense;
193: PetscErrorCode MPIAIJ_MPIDenseDestroy(void *ctx)
194: {
195: MPIAIJ_MPIDense *contents = (MPIAIJ_MPIDense*) ctx;
196: PetscErrorCode ierr;
199: if (contents->workB) {MatDestroy(contents->workB);}
200: PetscFree4(contents->rvalues,contents->svalues,contents->rwaits,contents->swaits);
201: PetscFree(contents);
202: return(0);
203: }
207: PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat A,Mat B,PetscReal fill,Mat *C)
208: {
209: PetscErrorCode ierr;
210: Mat_MPIAIJ *aij = (Mat_MPIAIJ*) A->data;
211: PetscInt nz = aij->B->cmap.n;
212: PetscObjectContainer cont;
213: MPIAIJ_MPIDense *contents;
214: VecScatter ctx = aij->Mvctx;
215: VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata;
216: VecScatter_MPI_General *to = ( VecScatter_MPI_General*) ctx->todata;
219: MatMatMultSymbolic_MPIDense_MPIDense(A,B,0.0,C);
221: PetscObjectContainerCreate(A->comm,&cont);
222: PetscNew(MPIAIJ_MPIDense,&contents);
223: PetscObjectContainerSetPointer(cont,contents);
224: PetscObjectContainerSetUserDestroy(cont,MPIAIJ_MPIDenseDestroy);
226: /* Create work matrix used to store off processor rows of B needed for local product */
227: MatCreateSeqDense(PETSC_COMM_SELF,nz,B->cmap.N,PETSC_NULL,&contents->workB);
229: /* Create work arrays needed */
230: PetscMalloc4(B->cmap.N*from->starts[from->n],PetscScalar,&contents->rvalues,
231: B->cmap.N*to->starts[to->n],PetscScalar,&contents->svalues,
232: from->n,MPI_Request,&contents->rwaits,
233: to->n,MPI_Request,&contents->swaits);
235: PetscObjectCompose((PetscObject)(*C),"workB",(PetscObject)cont);
236: PetscObjectContainerDestroy(cont);
237: return(0);
238: }
240: /*
241: Performs an efficient scatter on the rows of B needed by this process; this is
242: a modification of the VecScatterBegin_() routines.
243: */
244: PetscErrorCode MatMPIDenseScatter(Mat A,Mat B,Mat C,Mat *outworkB)
245: {
246: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data;
247: PetscErrorCode ierr;
248: PetscScalar *b,*w,*svalues,*rvalues;
249: VecScatter ctx = aij->Mvctx;
250: VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata;
251: VecScatter_MPI_General *to = ( VecScatter_MPI_General*) ctx->todata;
252: PetscInt i,j,k;
253: PetscMPIInt *sindices,*sstarts,*sprocs,*rindices,*rstarts,*rprocs,nrecvs;
254: MPI_Request *swaits,*rwaits;
255: MPI_Comm comm = A->comm;
256: PetscMPIInt tag = ctx->tag,ncols = B->cmap.N, nrows = aij->B->cmap.n,imdex,nrowsB = B->rmap.n;
257: MPI_Status status;
258: MPIAIJ_MPIDense *contents;
259: PetscObjectContainer cont;
260: Mat workB;
263: PetscObjectQuery((PetscObject)C,"workB",(PetscObject*)&cont);
264: PetscObjectContainerGetPointer(cont,(void**)&contents);
266: workB = *outworkB = contents->workB;
267: if (nrows != workB->rmap.n) SETERRQ2(PETSC_ERR_PLIB,"Number of rows of workB %D not equal to columns of aij->B %D",nrows,workB->cmap.n);
268: sindices = to->indices;
269: sstarts = to->starts;
270: sprocs = to->procs;
271: swaits = contents->swaits;
272: svalues = contents->svalues;
274: rindices = from->indices;
275: rstarts = from->starts;
276: rprocs = from->procs;
277: rwaits = contents->rwaits;
278: rvalues = contents->rvalues;
280: MatGetArray(B,&b);
281: MatGetArray(workB,&w);
283: for (i=0; i<from->n; i++) {
284: MPI_Irecv(rvalues+ncols*rstarts[i],ncols*(rstarts[i+1]-rstarts[i]),MPIU_SCALAR,rprocs[i],tag,comm,rwaits+i);
285: }
287: for (i=0; i<to->n; i++) {
288: /* pack a message at a time */
289: CHKMEMQ;
290: for (j=0; j<sstarts[i+1]-sstarts[i]; j++){
291: for (k=0; k<ncols; k++) {
292: svalues[ncols*(sstarts[i] + j) + k] = b[sindices[sstarts[i]+j] + nrowsB*k];
293: }
294: }
295: CHKMEMQ;
296: MPI_Isend(svalues+ncols*sstarts[i],ncols*(sstarts[i+1]-sstarts[i]),MPIU_SCALAR,sprocs[i],tag,comm,swaits+i);
297: }
299: nrecvs = from->n;
300: while (nrecvs) {
301: MPI_Waitany(from->n,rwaits,&imdex,&status);
302: nrecvs--;
303: /* unpack a message at a time */
304: CHKMEMQ;
305: for (j=0; j<rstarts[imdex+1]-rstarts[imdex]; j++){
306: for (k=0; k<ncols; k++) {
307: w[rindices[rstarts[imdex]+j] + nrows*k] = rvalues[ncols*(rstarts[imdex] + j) + k];
308: }
309: }
310: CHKMEMQ;
311: }
312: if (to->n) {MPI_Waitall(to->n,swaits,to->sstatus);CHKERRQ(ierr)}
314: MatRestoreArray(B,&b);
315: MatRestoreArray(workB,&w);
316: MatAssemblyBegin(workB,MAT_FINAL_ASSEMBLY);
317: MatAssemblyEnd(workB,MAT_FINAL_ASSEMBLY);
318: return(0);
319: }
324: PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat A,Mat B,Mat C)
325: {
326: PetscErrorCode ierr;
327: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data;
328: Mat_MPIDense *bdense = (Mat_MPIDense*)B->data;
329: Mat_MPIDense *cdense = (Mat_MPIDense*)C->data;
330: Mat workB;
334: /* diagonal block of A times all local rows of B*/
335: MatMatMultNumeric_SeqAIJ_SeqDense(aij->A,bdense->A,cdense->A);
337: /* get off processor parts of B needed to complete the product */
338: MatMPIDenseScatter(A,B,C,&workB);
340: /* off-diagonal block of A times nonlocal rows of B */
341: MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B,workB,cdense->A);
342: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
343: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
344: return(0);
345: }