Actual source code: mpi.h
1: /*
2: This is a special set of bindings for uni-processor use of MPI by the PETSc library.
3:
4: NOT ALL THE MPI CALLS ARE IMPLEMENTED CORRECTLY! Only those needed in PETSc.
6: For example,
7: * Does not implement send to self.
8: * Does not implement attributes correctly.
9: */
11: /*
12: The following info is a response to one of the petsc-maint questions
13: regarding MPIUNI.
15: MPIUNI was developed with the aim of getting PETSc compiled, and
16: usable in the absence of MPI. This is the reason each function is
17: not documented. The development strategy was - to make enough
18: changes to it so that PETSc source compiles without errors, and runs
19: in the uni-processor mode.
21: Most PETSc objects have both sequential and parallel
22: implementations, which are separate. For eg: We have two types of
23: sparse matrix storage formats - SeqAIJ, and MPIAIJ. Some MPI
24: routines are used in the Seq part, but most of them are used in the
25: MPI part. The send/receive calls can be found mostly in the MPI
26: part.
28: When MPIUNI is used, only the Seq version of the PETSc objects are
29: used, even though the MPI variant of the objects are compiled. Since
30: there are no send/receive calls in the Seq variant, PETSc works fine
31: with MPIUNI in seq mode.
33: The reason some send/receive functions are defined to abort(), is to
34: detect sections of code that use send/receive functions, and gets
35: executed in the sequential mode. (which shouldn't happen in case of
36: PETSc).
38: One of the goals with MPIUNI, is to avoid the function call overhead
39: of a regular MPI implementation. If this was not the case, we could
40: as well have used a regular implementation of MPI as they are
41: available on almost all machines. Hence most of the functions are
42: implemented as macros. One of the additional benefits we got from
43: MPIUNI is, we were able to use PETSc on machines where using a
44: proper implementation of MPI was painful (for eg NT).
46: Proper implementation of send/receive would involve writing a
47: function for each of them. Inside each of these functions, we have
48: to check if the send is to self or receive is from self, and then
49: doing the buffering accordingly (until the receive is called) - or
50: what if a nonblocking receive is called, do a copy etc.. Handling
51: the buffering aspects might be complicated enough, that in this
52: case, a proper implementation of MPI might as well be used. This is
53: the reason the send to self is not implemented in MPIUNI, and never
54: will be.
55: */
60: /* Requred by abort() in mpi.c & for win64 */
61: #include "petscconf.h"
63: #if defined(__cplusplus)
65: #endif
67: /* require an int variable large enough to hold a pointer */
68: #if !defined(MPIUNI_INTPTR)
69: #define MPIUNI_INTPTR long
70: #endif
72: #define _petsc_mpi_uni
74: /*
76: MPIUNI_TMP is used in the macros below only to stop various C/C++ compilers
77: from generating warning messages about unused variables while compiling PETSc.
78: */
81: #define MPI_COMM_WORLD 1
82: #define MPI_COMM_SELF MPI_COMM_WORLD
83: #define MPI_COMM_NULL 0
84: #define MPI_SUCCESS 0
85: #define MPI_IDENT 0
86: #define MPI_CONGRUENT 0
87: #define MPI_SIMILAR 0
88: #define MPI_UNEQUAL 3
89: #define MPI_ANY_SOURCE (-2)
90: #define MPI_KEYVAL_INVALID 0
91: #define MPI_ERR_UNKNOWN 18
92: #define MPI_ERR_INTERN 21
93: #define MPI_ERR_OTHER 1
94: #define MPI_TAG_UB 0
95: #define MPI_ERRORS_RETURN 0
97: /* External types */
98: typedef int MPI_Comm;
99: typedef void *MPI_Request;
100: typedef void *MPI_Group;
101: typedef struct {int MPI_TAG,MPI_SOURCE,MPI_ERROR;} MPI_Status;
102: typedef char *MPI_Errhandler;
103: typedef int MPI_Fint;
107: /* In order to handle datatypes, we make them into "sizeof(raw-type)";
108: this allows us to do the MPIUNI_Memcpy's easily */
109: #define MPI_Datatype int
110: #define MPI_FLOAT sizeof(float)
111: #define MPI_DOUBLE sizeof(double)
112: #define MPI_LONG_DOUBLE sizeof(long double)
113: #define MPI_CHAR sizeof(char)
114: #define MPI_BYTE sizeof(char)
115: #define MPI_INT sizeof(int)
116: #define MPI_LONG sizeof(long)
117: #define MPI_LONG_LONG_INT sizeof(long long)
118: #define MPI_SHORT sizeof(short)
119: #define MPI_UNSIGNED_SHORT sizeof(unsigned short)
120: #define MPI_UNSIGNED sizeof(unsigned)
121: #define MPI_UNSIGNED_CHAR sizeof(unsigned char)
122: #define MPI_UNSIGNED_LONG sizeof(unsigned long)
123: #define MPI_COMPLEX 2*sizeof(float)
124: #define MPI_DOUBLE_COMPLEX 2*sizeof(double)
125: #define MPI_FLOAT_INT (sizeof(float) + sizeof(int))
126: #define MPI_DOUBLE_INT (sizeof(double) + sizeof(int))
127: #define MPI_LONG_INT (sizeof(long) + sizeof(int))
128: #define MPI_SHORT_INT (sizeof(short) + sizeof(int))
129: #define MPI_2INT (2* sizeof(int))
131: #define MPI_REQUEST_NULL ((MPI_Request)0)
132: #define MPI_GROUP_NULL ((MPI_Group)0)
133: typedef int MPI_Op;
135: #define MPI_SUM 0
136: #define MPI_ANY_TAG (-1)
137: #define MPI_DATATYPE_NULL 0
139: #define MPI_MAX_ERROR_STRING 2056
140: /*
141: Prototypes of some functions which are implemented in mpi.c
142: */
143: typedef int (MPI_Copy_function)(MPI_Comm,int,void *,void *,void *,int *);
144: typedef int (MPI_Delete_function)(MPI_Comm,int,void *,void *);
145: typedef void (MPI_User_function)(void*, void *, int *, MPI_Datatype *);
147: /*
148: In order that the PETSc MPIUNI can be used with another package that has its
149: own MPIUni we map the following function names to a unique PETSc name. Those functions
150: are defined in mpi.c
151: */
163: #define MPI_Abort Petsc_MPI_Abort
164: #define MPI_Attr_get Petsc_MPI_Attr_get
165: #define MPI_Keyval_free Petsc_MPI_Keyval_free
166: #define MPI_Attr_put Petsc_MPI_Attr_put
167: #define MPI_Attr_delete Petsc_MPI_Attr_delete
168: #define MPI_Keyval_create Petsc_MPI_Keyval_create
169: #define MPI_Comm_free Petsc_MPI_Comm_free
170: #define MPI_Initialized Petsc_MPI_Initialized
171: #define MPI_Comm_dup Petsc_MPI_Comm_dup
172: #define MPI_Finalize Petsc_MPI_Finalize
174: #define MPI_Aint int
175: /*
176: Routines we have replace with macros that do nothing
177: Some return error codes others return success
178: */
180: #define MPI_Comm_f2c(comm) (MPI_Comm)(comm)
181: #define MPI_Comm_c2f(comm) (MPI_Fint)(comm)
183: #define MPI_Init(argc,argv) \
184: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (argc),\
185: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (argv),\
186: MPI_SUCCESS)
187: #define MPI_Send(buf,count,datatype,dest,tag,comm) \
188: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
189: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
190: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
191: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
192: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
193: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
194: MPI_SUCCESS)
195: #define MPI_Recv(buf,count,datatype,source,tag,comm,status) \
196: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
197: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
198: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
199: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (source),\
200: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
201: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
202: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (status),\
203: MPI_Abort(MPI_COMM_WORLD,0))
204: #define MPI_Get_count(status, datatype,count) \
205: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (status),\
206: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
207: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
208: MPI_Abort(MPI_COMM_WORLD,0))
209: #define MPI_Bsend(buf,count,datatype,dest,tag,comm) \
210: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
211: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
212: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
213: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
214: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
215: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
216: MPI_SUCCESS)
217: #define MPI_Ssend(buf,count, datatype,dest,tag,comm) \
218: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
219: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
220: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
221: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
222: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
223: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
224: MPI_SUCCESS)
225: #define MPI_Rsend(buf,count, datatype,dest,tag,comm) \
226: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
227: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
228: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
229: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
230: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
231: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
232: MPI_SUCCESS)
233: #define MPI_Buffer_attach(buffer,size) \
234: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buffer),\
235: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (size),\
236: MPI_SUCCESS)
237: #define MPI_Buffer_detach(buffer,size)\
238: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buffer),\
239: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (size),\
240: MPI_SUCCESS)
241: #define MPI_Ibsend(buf,count, datatype,dest,tag,comm,request) \
242: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
243: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
244: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
245: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
246: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
247: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
248: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
249: MPI_SUCCESS)
250: #define MPI_Issend(buf,count, datatype,dest,tag,comm,request) \
251: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
252: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
253: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
254: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
255: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
256: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
257: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
258: MPI_SUCCESS)
259: #define MPI_Irsend(buf,count, datatype,dest,tag,comm,request) \
260: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
261: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
262: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
263: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
264: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
265: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
266: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
267: MPI_SUCCESS)
268: #define MPI_Irecv(buf,count, datatype,source,tag,comm,request) \
269: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
270: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
271: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
272: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (source),\
273: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
274: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
275: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
276: MPI_Abort(MPI_COMM_WORLD,0))
277: #define MPI_Isend(buf,count, datatype,dest,tag,comm,request) \
278: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
279: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
280: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
281: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
282: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
283: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
284: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
285: MPI_Abort(MPI_COMM_WORLD,0))
286: #define MPI_Wait(request,status) \
287: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
288: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (status),\
289: MPI_SUCCESS)
290: #define MPI_Test(request,flag,status) \
291: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
292: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (status),\
293: *(flag) = 0, \
294: MPI_SUCCESS)
295: #define MPI_Request_free(request) \
296: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
297: MPI_SUCCESS)
298: #define MPI_Waitany(a,b,c,d) \
299: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (a),\
300: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (b),\
301: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (c),\
302: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (d),\
303: MPI_SUCCESS)
304: #define MPI_Testany(a,b,c,d,e) \
305: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (a),\
306: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (b),\
307: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (c),\
308: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (d),\
309: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (e),\
310: MPI_SUCCESS)
311: #define MPI_Waitall(count,array_of_requests,array_of_statuses) \
312: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
313: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_requests),\
314: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_statuses),\
315: MPI_SUCCESS)
316: #define MPI_Testall(count,array_of_requests,flag,array_of_statuses) \
317: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
318: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_requests),\
319: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (flag),\
320: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_statuses),\
321: MPI_SUCCESS)
322: #define MPI_Waitsome(incount,array_of_requests,outcount,\
323: array_of_indices,array_of_statuses) \
324: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (incount),\
325: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_requests),\
326: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (outcount),\
327: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_indices),\
328: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_statuses),\
329: MPI_SUCCESS)
330: #define MPI_Comm_group(comm,group) \
331: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
332: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (group),\
333: MPI_SUCCESS)
334: #define MPI_Group_incl(group,n,ranks,newgroup) \
335: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (group),\
336: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (n),\
337: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (ranks),\
338: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (newgroup),\
339: MPI_SUCCESS)
340: #define MPI_Testsome(incount,array_of_requests,outcount,\
341: array_of_indices,array_of_statuses) MPI_SUCCESS
342: #define MPI_Iprobe(source,tag,comm,flag,status) (*(flag)=0, MPI_SUCCESS)
343: #define MPI_Probe(source,tag,comm,status) MPI_SUCCESS
344: #define MPI_Cancel(request) (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),MPI_SUCCESS)
345: #define MPI_Test_cancelled(status,flag) (*(flag)=0,MPI_SUCCESS)
346: #define MPI_Send_init(buf,count, datatype,dest,tag,comm,request) \
347: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
348: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
349: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
350: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
351: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
352: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
353: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
354: MPI_SUCCESS)
355: #define MPI_Bsend_init(buf,count, datatype,dest,tag,comm,request) \
356: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
357: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
358: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
359: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
360: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
361: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
362: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
363: MPI_SUCCESS)
364: #define MPI_Ssend_init(buf,count, datatype,dest,tag,comm,request) \
365: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
366: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
367: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
368: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
369: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
370: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
371: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
372: MPI_SUCCESS)
373: #define MPI_Bsend_init(buf,count, datatype,dest,tag,comm,request) \
374: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
375: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
376: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
377: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
378: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
379: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
380: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
381: MPI_SUCCESS)
382: #define MPI_Rsend_init(buf,count, datatype,dest,tag,comm,request) \
383: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
384: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
385: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
386: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
387: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
388: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
389: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
390: MPI_SUCCESS)
391: #define MPI_Recv_init(buf,count, datatype,source,tag,comm,request) \
392: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
393: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
394: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
395: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (source),\
396: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
397: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
398: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
399: MPI_SUCCESS)
400: #define MPI_Start(request) (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),MPI_SUCCESS)
401: #define MPI_Startall(count,array_of_requests) \
402: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
403: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_requests),\
404: MPI_SUCCESS)
405: #define MPI_Op_create(function,commute,op) \
406: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (function),\
407: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (commute),\
408: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (op),\
409: MPI_SUCCESS)
410: #define MPI_Op_free(op) \
411: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (op),\
412: MPI_SUCCESS)
413: /* Need to determine sizeof "sendtype" */
414: #define MPI_Sendrecv(sendbuf,sendcount, sendtype,\
415: dest,sendtag,recvbuf,recvcount,\
416: recvtype,source,recvtag,\
417: comm,status) \
418: MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount) * (sendtype))
419: #define MPI_Sendrecv_replace(buf,count, datatype,dest,sendtag,\
420: source,recvtag,comm,status) MPI_SUCCESS
421: #define MPI_Type_contiguous(count, oldtype,newtype) \
422: (*(newtype) = (count)*(oldtype),MPI_SUCCESS)
423: #define MPI_Type_vector(count,blocklength,stride,oldtype, newtype) MPI_SUCCESS
424: #define MPI_Type_hvector(count,blocklength,stride,oldtype, newtype) MPI_SUCCESS
425: #define MPI_Type_indexed(count,array_of_blocklengths,\
426: array_of_displacements, oldtype,\
427: newtype) MPI_SUCCESS
428: #define MPI_Type_hindexed(count,array_of_blocklengths,\
429: array_of_displacements, oldtype,\
430: newtype) MPI_SUCCESS
431: #define MPI_Type_struct(count,array_of_blocklengths,\
432: array_of_displacements,\
433: array_of_types, newtype) MPI_SUCCESS
434: #define MPI_Address(location,address) \
435: (*(address) = (MPIUNI_INTPTR)(char *)(location),MPI_SUCCESS)
436: #define MPI_Type_extent(datatype,extent) \
437: MPI_Abort(MPI_COMM_WORLD,0)
438: #define MPI_Type_size(datatype,size) \
439: MPI_Abort(MPI_COMM_WORLD,0)
440: #define MPI_Type_lb(datatype,displacement) \
441: MPI_Abort(MPI_COMM_WORLD,0)
442: #define MPI_Type_ub(datatype,displacement) \
443: MPI_Abort(MPI_COMM_WORLD,0)
444: #define MPI_Type_commit(datatype) (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
445: MPI_SUCCESS)
446: #define MPI_Type_free(datatype) MPI_SUCCESS
447: #define MPI_Get_elements(status, datatype,count) \
448: MPI_Abort(MPI_COMM_WORLD,0)
449: #define MPI_Pack(inbuf,incount, datatype,outbuf,\
450: outsize,position, comm) \
451: MPI_Abort(MPI_COMM_WORLD,0)
452: #define MPI_Unpack(inbuf,insize,position,outbuf,\
453: outcount, datatype,comm) \
454: MPI_Abort(MPI_COMM_WORLD,0)
455: #define MPI_Pack_size(incount, datatype,comm,size) \
456: MPI_Abort(MPI_COMM_WORLD,0)
457: #define MPI_Barrier(comm) \
458: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
459: MPI_SUCCESS)
460: #define MPI_Bcast(buffer,count,datatype,root,comm) \
461: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buffer),\
462: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
463: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
464: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
465: MPI_SUCCESS)
466: #define MPI_Gather(sendbuf,sendcount, sendtype,\
467: recvbuf,recvcount, recvtype,\
468: root,comm) \
469: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvcount),\
470: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (root),\
471: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvtype),\
472: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
473: MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)* (sendtype)),\
474: MPI_SUCCESS)
475: #define MPI_Gatherv(sendbuf,sendcount, sendtype,\
476: recvbuf,recvcounts,displs,\
477: recvtype,root,comm) \
478: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvcounts),\
479: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (displs),\
480: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvtype),\
481: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (root),\
482: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
483: MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)* (sendtype)),\
484: MPI_SUCCESS)
485: #define MPI_Scatter(sendbuf,sendcount, sendtype,\
486: recvbuf,recvcount, recvtype,\
487: root,comm) \
488: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (sendbuf),\
489: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (sendcount),\
490: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (sendtype),\
491: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvbuf),\
492: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvcount),\
493: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvtype),\
494: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (root),\
495: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),MPI_Abort(MPI_COMM_WORLD,0))
496: #define MPI_Scatterv(sendbuf,sendcounts,displs,\
497: sendtype, recvbuf,recvcount,\
498: recvtype,root,comm) \
499: (MPIUNI_Memcpy(recvbuf,sendbuf,(recvcount)*(recvtype)),\
500: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (displs),\
501: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (sendtype),\
502: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (sendcounts),\
503: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (root),\
504: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
505: MPI_SUCCESS)
506: #define MPI_Allgather(sendbuf,sendcount, sendtype,\
507: recvbuf,recvcount, recvtype,comm) \
508: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvcount),\
509: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvtype),\
510: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
511: MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)* (sendtype)),\
512: MPI_SUCCESS)
513: #define MPI_Allgatherv(sendbuf,sendcount, sendtype,\
514: recvbuf,recvcounts,displs,recvtype,comm) \
515: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvcounts),\
516: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (displs),\
517: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvtype),\
518: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
519: MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)* (sendtype)),\
520: MPI_SUCCESS)
521: #define MPI_Alltoall(sendbuf,sendcount, sendtype,\
522: recvbuf,recvcount, recvtype,comm) \
523: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvcount),\
524: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvtype),\
525: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
526: MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)* (sendtype)),\
527: MPI_SUCCESS)
528: #define MPI_Alltoallv(sendbuf,sendcounts,sdispls,\
529: sendtype, recvbuf,recvcounts,\
530: rdispls, recvtype,comm) MPI_Abort(MPI_COMM_WORLD,0)
531: #define MPI_Alltoallw(sendbuf,sendcounts,sdispls,\
532: sendtypes, recvbuf,recvcounts,\
533: rdispls, recvtypes,comm) MPI_Abort(MPI_COMM_WORLD,0)
534: #define MPI_Reduce(sendbuf, recvbuf,count,\
535: datatype,op,root,comm) \
536: (MPIUNI_Memcpy(recvbuf,sendbuf,(count)*(datatype)),\
537: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),MPI_SUCCESS)
538: #define MPI_Allreduce(sendbuf, recvbuf,count,datatype,op,comm) \
539: (MPIUNI_Memcpy(recvbuf,sendbuf,(count)*(datatype)),\
540: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),MPI_SUCCESS)
541: #define MPI_Scan(sendbuf, recvbuf,count,datatype,op,comm) \
542: (MPIUNI_Memcpy(recvbuf,sendbuf,(count)*(datatype)),\
543: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),MPI_SUCCESS)
544: #define MPI_Reduce_scatter(sendbuf, recvbuf,recvcounts,\
545: datatype,op,comm) \
546: MPI_Abort(MPI_COMM_WORLD,0)
547: #define MPI_Group_size(group,size) (*(size)=1,MPI_SUCCESS)
548: #define MPI_Group_rank(group,rank) (*(rank)=0,MPI_SUCCESS)
549: #define MPI_Group_translate_ranks (group1,n,ranks1,\
550: group2,ranks2) MPI_Abort(MPI_COMM_WORLD,0)
551: #define MPI_Group_compare(group1,group2,result) \
552: (*(result)=1,MPI_SUCCESS)
553: #define MPI_Group_union(group1,group2,newgroup) MPI_SUCCESS
554: #define MPI_Group_intersection(group1,group2,newgroup) MPI_SUCCESS
555: #define MPI_Group_difference(group1,group2,newgroup) MPI_SUCCESS
556: #define MPI_Group_excl(group,n,ranks,newgroup) MPI_SUCCESS
557: #define MPI_Group_range_incl(group,n,ranges,newgroup) MPI_SUCCESS
558: #define MPI_Group_range_excl(group,n,ranges,newgroup) MPI_SUCCESS
559: #define MPI_Group_free(group) \
560: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (group),\
561: MPI_SUCCESS)
562: #define MPI_Comm_size(comm,size) \
563: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
564: *(size)=1,\
565: MPI_SUCCESS)
566: #define MPI_Comm_rank(comm,rank) \
567: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
568: *(rank)=0,\
569: MPI_SUCCESS)
570: #define MPI_Comm_compare(comm1,comm2,result) \
571: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm1),\
572: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm2),\
573: *(result)=MPI_IDENT,\
574: MPI_SUCCESS)
575: #define MPI_Comm_create(comm,group,newcomm) \
576: (*(newcomm) = (comm),\
577: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (group),\
578: MPI_SUCCESS)
579: #define MPI_Comm_split(comm,color,key,newcomm) MPI_SUCCESS
580: #define MPI_Comm_test_inter(comm,flag) (*(flag)=1,MPI_SUCCESS)
581: #define MPI_Comm_remote_size(comm,size) (*(size)=1,MPI_SUCCESS)
582: #define MPI_Comm_remote_group(comm,group) MPI_SUCCESS
583: #define MPI_Intercomm_create(local_comm,local_leader,peer_comm,\
584: remote_leader,tag,newintercomm) MPI_SUCCESS
585: #define MPI_Intercomm_merge(intercomm,high,newintracomm) MPI_SUCCESS
587: #define MPI_Topo_test(comm,status) MPI_SUCCESS
588: #define MPI_Cart_create(comm_old,ndims,dims,periods,\
589: reorder,comm_cart) MPI_SUCCESS
590: #define MPI_Dims_create(nnodes,ndims,dims) MPI_Abort(MPI_COMM_WORLD,0)
591: #define MPI_Graph_create(comm,a,b,c,d,e) MPI_SUCCESS
592: #define MPI_Graphdims_Get(comm,nnodes,nedges) MPI_Abort(MPI_COMM_WORLD,0)
593: #define MPI_Graph_get(comm,a,b,c,d) MPI_Abort(MPI_COMM_WORLD,0)
594: #define MPI_Cartdim_get(comm,ndims) MPI_Abort(MPI_COMM_WORLD,0)
595: #define MPI_Cart_get(comm,maxdims,dims,periods,coords) \
596: MPI_Abort(MPI_COMM_WORLD,0)
597: #define MPI_Cart_rank(comm,coords,rank) MPI_Abort(MPI_COMM_WORLD,0)
598: #define MPI_Cart_coords(comm,rank,maxdims,coords) \
599: MPI_Abort(MPI_COMM_WORLD,0)
600: #define MPI_Graph_neighbors_count(comm,rank,nneighbors) \
601: MPI_Abort(MPI_COMM_WORLD,0)
602: #define MPI_Graph_neighbors(comm,rank,maxneighbors,neighbors) \
603: MPI_Abort(MPI_COMM_WORLD,0)
604: #define MPI_Cart_shift(comm,direction,disp,rank_source,rank_dest) \
605: MPI_Abort(MPI_COMM_WORLD,0)
606: #define MPI_Cart_sub(comm,remain_dims,newcomm) MPI_Abort(MPI_COMM_WORLD,0)
607: #define MPI_Cart_map(comm,ndims,dims,periods,newrank) MPI_Abort(MPI_COMM_WORLD,0)
608: #define MPI_Graph_map(comm,a,b,c,d) MPI_Abort(MPI_COMM_WORLD,0)
609: #define MPI_Get_processor_name(name,result_len) \
610: (MPIUNI_Memcpy(name,"localhost",9*sizeof(char)),name[10] = 0,*(result_len) = 10)
611: #define MPI_Errhandler_create(function,errhandler) \
612: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (errhandler),\
613: MPI_SUCCESS)
614: #define MPI_Errhandler_set(comm,errhandler) \
615: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
616: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (errhandler),\
617: MPI_SUCCESS)
618: #define MPI_Errhandler_get(comm,errhandler) MPI_SUCCESS
619: #define MPI_Errhandler_free(errhandler) MPI_SUCCESS
620: #define MPI_Error_string(errorcode,string,result_len) MPI_SUCCESS
621: #define MPI_Error_class(errorcode,errorclass) MPI_SUCCESS
622: #define MPI_Wtick() 1.0
623: #define MPI_Wtime() 0.0
624: #define MPI_Pcontrol(level) MPI_SUCCESS
626: #define MPI_NULL_COPY_FN 0
627: #define MPI_NULL_DELETE_FN 0
629: #if defined(__cplusplus)
630: }
631: #endif
632: #endif