Actual source code: mpi.h

  1: /*
  2:    This is a special set of bindings for uni-processor use of MPI by the PETSc library.
  3:  
  4:    NOT ALL THE MPI CALLS ARE IMPLEMENTED CORRECTLY! Only those needed in PETSc.

  6:    For example,
  7:    * Does not implement send to self.
  8:    * Does not implement attributes correctly.
  9: */

 11: /*
 12:   The following info is a response to one of the petsc-maint questions
 13:   regarding MPIUNI.

 15:   MPIUNI was developed with the aim of getting PETSc compiled, and
 16:   usable in the absence of a full MPI implementation. With this, we
 17:   were able to provide PETSc on Windows, Windows64 even before any MPI
 18:   implementation was available on these platforms. [Or with certain
 19:   compilers - like borland, that do not have a useable MPI
 20:   implementation]

 22:   However - providing a seqential, standards compliant MPI
 23:   implementation is *not* the goal of MPIUNI. The development strategy
 24:   was - to make enough changes to it so that PETSc sources, examples
 25:   compile without errors, and runs in the uni-processor mode. This is
 26:   the reason each function is not documented.

 28:   PETSc usage of MPIUNI is primarily from C. However a minimal fortran
 29:   interface is also provided - to get PETSc fortran examples with a
 30:   few MPI calls working.

 32:   One of the optimzation with MPIUNI, is to avoid the function call
 33:   overhead, when possible. Hence most of the C functions are
 34:   implemented as macros. However the function calls cannot be avoided
 35:   with fortran usage.

 37:   Most PETSc objects have both sequential and parallel
 38:   implementations, which are separate. For eg: We have two types of
 39:   sparse matrix storage formats - SeqAIJ, and MPIAIJ. Some MPI
 40:   routines are used in the Seq part, but most of them are used in the
 41:   MPI part. The send/receive calls can be found mostly in the MPI
 42:   part.

 44:   When MPIUNI is used, only the Seq version of the PETSc objects are
 45:   used, even though the MPI variant of the objects are compiled. Since
 46:   there are no send/receive calls in the Seq variant, PETSc works fine
 47:   with MPIUNI in seq mode.

 49:   The reason some send/receive functions are defined to abort(), is to
 50:   detect sections of code that use send/receive functions, and gets
 51:   executed in the sequential mode. (which shouldn't happen in case of
 52:   PETSc).

 54:   Proper implementation of send/receive would involve writing a
 55:   function for each of them. Inside each of these functions, we have
 56:   to check if the send is to self or receive is from self, and then
 57:   doing the buffering accordingly (until the receive is called) - or
 58:   what if a nonblocking receive is called, do a copy etc.. Handling
 59:   the buffering aspects might be complicated enough, that in this
 60:   case, a proper implementation of MPI might as well be used. This is
 61:   the reason the send to self is not implemented in MPIUNI, and never
 62:   will be.
 63:   
 64:   Proper implementations of MPI [for eg: MPICH & OpenMPI] are
 65:   available for most machines. When these packages are available, Its
 66:   generally preferable to use one of them instead of MPIUNI - even if
 67:   the user is using PETSc sequentially.

 69:     - MPIUNI does not support all MPI functions [or functionality].
 70:     Hence it might not work with external packages or user code that
 71:     might have MPI calls in it.

 73:     - MPIUNI is not a standards compliant implementation for np=1.
 74:     For eg: if the user code has send/recv to self, then it will
 75:     abort. [Similar issues with a number of other MPI functionality]
 76:     However MPICH & OpenMPI are the correct implementations of MPI
 77:     standard for np=1.

 79:     - When user code uses multiple MPI based packages that have their
 80:     own *internal* stubs equivalent to MPIUNI - in sequential mode,
 81:     invariably these multiple implementations of MPI for np=1 conflict
 82:     with each other. The correct thing to do is: make all such
 83:     packages use the *same* MPI implementation for np=1. MPICH/OpenMPI
 84:     satisfy this requirement correctly [and hence the correct choice].

 86:     - Using MPICH/OpenMPI sequentially should have minimal
 87:     disadvantages. [for eg: these binaries can be run without
 88:     mpirun/mpiexec as ./executable, without requiring any extra
 89:     configurations for ssh/rsh/daemons etc..]. This should not be a
 90:     reason to avoid these packages for sequential use.

 92:     Instructions for building standalone MPIUNI [for eg: linux/gcc+gfortran]:
 93:     - extract include/mpiuni/mpi.h,mpif.f, src/sys/mpiuni/mpi.c from PETSc
 94:     - remove reference to petscconf.h from mpi.h
 95:     - gcc -c mpi.c -DPETSC_HAVE_STDLIB_H -DPETSC_HAVE_FORTRAN_UNDERSCORE
 96:     - ar cr libmpiuni.a mpi.o

 98: */


103: /* Requred by abort() in mpi.c & for win64 */
104: #include "petscconf.h"

106: #if defined(__cplusplus)
108: #endif

110: /* require an int variable large enough to hold a pointer */
111: #if !defined(MPIUNI_INTPTR)
112: #define MPIUNI_INTPTR long
113: #endif

115: /*

117:     MPIUNI_TMP is used in the macros below only to stop various C/C++ compilers
118: from generating warning messages about unused variables while compiling PETSc.
119: */

122: #define MPI_COMM_WORLD       1
123: #define MPI_COMM_SELF        MPI_COMM_WORLD
124: #define MPI_COMM_NULL        0
125: #define MPI_SUCCESS          0
126: #define MPI_IDENT            0
127: #define MPI_CONGRUENT        0
128: #define MPI_SIMILAR          0
129: #define MPI_UNEQUAL          3
130: #define MPI_ANY_SOURCE     (-2)
131: #define MPI_KEYVAL_INVALID   0
132: #define MPI_ERR_UNKNOWN     18
133: #define MPI_ERR_INTERN      21
134: #define MPI_ERR_OTHER        1
135: #define MPI_TAG_UB           0
136: #define MPI_ERRORS_RETURN    0

138: /* External types */
139: typedef int    MPI_Comm;
140: typedef void   *MPI_Request;
141: typedef void   *MPI_Group;
142: typedef struct {int MPI_TAG,MPI_SOURCE,MPI_ERROR;} MPI_Status;
143: typedef char   *MPI_Errhandler;
144: typedef int    MPI_Fint;
145: typedef int    MPI_File;
146: typedef int    MPI_Info;
147: typedef int    MPI_Offset;


151: /* In order to handle datatypes, we make them into "sizeof(raw-type)";
152:     this allows us to do the MPIUNI_Memcpy's easily */
153: #define MPI_Datatype        int
154: #define MPI_FLOAT           sizeof(float)
155: #define MPI_DOUBLE          sizeof(double)
156: #define MPI_LONG_DOUBLE     sizeof(long double)
157: #define MPI_CHAR            sizeof(char)
158: #define MPI_BYTE            sizeof(char)
159: #define MPI_INT             sizeof(int)
160: #define MPI_LONG            sizeof(long)
161: #define MPI_LONG_LONG_INT   sizeof(long long)
162: #define MPI_SHORT           sizeof(short)
163: #define MPI_UNSIGNED_SHORT  sizeof(unsigned short)
164: #define MPI_UNSIGNED        sizeof(unsigned)
165: #define MPI_UNSIGNED_CHAR   sizeof(unsigned char)
166: #define MPI_UNSIGNED_LONG   sizeof(unsigned long)
167: #define MPI_COMPLEX         2*sizeof(float)
168: #define MPI_DOUBLE_COMPLEX  2*sizeof(double)
169: #define MPI_FLOAT_INT       (sizeof(float) + sizeof(int))
170: #define MPI_DOUBLE_INT      (sizeof(double) + sizeof(int))
171: #define MPI_LONG_INT        (sizeof(long) + sizeof(int))
172: #define MPI_SHORT_INT       (sizeof(short) + sizeof(int))
173: #define MPI_2INT            (2* sizeof(int))

175: #define MPI_REQUEST_NULL    ((MPI_Request)0)
176: #define MPI_GROUP_NULL      ((MPI_Group)0)
177: #define MPI_INFO_NULL       ((MPI_Info)0)
178: #define MPI_BOTTOM          (void *)0
179: typedef int MPI_Op;

181: #define MPI_MODE_RDONLY   0
182: #define MPI_MODE_WRONLY   0
183: #define MPI_MODE_CREATE   0

185: #define MPI_SUM           0
186: #define MPI_MAX           0
187: #define MPI_MIN           0
188: #define MPI_ANY_TAG     (-1)
189: #define MPI_DATATYPE_NULL 0
190: #define MPI_PACKED        0
191: #define MPI_MAX_ERROR_STRING 2056
192: #define MPI_STATUS_IGNORE (MPI_Status *)1
193: #define MPI_ORDER_FORTRAN        57
194: #define MPI_IN_PLACE      (void *) -1

196: /*
197:   Prototypes of some functions which are implemented in mpi.c
198: */
199: typedef int   (MPI_Copy_function)(MPI_Comm,int,void *,void *,void *,int *);
200: typedef int   (MPI_Delete_function)(MPI_Comm,int,void *,void *);
201: typedef void  (MPI_User_function)(void*, void *, int *, MPI_Datatype *);

203: /*
204:   In order that the PETSc MPIUNI can be used with another package that has its
205:   own MPIUni we map the following function names to a unique PETSc name. Those functions
206:   are defined in mpi.c
207: */
208: #if defined(MPIUNI_AVOID_MPI_NAMESPACE)
209: #define MPI_Abort         Petsc_MPI_Abort
210: #define MPI_Attr_get      Petsc_MPI_Attr_get
211: #define MPI_Keyval_free   Petsc_MPI_Keyval_free
212: #define MPI_Attr_put      Petsc_MPI_Attr_put
213: #define MPI_Attr_delete   Petsc_MPI_Attr_delete
214: #define MPI_Keyval_create Petsc_MPI_Keyval_create
215: #define MPI_Comm_free     Petsc_MPI_Comm_free
216: #define MPI_Comm_dup      Petsc_MPI_Comm_dup
217: #define MPI_Comm_create   Petsc_MPI_Comm_create
218: #define MPI_Init          Petsc_MPI_Init
219: #define MPI_Finalize      Petsc_MPI_Finalize
220: #define MPI_Initialized   Petsc_MPI_Initialized
221: #define MPI_Finalized     Petsc_MPI_Finalized
222: #endif



239: #define MPI_Aint MPIUNI_INTPTR
240: /* 
241:     Routines we have replace with macros that do nothing 
242:     Some return error codes others return success
243: */

245: #define MPI_Comm_f2c(comm) (MPI_Comm)(comm)
246: #define MPI_Comm_c2f(comm) (MPI_Fint)(comm)

248: #define MPI_Send(buf,count,datatype,dest,tag,comm)  \
249:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
250:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
251:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
252:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
253:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
254:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
255:       MPI_Abort(MPI_COMM_WORLD,0))
256: #define MPI_Recv(buf,count,datatype,source,tag,comm,status) \
257:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
258:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
259:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
260:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (source),\
261:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
262:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
263:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (status),\
264:       MPI_Abort(MPI_COMM_WORLD,0))
265: #define MPI_Get_count(status, datatype,count) \
266:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (status),\
267:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
268:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
269:       MPI_Abort(MPI_COMM_WORLD,0))
270: #define MPI_Bsend(buf,count,datatype,dest,tag,comm)  \
271:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
272:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
273:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
274:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
275:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
276:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
277:       MPI_Abort(MPI_COMM_WORLD,0))
278: #define MPI_Ssend(buf,count, datatype,dest,tag,comm) \
279:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
280:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
281:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
282:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
283:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
284:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
285:       MPI_Abort(MPI_COMM_WORLD,0))
286: #define MPI_Rsend(buf,count, datatype,dest,tag,comm) \
287:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
288:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
289:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
290:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
291:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
292:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
293:       MPI_Abort(MPI_COMM_WORLD,0))
294: #define MPI_Buffer_attach(buffer,size) \
295:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buffer),\
296:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (size),\
297:       MPI_SUCCESS)
298: #define MPI_Buffer_detach(buffer,size)\
299:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buffer),\
300:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (size),\
301:       MPI_SUCCESS)
302: #define MPI_Ibsend(buf,count, datatype,dest,tag,comm,request) \
303:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
304:        MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
305:        MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
306:        MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
307:        MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
308:        MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
309:        MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
310:        MPI_Abort(MPI_COMM_WORLD,0))
311: #define MPI_Issend(buf,count, datatype,dest,tag,comm,request) \
312:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
313:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
314:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
315:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
316:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
317:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
318:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
319:       MPI_Abort(MPI_COMM_WORLD,0))
320: #define MPI_Irsend(buf,count, datatype,dest,tag,comm,request) \
321:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
322:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
323:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
324:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
325:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
326:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
327:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
328:       MPI_Abort(MPI_COMM_WORLD,0))
329: #define MPI_Irecv(buf,count, datatype,source,tag,comm,request) \
330:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
331:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
332:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
333:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (source),\
334:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
335:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
336:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
337:       MPI_Abort(MPI_COMM_WORLD,0))
338: #define MPI_Isend(buf,count, datatype,dest,tag,comm,request) \
339:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
340:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
341:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
342:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
343:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
344:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
345:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
346:       MPI_Abort(MPI_COMM_WORLD,0))
347: #define MPI_Wait(request,status) \
348:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
349:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (status),\
350:       MPI_SUCCESS)
351: #define MPI_Test(request,flag,status) \
352:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
353:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (status),\
354:       *(flag) = 0, \
355:       MPI_SUCCESS)
356: #define MPI_Request_free(request) \
357:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
358:       MPI_SUCCESS)
359: #define MPI_Waitany(a,b,c,d) \
360:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (a),\
361:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (b),\
362:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (c),\
363:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (d),\
364:       MPI_SUCCESS)
365: #define MPI_Testany(a,b,c,d,e) \
366:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (a),\
367:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (b),\
368:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (c),\
369:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (d),\
370:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (e),\
371:       MPI_SUCCESS)
372: #define MPI_Waitall(count,array_of_requests,array_of_statuses) \
373:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
374:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_requests),\
375:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_statuses),\
376:       MPI_SUCCESS)
377: #define MPI_Testall(count,array_of_requests,flag,array_of_statuses) \
378:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
379:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_requests),\
380:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (flag),\
381:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_statuses),\
382:       MPI_SUCCESS)
383: #define MPI_Waitsome(incount,array_of_requests,outcount,\
384:                      array_of_indices,array_of_statuses) \
385:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (incount),\
386:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_requests),\
387:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (outcount),\
388:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_indices),\
389:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_statuses),\
390:       MPI_SUCCESS)
391: #define MPI_Comm_group(comm,group) \
392:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
393:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (group),\
394:       MPI_SUCCESS)
395: #define MPI_Group_incl(group,n,ranks,newgroup) \
396:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (group),\
397:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (n),\
398:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (ranks),\
399:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (newgroup),\
400:       MPI_SUCCESS)
401: #define MPI_Testsome(incount,array_of_requests,outcount,\
402:                      array_of_indices,array_of_statuses) MPI_SUCCESS
403: #define MPI_Iprobe(source,tag,comm,flag,status) (*(flag)=0, MPI_SUCCESS)
404: #define MPI_Probe(source,tag,comm,status) MPI_SUCCESS
405: #define MPI_Cancel(request) (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),MPI_SUCCESS)
406: #define MPI_Test_cancelled(status,flag) (*(flag)=0,MPI_SUCCESS)
407: #define MPI_Send_init(buf,count, datatype,dest,tag,comm,request) \
408:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
409:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
410:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
411:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
412:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
413:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
414:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
415:      MPI_SUCCESS)
416: #define MPI_Bsend_init(buf,count, datatype,dest,tag,comm,request) \
417:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
418:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
419:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
420:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
421:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
422:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
423:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
424:      MPI_SUCCESS)
425: #define MPI_Ssend_init(buf,count, datatype,dest,tag,comm,request) \
426:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
427:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
428:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
429:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
430:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
431:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
432:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
433:      MPI_SUCCESS)
434: #define MPI_Bsend_init(buf,count, datatype,dest,tag,comm,request) \
435:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
436:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
437:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
438:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
439:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
440:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
441:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
442:      MPI_SUCCESS)
443: #define MPI_Rsend_init(buf,count, datatype,dest,tag,comm,request) \
444:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
445:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
446:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
447:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
448:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
449:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
450:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
451:      MPI_SUCCESS)
452: #define MPI_Recv_init(buf,count, datatype,source,tag,comm,request) \
453:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
454:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
455:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
456:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (source),\
457:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
458:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
459:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
460:      MPI_SUCCESS)
461: #define MPI_Start(request) (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),MPI_SUCCESS)
462: #define MPI_Startall(count,array_of_requests) \
463:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
464:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_requests),\
465:      MPI_SUCCESS)
466: #define MPI_Op_create(function,commute,op) \
467:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (function),\
468:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (commute),\
469:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (op),\
470:      MPI_SUCCESS)
471: #define MPI_Op_free(op) \
472:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (op),\
473:      MPI_SUCCESS)
474:      /* Need to determine sizeof "sendtype" */
475: #define MPI_Sendrecv(sendbuf,sendcount, sendtype,\
476:      dest,sendtag,recvbuf,recvcount,\
477:      recvtype,source,recvtag,\
478:      comm,status) \
479:      MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount) * (sendtype))
480: #define MPI_Sendrecv_replace(buf,count, datatype,dest,sendtag,\
481:      source,recvtag,comm,status) MPI_SUCCESS
482: #define MPI_Type_contiguous(count, oldtype,newtype) \
483:      (*(newtype) = (count)*(oldtype),MPI_SUCCESS)
484: #define MPI_Type_vector(count,blocklength,stride,oldtype, newtype) MPI_SUCCESS
485: #define MPI_Type_hvector(count,blocklength,stride,oldtype, newtype) MPI_SUCCESS
486: #define MPI_Type_indexed(count,array_of_blocklengths,\
487:      array_of_displacements, oldtype,\
488:      newtype) MPI_SUCCESS
489: #define MPI_Type_hindexed(count,array_of_blocklengths,\
490:      array_of_displacements, oldtype,\
491:      newtype) MPI_SUCCESS
492: #define MPI_Type_struct(count,array_of_blocklengths,\
493:      array_of_displacements,\
494:      array_of_types, newtype) MPI_SUCCESS
495: #define MPI_Address(location,address) \
496:      (*(address) = (MPIUNI_INTPTR)(char *)(location),MPI_SUCCESS)
497: #define MPI_Type_extent(datatype,extent) \
498:      MPI_Abort(MPI_COMM_WORLD,0)
499: #define MPI_Type_size(datatype,size) \
500:      MPI_Abort(MPI_COMM_WORLD,0)
501: #define MPI_Type_lb(datatype,displacement) \
502:      MPI_Abort(MPI_COMM_WORLD,0)
503: #define MPI_Type_ub(datatype,displacement) \
504:      MPI_Abort(MPI_COMM_WORLD,0)
505: #define MPI_Type_commit(datatype) (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
506:      MPI_SUCCESS)
507: #define MPI_Type_free(datatype) MPI_SUCCESS
508: #define MPI_Get_elements(status, datatype,count) \
509:      MPI_Abort(MPI_COMM_WORLD,0)
510: #define MPI_Pack(inbuf,incount, datatype,outbuf,\
511:      outsize,position, comm) \
512:      MPI_Abort(MPI_COMM_WORLD,0)
513: #define MPI_Unpack(inbuf,insize,position,outbuf,\
514:      outcount, datatype,comm) \
515:      MPI_Abort(MPI_COMM_WORLD,0)
516: #define MPI_Pack_size(incount, datatype,comm,size) \
517:      MPI_Abort(MPI_COMM_WORLD,0)
518: #define MPI_Barrier(comm) \
519:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
520:      MPI_SUCCESS)
521: #define MPI_Bcast(buffer,count,datatype,root,comm) \
522:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buffer),\
523:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
524:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
525:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
526:      MPI_SUCCESS)
527: #define MPI_Gather(sendbuf,sendcount, sendtype,\
528:      recvbuf,recvcount, recvtype,\
529:      root,comm) \
530:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvcount),\
531:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (root),\
532:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvtype),\
533:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
534:      MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)* (sendtype)),\
535:      MPI_SUCCESS)
536: #define MPI_Gatherv(sendbuf,sendcount, sendtype,\
537:      recvbuf,recvcounts,displs,\
538:      recvtype,root,comm) \
539:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvcounts),\
540:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (displs),\
541:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvtype),\
542:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (root),\
543:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
544:      MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)* (sendtype)),\
545:      MPI_SUCCESS)
546: #define MPI_Scatter(sendbuf,sendcount, sendtype,\
547:      recvbuf,recvcount, recvtype,\
548:      root,comm) \
549:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (sendbuf),\
550:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (sendcount),\
551:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (sendtype),\
552:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvbuf),\
553:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvcount),\
554:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvtype),\
555:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (root),\
556:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),MPI_Abort(MPI_COMM_WORLD,0))
557: #define MPI_Scatterv(sendbuf,sendcounts,displs,\
558:      sendtype, recvbuf,recvcount,\
559:      recvtype,root,comm) \
560:      (MPIUNI_Memcpy(recvbuf,sendbuf,(recvcount)*(recvtype)),\
561:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (displs),\
562:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (sendtype),\
563:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (sendcounts),\
564:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (root),\
565:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
566:      MPI_SUCCESS)
567: #define MPI_Allgather(sendbuf,sendcount, sendtype,\
568:      recvbuf,recvcount, recvtype,comm) \
569:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvcount),\
570:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvtype),\
571:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
572:      MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)* (sendtype)),\
573:      MPI_SUCCESS)
574: #define MPI_Allgatherv(sendbuf,sendcount, sendtype,\
575:      recvbuf,recvcounts,displs,recvtype,comm) \
576:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvcounts),\
577:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (displs),\
578:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvtype),\
579:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
580:      (sendbuf != MPI_IN_PLACE) ?  MPIUNI_Memcpy((recvbuf),(sendbuf),(sendcount)* (sendtype)) : 0, \
581:      MPI_SUCCESS)
582: #define MPI_Alltoall(sendbuf,sendcount, sendtype,\
583:      recvbuf,recvcount, recvtype,comm) \
584:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvcount),\
585:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvtype),\
586:       MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
587:       MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)* (sendtype)),\
588:       MPI_SUCCESS)
589: #define MPI_Alltoallv(sendbuf,sendcounts,sdispls,\
590:      sendtype, recvbuf,recvcounts,\
591:      rdispls, recvtype,comm) MPI_Abort(MPI_COMM_WORLD,0)
592: #define MPI_Alltoallw(sendbuf,sendcounts,sdispls,\
593:      sendtypes, recvbuf,recvcounts,\
594:      rdispls, recvtypes,comm) MPI_Abort(MPI_COMM_WORLD,0)
595: #define MPI_Reduce(sendbuf, recvbuf,count,\
596:      datatype,op,root,comm) \
597:      (MPIUNI_Memcpy(recvbuf,sendbuf,(count)*(datatype)),\
598:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),MPI_SUCCESS)
599: #define MPI_Allreduce(sendbuf, recvbuf,count,datatype,op,comm) \
600:      (MPIUNI_Memcpy(recvbuf,sendbuf,(count)*(datatype)),\
601:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),MPI_SUCCESS)
602: #define MPI_Scan(sendbuf, recvbuf,count,datatype,op,comm) \
603:      (MPIUNI_Memcpy(recvbuf,sendbuf,(count)*(datatype)),\
604:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),MPI_SUCCESS)
605: #define MPI_Reduce_scatter(sendbuf, recvbuf,recvcounts,\
606:      datatype,op,comm) \
607:      MPI_Abort(MPI_COMM_WORLD,0)
608: #define MPI_Group_size(group,size) (*(size)=1,MPI_SUCCESS)
609: #define MPI_Group_rank(group,rank) (*(rank)=0,MPI_SUCCESS)
610: #define MPI_Group_translate_ranks (group1,n,ranks1,\
611:      group2,ranks2) MPI_Abort(MPI_COMM_WORLD,0)
612: #define MPI_Group_compare(group1,group2,result) \
613:      (*(result)=1,MPI_SUCCESS)
614: #define MPI_Group_union(group1,group2,newgroup) MPI_SUCCESS
615: #define MPI_Group_intersection(group1,group2,newgroup) MPI_SUCCESS
616: #define MPI_Group_difference(group1,group2,newgroup) MPI_SUCCESS
617: #define MPI_Group_excl(group,n,ranks,newgroup) MPI_SUCCESS
618: #define MPI_Group_range_incl(group,n,ranges,newgroup) MPI_SUCCESS
619: #define MPI_Group_range_excl(group,n,ranges,newgroup) MPI_SUCCESS
620: #define MPI_Group_free(group) \
621:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (group),\
622:      MPI_SUCCESS)
623: #define MPI_Comm_size(comm,size) \
624:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
625:      *(size)=1,\
626:      MPI_SUCCESS)
627: #define MPI_Comm_rank(comm,rank) \
628:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
629:      *(rank)=0,\
630:      MPI_SUCCESS)
631: #define MPI_Comm_compare(comm1,comm2,result) \
632:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm1),\
633:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm2),\
634:      *(result)=MPI_IDENT,\
635:      MPI_SUCCESS)
636: #define MPI_Comm_split(comm,color,key,newcomm) MPI_SUCCESS
637: #define MPI_Comm_test_inter(comm,flag) (*(flag)=1,MPI_SUCCESS)
638: #define MPI_Comm_remote_size(comm,size) (*(size)=1,MPI_SUCCESS)
639: #define MPI_Comm_remote_group(comm,group) MPI_SUCCESS
640: #define MPI_Intercomm_create(local_comm,local_leader,peer_comm,\
641:      remote_leader,tag,newintercomm) MPI_SUCCESS
642: #define MPI_Intercomm_merge(intercomm,high,newintracomm) MPI_SUCCESS

644: #define MPI_Topo_test(comm,status) MPI_SUCCESS
645: #define MPI_Cart_create(comm_old,ndims,dims,periods,\
646:      reorder,comm_cart) MPI_SUCCESS
647: #define MPI_Dims_create(nnodes,ndims,dims) MPI_Abort(MPI_COMM_WORLD,0)
648: #define MPI_Graph_create(comm,a,b,c,d,e) MPI_SUCCESS
649: #define MPI_Graphdims_Get(comm,nnodes,nedges) MPI_Abort(MPI_COMM_WORLD,0)
650: #define MPI_Graph_get(comm,a,b,c,d) MPI_Abort(MPI_COMM_WORLD,0)
651: #define MPI_Cartdim_get(comm,ndims) MPI_Abort(MPI_COMM_WORLD,0)
652: #define MPI_Cart_get(comm,maxdims,dims,periods,coords) \
653:      MPI_Abort(MPI_COMM_WORLD,0)
654: #define MPI_Cart_rank(comm,coords,rank) MPI_Abort(MPI_COMM_WORLD,0)
655: #define MPI_Cart_coords(comm,rank,maxdims,coords) \
656:      MPI_Abort(MPI_COMM_WORLD,0)
657: #define MPI_Graph_neighbors_count(comm,rank,nneighbors) \
658:      MPI_Abort(MPI_COMM_WORLD,0)
659: #define MPI_Graph_neighbors(comm,rank,maxneighbors,neighbors) \
660:      MPI_Abort(MPI_COMM_WORLD,0)
661: #define MPI_Cart_shift(comm,direction,disp,rank_source,rank_dest) \
662:      MPI_Abort(MPI_COMM_WORLD,0)
663: #define MPI_Cart_sub(comm,remain_dims,newcomm) MPI_Abort(MPI_COMM_WORLD,0)
664: #define MPI_Cart_map(comm,ndims,dims,periods,newrank) MPI_Abort(MPI_COMM_WORLD,0)
665: #define MPI_Graph_map(comm,a,b,c,d) MPI_Abort(MPI_COMM_WORLD,0)
666: #define MPI_Get_processor_name(name,result_len) \
667:      (MPIUNI_Memcpy(name,"localhost",9*sizeof(char)),name[10] = 0,*(result_len) = 10)
668: #define MPI_Errhandler_create(function,errhandler) \
669:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (errhandler),\
670:      MPI_SUCCESS)
671: #define MPI_Errhandler_set(comm,errhandler) \
672:      (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
673:      MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (errhandler),\
674:      MPI_SUCCESS)
675: #define MPI_Errhandler_get(comm,errhandler) MPI_SUCCESS
676: #define MPI_Errhandler_free(errhandler) MPI_SUCCESS
677: #define MPI_Error_string(errorcode,string,result_len) MPI_SUCCESS
678: #define MPI_Error_class(errorcode,errorclass) MPI_SUCCESS
679: #define MPI_Wtick() 1.0
680: #define MPI_Wtime() 0.0
681: #define MPI_Pcontrol(level) MPI_SUCCESS

683: #define MPI_NULL_COPY_FN   0
684: #define MPI_NULL_DELETE_FN 0

686:   /* MPI-IO additions */

688: #define MPI_File_open(comm,filename,amode,info,mpi_fh) \
689:   (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),  \
690:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (filename), \
691:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (amode), \
692:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (info), \
693:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (mpi_fh), \
694:    MPI_Abort(MPI_COMM_WORLD,0))

696: #define MPI_File_close(mpi_fh) \
697:   (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (mpi_fh),  \
698:    MPI_Abort(MPI_COMM_WORLD,0))

700: #define MPI_File_set_view(mpi_fh,disp,etype,filetype,datarep,info) \
701:   (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (mpi_fh),  \
702:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (disp), \
703:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (etype), \
704:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (filetype), \
705:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datarep), \
706:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (info), \
707:    MPI_Abort(MPI_COMM_WORLD,0))

709: #define MPI_Type_get_extent(datatype,lb,extent) \
710:   (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),      \
711:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (lb),     \
712:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (extent), \
713:    MPI_Abort(MPI_COMM_WORLD,0))

715: #define MPI_File_write_all(mpi_fh,buf,count,datatype,status) \
716:   (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (mpi_fh),             \
717:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf), \
718:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count), \
719:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype), \
720:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (status), \
721:    MPI_Abort(MPI_COMM_WORLD,0))

723: #define MPI_File_read_all(mpi_fh,buf,count,datatype,status) \
724:   (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (mpi_fh),            \
725:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf), \
726:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count), \
727:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype), \
728:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (status), \
729:    MPI_Abort(MPI_COMM_WORLD,0))

731:   /* called from PetscInitialize() - so return success */
732: #define MPI_Register_datarep(name,read_conv_fn,write_conv_fn,extent_fn,state) \
733:   (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (name),                          \
734:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (read_conv_fn), \
735:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (write_conv_fn), \
736:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (extent_fn), \
737:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (state), \
738:    MPI_SUCCESS)

740: #define MPI_Type_create_subarray(ndims,array_of_sizes,array_of_subsizes,array_of_starts,order,oldtype,newtype) \
741:   (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (ndims),                         \
742:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_sizes), \
743:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_subsizes), \
744:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_starts), \
745:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (order), \
746:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (oldtype), \
747:    MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (newtype), \
748:    MPI_Abort(MPI_COMM_WORLD,0))

750: #if defined(__cplusplus)
751: }
752: #endif
753: #endif