Actual source code: Partitioner.hh
1: #ifndef included_ALE_Partitioner_hh
2: #define included_ALE_Partitioner_hh
4: #include <petscvec.h>
6: /* Chaco does not have an include file */
9: float *ewgts, float *x, float *y, float *z, char *outassignname,
10: char *outfilename, short *assignment, int architecture, int ndims_tot,
11: int mesh_dims[3], double *goal, int global_method, int local_method,
12: int rqi_flag, int vmax, int ndims, double eigtol, long seed);
15: }
17: template<typename OverlapType>
18: struct findNeighbor {
19: int graph;
21: findNeighbor(int graph) : graph(graph) {};
22: bool operator()(const typename OverlapType::traits::source_type& p, const typename OverlapType::traits::target_type& t) const {
23: if (p.first == graph) return true;
24: return false;
25: };
26: };
28: template<typename OverlapType, typename FieldType>
29: struct calcNeighborSize {
30: ALE::Obj<FieldType> serialSifter;
31: ALE::Obj<typename FieldType::order_type::baseSequence> serialPatches;
32: int *BuySizesA;
33: int *SellSizesA;
34: int& offsetA;
35: int foundA;
36: ALE::Obj<FieldType> parallelSifter;
37: ALE::Obj<typename FieldType::order_type::baseSequence> parallelPatches;
38: int *BuySizesB;
39: int *SellSizesB;
40: int& offsetB;
41: int foundB;
43: calcNeighborSize(ALE::Obj<FieldType> serialSifter, ALE::Obj<typename FieldType::order_type::baseSequence> serialPatches, int *BuySizesA, int *SellSizesA, int& offsetA, ALE::Obj<FieldType> parallelSifter, ALE::Obj<typename FieldType::order_type::baseSequence> parallelPatches, int *BuySizesB, int *SellSizesB, int& offsetB) : serialSifter(serialSifter), serialPatches(serialPatches), BuySizesA(BuySizesA), SellSizesA(SellSizesA), offsetA(offsetA), parallelSifter(parallelSifter), parallelPatches(parallelPatches), BuySizesB(BuySizesB), SellSizesB(SellSizesB), offsetB(offsetB) {foundA = 0; foundB = 0;};
44: void operator()(const typename OverlapType::traits::source_type& p, const typename OverlapType::traits::target_type& t) {
45: if (p.first == 0) {
46: const ALE::Obj<typename FieldType::order_type::supportSequence>& patches = serialSifter->__getOrder()->support(p.second);
48: for(typename FieldType::order_type::supportSequence::iterator sp_iter = patches->begin(); sp_iter != patches->end(); ++sp_iter) {
49: // Assume the same index sizes
50: int idxSize = serialSifter->getIndex(*sp_iter, p.second).index;
52: BuySizesA[0] += idxSize;
53: SellSizesA[0] += idxSize;
54: offsetA += idxSize;
55: foundA = 1;
56: }
57: } else {
58: const ALE::Obj<typename FieldType::order_type::supportSequence>& patches = parallelSifter->__getOrder()->support(p.second);
60: for(typename FieldType::order_type::supportSequence::iterator pp_iter = patches->begin(); pp_iter != patches->end(); ++pp_iter) {
61: // Assume the same index sizes
62: int idxSize = parallelSifter->getIndex(*pp_iter, p.second).index;
64: BuySizesB[0] += idxSize;
65: SellSizesB[0] += idxSize;
66: offsetB += idxSize;
67: foundB = 1;
68: }
69: }
70: };
71: };
73: template<typename OverlapType, typename FieldType>
74: struct fillNeighborCones {
75: ALE::Obj<FieldType> serialSifter;
76: ALE::Obj<typename FieldType::order_type::baseSequence> serialPatches;
77: std::map<typename FieldType::patch_type,int>& serialOffsets;
78: int *SellConesA;
79: int& offsetA;
80: ALE::Obj<FieldType> parallelSifter;
81: ALE::Obj<typename FieldType::order_type::baseSequence> parallelPatches;
82: std::map<typename FieldType::patch_type,int>& parallelOffsets;
83: int *SellConesB;
84: int& offsetB;
86: fillNeighborCones(ALE::Obj<FieldType> serialSifter, ALE::Obj<typename FieldType::order_type::baseSequence> serialPatches, std::map<typename FieldType::patch_type,int>& serialOffsets, int *SellConesA, int &offsetA, ALE::Obj<FieldType> parallelSifter, ALE::Obj<typename FieldType::order_type::baseSequence> parallelPatches, std::map<typename FieldType::patch_type,int>& parallelOffsets, int *SellConesB, int& offsetB) : serialSifter(serialSifter), serialPatches(serialPatches), serialOffsets(serialOffsets), SellConesA(SellConesA), offsetA(offsetA), parallelSifter(parallelSifter), parallelPatches(parallelPatches), parallelOffsets(parallelOffsets), SellConesB(SellConesB), offsetB(offsetB) {};
87: void operator()(const typename OverlapType::traits::source_type& p, const typename OverlapType::traits::target_type& t) {
88: if (p.first == 0) {
89: const ALE::Obj<typename FieldType::order_type::supportSequence>& patches = serialSifter->__getOrder()->support(p.second);
91: for(typename FieldType::order_type::supportSequence::iterator sp_iter = patches->begin(); sp_iter != patches->end(); ++sp_iter) {
92: const typename FieldType::index_type& idx = serialSifter->getIndex(*sp_iter, p.second);
94: if (serialSifter->debug) {
95: ostringstream txt;
98: txt << "["<<serialSifter->commRank()<<"]Packing A patch " << *sp_iter << " point " << p.second << " index " << idx << "(" << serialOffsets[*sp_iter] << ") for " << t << std::endl;
99: PetscSynchronizedPrintf(serialSifter->comm(), txt.str().c_str()); ALE::CHKERROR(ierr, "Error in PetscSynchronizedPrintf");
100: }
101: for(int i = serialOffsets[*sp_iter]+idx.prefix; i < serialOffsets[*sp_iter]+idx.prefix+idx.index; ++i) {
102: SellConesA[offsetA++] = i;
103: }
104: }
105: } else {
106: const ALE::Obj<typename FieldType::order_type::supportSequence>& patches = parallelSifter->__getOrder()->support(p.second);
108: for(typename FieldType::order_type::supportSequence::iterator pp_iter = patches->begin(); pp_iter != patches->end(); ++pp_iter) {
109: const typename FieldType::index_type& idx = parallelSifter->getIndex(*pp_iter, p.second);
111: if (parallelSifter->debug) {
112: ostringstream txt;
115: txt << "["<<parallelSifter->commRank()<<"]Packing B patch " << *pp_iter << " point " << p.second << " index " << idx << "(" << parallelOffsets[*pp_iter] << ") for " << t << std::endl;
116: PetscSynchronizedPrintf(parallelSifter->comm(), txt.str().c_str()); ALE::CHKERROR(ierr, "Error in PetscSynchronizedPrintf");
117: }
118: for(int i = parallelOffsets[*pp_iter]+idx.prefix; i < parallelOffsets[*pp_iter]+idx.prefix+idx.index; ++i) {
119: SellConesB[offsetB++] = i;
120: }
121: }
122: }
123: };
124: };
126: namespace ALE {
127: template<typename Sifter_>
128: class Distributer {
129: public:
130: typedef Sifter_ sifter_type;
131: typedef RightSequenceDuplicator<ConeArraySequence<typename sifter_type::traits::arrow_type> > fuser;
132: typedef ParConeDelta<sifter_type, fuser,
133: typename sifter_type::template rebind<typename fuser::fusion_source_type,
134: typename fuser::fusion_target_type,
135: typename fuser::fusion_color_type,
136: ::boost::multi_index::composite_key_compare<std::less<typename fuser::fusion_source_type>,
137: std::less<typename fuser::fusion_color_type>,
138: std::less<typename fuser::fusion_target_type> >,
139: typename sifter_type::traits::cap_container_type::template rebind<typename fuser::fusion_source_type, typename sifter_type::traits::sourceRec_type::template rebind<typename fuser::fusion_source_type>::type>::type,
140: typename sifter_type::traits::base_container_type::template rebind<typename fuser::fusion_target_type, typename sifter_type::traits::targetRec_type::template rebind<typename fuser::fusion_target_type>::type>::type
141: >::type> coneDelta_type;
142: typedef ParSupportDelta<sifter_type, fuser,
143: typename sifter_type::template rebind<typename fuser::fusion_source_type,
144: typename fuser::fusion_target_type,
145: typename fuser::fusion_color_type,
146: ::boost::multi_index::composite_key_compare<std::less<typename fuser::fusion_source_type>,
147: std::less<typename fuser::fusion_color_type>,
148: std::less<typename fuser::fusion_target_type> >,
149: typename sifter_type::traits::cap_container_type::template rebind<typename fuser::fusion_source_type, typename sifter_type::traits::sourceRec_type::template rebind<typename fuser::fusion_source_type>::type>::type,
150: typename sifter_type::traits::base_container_type::template rebind<typename fuser::fusion_target_type, typename sifter_type::traits::targetRec_type::template rebind<typename fuser::fusion_target_type>::type>::type
151: >::type> supportDelta_type;
152: typedef typename supportDelta_type::bioverlap_type overlap_type;
153: public:
156: static Obj<overlap_type> distribute(Obj<sifter_type> oldSifter, Obj<sifter_type> newSifter, bool restrict = true) {
157: ALE_LOG_EVENT_BEGIN;
158: // Construct a Delta object and a base overlap object
159: coneDelta_type::setDebug(oldSifter->debug);
160: Obj<typename coneDelta_type::bioverlap_type> overlap = coneDelta_type::overlap(oldSifter, newSifter);
161: // Cone complete to move the partitions to the other processors
162: Obj<typename coneDelta_type::fusion_type> fusion = coneDelta_type::fusion(oldSifter, newSifter, overlap);
163: // Merge in the completion
164: newSifter->add(fusion);
165: if (oldSifter->debug) {
166: overlap->view("Initial overlap");
167: fusion->view("Initial fusion");
168: newSifter->view("After merging inital fusion");
169: }
170: // Remove partition points
171: for(int p = 0; p < oldSifter->commSize(); ++p) {
172: oldSifter->removeBasePoint(typename sifter_type::traits::target_type(-p));
173: newSifter->removeBasePoint(typename sifter_type::traits::target_type(-p));
174: }
175: // Support complete to build the local topology
176: supportDelta_type::setDebug(oldSifter->debug);
177: Obj<typename supportDelta_type::bioverlap_type> overlap2 = supportDelta_type::overlap(oldSifter, newSifter);
178: Obj<typename supportDelta_type::fusion_type> fusion2 = supportDelta_type::fusion(oldSifter, newSifter, overlap2);
179: newSifter->add(fusion2, true && restrict);
180: if (oldSifter->debug) {
181: overlap2->view("Second overlap");
182: fusion2->view("Second fusion");
183: newSifter->view("After merging second fusion");
184: }
185: ALE_LOG_EVENT_END;
186: return overlap2;
187: };
191: // ERROR: This crap only works for a single patch
192: template<typename FieldType, typename OverlapType>
193: static VecScatter createMappingStoP(Obj<FieldType> serialSifter, Obj<FieldType> parallelSifter, Obj<OverlapType> overlap, bool doExchange = false) {
194: ALE_LOG_EVENT_BEGIN;
195: {
196: ALE::LogEvent event = ALE::LogEventRegister("createMappingI");
197: ALE::LogEventBegin(event);
198: }
199: VecScatter scatter;
200: Obj<typename OverlapType::traits::baseSequence> neighbors = overlap->base();
201: MPI_Comm comm = serialSifter->comm();
202: int debug = serialSifter->debug;
203: Vec serialVec, parallelVec;
206: if (serialSifter->debug && !serialSifter->commRank()) {PetscSynchronizedPrintf(serialSifter->comm(), "Creating mapping\n");}
207: // Use an MPI vector for the serial data since it has no overlap
208: if (serialSifter->debug && !serialSifter->commRank()) {PetscSynchronizedPrintf(serialSifter->comm(), " Creating serial indices\n");}
209: if (serialSifter->debug) {
210: serialSifter->view("SerialSifter");
211: overlap->view("Partition Overlap");
212: }
213: // We may restrict to the first patch, since they are allocated in order
214: Obj<typename FieldType::order_type::baseSequence> serialPatches = serialSifter->getPatches();
215: Obj<typename FieldType::order_type::baseSequence> parallelPatches = parallelSifter->getPatches();
216:
217: std::map<typename FieldType::patch_type,int> serialOffsets;
218: int serialSize = 0;
219: int k = 0;
220: for(typename FieldType::order_type::baseSequence::iterator p_iter = serialPatches->begin(); p_iter != serialPatches->end(); ++p_iter) {
221: serialOffsets[*p_iter] = serialSize;
222: serialSize += serialSifter->getSize(*p_iter);
223: }
224: VecCreateMPIWithArray(serialSifter->comm(), serialSize, PETSC_DETERMINE, serialSifter->restrict(*serialPatches->begin(), false), &serialVec);CHKERROR(ierr, "Error in VecCreate");
225: // Use individual serial vectors for each of the parallel domains
226: if (serialSifter->debug && !serialSifter->commRank()) {PetscSynchronizedPrintf(serialSifter->comm(), " Creating parallel indices\n");}
227: std::map<typename FieldType::patch_type,int> parallelOffsets;
228: int parallelSize = 0;
229: k = 0;
230: for(typename FieldType::order_type::baseSequence::iterator p_iter = parallelPatches->begin(); p_iter != parallelPatches->end(); ++p_iter) {
231: parallelOffsets[*p_iter] = parallelSize;
232: parallelSize += parallelSifter->getSize(*p_iter);
233: }
234: VecCreateSeqWithArray(PETSC_COMM_SELF, parallelSize, parallelSifter->restrict(*parallelPatches->begin(), false), ¶llelVec);CHKERROR(ierr, "Error in VecCreate");
236: int NeighborCountA = 0, NeighborCountB = 0;
237: for(typename OverlapType::traits::baseSequence::iterator neighbor = neighbors->begin(); neighbor != neighbors->end(); ++neighbor) {
238: if (overlap->coneContains(*neighbor, findNeighbor<OverlapType>(0))) NeighborCountA++;
239: if (overlap->coneContains(*neighbor, findNeighbor<OverlapType>(1))) NeighborCountB++;
240: }
241: {
242: ALE::LogEvent event = ALE::LogEventRegister("createMappingI");
243: ALE::LogEventEnd(event);
244: }
246: {
247: ALE::LogEvent event = ALE::LogEventRegister("createMappingII");
248: ALE::LogEventBegin(event);
249: }
250: int *NeighborsA, *NeighborsB; // Neighbor processes
251: int *SellSizesA, *BuySizesA; // Sizes of the A cones to transmit and B cones to receive
252: int *SellSizesB, *BuySizesB; // Sizes of the B cones to transmit and A cones to receive
253: int *SellConesA = PETSC_NULL;
254: int *SellConesB = PETSC_NULL;
255: int nA, nB, offsetA, offsetB;
256: PetscMalloc2(NeighborCountA,int,&NeighborsA,NeighborCountB,int,&NeighborsB);CHKERROR(ierr, "Error in PetscMalloc");
257: PetscMalloc2(NeighborCountA,int,&SellSizesA,NeighborCountA,int,&BuySizesA);CHKERROR(ierr, "Error in PetscMalloc");
258: PetscMalloc2(NeighborCountB,int,&SellSizesB,NeighborCountB,int,&BuySizesB);CHKERROR(ierr, "Error in PetscMalloc");
260: nA = 0;
261: nB = 0;
262: for(typename OverlapType::traits::baseSequence::iterator neighbor = neighbors->begin(); neighbor != neighbors->end(); ++neighbor) {
263: if (overlap->coneContains(*neighbor, findNeighbor<OverlapType>(0))) {
264: NeighborsA[nA] = *neighbor;
265: BuySizesA[nA] = 0;
266: SellSizesA[nA] = 0;
267: nA++;
268: }
269: if (overlap->coneContains(*neighbor, findNeighbor<OverlapType>(1))) {
270: NeighborsB[nB] = *neighbor;
271: BuySizesB[nB] = 0;
272: SellSizesB[nB] = 0;
273: nB++;
274: }
275: }
276: if ((nA != NeighborCountA) || (nB != NeighborCountB)) {
277: throw ALE::Exception("Invalid neighbor count");
278: }
279: {
280: ALE::LogEvent event = ALE::LogEventRegister("createMappingII");
281: ALE::LogEventEnd(event);
282: }
284: {
285: ALE::LogEvent event = ALE::LogEventRegister("createMappingIII");
286: ALE::LogEventBegin(event);
287: }
288: nA = 0;
289: offsetA = 0;
290: nB = 0;
291: offsetB = 0;
292: for(typename OverlapType::traits::baseSequence::iterator neighbor = neighbors->begin(); neighbor != neighbors->end(); ++neighbor) {
293: calcNeighborSize<OverlapType, FieldType> processor(serialSifter, serialPatches, &BuySizesA[nA], &SellSizesA[nA], offsetA,
294: parallelSifter, parallelPatches, &BuySizesB[nB], &SellSizesB[nB], offsetB);
295: overlap->coneApply(*neighbor, processor);
296: if (processor.foundA) nA++;
297: if (processor.foundB) nB++;
298: }
299: {
300: ALE::LogEvent event = ALE::LogEventRegister("createMappingIII");
301: ALE::LogEventEnd(event);
302: }
304: {
305: ALE::LogEvent event = ALE::LogEventRegister("createMappingIV");
306: ALE::LogEventBegin(event);
307: }
308: PetscMalloc2(offsetA,int,&SellConesA,offsetB,int,&SellConesB);CHKERROR(ierr, "Error in PetscMalloc");
309: offsetA = 0;
310: offsetB = 0;
311: for(typename OverlapType::traits::baseSequence::iterator neighbor = neighbors->begin(); neighbor != neighbors->end(); ++neighbor) {
312: fillNeighborCones<OverlapType, FieldType> processor(serialSifter, serialPatches, serialOffsets, SellConesA, offsetA,
313: parallelSifter, parallelPatches, parallelOffsets, SellConesB, offsetB);
314: overlap->coneApply(*neighbor, processor);
315: }
316: if (debug) {
317: PetscSynchronizedFlush(comm);CHKERROR(ierr,"Error in PetscSynchronizedFlush");
318: }
319: {
320: ALE::LogEvent event = ALE::LogEventRegister("createMappingIV");
321: ALE::LogEventEnd(event);
322: }
324: {
325: ALE::LogEvent event = ALE::LogEventRegister("createMappingV");
326: ALE::LogEventBegin(event);
327: }
328: VecScatterCreateEmpty(comm, &scatter);CHKERROR(ierr, "Error in VecScatterCreate");
329: scatter->from_n = serialSize;
330: scatter->to_n = parallelSize;
331: VecScatterCreateLocal_PtoS(NeighborCountA, SellSizesA, NeighborsA, SellConesA, NeighborCountB, SellSizesB, NeighborsB, SellConesB, 1, scatter);CHKERROR(ierr, "Error in VecScatterCreate");
333: if (doExchange) {
334: if (serialSifter->debug && !serialSifter->commRank()) {PetscSynchronizedPrintf(serialSifter->comm(), " Exchanging data\n");}
335: VecScatterBegin(serialVec, parallelVec, INSERT_VALUES, SCATTER_FORWARD, scatter);CHKERROR(ierr, "Error in VecScatter");
336: VecScatterEnd(serialVec, parallelVec, INSERT_VALUES, SCATTER_FORWARD, scatter);CHKERROR(ierr, "Error in VecScatter");
337: }
339: VecDestroy(serialVec);CHKERROR(ierr, "Error in VecDestroy");
340: VecDestroy(parallelVec);CHKERROR(ierr, "Error in VecDestroy");
341: {
342: ALE::LogEvent event = ALE::LogEventRegister("createMappingV");
343: ALE::LogEventEnd(event);
344: }
345: ALE_LOG_EVENT_END;
346: return scatter;
347: };
348: };
349: template<typename Mesh_> class MeshPartitioner {
350: public:
351: typedef Mesh_ mesh_type;
352: typedef typename mesh_type::sieve_type sieve_type;
353: typedef typename mesh_type::field_type::order_type sifter_type;
354: private:
357: static void partition_Induced(short assignment[], Obj<mesh_type> oldMesh, Obj<sieve_type> oldSieve, Obj<sieve_type> newSieve) {
358: Obj<typename sieve_type::traits::heightSequence> elements = oldSieve->heightStratum(0);
359: Obj<typename mesh_type::bundle_type> elementBundle = oldMesh->getBundle(oldSieve->depth());
360: typename mesh_type::patch_type patch;
362: for(typename sieve_type::traits::heightSequence::iterator e_iter = elements->begin(); e_iter != elements->end(); ++e_iter) {
363: if (*e_iter >= 0) {
364: oldSieve->addCone(oldSieve->closure(*e_iter), typename mesh_type::point_type(-assignment[elementBundle->getIndex(patch, *e_iter).prefix]));
365: }
366: }
367: typename mesh_type::point_type partitionPoint(-newSieve->commRank());
369: newSieve->addBasePoint(partitionPoint);
370: if (oldSieve->debug) {
371: oldSieve->view("Partition of old sieve");
372: newSieve->view("Partition of new sieve");
373: }
374: };
377: static void partition_Induced(short assignment[], Obj<mesh_type> oldMesh, Obj<sifter_type> oldSifter, Obj<sifter_type> newSifter) {
378: Obj<typename mesh_type::sieve_type> oldSieve = oldMesh->getTopology();
379: Obj<typename sieve_type::traits::heightSequence> elements = oldSieve->heightStratum(0);
380: Obj<typename mesh_type::bundle_type> elementBundle = oldMesh->getBundle(oldSieve->depth());
381: Obj<typename sifter_type::traits::capSequence> cap = oldSifter->cap();
382: typename mesh_type::patch_type patch;
384: for(typename sieve_type::traits::heightSequence::iterator e_iter = elements->begin(); e_iter != elements->end(); ++e_iter) {
385: if (*e_iter >= 0) {
386: Obj<typename sieve_type::coneSet> closure = oldSieve->closure(*e_iter);
387: typename mesh_type::point_type partitionPoint(-assignment[elementBundle->getIndex(patch, *e_iter).prefix]);
389: for(typename sieve_type::coneSet::iterator c_iter = closure->begin(); c_iter != closure->end(); ++c_iter) {
390: if (cap->contains(*c_iter)) {
391: oldSifter->addCone(*c_iter, partitionPoint);
392: }
393: }
394: }
395: }
396: typename mesh_type::point_type partitionPoint(-1, newSifter->commRank());
398: newSifter->addBasePoint(partitionPoint);
399: if (oldSifter->debug) {
400: oldSifter->view("Partition of old sifter");
401: newSifter->view("Partition of new sifter");
402: }
403: };
406: static short *partition_Simple(Obj<mesh_type> oldMesh, Obj<sieve_type> oldSieve, Obj<sieve_type> newSieve) {
407: typedef typename sieve_type::traits::target_type point_type;
408: Obj<typename mesh_type::bundle_type> elementBundle = oldMesh->getBundle(oldSieve->depth());
409: typename mesh_type::patch_type patch;
410: short *assignment = NULL;
412: ALE_LOG_EVENT_BEGIN;
413: if (oldSieve->commRank() == 0) {
414: int numLeaves = oldSieve->leaves()->size();
415: int size = oldSieve->commSize();
417: assignment = new short[numLeaves];
418: for(int p = 0; p < size; p++) {
419: for(int l = (numLeaves/size)*p + PetscMin(numLeaves%size, p); l < (numLeaves/size)*(p+1) + PetscMin(numLeaves%size, p+1); l++) {
420: assignment[elementBundle->getIndex(patch, point_type(l)).prefix] = p;
421: }
422: }
423: }
424: partition_Induced(assignment, oldMesh, oldSieve, newSieve);
425: ALE_LOG_EVENT_END;
426: return assignment;
427: };
428: #ifdef PETSC_HAVE_CHACO
431: static short *partition_Chaco(Obj<mesh_type> oldMesh, Obj<sieve_type> oldSieve, const Obj<sieve_type> newSieve) {
432: ALE_LOG_EVENT_BEGIN;
433: typename mesh_type::patch_type patch;
435: int size = oldSieve->commSize();
436: short *assignment = NULL; /* set number of each vtx (length n) */
437: int *offsets = NULL;
440: Obj<typename sieve_type::traits::heightSequence> elements = oldSieve->heightStratum(0);
441: Obj<typename sieve_type::traits::heightSequence> faces = oldSieve->heightStratum(1);
442: Obj<typename mesh_type::bundle_type> vertexBundle = oldMesh->getBundle(0);
443: Obj<typename mesh_type::bundle_type> elementBundle = oldMesh->getBundle(oldSieve->depth());
444: if (oldSieve->commRank() == 0) {
445: /* arguments for Chaco library */
446: FREE_GRAPH = 0; /* Do not let Chaco free my memory */
447: int nvtxs; /* number of vertices in full graph */
448: int *start; /* start of edge list for each vertex */
449: int *adjacency; /* = adj -> j; edge list data */
450: int *vwgts = NULL; /* weights for all vertices */
451: float *ewgts = NULL; /* weights for all edges */
452: float *x = NULL, *y = NULL, *z = NULL; /* coordinates for inertial method */
453: char *outassignname = NULL; /* name of assignment output file */
454: char *outfilename = NULL; /* output file name */
455: int architecture = 1; /* 0 => hypercube, d => d-dimensional mesh */
456: int ndims_tot = 0; /* total number of cube dimensions to divide */
457: int mesh_dims[3]; /* dimensions of mesh of processors */
458: double *goal = NULL; /* desired set sizes for each set */
459: int global_method = 1; /* global partitioning algorithm */
460: int local_method = 1; /* local partitioning algorithm */
461: int rqi_flag = 0; /* should I use RQI/Symmlq eigensolver? */
462: int vmax = 200; /* how many vertices to coarsen down to? */
463: int ndims = 1; /* number of eigenvectors (2^d sets) */
464: double eigtol = 0.001; /* tolerance on eigenvectors */
465: long seed = 123636512; /* for random graph mutations */
467: nvtxs = oldSieve->heightStratum(0)->size();
468: start = new int[nvtxs+1];
469: offsets = new int[nvtxs];
470: mesh_dims[0] = size; mesh_dims[1] = 1; mesh_dims[2] = 1;
471: PetscMemzero(start, (nvtxs+1) * sizeof(int));CHKERROR(ierr, "Error in PetscMemzero");
472: if (oldSieve->depth() == oldMesh->getDimension()) {
473: for(typename sieve_type::traits::heightSequence::iterator f_iter = faces->begin(); f_iter != faces->end(); ++f_iter) {
474: const Obj<typename sieve_type::supportSequence>& cells = oldSieve->support(*f_iter);
476: if (cells->size() == 2) {
477: start[elementBundle->getIndex(patch, *cells->begin()).prefix+1]++;
478: start[elementBundle->getIndex(patch, *(++cells->begin())).prefix+1]++;
479: }
480: }
481: for(int v = 1; v <= nvtxs; v++) {
482: offsets[v-1] = start[v-1];
483: start[v] += start[v-1];
484: }
485: adjacency = new int[start[nvtxs]];
486: for(typename sieve_type::traits::heightSequence::iterator f_iter = faces->begin(); f_iter != faces->end(); ++f_iter) {
487: const Obj<typename sieve_type::supportSequence>& cells = oldSieve->support(*f_iter);
489: if (cells->size() == 2) {
490: int cellA = elementBundle->getIndex(patch, *cells->begin()).prefix;
491: int cellB = elementBundle->getIndex(patch, *(++cells->begin())).prefix;
493: adjacency[offsets[cellA]++] = cellB+1;
494: adjacency[offsets[cellB]++] = cellA+1;
495: }
496: }
497: } else if (oldSieve->depth() == 1) {
498: int dim = oldMesh->getDimension();
499: int corners = oldSieve->cone(*elements->begin())->size();
500: int faceVertices = -1;
501: std::set<int> *adj = new std::set<int>[nvtxs];
503: if (corners == dim+1) {
504: faceVertices = dim;
505: } else if ((dim == 2) && (corners == 4)) {
506: faceVertices = 2;
507: } else if ((dim == 3) && (corners == 8)) {
508: faceVertices = 4;
509: } else {
510: throw ALE::Exception("Could not determine number of face vertices");
511: }
512: for(typename sieve_type::traits::heightSequence::iterator e_iter = elements->begin(); e_iter != elements->end(); ++e_iter) {
513: const Obj<typename sieve_type::traits::coneSequence>& vertices = oldSieve->cone(*e_iter);
514: typename sieve_type::traits::coneSequence::iterator vEnd = vertices->end();
516: for(typename sieve_type::traits::coneSequence::iterator v_iter = vertices->begin(); v_iter != vEnd; ++v_iter) {
517: const Obj<typename sieve_type::traits::supportSequence>& neighbors = oldSieve->support(*v_iter);
518: typename sieve_type::traits::supportSequence::iterator nEnd = neighbors->end();
520: for(typename sieve_type::traits::supportSequence::iterator n_iter = neighbors->begin(); n_iter != nEnd; ++n_iter) {
521: if (*e_iter == *n_iter) continue;
522: if ((int) oldSieve->meet(*e_iter, *n_iter)->size() == faceVertices) {
523: adj[elementBundle->getIndex(patch, *e_iter).prefix].insert(elementBundle->getIndex(patch, *n_iter).prefix);
524: }
525: }
526:
527: }
528: }
529: start[0] = 0;
530: for(int v = 1; v <= nvtxs; v++) {
531: start[v] = adj[v-1].size() + start[v-1];
532: }
533: adjacency = new int[start[nvtxs]];
534: int offset = 0;
535: for(int v = 0; v < nvtxs; v++) {
536: for(typename std::set<int>::iterator n_iter = adj[v].begin(); n_iter != adj[v].end(); ++n_iter) {
537: adjacency[offset++] = *n_iter+1;
538: }
539: }
540: delete [] adj;
541: if (offset != start[nvtxs]) {
542: ostringstream msg;
543: msg << "ERROR: Number of neighbors " << offset << " does not match the offset array " << start[nvtxs];
544: throw ALE::Exception(msg.str().c_str());
545: }
546: } else {
547: throw ALE::Exception("Cannot construct dual for incompletely interpolated sieve");
548: }
549: assignment = new short int[nvtxs];
550: PetscMemzero(assignment, nvtxs * sizeof(short));CHKERROR(ierr, "Error in PetscMemzero");
552: /* redirect output to buffer: chaco -> msgLog */
553: #ifdef PETSC_HAVE_UNISTD_H
554: char *msgLog;
555: int fd_stdout, fd_pipe[2], count;
557: fd_stdout = dup(1);
558: pipe(fd_pipe);
559: close(1);
560: dup2(fd_pipe[1], 1);
561: msgLog = new char[16284];
562: #endif
564: interface(nvtxs, start, adjacency, vwgts, ewgts, x, y, z,
565: outassignname, outfilename, assignment, architecture, ndims_tot,
566: mesh_dims, goal, global_method, local_method, rqi_flag, vmax, ndims,
567: eigtol, seed);
569: #ifdef PETSC_HAVE_UNISTD_H
570: int SIZE_LOG = 10000;
572: fflush(stdout);
573: count = read(fd_pipe[0], msgLog, (SIZE_LOG - 1) * sizeof(char));
574: if (count < 0) count = 0;
575: msgLog[count] = 0;
576: close(1);
577: dup2(fd_stdout, 1);
578: close(fd_stdout);
579: close(fd_pipe[0]);
580: close(fd_pipe[1]);
581: if (oldMesh->debug) {
582: std::cout << msgLog << std::endl;
583: }
584: delete [] msgLog;
585: #endif
586: delete [] adjacency;
587: delete [] start;
588: delete [] offsets;
589: }
591: partition_Induced(assignment, oldMesh, oldSieve, newSieve);
592: ALE_LOG_EVENT_END;
593: return assignment;
594: };
595: #endif
596: public:
597: static void partition(const Obj<mesh_type> serialMesh, const Obj<mesh_type> parallelMesh) {
598: typedef typename mesh_type::field_type::order_type order_type;
599: Obj<sieve_type> serialTopology = serialMesh->getTopology();
600: Obj<sieve_type> parallelTopology = parallelMesh->getTopology();
601: Obj<typename mesh_type::field_type> serialBoundary = serialMesh->getBoundary();
602: Obj<typename mesh_type::field_type> parallelBoundary = parallelMesh->getBoundary();
603: short *assignment = NULL;
604: bool useSimple = true;
605: // bool hasBd = (serialBoundary->getPatches()->size() > 0);
607: parallelTopology->setStratification(false);
608: #ifdef PETSC_HAVE_CHACO
609: if (serialMesh->getDimension() > 1) {
610: assignment = partition_Chaco(serialMesh, serialTopology, parallelTopology);
611: useSimple = false;
612: }
613: #endif
614: if (useSimple) {
615: assignment = partition_Simple(serialMesh, serialTopology, parallelTopology);
616: }
617: // if (hasBd) {
618: // partition_Induced(assignment, serialMesh, serialBoundary->__getOrder(), parallelBoundary->__getOrder());
619: // }
620: // Obj<std::set<std::string> > fieldNames = serialMesh->getFields();
622: // for(typename std::set<std::string>::iterator f_iter = fieldNames->begin(); f_iter != fieldNames->end(); ++f_iter) {
623: // partition_Induced(assignment, serialMesh, serialMesh->getField(*f_iter)->__getOrder(), parallelMesh->getField(*f_iter)->__getOrder());
624: // }
625: delete [] assignment;
627: // Obj<typename Distributer<sieve_type>::overlap_type> partitionOverlap = Distributer<sieve_type>::distribute(serialTopology, parallelTopology);
628: parallelTopology->stratify();
629: parallelTopology->setStratification(true);
631: // if (hasBd) {
632: // Distributer<order_type>::distribute(serialBoundary->__getOrder(), parallelBoundary->__getOrder(), false);
633: // parallelBoundary->reorderPatches();
634: // parallelBoundary->allocatePatches();
635: // parallelBoundary->createGlobalOrder();
637: // VecScatter scatter = Distributer<order_type>::createMappingStoP(serialBoundary, parallelBoundary, partitionOverlap, true);
638: // PetscErrorCode VecScatterDestroy(scatter);CHKERROR(ierr, "Error in VecScatterDestroy");
639: // }
640: // for(typename std::set<std::string>::iterator f_iter = fieldNames->begin(); f_iter != fieldNames->end(); ++f_iter) {
641: // {
642: // ALE::LogStage stage = ALE::LogStageRegister((*f_iter).c_str());
643: // ALE::LogStagePush(stage);
644: // }
645: // Obj<typename mesh_type::field_type> serialField = serialMesh->getField(*f_iter);
646: // Obj<typename mesh_type::field_type> parallelField = parallelMesh->getField(*f_iter);
648: // if (serialMesh->debug) {
649: // std::string msg = "Serial field ";
650: // msg += *f_iter;
651: // serialField->view(msg.c_str());
652: // }
653: // Distributer<order_type>::distribute(serialField->__getOrder(), parallelField->__getOrder(), false);
654: // if (parallelMesh->debug) {
655: // std::string msg = "Serial order ";
656: // msg += *f_iter;
657: // serialField->__getOrder()->view(msg.c_str());
658: // msg = "Parallel order ";
659: // msg += *f_iter;
660: // parallelField->__getOrder()->view(msg.c_str());
661: // }
662: // parallelField->reorderPatches();
663: // parallelField->allocatePatches();
664: // parallelField->createGlobalOrder();
666: // VecScatter scatter = Distributer<order_type>::createMappingStoP(serialField, parallelField, partitionOverlap, true);
667: // PetscErrorCode VecScatterDestroy(scatter);CHKERROR(ierr, "Error in VecScatterDestroy");
668: // if (parallelMesh->debug) {
669: // std::string msg = "Parallel field ";
670: // msg += *f_iter;
671: // parallelField->view(msg.c_str());
672: // }
673: // {
674: // ALE::LogStage stage = ALE::LogStageRegister((*f_iter).c_str());
675: // ALE::LogStagePop(stage);
676: // }
677: // }
678: };
679: static void unify(const Obj<mesh_type> parallelMesh, const Obj<mesh_type> serialMesh) {
680: typedef typename mesh_type::field_type::order_type order_type;
681: Obj<sieve_type> parallelTopology = parallelMesh->getTopology();
682: Obj<sieve_type> serialTopology = serialMesh->getTopology();
683: Obj<typename mesh_type::field_type> parallelBoundary = parallelMesh->getBoundary();
684: Obj<typename mesh_type::field_type> serialBoundary = serialMesh->getBoundary();
685: // bool hasBd = (parallelBoundary->getPatches()->size() > 0);
686: typename mesh_type::point_type partitionPoint(-1);
688: parallelTopology->addCone(parallelTopology->cap(), partitionPoint);
689: parallelTopology->addCone(parallelTopology->base(), partitionPoint);
690: parallelTopology->removeCapPoint(partitionPoint);
691: // parallelBoundary->__getOrder()->addCone(parallelBoundary->__getOrder()->cap(), partitionPoint);
692: // if (serialTopology->commRank() == 0) {
693: // serialTopology->addBasePoint(partitionPoint);
694: // serialBoundary->__getOrder()->addBasePoint(partitionPoint);
695: // }
696: // Distributer<sieve_type>::distribute(parallelTopology, serialTopology);
697: // if (hasBd) {
698: // Distributer<order_type>::distribute(parallelBoundary->__getOrder(), serialBoundary->__getOrder(), false);
699: // serialBoundary->allocatePatches();
700: // }
701: };
702: };
703: }
704: #endif