NAMD
RefineTorusLB.C
Go to the documentation of this file.
1 /*****************************************************************************
2  * $Source: /home/cvs/namd/cvsroot/namd2/src/RefineTorusLB.C,v $
3  * $Author: jim $
4  * $Date: 2013/08/30 18:18:20 $
5  * $Revision: 1.39 $
6  *****************************************************************************/
7 
15 #include "RefineTorusLB.h"
16 #define EXPAND_INNER_BRICK 2
17 
19 int npas, int npes, int flag) : Rebalancer(cs, pas, pes, ncs, npas, npes)
20 {
21  if(flag==1) {
22  strategyName = "RefineTorusLB";
23  strategy();
24  // CREATE THE SPANNING TREE IN THE LOAD BALANCER
25 #if 0
27  for(int i=0; i<4; i++) {
28  decrSTLoad();
31  incrSTLoad();
32  // for(int i=0; i<P; i++)
33  // delete [] processors[i].proxyUsage;
35  binaryRefine();
36  printLoads();
38  }
39  }
40 #endif
41  }
42 }
43 
45 
46 void RefineTorusLB::strategy() {
47  int index, realPe;
48 
49  const int beginGroup = processors[0].Id;
50  const int endGroup = beginGroup + P;
51 #define INGROUP(PROC) ((PROC) >= beginGroup && (PROC) < endGroup)
52 
54  for(int i=0; i<numComputes; i++){
55  // HYBRID check if processor is in local group
56  realPe = computes[i].oldProcessor;
57  if INGROUP(realPe) {
58  index = realPe - processors[0].Id;
59  assign((computeInfo *) &(computes[i]), (processorInfo *) &(processors[index]));
60  }
61  }
63 
64  printLoads(2);
65  binaryRefine();
66  printLoads(3);
67 }
68 
70  // compute the max and average load
72  double max = computeMax();
73 
74  double step = 0.01, start = 1.01 + ((double)P)/((double)numComputes);
75  double dCurLoad = max/averageLoad;
76  int curLoad;
77  int minLoad = 0;
78  int maxLoad = (int)((dCurLoad - start)/step + 1);
79  double dMinLoad = minLoad * step + start;
80  double dMaxLoad = maxLoad * step + start;
81 
82  // check the two limits of the search: start and dMaxLoad
83  int done=0;
84  overLoad = dMinLoad;
85  if(newRefine())
86  done = 1;
87  else {
88  overLoad = dMaxLoad;
89  if(!newRefine()) {
90  CkPrintf("Error: Could not refine at max overload\n");
91  done = 1;
92  }
93  }
94 
95  // do a binary search between start and dMaxLoad until we succeed
96  while(!done) {
97  if(maxLoad - minLoad <= 1)
98  done = 1;
99  else {
100  curLoad = (maxLoad + minLoad)/2;
101  overLoad = curLoad * step + start;
102  if(newRefine())
103  maxLoad = curLoad;
104  else
105  minLoad = curLoad;
106  }
107  }
108 }
109 
111  int done = 1;
112  maxHeap *heavyPes = new maxHeap(P);
113  IRSet *lightPes = new IRSet();
114  processorInfo *donor, *p, *bestP;
115  computeInfo *c;
116  Iterator nextC, nextP;
117  pcpair good;
118  double thresholdLoad = overLoad * averageLoad;
119  int index, realPe;
120 
121  const int beginGroup = processors[0].Id;
122  const int endGroup = beginGroup + P;
123 
124  // create a heap and set of heavy and light pes respectively
125  for(int i=0; i<P; i++) {
126  if (processors[i].load > thresholdLoad)
127  heavyPes->insert((InfoRecord *) &(processors[i]));
128  else
129  lightPes->insert((InfoRecord *) &(processors[i]));
130  }
131 
132 #if LDB_DEBUG
133  iout << "\n Before Refinement Summary\n" << endi;
134  printSummary();
135 #endif
136 
137  pcpair pcpairarray[12];
138 
139  for(int j=0; j<6; j++) {
140  bestPe[j] = &pcpairarray[j]; // new pcpair();
141  goodPe[j] = &pcpairarray[j+6]; // new pcpair();
142  }
143 
144  while(1) {
145  while(donor = (processorInfo*)heavyPes->deleteMax())
146  if(donor->computeSet.hasElements())
147  break;
148 
149  if(!donor) break;
150 
151  for(int j=0; j<6; j++) {
152  bestPe[j]->reset();
153  goodPe[j]->reset();
154  }
155 
156  nextC.id = 0;
157  c = (computeInfo *)donor->computeSet.iterator((Iterator *)&nextC);
158 
159  while(c) {
160  // Look at pes which have the compute's patches
161 
162  // HYBRID check if processor is in local group
163 #define SELECT_REALPE(X) if INGROUP((X)) { \
164  selectPes(&processors[(X) - beginGroup], c); \
165  }
166 
167  int realPe1 = patches[c->patch1].processor;
168  SELECT_REALPE(realPe1)
169 
170  int realPe2 = patches[c->patch2].processor;
171  if ( realPe2 != realPe1 ) {
172  SELECT_REALPE(realPe2)
173  }
174 
175  // Try the processors which have the patches' proxies
176  p = (processorInfo *)(patches[c->patch1].proxiesOn.iterator((Iterator *)&nextP));
177  while(p) { // patch 1
178  if INGROUP(p->Id) selectPes(p, c);
179  p = (processorInfo *)(patches[c->patch1].proxiesOn.next((Iterator *)&nextP));
180  }
181 
182  p = (processorInfo *)(patches[c->patch2].proxiesOn.iterator((Iterator *)&nextP));
183  while(p) { //patch 2
184  if INGROUP(p->Id) selectPes(p, c);
185  p = (processorInfo *)(patches[c->patch2].proxiesOn.next((Iterator *)&nextP));
186  }
187 
188  nextC.id++;
189  c = (computeInfo *) donor->computeSet.next((Iterator *)&nextC);
190  } // end of compute loop
191 
192 #define REASSIGN(GRID) if (GRID->c) { deAssign(GRID->c, donor); \
193  assign(GRID->c, GRID->p); bestP = GRID->p; }
194 
195  bestP = 0;
196  // see if we have found a compute processor pair
197  REASSIGN(bestPe[5])
198 #if USE_TOPOMAP
199  else REASSIGN(goodPe[5])
200 #endif
201  else REASSIGN(bestPe[4])
202 #if USE_TOPOMAP
203  else REASSIGN(goodPe[4])
204 #endif
205  else REASSIGN(bestPe[3])
206 #if USE_TOPOMAP
207  else REASSIGN(goodPe[3])
208 #endif
209  else REASSIGN(bestPe[1])
210 #if USE_TOPOMAP
211  else REASSIGN(goodPe[1])
212 #endif
213  else REASSIGN(bestPe[2])
214 #if USE_TOPOMAP
215  else REASSIGN(goodPe[2])
216 #endif
217  else REASSIGN(bestPe[0])
218 #if USE_TOPOMAP
219  else REASSIGN(goodPe[0])
220 #endif
221 
222  // Try all pes on the nodes of the home patches
223  if ( ! bestP && CmiNumNodes() > 1 ) { // else not useful
224  double minLoad = overLoad * averageLoad;
225  good.c = 0; good.p = 0;
226  nextC.id = 0;
227  c = (computeInfo *)donor->computeSet.iterator((Iterator *)&nextC);
228  while(c) {
229  int realPe1 = patches[c->patch1].processor;
230  int realNode1 = CmiNodeOf(realPe1);
231  int nodeSize = CmiNodeSize(realNode1);
232  if ( nodeSize > 1 ) { // else did it already
233  int firstpe = CmiNodeFirst(realNode1);
234  for ( int rpe = firstpe; rpe < firstpe+nodeSize; ++rpe ) {
235  if INGROUP(rpe) {
236  p = &processors[rpe - beginGroup];
237  if ( p->available && ( p->load + c->load < minLoad ) ) {
238  minLoad = p->load + c->load;
239  good.c = c;
240  good.p = p;
241  }
242  }
243  }
244  }
245  int realPe2 = patches[c->patch2].processor;
246  if ( realPe2 != realPe1 ) {
247  int realNode2 = CmiNodeOf(realPe2);
248  if ( realNode2 != realNode1 ) { // else did it already
249  nodeSize = CmiNodeSize(realNode2);
250  if ( nodeSize > 1 ) {
251  int firstpe = CmiNodeFirst(realNode2);
252  for ( int rpe = firstpe; rpe < firstpe+nodeSize; ++rpe ) {
253  if INGROUP(rpe) {
254  p = &processors[rpe - beginGroup];
255  if ( p->available && ( p->load + c->load < minLoad ) ) {
256  minLoad = p->load + c->load;
257  good.c = c;
258  good.p = p;
259  }
260  }
261  }
262  }
263  }
264  }
265  nextC.id++;
266  c = (computeInfo *) donor->computeSet.next((Iterator *)&nextC);
267  } // end of compute loop
268 
269  REASSIGN((&good))
270  }
271 
272  // Try all pes on the physical nodes of the home patches
273  if ( ! bestP && ( CmiNumPhysicalNodes() > 1 ) &&
274  ( CmiNumPhysicalNodes() < CmiNumNodes() ) ) { // else not useful
275  double minLoad = overLoad * averageLoad;
276  good.c = 0; good.p = 0;
277  nextC.id = 0;
278  c = (computeInfo *)donor->computeSet.iterator((Iterator *)&nextC);
279  while(c) {
280  int realPe1 = patches[c->patch1].processor;
281  int realNode1 = CmiPhysicalNodeID(realPe1);
282  int *rpelist;
283  int nodeSize;
284  CmiGetPesOnPhysicalNode(realNode1, &rpelist, &nodeSize);
285  if ( nodeSize > 1 ) { // else did it already
286  for ( int ipe = 0; ipe < nodeSize; ++ipe ) {
287  int rpe = rpelist[ipe];
288  if INGROUP(rpe) {
289  p = &processors[rpe - beginGroup];
290  if ( p->available && ( p->load + c->load < minLoad ) ) {
291  minLoad = p->load + c->load;
292  good.c = c;
293  good.p = p;
294  }
295  }
296  }
297  }
298  int realPe2 = patches[c->patch2].processor;
299  if ( realPe2 != realPe1 ) {
300  int realNode2 = CmiPhysicalNodeID(realPe2);
301  if ( realNode2 != realNode1 ) { // else did it already
302  CmiGetPesOnPhysicalNode(realNode2, &rpelist, &nodeSize);
303  if ( nodeSize > 1 ) { // else did it already
304  for ( int ipe = 0; ipe < nodeSize; ++ipe ) {
305  int rpe = rpelist[ipe];
306  if INGROUP(rpe) {
307  p = &processors[rpe - beginGroup];
308  if ( p->available && ( p->load + c->load < minLoad ) ) {
309  minLoad = p->load + c->load;
310  good.c = c;
311  good.p = p;
312  }
313  }
314  }
315  }
316  }
317  }
318  nextC.id++;
319  c = (computeInfo *) donor->computeSet.next((Iterator *)&nextC);
320  } // end of compute loop
321 
322  REASSIGN((&good))
323  }
324 
325  if(bestP) {
326  if(bestP->load > averageLoad) {
327  // CkPrintf("Acceptor %d became heavy%f %f\n", bestP->Id, bestP->load, overLoad*averageLoad);
328  lightPes->remove(bestP);
329  } else {
330  // CkPrintf("Acceptor %d still light %f %f\n", bestP->Id, bestP->load, overLoad*averageLoad);
331  }
332  if(donor->load > overLoad*averageLoad) {
333  // CkPrintf("Donor %d still heavy %f %f\n", donor->Id, donor->load, overLoad*averageLoad);
334  heavyPes->insert((InfoRecord *) donor);
335  }
336  else {
337  // CkPrintf("Donor %d became light %f %f\n", donor->Id, donor->load, overLoad*averageLoad);
338  lightPes->insert((InfoRecord *) donor);
339  }
340 
341  continue;
342  }
343  //else
344  //CkPrintf("1st try failed\n");
345 
346  int found = 0;
347 #if USE_TOPOMAP
348  // if this fails, look at the inner brick
349  int p1, p2, pe, x1, x2, xm, xM, y1, y2, ym, yM, z1, z2, zm, zM, t1, t2;
350  int dimNX, dimNY, dimNZ, dimNT;
351  double minLoad;
352 
353  good.c = 0; good.p = 0;
354  minLoad = overLoad*averageLoad;
355  nextC.id = 0;
356  c = (computeInfo *)donor->computeSet.iterator((Iterator *)&nextC);
357 
358  while(c) {
359  p1 = patches[c->patch1].processor;
360  p2 = patches[c->patch2].processor;
361 
362  tmgr.rankToCoordinates(p1, x1, y1, z1, t1);
363  tmgr.rankToCoordinates(p2, x2, y2, z2, t2);
364  dimNX = tmgr.getDimNX();
365  dimNY = tmgr.getDimNY();
366  dimNZ = tmgr.getDimNZ();
367  dimNT = tmgr.getDimNT();
368 
369  brickDim(x1, x2, dimNX, xm, xM);
370  brickDim(y1, y2, dimNY, ym, yM);
371  brickDim(z1, z2, dimNZ, zm, zM);
372 
373  // to expand the inner brick by some hops
374 #if 0
375  if(xm>=EXPAND_INNER_BRICK) xm=xm-EXPAND_INNER_BRICK; else xm=0;
376  if(ym>=EXPAND_INNER_BRICK) ym=ym-EXPAND_INNER_BRICK; else ym=0;
377  if(zm>=EXPAND_INNER_BRICK) zm=zm-EXPAND_INNER_BRICK; else zm=0;
378 
379  xM=xM+EXPAND_INNER_BRICK;
380  yM=yM+EXPAND_INNER_BRICK;
381  zM=zM+EXPAND_INNER_BRICK;
382 #endif
383 
384  // first go over the processors inside the brick and choose the least
385  for(int i=xm; i<=xM; i++)
386  for(int j=ym; j<=yM; j++)
387  for(int k=zm; k<=zM; k++)
388  for(int l=0; l<dimNT; l++)
389  {
390  pe = tmgr.coordinatesToRank(i%dimNX, j%dimNY, k%dimNZ, l);
391  if ( ! INGROUP(pe) ) continue;
392  p = &processors[pe - beginGroup];
393  if(c->load + p->load < minLoad) {
394  minLoad = c->load + p->load;
395  good.c = c;
396  good.p = p;
397  }
398  }
399  nextC.id++;
400  c = (computeInfo *) donor->computeSet.next((Iterator *)&nextC);
401  }
402 
403  if(good.c) {
404  found = 1;
405  //CkPrintf("2nd try succeeded\n");
406  }
407  else {
408  found = 0;
409  //CkPrintf("2nd try failed\n");
410  }
411 
412  // if that also fails, look at the outer brick
413  minLoad = overLoad * averageLoad;
414  if(found==0) {
415  good.c = 0; good.p = 0;
416  p = 0;
417 
418  nextC.id = 0;
419  c = (computeInfo *)donor->computeSet.iterator((Iterator *)&nextC);
420  while(c) {
421  p1 = patches[c->patch1].processor;
422  p2 = patches[c->patch2].processor;
423 
424  tmgr.rankToCoordinates(p1, x1, y1, z1, t1);
425  tmgr.rankToCoordinates(p2, x2, y2, z2, t2);
426  dimNX = tmgr.getDimNX();
427  dimNY = tmgr.getDimNY();
428  dimNZ = tmgr.getDimNZ();
429  dimNT = tmgr.getDimNT();
430 
431  brickDim(x1, x2, dimNX, xm, xM);
432  brickDim(y1, y2, dimNY, ym, yM);
433  brickDim(z1, z2, dimNZ, zm, zM);
434 
435  for(int i=xM+1; i<xm+dimNX; i++)
436  for(int j=0; j<dimNY; j++)
437  for(int k=0; k<dimNZ; k++)
438  for(int l=0; l<dimNT; l++)
439  {
440  pe = tmgr.coordinatesToRank(i%dimNX, j%dimNY, k%dimNZ, l);
441  if ( ! INGROUP(pe) ) continue;
442  p = &processors[pe - beginGroup];
443  if(c->load + p->load < minLoad) {
444  good.c = c;
445  good.p = p;
446  found = 1; break;
447  }
448  }
449 
450  if(found==1)
451  break;
452  else {
453  for(int j=yM+1; j<ym+dimNY; j++)
454  for(int i=xm; i<=xM; i++)
455  for(int k=0; k<dimNZ; k++)
456  for(int l=0; l<dimNT; l++)
457  {
458  pe = tmgr.coordinatesToRank(i%dimNX, j%dimNY, k%dimNZ, l);
459  if ( ! INGROUP(pe) ) continue;
460  p = &processors[pe - beginGroup];
461  if(c->load + p->load < minLoad) {
462  good.c = c;
463  good.p = p;
464  found = 1; break;
465  }
466  }
467  }
468 
469  if(found==1)
470  break;
471  else {
472  for(int k=zM+1; k<zm+dimNZ; k++)
473  for(int i=xm; i<=xM; i++)
474  for(int j=ym; j<=yM; j++)
475  for(int l=0; l<dimNT; l++)
476  {
477  pe = tmgr.coordinatesToRank(i%dimNX, j%dimNY, k%dimNZ, l);
478  if ( ! INGROUP(pe) ) continue;
479  p = &processors[pe - beginGroup];
480  if(c->load + p->load < minLoad) {
481  good.c = c;
482  good.p = p;
483  found = 1; break;
484  }
485  }
486  }
487 
488  if(found==1) break;
489 
490  nextC.id++;
491  c = (computeInfo *) donor->computeSet.next((Iterator *)&nextC);
492  }
493  }
494 
495  if(found == 1) {
496  deAssign(good.c, donor);
497  assign(good.c, good.p);
498  if (good.p->load > averageLoad) lightPes->remove(good.p);
499  if (donor->load > overLoad*averageLoad)
500  heavyPes->insert((InfoRecord *) donor);
501  else
502  lightPes->insert((InfoRecord *) donor);
503  continue;
504  }
505 
506 #endif /* USE_TOPOMAP */
507 
508  // find the first processor to place the compute on
509  p = (processorInfo *)lightPes->iterator((Iterator *) &nextP);
510  if(found == 0) {
511  while (p)
512  {
513  nextC.id = 0;
514  c = (computeInfo *)donor->computeSet.iterator((Iterator *)&nextC);
515  while (c)
516  {
517  selectPes(p, c);
518  nextC.id++;
519  c = (computeInfo *) donor->computeSet.next((Iterator *)&nextC);
520  }
521  p = (processorInfo *)lightPes->next((Iterator *) &nextP);
522  }
523 
524  bestP = 0;
525  REASSIGN(bestPe[5])
526 #if USE_TOPOMAP
527  else REASSIGN(goodPe[5])
528 #endif
529  else REASSIGN(bestPe[4])
530 #if USE_TOPOMAP
531  else REASSIGN(goodPe[4])
532 #endif
533  else REASSIGN(bestPe[3])
534 #if USE_TOPOMAP
535  else REASSIGN(goodPe[3])
536 #endif
537  else REASSIGN(bestPe[1])
538 #if USE_TOPOMAP
539  else REASSIGN(goodPe[1])
540 #endif
541  else REASSIGN(bestPe[2])
542 #if USE_TOPOMAP
543  else REASSIGN(goodPe[2])
544 #endif
545  else REASSIGN(bestPe[0])
546 #if USE_TOPOMAP
547  else REASSIGN(goodPe[0])
548 #endif
549  }
550 
551  if(bestP) {
552  if(bestP->load > averageLoad) lightPes->remove(bestP);
553  if(donor->load > overLoad*averageLoad)
554  heavyPes->insert((InfoRecord *) donor);
555  else
556  lightPes->insert((InfoRecord *) donor);
557  continue;
558  }
559  else {
560  done = 0;
561  break;
562  }
563 
564  } // end of while loop
565 
566 #if LDB_DEBUG
567  iout << "After Refinement Summary\n" << endi;
568  printSummary();
569 #endif
570 
571  delete heavyPes;
572  delete lightPes;
573 
574  return done;
575 }
576 
577 void RefineTorusLB::selectPes(processorInfo *p, computeInfo *c) {
578  if (p->available == false)
579  return;
580 
581  // find the position in bestPe/goodPe to place this pair
582  // HP HP HP HP HP HP
583  // 02 11 20 01 10 00
584  // 5 4 3 2 1 0
585  int numPatches, numProxies, /* badForComm, */ index;
586  numAvailable(c, p, &numPatches, &numProxies, 0 /* &badForComm */);
587  int numEither = numPatches + numProxies;
588  index = (numEither*(numEither+1))/2 + numProxies;
589 
590 #if USE_TOPOMAP
591  int x, y, z, t;
592  int p1, p2, pe, x1, x2, xm, xM, y1, y2, ym, yM, z1, z2, zm, zM, t1, t2;
593  int dimNX, dimNY, dimNZ, dimNT;
594  double minLoad;
595  p1 = patches[c->patch1].processor;
596  p2 = patches[c->patch2].processor;
597 
598  tmgr.rankToCoordinates(p1, x1, y1, z1, t1);
599  tmgr.rankToCoordinates(p2, x2, y2, z2, t2);
600  dimNX = tmgr.getDimNX();
601  dimNY = tmgr.getDimNY();
602  dimNZ = tmgr.getDimNZ();
603  dimNT = tmgr.getDimNT();
604 
605  brickDim(x1, x2, dimNX, xm, xM);
606  brickDim(y1, y2, dimNY, ym, yM);
607  brickDim(z1, z2, dimNZ, zm, zM);
608 #endif
609 
610  if (p->load + c->load < overLoad * averageLoad) {
611 #if USE_TOPOMAP
612  tmgr.rankToCoordinates(p->Id, x, y, z, t);
613  int wB = withinBrick(x, y, z, xm, xM, dimNX, ym, yM, dimNY, zm, zM, dimNZ);
614  if (wB) {
615 #endif
616  pcpair* &oldp = bestPe[index];
617 
618  if (!(oldp->p) || ((p->load + c->load) < (oldp->p->load + oldp->c->load))) {
619  oldp->p = p;
620  oldp->c = c;
621  }
622 #if USE_TOPOMAP
623  } else {
624  pcpair* &oldp = goodPe[index];
625  double loadDiff = 0.0;
626 
627  if (!(oldp->p)) {
628  oldp->p = p;
629  oldp->c = c;
630  } else {
631  loadDiff = oldp->p->load + oldp->c->load - p->load - c->load;
632  if ( (loadDiff > 0.4) || (loadDiff > 0.0 && (tmgr.getHopsBetweenRanks(p->Id, p1) + tmgr.getHopsBetweenRanks(p->Id, p2) < tmgr.getHopsBetweenRanks((oldp->p)->Id, p1) + tmgr.getHopsBetweenRanks((oldp->p)->Id, p2))) ) {
633  oldp->p = p;
634  oldp->c = c;
635  }
636  }
637  }
638 #endif
639  }
640 }
641 
BlockLoad::TempStorage load
int numComputes
Definition: Rebalancer.h:138
int patch1
Definition: elements.h:23
#define EXPAND_INNER_BRICK
Definition: RefineTorusLB.C:16
#define REASSIGN(GRID)
int remove(InfoRecord *)
Definition: Set.C:75
int proxyRecvSpanning
Definition: ProxyMgr.C:46
computeInfo * computes
Definition: Rebalancer.h:128
computeInfo * c
Definition: Rebalancer.h:113
int numPatches
Definition: Rebalancer.h:137
void binaryRefine()
Definition: RefineTorusLB.C:69
int Id
Definition: elements.h:16
void createSpanningTree()
Definition: Rebalancer.C:1154
#define INGROUP(PROC)
void assign(computeInfo *c, processorInfo *pRec)
Definition: Rebalancer.C:402
std::ostream & endi(std::ostream &s)
Definition: InfoStream.C:54
double averageLoad
Definition: Rebalancer.h:141
#define iout
Definition: InfoStream.h:51
int oldProcessor
Definition: elements.h:25
void insert(InfoRecord *)
Definition: Set.C:49
int numProxies
Definition: Rebalancer.h:139
void incrSTLoad()
Definition: Rebalancer.C:1210
processorInfo * processors
Definition: Rebalancer.h:130
int firstAssignInRefine
Definition: Rebalancer.h:143
void printLoads(int phase=0)
Definition: Rebalancer.C:874
void numAvailable(computeInfo *c, processorInfo *p, int *nPatches, int *nProxies, int *isBadForCommunication)
Definition: Rebalancer.C:1074
void deAssign(computeInfo *c, processorInfo *pRec)
Definition: Rebalancer.C:466
InfoRecord * deleteMax()
Definition: heap.C:152
InfoRecord * next(Iterator *)
Definition: Set.C:131
int patch2
Definition: elements.h:23
Definition: heap.h:43
void printSummary()
Definition: Rebalancer.C:975
Definition: Set.h:25
gridSize z
int withinBrick(int x, int y, int z, int xm, int xM, int dimX, int ym, int yM, int dimY, int zm, int zM, int dimZ)
Definition: Rebalancer.h:199
LargeIRSet computeSet
Definition: elements.h:47
double computeAverage()
Definition: Rebalancer.C:1001
void brickDim(int a, int b, int dim, int &min, int &max)
Definition: Rebalancer.h:179
void InitProxyUsage()
Definition: Rebalancer.C:195
double overLoad
Definition: Rebalancer.h:168
int insert(InfoRecord *)
Definition: heap.C:126
int hasElements()
Definition: Set.C:149
#define SELECT_REALPE(X)
double load
Definition: elements.h:15
Definition: Set.h:19
processorInfo * p
Definition: Rebalancer.h:112
RefineTorusLB(computeInfo *cs, patchInfo *pas, processorInfo *pes, int ncs, int npas, int npes, int flag)
Definition: RefineTorusLB.C:18
IRSet proxiesOn
Definition: elements.h:33
patchInfo * patches
Definition: Rebalancer.h:129
double computeMax()
Definition: Rebalancer.C:1057
gridSize y
int processor
Definition: elements.h:31
gridSize x
InfoRecord * iterator(Iterator *)
Definition: Set.C:122
const char * strategyName
Definition: Rebalancer.h:127
int id
Definition: Set.h:21
bool available
Definition: elements.h:44
void decrSTLoad()
Definition: Rebalancer.C:1195
int proxySendSpanning
Definition: ProxyMgr.C:45