OptPmeMgr Class Reference

List of all members.

Public Member Functions

 OptPmeMgr ()
 ~OptPmeMgr ()
void initialize (CkQdMsg *)
void initialize_pencils (CkQdMsg *)
void activate_pencils (CkQdMsg *)
void recvArrays (CProxy_OptPmeXPencil, CProxy_OptPmeYPencil, CProxy_OptPmeZPencil)
void recvUngrid (OptPmeGridMsg *)
void ungridCalc (OptPmeDummyMsg *)
void ungridCalc_subcompute (OptPmeSubComputeMsg *)
void ungridCalc_subcompute_done (OptPmeSubComputeMsg *)
void doWorkOnPeer (OptPmeSubComputeMsg *)
void recvEvir (CkReductionMsg *msg)
void setCompute (OptPmeCompute *c)

Friends

class OptPmeCompute

Detailed Description

Definition at line 56 of file OptPme.C.


Constructor & Destructor Documentation

OptPmeMgr::OptPmeMgr (  ) 

Definition at line 227 of file OptPme.C.

References fftw_plan_lock.

00227                      : pmeProxy(thisgroup), 
00228                                  pmeProxyDir(thisgroup), pmeCompute(0) {
00229 
00230   CkpvAccess(BOCclass_group).computePmeMgr = thisgroup;
00231 
00232   myKSpace = 0;
00233   ungrid_count = 0;
00234   peersAllocated = 0;
00235 
00236 #ifdef NAMD_FFTW
00237   if ( CmiMyRank() == 0 ) {
00238     fftw_plan_lock = CmiCreateLock();
00239   }
00240 #endif    
00241 }

OptPmeMgr::~OptPmeMgr (  ) 

Definition at line 447 of file OptPme.C.

00447                       {
00448   delete myKSpace;
00449 }


Member Function Documentation

void OptPmeMgr::activate_pencils ( CkQdMsg *   ) 

Definition at line 442 of file OptPme.C.

00442                                              {
00443   if ( CkMyPe() == 0 ) zPencil.dummyRecvGrid(CkMyPe(),1);
00444 }

void OptPmeMgr::doWorkOnPeer ( OptPmeSubComputeMsg  ) 

Definition at line 553 of file OptPme.C.

References OptPmeSubComputeMsg::compute, and OptPmeCompute::doWorkOnPeer().

00553                                                      {
00554   OptPmeCompute *compute = (OptPmeCompute *) msg->compute;
00555   compute->doWorkOnPeer();
00556   //  delete msg; //saved in compute
00557 }

void OptPmeMgr::initialize ( CkQdMsg *   ) 

Definition at line 248 of file OptPme.C.

References OptPmePencilInitMsgData::cb_energy, OptPmePencilInitMsgData::constant_pressure, endi(), OptPmePencilInitMsgData::grid, iINFO(), initializePmeGrid(), iout, MANY_TO_MANY_START, many_to_many_start, NAMD_die(), PatchMap::numPatches(), ReductionMgr::Object(), PatchMap::Object(), Node::Object(), ComputeHomePatches::patchMap, pencilPMEProcessors, OptPmePencilInitMsgData::pmeProxy, REDUCTIONS_BASIC, Node::simParameters, simParams, ReductionMgr::willSubmit(), x, OptPmePencilInitMsgData::xBlocks, PmeGrid::xBlocks, OptPmePencilInitMsgData::xPencil, y, OptPmePencilInitMsgData::yBlocks, PmeGrid::yBlocks, OptPmePencilInitMsgData::yPencil, z, OptPmePencilInitMsgData::zBlocks, PmeGrid::zBlocks, and OptPmePencilInitMsgData::zPencil.

00248                                        {
00249     delete msg;
00250 
00251     _iter = 0;
00252 
00253     handle = CmiDirect_manytomany_allocate_handle ();
00254 
00255     SimParameters *simParams = Node::Object()->simParameters;
00256     PatchMap *patchMap = PatchMap::Object();
00257     
00258     initializePmeGrid (simParams, myGrid);    
00259 
00260     if (simParams->langevinPistonOn || simParams->berendsenPressureOn)  
00261       constant_pressure = true;
00262     else
00263       constant_pressure = false;      
00264 
00265     bool useManyToMany = simParams->useManyToMany;
00266     //Many-to-many requires that patches and pmepencils are all on different processors
00267     //int npes = patchMap->numPatches() + 
00268     //         myGrid.xBlocks *  myGrid.yBlocks + 
00269     //         myGrid.zBlocks *  myGrid.xBlocks +
00270     //         myGrid.yBlocks *  myGrid.zBlocks;
00271     
00272     int npes = patchMap->numPatches();
00273     if (npes < myGrid.xBlocks *  myGrid.yBlocks)
00274       npes = myGrid.xBlocks *  myGrid.yBlocks;
00275     if (npes <  myGrid.zBlocks *  myGrid.xBlocks)
00276       npes = myGrid.zBlocks *  myGrid.xBlocks;
00277     if (npes < myGrid.yBlocks *  myGrid.zBlocks)
00278       npes = myGrid.yBlocks *  myGrid.zBlocks;
00279     
00280    if (npes >= CkNumPes()) {
00281       if (CkMyPe() == 0)
00282         printf ("Warning : Not enough processors for the many-to-many optimization \n");      
00283       useManyToMany = false;
00284     }
00285     
00286     if (useManyToMany)  {
00287       if (CkMyPe() == 0)
00288         printf ("Enabling the Many-to-many optimization\n");
00289       //defaults to max integer
00290       many_to_many_start = MANY_TO_MANY_START;
00291     }
00292 
00293     if (CkMyRank() == 0) { //create the pencil pme processor map
00294       pencilPMEProcessors = new char [CkNumPes()];
00295       memset (pencilPMEProcessors, 0, sizeof(char) * CkNumPes());
00296     }
00297 
00298     if ( CkMyPe() == 0) {
00299       iout << iINFO << "PME using " << myGrid.xBlocks << " x " <<
00300         myGrid.yBlocks << " x " << myGrid.zBlocks <<
00301         " pencil grid for FFT and reciprocal sum.\n" << endi;
00302       
00303       CProxy_OptPmePencilMapZ   mapz;      
00304       CProxy_OptPmePencilMapY   mapy;
00305       CProxy_OptPmePencilMapX   mapx;
00306       
00307       mapz = CProxy_OptPmePencilMapZ::ckNew(myGrid.xBlocks, myGrid.yBlocks, myGrid.zBlocks);      
00308       mapy = CProxy_OptPmePencilMapY::ckNew(myGrid.xBlocks, myGrid.yBlocks, myGrid.zBlocks);
00309       mapx = CProxy_OptPmePencilMapX::ckNew(myGrid.xBlocks, myGrid.yBlocks, myGrid.zBlocks);
00310       
00311       CkArrayOptions optsz;
00312       optsz.setMap (mapz);
00313       CkArrayOptions optsy;
00314       optsy.setMap (mapy);
00315       CkArrayOptions optsx;
00316       optsx.setMap (mapx);
00317       
00318       zPencil = CProxy_OptPmeZPencil::ckNew(optsz);  
00319       yPencil = CProxy_OptPmeYPencil::ckNew(optsy);  
00320       xPencil = CProxy_OptPmeXPencil::ckNew(optsx);  
00321       
00322       int x,y,z;
00323       for (x = 0; x < myGrid.xBlocks; ++x)
00324         for (y = 0; y < myGrid.yBlocks; ++y ) {
00325           zPencil(x,y,0).insert();
00326         }
00327       zPencil.doneInserting();
00328       
00329       for (z = 0; z < myGrid.zBlocks; ++z )
00330         for (x = 0; x < myGrid.xBlocks; ++x ) {
00331           yPencil(x,0,z).insert();
00332         }
00333       yPencil.doneInserting();
00334       
00335       for (y = 0; y < myGrid.yBlocks; ++y )     
00336         for (z = 0; z < myGrid.zBlocks; ++z ) {
00337           xPencil(0,y,z).insert();
00338         }
00339       xPencil.doneInserting();      
00340       
00341       pmeProxy.recvArrays(xPencil,yPencil,zPencil);
00342       OptPmePencilInitMsgData msgdata;
00343       msgdata.grid = myGrid;
00344       msgdata.xBlocks = myGrid.xBlocks;
00345       msgdata.yBlocks = myGrid.yBlocks;
00346       msgdata.zBlocks = myGrid.zBlocks;
00347       msgdata.xPencil = xPencil;
00348       msgdata.yPencil = yPencil;
00349       msgdata.zPencil = zPencil;
00350       msgdata.constant_pressure = constant_pressure;
00351 
00352       CkCallback cb (CkIndex_OptPmeMgr::recvEvir(NULL), thisProxy[0]);
00353       msgdata.cb_energy = cb;
00354 
00355       msgdata.pmeProxy = pmeProxyDir;
00356       xPencil.init(new OptPmePencilInitMsg(msgdata));
00357       yPencil.init(new OptPmePencilInitMsg(msgdata));
00358       zPencil.init(new OptPmePencilInitMsg(msgdata));
00359      
00360 #if 0 
00361       reduction = ReductionMgr::Object()->willSubmit(REDUCTIONS_BASIC);
00362 #endif
00363 
00364 #ifndef NAMD_FFTW
00365       NAMD_die("Sorry, FFTW must be compiled in to use PME.");
00366 #endif
00367     }
00368 
00369 }

void OptPmeMgr::initialize_pencils ( CkQdMsg *   ) 

Definition at line 371 of file OptPme.C.

References Lattice::a(), Lattice::a_r(), Lattice::b(), Lattice::b_r(), PmeGrid::block1, PmeGrid::block2, j, PmeGrid::K1, PmeGrid::K2, PatchMap::max_a(), PatchMap::max_b(), PatchMap::min_a(), PatchMap::min_b(), PatchMap::node(), PatchMap::numPatches(), numPatches, PatchMap::numPatchesOnNode(), PatchMap::Object(), Node::Object(), PmeGrid::order, ComputeHomePatches::patchMap, Node::simParameters, simParams, Vector::unit(), PmeGrid::xBlocks, and PmeGrid::yBlocks.

00371                                                {
00372   delete msg;
00373 
00374   SimParameters *simParams = Node::Object()->simParameters;
00375 
00376   PatchMap *patchMap = PatchMap::Object();
00377   Lattice lattice = simParams->lattice;
00378   BigReal sysdima = lattice.a_r().unit() * lattice.a();
00379   BigReal sysdimb = lattice.b_r().unit() * lattice.b();
00380   BigReal cutoff = simParams->cutoff;
00381   BigReal patchdim = simParams->patchDimension;
00382   int numPatches = patchMap->numPatches();
00383 
00384   //fprintf(stderr, "Node %d PE %d trying to allocate %d bytes\n", CmiMyNode(), CmiMyPe(), myGrid.xBlocks*myGrid.yBlocks);
00385 
00386   char *pencilActive = new char[myGrid.xBlocks*myGrid.yBlocks];
00387   for ( int i=0; i<myGrid.xBlocks; ++i ) {
00388     for ( int j=0; j<myGrid.yBlocks; ++j ) {
00389       pencilActive[i*myGrid.yBlocks+j] = 0;
00390     }
00391   }
00392 
00393   //Right now we only support one patch per processor
00394   assert (patchMap->numPatchesOnNode(CkMyPe()) <= 1);
00395   for ( int pid=0; pid < numPatches; ++pid ) {
00396     int pnode = patchMap->node(pid);
00397     if ( pnode != CkMyPe() ) continue;
00398 
00399     BigReal minx = patchMap->min_a(pid);
00400     BigReal maxx = patchMap->max_a(pid);
00401     BigReal margina = 0.5 * ( patchdim - cutoff ) / sysdima;
00402     // min1 (max1) is smallest (largest) grid line for this patch
00403     int min1 = ((int) floor(myGrid.K1 * (minx - margina))) - myGrid.order + 1;
00404     int max1 = ((int) floor(myGrid.K1 * (maxx + margina)));
00405 
00406     BigReal miny = patchMap->min_b(pid);
00407     BigReal maxy = patchMap->max_b(pid);
00408     BigReal marginb = 0.5 * ( patchdim - cutoff ) / sysdimb;
00409     // min2 (max2) is smallest (largest) grid line for this patch
00410     int min2 = ((int) floor(myGrid.K2 * (miny - marginb))) - myGrid.order + 1;
00411     int max2 = ((int) floor(myGrid.K2 * (maxy + marginb)));
00412 
00413     for ( int i=min1; i<=max1; ++i ) {
00414       int ix = i;
00415       while ( ix >= myGrid.K1 ) ix -= myGrid.K1;
00416       while ( ix < 0 ) ix += myGrid.K1;
00417       for ( int j=min2; j<=max2; ++j ) {
00418         int jy = j;
00419         while ( jy >= myGrid.K2 ) jy -= myGrid.K2;
00420         while ( jy < 0 ) jy += myGrid.K2;
00421         pencilActive[(ix / myGrid.block1)*myGrid.yBlocks + (jy / myGrid.block2)] = 1;
00422       }
00423     }
00424   }
00425 
00426   numPencilsActive = 0;
00427   for ( int i=0; i<myGrid.xBlocks; ++i ) {
00428     for ( int j=0; j<myGrid.yBlocks; ++j ) {
00429       if ( pencilActive[i*myGrid.yBlocks+j] ) {
00430         ++numPencilsActive;
00431 
00432         zPencil(i,j,0).dummyRecvGrid(CkMyPe(),0);
00433       }
00434     }
00435   }
00436 
00437   ungrid_count = numPencilsActive;
00438   delete [] pencilActive;  
00439 }

void OptPmeMgr::recvArrays ( CProxy_OptPmeXPencil  ,
CProxy_OptPmeYPencil  ,
CProxy_OptPmeZPencil   
)

Definition at line 244 of file OptPme.C.

00244                                                                                                  {
00245   xPencil = x;  yPencil = y;  zPencil = z;
00246 }

void OptPmeMgr::recvEvir ( CkReductionMsg *  msg  ) 

Definition at line 559 of file OptPme.C.

References SubmitReduction::item(), Node::Object(), REDUCTION_ELECT_ENERGY_SLOW, Node::simParameters, simParams, and SubmitReduction::submit().

00559                                              {
00560 
00561   assert (CkMyPe() == 0);
00562 
00563   double *data = (double *) msg->getData();
00564   assert (msg->getSize() == 7 * sizeof(double));
00565 
00566   //printf ("[%d]: Received Evir\n", CkMyPe());
00567 
00568   double scale = 1.;
00569   reduction->item(REDUCTION_ELECT_ENERGY_SLOW) += data[0] * scale;
00570   reduction->item(REDUCTION_VIRIAL_SLOW_XX) += data[1] * scale;
00571   reduction->item(REDUCTION_VIRIAL_SLOW_XY) += data[2] * scale;
00572   reduction->item(REDUCTION_VIRIAL_SLOW_XZ) += data[3] * scale;
00573   reduction->item(REDUCTION_VIRIAL_SLOW_YX) += data[2] * scale;
00574   reduction->item(REDUCTION_VIRIAL_SLOW_YY) += data[4] * scale;
00575   reduction->item(REDUCTION_VIRIAL_SLOW_YZ) += data[5] * scale;
00576   reduction->item(REDUCTION_VIRIAL_SLOW_ZX) += data[3] * scale;
00577   reduction->item(REDUCTION_VIRIAL_SLOW_ZY) += data[5] * scale;
00578   reduction->item(REDUCTION_VIRIAL_SLOW_ZZ) += data[6] * scale;   
00579 
00580   delete msg;
00581 
00582   SimParameters *simParams = Node::Object()->simParameters;
00583   int fef = simParams->fullElectFrequency;
00584   for (int i = 0; i < fef; i++) {
00585     reduction->submit();
00586   }
00587 }

void OptPmeMgr::recvUngrid ( OptPmeGridMsg  ) 

Definition at line 451 of file OptPme.C.

References OptPmeCompute::copyPencils(), NAMD_bug(), and ungridCalc().

00451                                              {
00452   if ( ungrid_count == 0 ) {
00453     NAMD_bug("Message order failure in OptPmeMgr::recvUngrid\n");
00454   }
00455     
00456   pmeCompute->copyPencils(msg);
00457   delete msg;
00458   --ungrid_count;
00459 
00460   if ( ungrid_count == 0 ) {
00461     //CkPrintf("recvUngrid on Pe(%d)\n",CkMyPe());
00462     ungridCalc(NULL);
00463   }
00464 }

void OptPmeMgr::setCompute ( OptPmeCompute c  )  [inline]

Definition at line 74 of file OptPme.C.

References OptPmeCompute::setMgr().

00074 { pmeCompute = c; c->setMgr(this); }

void OptPmeMgr::ungridCalc ( OptPmeDummyMsg  ) 

Definition at line 466 of file OptPme.C.

References OptPmeSubComputeMsg::compute, OptPmeSubComputeMsg::dest, OptPmeSubComputeMsg::end, OptPmeCompute::getNumLocalAtoms(), PatchMap::numPatchesOnNode(), PatchMap::Object(), ComputeHomePatches::patchMap, PRIORITY_SIZE, OptPmeSubComputeMsg::src_pe, OptPmeSubComputeMsg::start, SUBCOMPUTE_NPAR, OptPmeCompute::ungridForces_compute(), OptPmeCompute::ungridForces_finalize(), and OptPmeCompute::ungridForces_init().

Referenced by recvUngrid().

00466                                                {    
00467   pmeCompute->ungridForces_init();
00468   if ( CmiMyNodeSize() >= SUBCOMPUTE_NPAR ) {
00469     int npar = SUBCOMPUTE_NPAR;
00470     OptPmeSubComputeMsg *smsg = NULL;
00471 
00472     if (!peersAllocated) {
00473       peersAllocated = 1;
00474       int next_rank = CmiMyRank();   
00475       PatchMap *patchMap = PatchMap::Object();          
00476 
00477       for (int i = 1; i < npar; ++i) {      
00478         smsg = new (PRIORITY_SIZE) OptPmeSubComputeMsg;
00479         subcompute_msgs[i] = smsg;
00480         smsg->src_pe  = CkMyPe();
00481         smsg->compute = pmeCompute;
00482 
00483         next_rank ++;
00484         if (next_rank >= CmiMyNodeSize())
00485           next_rank = 0;
00486         int n = 0;
00487         int nr = next_rank;
00488         while(n < CmiMyNodeSize() &&
00489               patchMap->numPatchesOnNode(CmiNodeFirst(CmiMyNode())+nr) > 0)
00490         {
00491           nr ++;
00492           if (nr >= CmiMyNodeSize())
00493             nr = 0;
00494           n++;
00495         }
00496         if (n < CmiMyNodeSize()) 
00497           next_rank = nr;  //we are successful, so save this rank
00498         
00499         smsg->dest = next_rank;
00500       }
00501 
00502       //Local subcompute msg
00503       smsg = new (PRIORITY_SIZE) OptPmeSubComputeMsg;
00504       subcompute_msgs[0] = smsg;
00505       smsg->src_pe  = CkMyPe();
00506       smsg->compute = pmeCompute;
00507       smsg->dest    = CmiMyRank();
00508     }
00509 
00510     int start  = 0;
00511     int nlocal = pmeCompute->getNumLocalAtoms();
00512     //CmiAssert (npar <= nlocal);
00513     if (nlocal < npar)
00514       npar = nlocal;
00515     if (npar == 0)
00516       npar = 1;
00517     int n_per_iter = nlocal / npar;
00518     //We dont handle the case where there are very few atoms
00519     subcompute_count = npar;
00520 
00521     for (int i = 0; i < npar; ++i) {      
00522       smsg = subcompute_msgs[i];
00523       smsg->start   = start;
00524       smsg->end     = start + n_per_iter;
00525       start += n_per_iter;
00526       if (i == npar - 1)
00527         smsg->end = nlocal;
00528       pmeProxy[CmiNodeFirst(CmiMyNode())+smsg->dest].ungridCalc_subcompute(smsg);
00529     }    
00530   }
00531   else {
00532     pmeCompute->ungridForces_compute(0, 0);
00533     pmeCompute->ungridForces_finalize();
00534     ungrid_count = numPencilsActive; 
00535   }
00536 }

void OptPmeMgr::ungridCalc_subcompute ( OptPmeSubComputeMsg  ) 

Definition at line 538 of file OptPme.C.

References OptPmeSubComputeMsg::compute, OptPmeSubComputeMsg::end, OptPmeSubComputeMsg::src_pe, OptPmeSubComputeMsg::start, and OptPmeCompute::ungridForces_compute().

00538                                                              { 
00539   OptPmeCompute *compute = (OptPmeCompute *) msg->compute;
00540   compute->ungridForces_compute(msg->start, msg->end);
00541   pmeProxy[msg->src_pe].ungridCalc_subcompute_done(msg);
00542 }

void OptPmeMgr::ungridCalc_subcompute_done ( OptPmeSubComputeMsg  ) 

Definition at line 544 of file OptPme.C.

References OptPmeCompute::ungridForces_finalize().

00544                                                                   {  
00545   subcompute_count --;
00546   //delete msg; //message pointers saved
00547   if (subcompute_count == 0) {
00548     pmeCompute->ungridForces_finalize();
00549     ungrid_count = numPencilsActive; 
00550   }
00551 }


Friends And Related Function Documentation

friend class OptPmeCompute [friend]

Definition at line 58 of file OptPme.C.


The documentation for this class was generated from the following file:
Generated on Thu Nov 23 01:17:19 2017 for NAMD by  doxygen 1.4.7