ComputeMgr Class Reference

#include <ComputeMgr.h>

List of all members.

Public Member Functions

 ComputeMgr ()
 ~ComputeMgr ()
void createComputes (ComputeMap *map)
void updateComputes (int, CkGroupID)
void updateComputes2 (CkQdMsg *)
void updateComputes3 ()
void splitComputes ()
void splitComputes2 (CkQdMsg *)
void updateLocalComputes ()
void updateLocalComputes2 (CkQdMsg *)
void updateLocalComputes3 ()
void updateLocalComputes4 (CkQdMsg *)
void updateLocalComputes5 ()
void doneUpdateLocalComputes ()
void sendComputeGlobalConfig (ComputeGlobalConfigMsg *)
void recvComputeGlobalConfig (ComputeGlobalConfigMsg *)
void sendComputeGlobalData (ComputeGlobalDataMsg *)
void recvComputeGlobalData (ComputeGlobalDataMsg *)
void sendComputeGlobalResults (ComputeGlobalResultsMsg *)
void recvComputeGlobalResults (ComputeGlobalResultsMsg *)
void enableComputeGlobalResults ()
void sendComputeDPMEData (ComputeDPMEDataMsg *)
void recvComputeDPMEData (ComputeDPMEDataMsg *)
void sendComputeDPMEResults (ComputeDPMEResultsMsg *, int)
void recvComputeDPMEResults (ComputeDPMEResultsMsg *)
void sendComputeEwaldData (ComputeEwaldMsg *)
void recvComputeEwaldData (ComputeEwaldMsg *)
void sendComputeEwaldResults (ComputeEwaldMsg *)
void recvComputeEwaldResults (ComputeEwaldMsg *)
void recvComputeConsForceMsg (ComputeConsForceMsg *)
void sendYieldDevice (int pe)
void recvYieldDevice (int pe)
void sendBuildCudaExclusions ()
void recvBuildCudaExclusions ()
void sendBuildCudaForceTable ()
void recvBuildCudaForceTable ()
void sendBuildMICForceTable ()
void recvBuildMICForceTable ()
void sendCreateNonbondedCUDASlave (int, int)
void recvCreateNonbondedCUDASlave (NonbondedCUDASlaveMsg *)
void sendNonbondedCUDASlaveReady (int, int, int, int)
void recvNonbondedCUDASlaveReady (int, int, int)
void sendNonbondedCUDASlaveSkip (ComputeNonbondedCUDA *c, int)
void recvNonbondedCUDASlaveSkip (NonbondedCUDASkipMsg *)
void sendNonbondedCUDASlaveEnqueue (ComputeNonbondedCUDA *c, int, int, int, int)
void sendNonbondedCUDASlaveEnqueuePatch (ComputeNonbondedCUDA *c, int, int, int, int, FinishWorkMsg *)
void sendAssignPatchesOnPe (std::vector< int > &pes, CudaComputeNonbonded *c)
void recvAssignPatchesOnPe (CudaComputeNonbondedMsg *msg)
void sendSkipPatchesOnPe (std::vector< int > &pes, CudaComputeNonbonded *c)
void recvSkipPatchesOnPe (CudaComputeNonbondedMsg *msg)
void sendFinishPatchesOnPe (std::vector< int > &pes, CudaComputeNonbonded *c)
void recvFinishPatchesOnPe (CudaComputeNonbondedMsg *msg)
void sendFinishPatchOnPe (int pe, CudaComputeNonbonded *c, int i)
void recvFinishPatchOnPe (CudaComputeNonbondedMsg *msg)
void sendOpenBoxesOnPe (std::vector< int > &pes, CudaComputeNonbonded *c)
void recvOpenBoxesOnPe (CudaComputeNonbondedMsg *msg)
void sendFinishReductions (int pe, CudaComputeNonbonded *c)
void recvFinishReductions (CudaComputeNonbondedMsg *msg)
void sendMessageEnqueueWork (int pe, CudaComputeNonbonded *c)
void recvMessageEnqueueWork (CudaComputeNonbondedMsg *msg)
void sendLaunchWork (int pe, CudaComputeNonbonded *c)
void recvLaunchWork (CudaComputeNonbondedMsg *msg)
void sendUnregisterBoxesOnPe (std::vector< int > &pes, CudaComputeNonbonded *c)
void recvUnregisterBoxesOnPe (CudaComputeNonbondedMsg *msg)
void sendCreateNonbondedMICSlave (int, int)
void recvCreateNonbondedMICSlave (NonbondedMICSlaveMsg *)
void sendNonbondedMICSlaveReady (int, int, int, int)
void recvNonbondedMICSlaveReady (int, int, int)
void sendNonbondedMICSlaveSkip (ComputeNonbondedMIC *c, int)
void recvNonbondedMICSlaveSkip (NonbondedMICSkipMsg *)
void sendNonbondedMICSlaveEnqueue (ComputeNonbondedMIC *c, int, int, int, int)
void sendMICPEData (int, int)
void recvMICPEData (int, int)
int isMICProcessor (int)

Public Attributes

ComputeGlobalcomputeGlobalObject
ResizeArray< ComputeGlobalResultsMsg * > computeGlobalResultsMsgs
int computeGlobalResultsMsgSeq
int computeGlobalResultsMsgMasterSeq


Detailed Description

Definition at line 57 of file ComputeMgr.h.


Constructor & Destructor Documentation

ComputeMgr::ComputeMgr (  ) 

Definition at line 110 of file ComputeMgr.C.

References computeGlobalObject, computeGlobalResultsMsgMasterSeq, computeGlobalResultsMsgSeq, and NAMD_die().

00111 {
00112     CkpvAccess(BOCclass_group).computeMgr = thisgroup;
00113     computeGlobalObject = 0;
00114     computeGlobalResultsMsgSeq = -1;
00115     computeGlobalResultsMsgMasterSeq = -1;
00116     computeDPMEObject = 0;
00117     computeEwaldObject = 0;
00118     computeNonbondedCUDAObject = 0;
00119     computeNonbondedMICObject = 0;
00120     computeNonbondedWorkArrays = new ComputeNonbondedWorkArrays;
00121     skipSplitting = 0;
00122 
00123     #if defined(NAMD_MIC)
00124       // Create the micPEData flag array (1 bit per PE) and initially set each PE as "not driving
00125       //   a MIC card" (unset).  PEs that are driving MIC card will identify themselves during startup.
00126       int numPEs = CkNumPes();
00127       int numInts = ((numPEs + (sizeof(int)*8-1)) & (~(sizeof(int)*8-1))) / (sizeof(int)*8);  // Round up to sizeof(int) then divide by the size of an int
00128       micPEData = new int[numInts];
00129       if (micPEData == NULL) { NAMD_die("Unable to allocate memory for micPEData"); }
00130       memset(micPEData, 0, sizeof(int) * numInts);
00131     #else
00132       micPEData = NULL;
00133     #endif
00134 }

ComputeMgr::~ComputeMgr (  ) 

Definition at line 136 of file ComputeMgr.C.

00137 {
00138     delete computeNonbondedWorkArrays;
00139 }


Member Function Documentation

void ComputeMgr::createComputes ( ComputeMap map  ) 

Definition at line 1013 of file ComputeMgr.C.

References GlobalMasterServer::addClient(), ComputeNonbondedMIC::assignPatches(), ComputeNonbondedCUDA::assignPatches(), CudaComputeNonbonded::assignPatches(), computeAnglesType, computeBondsType, computeCrosstermsType, ComputeMap::computeData, computeDihedralsType, computeExclsType, computeGlobalObject, computeImpropersType, computeMgr, computeNonbondedCUDA2Type, computeNonbondedCUDAType, computeNonbondedMICType, computeNonbondedPairType, computeNonbondedSelfType, computeSelfAnglesType, computeSelfBondsType, computeSelfCrosstermsType, computeSelfDihedralsType, computeSelfExclsType, computeSelfImpropersType, DebugM, DeviceCUDA::device_shared_with_pe(), deviceCUDA, getCudaComputeNonbonded(), DeviceCUDA::getMasterPe(), CudaComputeNonbonded::initialize(), j, mic_device_pe(), mic_device_shared_with_pe(), Node::molecule, Node::myid(), NAMD_die(), ComputeMap::nComputes, Molecule::numAtoms, PatchMap::Object(), Node::Object(), recvComputeGlobalConfig(), sendComputeGlobalConfig(), Node::simParameters, simParams, and ComputeMap::type().

Referenced by Node::startup().

01014 {
01015 // #ifdef NAMD_CUDA
01016 //     int ComputePmeCUDACounter = 0;
01017 // #endif
01018     Node *node = Node::Object();
01019     SimParameters *simParams = node->simParameters;
01020     int myNode = node->myid();
01021 
01022     if ( simParams->globalForcesOn && !myNode )
01023     {
01024         DebugM(4,"Mgr running on Node "<<CkMyPe()<<"\n");
01025         /* create a master server to allow multiple masters */
01026         masterServerObject = new GlobalMasterServer(this,
01027                 PatchMap::Object()->numNodesWithPatches());
01028 
01029         /* create the individual global masters */
01030         // masterServerObject->addClient(new GlobalMasterTest());
01031         if (simParams->tclForcesOn)
01032             masterServerObject->addClient(new GlobalMasterTcl());
01033         if (simParams->IMDon && ! (simParams->IMDignore || simParams->IMDignoreForces) )
01034             masterServerObject->addClient(new GlobalMasterIMD());
01035 
01036         if (simParams->SMDOn)
01037             masterServerObject->addClient(
01038                 new GlobalMasterSMD(simParams->SMDk, simParams->SMDk2,
01039                                     simParams->SMDVel,
01040                                     simParams->SMDDir, simParams->SMDOutputFreq,
01041                                     simParams->firstTimestep, simParams->SMDFile,
01042                                     node->molecule->numAtoms)
01043             );
01044             
01045         if (simParams->symmetryOn && 
01046           (simParams->firstTimestep < simParams->symmetryLastStep || 
01047           simParams->symmetryLastStep == -1))
01048             masterServerObject->addClient(new GlobalMasterSymmetry());    
01049         if (simParams->TMDOn)
01050             masterServerObject->addClient(new GlobalMasterTMD());
01051         if (simParams->miscForcesOn)
01052             masterServerObject->addClient(new GlobalMasterMisc());
01053         if ( simParams->freeEnergyOn )
01054             masterServerObject->addClient(new GlobalMasterFreeEnergy());
01055                 if ( simParams->colvarsOn )
01056                         masterServerObject->addClient(new GlobalMasterColvars());
01057 
01058     }
01059 
01060     if ( !myNode && simParams->IMDon && (simParams->IMDignore || simParams->IMDignoreForces) ) {
01061       // GlobalMasterIMD constructor saves pointer to node->IMDOutput object
01062       new GlobalMasterIMD();
01063     }
01064 
01065 #ifdef NAMD_CUDA
01066     bool deviceIsMine = ( deviceCUDA->getMasterPe() == CkMyPe() );
01067 #ifdef BONDED_CUDA
01068     // Place bonded forces on Pe different from non-bonded forces
01069     int bondedMasterPe = deviceCUDA->getMasterPe();
01070     // for (int i=0;i < deviceCUDA->getNumPesSharingDevice();i++) {
01071     //   int pe = deviceCUDA->getPesSharingDevice(i);
01072     //   if (pe != deviceCUDA->getMasterPe()) {
01073     //     bondedMasterPe = pe;
01074     //   }
01075     // }
01076     bool deviceIsMineBonded = (CkMyPe() == bondedMasterPe);
01077 #endif
01078 #endif
01079 
01080     #ifdef NAMD_MIC
01081       bool deviceIsMine = ( mic_device_pe() == CkMyPe() );
01082     #endif
01083 
01084     for (int i=0; i < map->nComputes; i++)
01085     {
01086         if ( ! ( i % 100 ) )
01087         {
01088         }
01089 
01090 #if defined(NAMD_CUDA) || defined(NAMD_MIC)
01091         switch ( map->type(i) )
01092         {
01093 #ifdef NAMD_CUDA
01094           // case computePmeCUDAType:
01095           //   // Only create single ComputePmeCUDA object per Pe
01096           //  if ( map->computeData[i].node != myNode ) continue;
01097           //  if (ComputePmeCUDACounter > 0) continue;
01098           //  ComputePmeCUDACounter++;
01099           //  break;
01100           case computeNonbondedSelfType:
01101             if ( ! deviceIsMine ) continue;
01102             if ( ! deviceCUDA->device_shared_with_pe(map->computeData[i].node) ) continue;
01103           break;
01104 
01105           case computeNonbondedPairType:
01106             if ( ! deviceIsMine ) continue;
01107             if ( ! deviceCUDA->device_shared_with_pe(map->computeData[i].node) ) continue;
01108           break;
01109 
01110 #ifdef BONDED_CUDA
01111           case computeSelfBondsType:
01112           case computeBondsType:
01113             if (simParams->bondedCUDA & 1) {
01114               if ( ! deviceIsMineBonded ) continue;
01115               if ( ! deviceCUDA->device_shared_with_pe(map->computeData[i].node) ) continue;
01116             } else {
01117               if ( map->computeData[i].node != myNode ) continue;
01118             }
01119           break;
01120 
01121           case computeSelfAnglesType:
01122           case computeAnglesType:
01123             if (simParams->bondedCUDA & 2) {
01124               if ( ! deviceIsMineBonded ) continue;
01125               if ( ! deviceCUDA->device_shared_with_pe(map->computeData[i].node) ) continue;
01126             } else {
01127               if ( map->computeData[i].node != myNode ) continue;
01128             }
01129           break;
01130 
01131           case computeSelfDihedralsType:
01132           case computeDihedralsType:
01133             if (simParams->bondedCUDA & 4) {
01134               if ( ! deviceIsMineBonded ) continue;
01135               if ( ! deviceCUDA->device_shared_with_pe(map->computeData[i].node) ) continue;
01136             } else {
01137               if ( map->computeData[i].node != myNode ) continue;
01138             }
01139           break;
01140 
01141           case computeSelfImpropersType:
01142           case computeImpropersType:
01143             if (simParams->bondedCUDA & 8) {
01144               if ( ! deviceIsMineBonded ) continue;
01145               if ( ! deviceCUDA->device_shared_with_pe(map->computeData[i].node) ) continue;
01146             } else {
01147               if ( map->computeData[i].node != myNode ) continue;
01148             }
01149           break;
01150 
01151           case computeSelfExclsType:
01152           case computeExclsType:
01153             if (simParams->bondedCUDA & 16) {
01154               if ( ! deviceIsMineBonded ) continue;
01155               if ( ! deviceCUDA->device_shared_with_pe(map->computeData[i].node) ) continue;
01156             } else {
01157               if ( map->computeData[i].node != myNode ) continue;
01158             }
01159           break;
01160 
01161           case computeSelfCrosstermsType:
01162           case computeCrosstermsType:
01163             if (simParams->bondedCUDA & 32) {
01164               if ( ! deviceIsMineBonded ) continue;
01165               if ( ! deviceCUDA->device_shared_with_pe(map->computeData[i].node) ) continue;
01166             } else {
01167               if ( map->computeData[i].node != myNode ) continue;
01168             }
01169           break;
01170 
01171           case computeBondedCUDAType:
01172             if ( ! deviceIsMineBonded ) continue;
01173             if ( map->computeData[i].node != myNode ) continue;
01174           break;
01175 #endif
01176 
01177 #endif
01178 #ifdef NAMD_MIC
01179 
01180           case computeNonbondedSelfType:
01181             if (map->directToDevice(i) != 0) { // If should be directed to the device...
01182               if ( ! deviceIsMine ) continue;
01183               if ( ! mic_device_shared_with_pe(map->computeData[i].node) ) continue;
01184             } else { // ... otherwise, direct to host...
01185               if (map->computeData[i].node != myNode) { continue; }
01186             }
01187             break;
01188 
01189           case computeNonbondedPairType:
01190             if (map->directToDevice(i)) { // If should be directed to the device...
01191               if ( ! deviceIsMine ) continue;
01192               if ( ! mic_device_shared_with_pe(map->computeData[i].node) ) continue;
01193             } else { // ... otherwise, direct to host...
01194               if (map->computeData[i].node != myNode) { continue; }
01195             }
01196             break;
01197 
01198 #endif
01199           case computeNonbondedCUDAType:
01200 #ifdef NAMD_CUDA
01201           case computeNonbondedCUDA2Type:
01202 // #ifdef BONDED_CUDA
01203 //           case computeBondedCUDAType:
01204 // #endif
01205 #endif
01206           case computeNonbondedMICType:
01207             if ( ! deviceIsMine ) continue;
01208           default:
01209             if ( map->computeData[i].node != myNode ) continue;
01210         }
01211 #else // defined(NAMD_CUDA) || defined(NAMD_MIC)
01212         if ( map->computeData[i].node != myNode ) continue;
01213 #endif
01214         DebugM(1,"Compute " << i << '\n');
01215         DebugM(1,"  node = " << map->computeData[i].node << '\n');
01216         DebugM(1,"  type = " << map->computeData[i].type << '\n');
01217         DebugM(1,"  numPids = " << map->computeData[i].numPids << '\n');
01218 //         DebugM(1,"  numPidsAllocated = " << map->computeData[i].numPidsAllocated << '\n');
01219         for (int j=0; j < map->computeData[i].numPids; j++)
01220         {
01221             DebugM(1,"  pid " << map->computeData[i].pids[j].pid << '\n');
01222             if (!((j+1) % 6))
01223                 DebugM(1,'\n');
01224         }
01225         DebugM(1,"\n---------------------------------------");
01226         DebugM(1,"---------------------------------------\n");
01227 
01228         createCompute(i, map);
01229 
01230     }
01231 
01232 #ifdef NAMD_CUDA
01233     if (simParams->useCUDA2) {
01234       if (deviceIsMine) {
01235         getCudaComputeNonbonded()->assignPatches(this);
01236         getCudaComputeNonbonded()->initialize();
01237       }
01238     } else {
01239       if ( computeNonbondedCUDAObject ) {
01240         computeNonbondedCUDAObject->assignPatches();
01241       }      
01242     }
01243 #ifdef BONDED_CUDA
01244     if (simParams->bondedCUDA) {
01245       if (deviceIsMineBonded) {
01246         getComputeBondedCUDA()->initialize();
01247       }
01248     }
01249 #endif
01250 #endif
01251 #ifdef NAMD_MIC
01252     if ( computeNonbondedMICObject ) {
01253       computeNonbondedMICObject->assignPatches();
01254     }
01255 #endif
01256 
01257 }

void ComputeMgr::doneUpdateLocalComputes (  ) 

Definition at line 347 of file ComputeMgr.C.

References DebugM.

00348 {
00349 
00350 //  if (!--updateComputesCount) {
00351     DebugM(4, "doneUpdateLocalComputes on Pe("<<CkMyPe()<<")\n");
00352     void *msg = CkAllocMsg(0,0,0);
00353     CkSendMsgBranch(updateComputesReturnEP,msg,0,updateComputesReturnChareID);
00354 //  }
00355 }

void ComputeMgr::enableComputeGlobalResults (  ) 

Definition at line 1297 of file ComputeMgr.C.

References computeGlobalResultsMsgs, computeGlobalResultsMsgSeq, ResizeArray< Elem >::del(), recvComputeGlobalResults(), and ResizeArray< Elem >::size().

Referenced by ComputeGlobal::doWork().

01298 {
01299     ++computeGlobalResultsMsgSeq;
01300     for ( int i=0; i<computeGlobalResultsMsgs.size(); ++i ) {
01301       if ( computeGlobalResultsMsgs[i]->seq == computeGlobalResultsMsgSeq ) {
01302         ComputeGlobalResultsMsg *msg = computeGlobalResultsMsgs[i];
01303         computeGlobalResultsMsgs.del(i);
01304         recvComputeGlobalResults(msg);
01305         break;
01306       }
01307     }
01308 }

int ComputeMgr::isMICProcessor ( int   ) 

Definition at line 1884 of file ComputeMgr.C.

01884                                      {
01885   if (pe < 0 || pe >= CkNumPes() || micPEData == NULL) { return 0; }
01886   int majorIndex = pe / (sizeof(int)*8);
01887   int minorIndex = pe % (sizeof(int)*8);
01888   return ((micPEData[majorIndex] >> minorIndex) & 0x01);
01889 }

void ComputeMgr::recvAssignPatchesOnPe ( CudaComputeNonbondedMsg msg  ) 

Definition at line 1598 of file ComputeMgr.C.

References CudaComputeNonbonded::assignPatchesOnPe(), and CudaComputeNonbondedMsg::c.

01598                                                                    {
01599   msg->c->assignPatchesOnPe();
01600   delete msg;
01601 }

void ComputeMgr::recvBuildCudaExclusions (  ) 

Definition at line 1465 of file ComputeMgr.C.

References build_cuda_exclusions().

Referenced by sendBuildCudaExclusions().

01465                                          {
01466 #ifdef NAMD_CUDA
01467     build_cuda_exclusions();
01468 #endif
01469 }

void ComputeMgr::recvBuildCudaForceTable (  ) 

Definition at line 1484 of file ComputeMgr.C.

References build_cuda_force_table().

Referenced by sendBuildCudaForceTable().

01484                                          {
01485 #ifdef NAMD_CUDA
01486     build_cuda_force_table();
01487 #endif
01488 }

void ComputeMgr::recvBuildMICForceTable (  ) 

Definition at line 1503 of file ComputeMgr.C.

Referenced by sendBuildMICForceTable().

01503                                         {
01504   #ifdef NAMD_MIC
01505     build_mic_force_table();
01506   #endif
01507 }

void ComputeMgr::recvComputeConsForceMsg ( ComputeConsForceMsg  ) 

Definition at line 1412 of file ComputeMgr.C.

References ComputeConsForceMsg::aid, Molecule::consForce, Molecule::consForceIndexes, ComputeConsForceMsg::f, Node::molecule, Molecule::numAtoms, Node::Object(), and ResizeArray< Elem >::size().

01413 {
01414     Molecule *m = Node::Object()->molecule;
01415     delete [] m->consForceIndexes;
01416     delete [] m->consForce;
01417     int n = msg->aid.size();
01418     if (n > 0)
01419     {
01420         m->consForceIndexes = new int32[m->numAtoms];
01421         m->consForce = new Vector[n];
01422         int i;
01423         for (i=0; i<m->numAtoms; i++) m->consForceIndexes[i] = -1;
01424         for (i=0; i<msg->aid.size(); i++)
01425         {
01426             m->consForceIndexes[msg->aid[i]] = i;
01427             m->consForce[i] = msg->f[i];
01428         }
01429     }
01430     else
01431     {
01432         m->consForceIndexes = NULL;
01433         m->consForce = NULL;
01434     }
01435     delete msg;
01436 }

void ComputeMgr::recvComputeDPMEData ( ComputeDPMEDataMsg  ) 

Definition at line 1382 of file ComputeMgr.C.

References NAMD_die(), and PatchMap::Object().

01383 {
01384     if ( computeDPMEObject )
01385     {
01386 #ifdef DPME
01387         computeDPMEObject->recvData(msg);
01388 #endif
01389     }
01390     else if ( ! (PatchMap::Object())->numHomePatches() ) delete msg;
01391     else NAMD_die("ComputeMgr::computeDPMEObject is NULL!");
01392 }

void ComputeMgr::recvComputeDPMEResults ( ComputeDPMEResultsMsg  ) 

Definition at line 1400 of file ComputeMgr.C.

References NAMD_die(), and PatchMap::Object().

01401 {
01402     if ( computeDPMEObject )
01403     {
01404 #ifdef DPME
01405         computeDPMEObject->recvResults(msg);
01406 #endif
01407     }
01408     else if ( ! (PatchMap::Object())->numHomePatches() ) delete msg;
01409     else NAMD_die("ComputeMgr::computeDPMEObject is NULL!");
01410 }

void ComputeMgr::recvComputeEwaldData ( ComputeEwaldMsg  ) 

Definition at line 1345 of file ComputeMgr.C.

References NAMD_die(), and ComputeEwald::recvData().

01346 {
01347     if (computeEwaldObject)
01348         computeEwaldObject->recvData(msg);
01349     else NAMD_die("ComputeMgr::computeEwaldObject in recvData is NULL!");
01350 }

void ComputeMgr::recvComputeEwaldResults ( ComputeEwaldMsg  ) 

Definition at line 1357 of file ComputeMgr.C.

References NAMD_die(), PatchMap::Object(), and ComputeEwald::recvResults().

Referenced by sendComputeEwaldResults().

01358 {
01359     if (computeEwaldObject) {
01360         CmiEnableUrgentSend(1);
01361         computeEwaldObject->recvResults(msg);
01362         CmiEnableUrgentSend(0);
01363     }
01364     else if ( ! (PatchMap::Object())->numHomePatches() ) delete msg;
01365     else NAMD_die("ComputeMgr::computeEwaldObject in recvResults is NULL!");
01366 }

void ComputeMgr::recvComputeGlobalConfig ( ComputeGlobalConfigMsg *   ) 

Referenced by createComputes().

void ComputeMgr::recvComputeGlobalData ( ComputeGlobalDataMsg  ) 

Definition at line 1282 of file ComputeMgr.C.

References NAMD_die(), and GlobalMasterServer::recvData().

01283 {
01284     if (masterServerObject)  // make sure it has been initialized
01285     {
01286         masterServerObject->recvData(msg);
01287     }
01288     else NAMD_die("ComputeMgr::masterServerObject is NULL!");
01289 }

void ComputeMgr::recvComputeGlobalResults ( ComputeGlobalResultsMsg  ) 

Definition at line 1310 of file ComputeMgr.C.

References ResizeArray< Elem >::add(), computeGlobalObject, computeGlobalResultsMsgs, computeGlobalResultsMsgSeq, NAMD_die(), PatchMap::Object(), ComputeGlobal::recvResults(), and ComputeGlobalResultsMsg::seq.

Referenced by enableComputeGlobalResults().

01311 {
01312     if ( computeGlobalObject )
01313     {
01314       if ( msg->seq == computeGlobalResultsMsgSeq ) {
01315         CmiEnableUrgentSend(1);
01316         computeGlobalObject->recvResults(msg);
01317         CmiEnableUrgentSend(0);
01318       } else {
01319         computeGlobalResultsMsgs.add(msg);
01320       }
01321     }
01322     else if ( ! (PatchMap::Object())->numHomePatches() ) delete msg;
01323     else NAMD_die("ComputeMgr::computeGlobalObject is NULL!");
01324 }

void ComputeMgr::recvCreateNonbondedCUDASlave ( NonbondedCUDASlaveMsg  ) 

Definition at line 1523 of file ComputeMgr.C.

References Compute::cid, NonbondedCUDASlaveMsg::index, and NonbondedCUDASlaveMsg::master.

01523                                                                         {
01524 #ifdef NAMD_CUDA
01525   new ComputeNonbondedCUDA(msg->master->cid,this,msg->master,msg->index);
01526 #endif
01527 }

void ComputeMgr::recvCreateNonbondedMICSlave ( NonbondedMICSlaveMsg  ) 

Definition at line 1818 of file ComputeMgr.C.

References Compute::cid, NonbondedMICSlaveMsg::index, and NonbondedMICSlaveMsg::master.

01818                                                                       {
01819 #ifdef NAMD_MIC
01820   ComputeNonbondedMIC *c = new ComputeNonbondedMIC(msg->master->cid,this,msg->master,msg->index);
01821 #endif
01822 }

void ComputeMgr::recvFinishPatchesOnPe ( CudaComputeNonbondedMsg msg  ) 

Definition at line 1624 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, and CudaComputeNonbonded::finishPatchesOnPe().

01624                                                                    {
01625   msg->c->finishPatchesOnPe();
01626   delete msg;
01627 }

void ComputeMgr::recvFinishPatchOnPe ( CudaComputeNonbondedMsg msg  ) 

Definition at line 1636 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, CudaComputeNonbonded::finishPatchOnPe(), and CudaComputeNonbondedMsg::i.

01636                                                                  {
01637   msg->c->finishPatchOnPe(msg->i);
01638   delete msg;
01639 }

void ComputeMgr::recvFinishReductions ( CudaComputeNonbondedMsg msg  ) 

Definition at line 1660 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, and CudaComputeNonbonded::finishReductions().

01660                                                                   {
01661   msg->c->finishReductions();
01662   delete msg;
01663 }

void ComputeMgr::recvLaunchWork ( CudaComputeNonbondedMsg msg  ) 

Definition at line 1682 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, and CudaComputeNonbonded::launchWork().

01682                                                             {
01683   msg->c->launchWork();
01684   delete msg;
01685 }

void ComputeMgr::recvMessageEnqueueWork ( CudaComputeNonbondedMsg msg  ) 

Definition at line 1671 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, and CudaComputeNonbonded::messageEnqueueWork().

01671                                                                     {
01672   msg->c->messageEnqueueWork();
01673   delete msg;
01674 }

void ComputeMgr::recvMICPEData ( int  ,
int   
)

Definition at line 1869 of file ComputeMgr.C.

01869                                                {
01870   if (pe < 0 || pe >= CkNumPes() || micPEData == NULL) { return; }
01871   int majorIndex = pe / (sizeof(int)*8);
01872   int minorIndex = pe % (sizeof(int)*8);
01873   if (data != 0) {
01874     micPEData[majorIndex] |= (0x01 << minorIndex);
01875   } else {
01876     micPEData[majorIndex] &= ((~0x01) << minorIndex);
01877   }
01878 }

void ComputeMgr::recvNonbondedCUDASlaveReady ( int  ,
int  ,
int   
)

Definition at line 1534 of file ComputeMgr.C.

References Compute::patchReady().

01534                                                                     {
01535   for ( int i=0; i<np; ++i ) {
01536     computeNonbondedCUDAObject->patchReady(-1,ac,seq);
01537   }
01538 }

void ComputeMgr::recvNonbondedCUDASlaveSkip ( NonbondedCUDASkipMsg  ) 

Definition at line 1551 of file ComputeMgr.C.

References NonbondedCUDASkipMsg::compute, and ComputeNonbondedCUDA::skip().

01551                                                                      {
01552 #ifdef NAMD_CUDA
01553   msg->compute->skip();
01554 #endif
01555   delete msg;
01556 }

void ComputeMgr::recvNonbondedMICSlaveReady ( int  ,
int  ,
int   
)

Definition at line 1829 of file ComputeMgr.C.

References Compute::patchReady().

01829                                                                    {
01830   for ( int i=0; i<np; ++i ) {
01831     computeNonbondedMICObject->patchReady(-1,ac,seq);
01832   }
01833 }

void ComputeMgr::recvNonbondedMICSlaveSkip ( NonbondedMICSkipMsg  ) 

Definition at line 1846 of file ComputeMgr.C.

References NonbondedMICSkipMsg::compute, and ComputeNonbondedMIC::skip().

01846                                                                    {
01847 #ifdef NAMD_MIC
01848   msg->compute->skip();
01849 #endif
01850   delete msg;
01851 }

void ComputeMgr::recvOpenBoxesOnPe ( CudaComputeNonbondedMsg msg  ) 

Definition at line 1649 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, and CudaComputeNonbonded::openBoxesOnPe().

01649                                                                {
01650   msg->c->openBoxesOnPe();
01651   delete msg;
01652 }

void ComputeMgr::recvSkipPatchesOnPe ( CudaComputeNonbondedMsg msg  ) 

Definition at line 1611 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, and CudaComputeNonbonded::skipPatchesOnPe().

01611                                                                  {
01612   msg->c->skipPatchesOnPe();
01613   delete msg;
01614 }

void ComputeMgr::recvUnregisterBoxesOnPe ( CudaComputeNonbondedMsg msg  ) 

Definition at line 1695 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, and CudaComputeNonbonded::unregisterBoxesOnPe().

01695                                                                      {
01696   msg->c->unregisterBoxesOnPe();
01697   delete msg;
01698 }

void ComputeMgr::recvYieldDevice ( int  pe  ) 

Definition at line 1443 of file ComputeMgr.C.

References ComputeNonbondedMIC::recvYieldDevice(), and ComputeNonbondedCUDA::recvYieldDevice().

01443                                        {
01444 #ifdef NAMD_CUDA
01445     computeNonbondedCUDAObject->recvYieldDevice(pe);
01446 #endif
01447 #ifdef NAMD_MIC
01448     computeNonbondedMICObject->recvYieldDevice(pe);
01449 #endif
01450 }

void ComputeMgr::sendAssignPatchesOnPe ( std::vector< int > &  pes,
CudaComputeNonbonded c 
)

Definition at line 1590 of file ComputeMgr.C.

Referenced by CudaComputeNonbonded::assignPatches().

01590                                                                                    {
01591   for (int i=0;i < pes.size();i++) {
01592     CudaComputeNonbondedMsg *msg = new CudaComputeNonbondedMsg;
01593     msg->c = c;
01594     thisProxy[pes[i]].recvAssignPatchesOnPe(msg);
01595   }
01596 }

void ComputeMgr::sendBuildCudaExclusions (  ) 

Definition at line 1452 of file ComputeMgr.C.

References computeMgr, and recvBuildCudaExclusions().

Referenced by Node::resendMolecule().

01452                                          {
01453     CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
01454     int pe = CkNodeFirst(CkMyNode());
01455     int end = pe + CkNodeSize(CkMyNode());
01456     for( ; pe != end; ++pe ) {
01457       cm[pe].recvBuildCudaExclusions();
01458     }
01459 }

void ComputeMgr::sendBuildCudaForceTable (  ) 

Definition at line 1471 of file ComputeMgr.C.

References computeMgr, and recvBuildCudaForceTable().

Referenced by send_build_cuda_force_table().

01471                                          {
01472     CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
01473     int pe = CkNodeFirst(CkMyNode());
01474     int end = pe + CkNodeSize(CkMyNode());
01475     for( ; pe != end; ++pe ) {
01476       cm[pe].recvBuildCudaForceTable();
01477     }
01478 }

void ComputeMgr::sendBuildMICForceTable (  ) 

Definition at line 1490 of file ComputeMgr.C.

References computeMgr, and recvBuildMICForceTable().

01490                                         {
01491   CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
01492   int pe = CkNodeFirst(CkMyNode());
01493   int end = pe + CkNodeSize(CkMyNode());
01494   for( ; pe != end; ++pe ) {
01495     cm[pe].recvBuildMICForceTable();
01496   }
01497 }

void ComputeMgr::sendComputeDPMEData ( ComputeDPMEDataMsg  ) 

Definition at line 1368 of file ComputeMgr.C.

References computeMgr, NAMD_die(), and PatchMap::Object().

01369 {
01370     if ( computeDPMEObject )
01371     {
01372 #ifdef DPME
01373         int node = computeDPMEObject->getMasterNode();
01374         CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
01375         cm.recvComputeDPMEData(msg,node);
01376 #endif
01377     }
01378     else if ( ! (PatchMap::Object())->numHomePatches() ) delete msg;
01379     else NAMD_die("ComputeMgr::computeDPMEObject is NULL!");
01380 }

void ComputeMgr::sendComputeDPMEResults ( ComputeDPMEResultsMsg ,
int   
)

Definition at line 1394 of file ComputeMgr.C.

References computeMgr.

01395 {
01396     CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
01397     cm[node].recvComputeDPMEResults(msg);
01398 }

void ComputeMgr::sendComputeEwaldData ( ComputeEwaldMsg  ) 

Definition at line 1329 of file ComputeMgr.C.

References computeMgr, ComputeEwald::getMasterNode(), NAMD_die(), and PatchMap::Object().

Referenced by ComputeEwald::doWork().

01330 {
01331     if (computeEwaldObject)
01332     {
01333         int node = computeEwaldObject->getMasterNode();
01334         CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
01335         cm[node].recvComputeEwaldData(msg);
01336     }
01337     else if (!PatchMap::Object()->numHomePatches())
01338     {
01339         CkPrintf("skipping message on Pe(%d)\n", CkMyPe());
01340         delete msg;
01341     }
01342     else NAMD_die("ComputeMgr::computeEwaldObject is NULL!");
01343 }

void ComputeMgr::sendComputeEwaldResults ( ComputeEwaldMsg  ) 

Definition at line 1352 of file ComputeMgr.C.

References computeMgr, and recvComputeEwaldResults().

Referenced by ComputeEwald::recvData().

01353 {
01354     (CProxy_ComputeMgr(CkpvAccess(BOCclass_group).computeMgr)).recvComputeEwaldResults(msg);
01355 }

void ComputeMgr::sendComputeGlobalConfig ( ComputeGlobalConfigMsg *   ) 

Referenced by createComputes().

void ComputeMgr::sendComputeGlobalData ( ComputeGlobalDataMsg  ) 

Definition at line 1276 of file ComputeMgr.C.

References computeMgr.

Referenced by ComputeGlobal::doWork().

01277 {
01278     CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
01279     cm[0].recvComputeGlobalData(msg);
01280 }

void ComputeMgr::sendComputeGlobalResults ( ComputeGlobalResultsMsg  ) 

Definition at line 1291 of file ComputeMgr.C.

References computeGlobalResultsMsgMasterSeq, and ComputeGlobalResultsMsg::seq.

01292 {
01293     msg->seq = ++computeGlobalResultsMsgMasterSeq;
01294     thisProxy.recvComputeGlobalResults(msg);
01295 }

void ComputeMgr::sendCreateNonbondedCUDASlave ( int  ,
int   
)

Definition at line 1515 of file ComputeMgr.C.

References computeMgr, NonbondedCUDASlaveMsg::index, and NonbondedCUDASlaveMsg::master.

Referenced by ComputeNonbondedCUDA::assignPatches().

01515                                                                {
01516   NonbondedCUDASlaveMsg *msg = new NonbondedCUDASlaveMsg;
01517   msg->master = computeNonbondedCUDAObject;
01518   msg->index = index;
01519   CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
01520   cm[pe].recvCreateNonbondedCUDASlave(msg);
01521 }

void ComputeMgr::sendCreateNonbondedMICSlave ( int  ,
int   
)

Definition at line 1810 of file ComputeMgr.C.

References computeMgr, NonbondedMICSlaveMsg::index, and NonbondedMICSlaveMsg::master.

01810                                                               {
01811   NonbondedMICSlaveMsg *msg = new NonbondedMICSlaveMsg;
01812   msg->master = computeNonbondedMICObject;
01813   msg->index = index;
01814   CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
01815   cm[pe].recvCreateNonbondedMICSlave(msg);
01816 }

void ComputeMgr::sendFinishPatchesOnPe ( std::vector< int > &  pes,
CudaComputeNonbonded c 
)

Definition at line 1616 of file ComputeMgr.C.

01616                                                                                    {
01617   for (int i=0;i < pes.size();i++) {
01618     CudaComputeNonbondedMsg *msg = new CudaComputeNonbondedMsg;
01619     msg->c = c;
01620     thisProxy[pes[i]].recvFinishPatchesOnPe(msg);
01621   }
01622 }

void ComputeMgr::sendFinishPatchOnPe ( int  pe,
CudaComputeNonbonded c,
int  i 
)

Definition at line 1629 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, and CudaComputeNonbondedMsg::i.

01629                                                                            {
01630   CudaComputeNonbondedMsg *msg = new CudaComputeNonbondedMsg;
01631   msg->c = c;
01632   msg->i = i;
01633   thisProxy[pe].recvFinishPatchOnPe(msg);
01634 }

void ComputeMgr::sendFinishReductions ( int  pe,
CudaComputeNonbonded c 
)

Definition at line 1654 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c.

Referenced by CudaComputeNonbonded::skipPatchesOnPe().

01654                                                                      {
01655   CudaComputeNonbondedMsg *msg = new CudaComputeNonbondedMsg;
01656   msg->c = c;
01657   thisProxy[pe].recvFinishReductions(msg);
01658 }

void ComputeMgr::sendLaunchWork ( int  pe,
CudaComputeNonbonded c 
)

Definition at line 1676 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c.

Referenced by CudaComputeNonbonded::openBoxesOnPe().

01676                                                                {
01677   CudaComputeNonbondedMsg *msg = new CudaComputeNonbondedMsg;
01678   msg->c = c;
01679   thisProxy[pe].recvLaunchWork(msg);
01680 }

void ComputeMgr::sendMessageEnqueueWork ( int  pe,
CudaComputeNonbonded c 
)

Definition at line 1665 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c.

Referenced by CudaComputeNonbonded::noWork().

01665                                                                        {
01666   CudaComputeNonbondedMsg *msg = new CudaComputeNonbondedMsg;
01667   msg->c = c;
01668   thisProxy[pe].recvMessageEnqueueWork(msg);
01669 }

void ComputeMgr::sendMICPEData ( int  ,
int   
)

Definition at line 1864 of file ComputeMgr.C.

References computeMgr.

01864                                                {
01865   CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
01866   cm.recvMICPEData(pe, data);
01867 }

void ComputeMgr::sendNonbondedCUDASlaveEnqueue ( ComputeNonbondedCUDA c,
int  ,
int  ,
int  ,
int   
)

Definition at line 1558 of file ComputeMgr.C.

References Compute::cid, LocalWorkMsg::compute, ComputeNonbondedCUDA::localHostedPatches, Compute::localWorkMsg, ComputeNonbondedCUDA::localWorkMsg2, SET_PRIORITY, ResizeArray< Elem >::size(), and Compute::type().

Referenced by ComputeNonbondedCUDA::finishWork().

01558                                                                                                          {
01559   if ( ws == 2 && c->localHostedPatches.size() == 0 ) return;
01560   LocalWorkMsg *msg = ( ws == 1 ? c->localWorkMsg : c->localWorkMsg2 );
01561   msg->compute = c;
01562   int type = c->type();
01563   int cid = c->cid;
01564   SET_PRIORITY(msg,seq,prio);
01565   CProxy_WorkDistrib wdProxy(CkpvAccess(BOCclass_group).workDistrib);
01566   wdProxy[pe].enqueueCUDA(msg);
01567 }

void ComputeMgr::sendNonbondedCUDASlaveEnqueuePatch ( ComputeNonbondedCUDA c,
int  ,
int  ,
int  ,
int  ,
FinishWorkMsg  
)

Definition at line 1569 of file ComputeMgr.C.

References FinishWorkMsg::compute, FinishWorkMsg::data, and SET_PRIORITY.

Referenced by ComputeNonbondedCUDA::messageFinishPatch().

01569                                                                                                                                     {
01570   msg->compute = c;
01571   msg->data = data;
01572   SET_PRIORITY(msg,seq,prio);
01573   CProxy_WorkDistrib wdProxy(CkpvAccess(BOCclass_group).workDistrib);
01574   wdProxy[pe].finishCUDAPatch(msg);
01575 }

void ComputeMgr::sendNonbondedCUDASlaveReady ( int  ,
int  ,
int  ,
int   
)

Definition at line 1529 of file ComputeMgr.C.

References computeMgr.

Referenced by ComputeNonbondedCUDA::noWork().

01529                                                                             {
01530   CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
01531   cm[pe].recvNonbondedCUDASlaveReady(np,ac,seq);
01532 }

void ComputeMgr::sendNonbondedCUDASlaveSkip ( ComputeNonbondedCUDA c,
int   
)

Definition at line 1545 of file ComputeMgr.C.

References NonbondedCUDASkipMsg::compute.

Referenced by ComputeNonbondedCUDA::noWork().

01545                                                                            {
01546   NonbondedCUDASkipMsg *msg = new NonbondedCUDASkipMsg;
01547   msg->compute = c;
01548   thisProxy[pe].recvNonbondedCUDASlaveSkip(msg);
01549 }

void ComputeMgr::sendNonbondedMICSlaveEnqueue ( ComputeNonbondedMIC c,
int  ,
int  ,
int  ,
int   
)

Definition at line 1853 of file ComputeMgr.C.

References Compute::cid, LocalWorkMsg::compute, ComputeNonbondedMIC::localHostedPatches, Compute::localWorkMsg, ComputeNonbondedMIC::localWorkMsg2, SET_PRIORITY, ResizeArray< Elem >::size(), and Compute::type().

01853                                                                                                        {
01854   if ( ws == 2 && c->localHostedPatches.size() == 0 ) return;
01855   LocalWorkMsg *msg = ( ws == 1 ? c->localWorkMsg : c->localWorkMsg2 );
01856   msg->compute = c;
01857   int type = c->type();
01858   int cid = c->cid;
01859   SET_PRIORITY(msg,seq,prio);
01860   CProxy_WorkDistrib wdProxy(CkpvAccess(BOCclass_group).workDistrib);
01861   wdProxy[pe].enqueueMIC(msg);
01862 }

void ComputeMgr::sendNonbondedMICSlaveReady ( int  ,
int  ,
int  ,
int   
)

Definition at line 1824 of file ComputeMgr.C.

References computeMgr.

01824                                                                            {
01825   CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
01826   cm[pe].recvNonbondedMICSlaveReady(np,ac,seq);
01827 }

void ComputeMgr::sendNonbondedMICSlaveSkip ( ComputeNonbondedMIC c,
int   
)

Definition at line 1840 of file ComputeMgr.C.

References NonbondedMICSkipMsg::compute.

01840                                                                          {
01841   NonbondedMICSkipMsg *msg = new NonbondedMICSkipMsg;
01842   msg->compute = c;
01843   thisProxy[pe].recvNonbondedMICSlaveSkip(msg);
01844 }

void ComputeMgr::sendOpenBoxesOnPe ( std::vector< int > &  pes,
CudaComputeNonbonded c 
)

Definition at line 1641 of file ComputeMgr.C.

Referenced by CudaComputeNonbonded::doWork().

01641                                                                                {
01642   for (int i=0;i < pes.size();i++) {
01643     CudaComputeNonbondedMsg *msg = new CudaComputeNonbondedMsg;
01644     msg->c = c;
01645     thisProxy[pes[i]].recvOpenBoxesOnPe(msg);
01646   }
01647 }

void ComputeMgr::sendSkipPatchesOnPe ( std::vector< int > &  pes,
CudaComputeNonbonded c 
)

Definition at line 1603 of file ComputeMgr.C.

01603                                                                                  {
01604   for (int i=0;i < pes.size();i++) {
01605     CudaComputeNonbondedMsg *msg = new CudaComputeNonbondedMsg;
01606     msg->c = c;
01607     thisProxy[pes[i]].recvSkipPatchesOnPe(msg);
01608   }
01609 }

void ComputeMgr::sendUnregisterBoxesOnPe ( std::vector< int > &  pes,
CudaComputeNonbonded c 
)

Definition at line 1687 of file ComputeMgr.C.

Referenced by CudaComputeNonbonded::~CudaComputeNonbonded().

01687                                                                                      {
01688   for (int i=0;i < pes.size();i++) {
01689     CudaComputeNonbondedMsg *msg = new CudaComputeNonbondedMsg;
01690     msg->c = c;
01691     thisProxy[pes[i]].recvUnregisterBoxesOnPe(msg);
01692   }
01693 }

void ComputeMgr::sendYieldDevice ( int  pe  ) 

Definition at line 1438 of file ComputeMgr.C.

References computeMgr.

Referenced by cuda_check_local_calc(), and cuda_check_remote_calc().

01438                                        {
01439     CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
01440     cm[pe].recvYieldDevice(CkMyPe());
01441 }

void ComputeMgr::splitComputes (  ) 

Definition at line 174 of file ComputeMgr.C.

References ComputeMap::cloneCompute(), ComputeMap::extendPtrs(), j, ComputeMap::newNode(), ComputeMap::newNumPartitions(), ComputeMap::node(), ComputeMap::numComputes(), ComputeMap::numPartitions(), ComputeMap::Object(), ComputeMap::setNewNode(), ComputeMap::setNewNumPartitions(), and ComputeMap::setNumPartitions().

00175 {
00176   if ( ! CkMyRank() ) {
00177     ComputeMap *computeMap = ComputeMap::Object();
00178     const int nc = computeMap->numComputes();
00179 
00180     for (int i=0; i<nc; i++) {
00181       int nnp = computeMap->newNumPartitions(i);
00182       if ( nnp > 0 ) {
00183         if ( computeMap->numPartitions(i) != 1 ) {
00184           CkPrintf("Warning: unable to partition compute %d\n", i);
00185           computeMap->setNewNumPartitions(i,0);
00186           continue;
00187         }
00188         //CkPrintf("splitting compute %d by %d\n",i,nnp);
00189         computeMap->setNumPartitions(i,nnp);
00190         if (computeMap->newNode(i) == -1) {
00191           computeMap->setNewNode(i,computeMap->node(i));
00192         }
00193         for ( int j=1; j<nnp; ++j ) {
00194           int newcid = computeMap->cloneCompute(i,j);
00195           //CkPrintf("compute %d partition %d is %d\n",i,j,newcid);
00196         }
00197       }
00198     }
00199     computeMap->extendPtrs();
00200   }
00201 
00202   if (!CkMyPe())
00203   {
00204     CkStartQD(CkIndex_ComputeMgr::splitComputes2((CkQdMsg*)0), &thishandle);
00205   }
00206 }

void ComputeMgr::splitComputes2 ( CkQdMsg *   ) 

Definition at line 208 of file ComputeMgr.C.

00209 {
00210     delete msg;
00211     CProxy_ComputeMgr(thisgroup).updateLocalComputes();
00212 }

void ComputeMgr::updateComputes ( int  ,
CkGroupID   
)

Definition at line 141 of file ComputeMgr.C.

References NAMD_bug().

Referenced by LdbCoordinator::ExecuteMigrations().

00142 {
00143     updateComputesReturnEP = ep;
00144     updateComputesReturnChareID = chareID;
00145     updateComputesCount = CkNumPes();
00146 
00147     if (CkMyPe())
00148     {
00149         NAMD_bug("updateComputes signaled on wrong Pe!");
00150     }
00151 
00152     CkStartQD(CkIndex_ComputeMgr::updateComputes2((CkQdMsg*)0),&thishandle);
00153 }

void ComputeMgr::updateComputes2 ( CkQdMsg *   ) 

Definition at line 155 of file ComputeMgr.C.

References WorkDistrib::saveComputeMapChanges().

00156 {
00157     delete msg;
00158 
00159     CProxy_WorkDistrib wd(CkpvAccess(BOCclass_group).workDistrib);
00160     WorkDistrib  *workDistrib = wd.ckLocalBranch();
00161     workDistrib->saveComputeMapChanges(CkIndex_ComputeMgr::updateComputes3(),thisgroup);
00162 }

void ComputeMgr::updateComputes3 (  ) 

Definition at line 164 of file ComputeMgr.C.

00165 {
00166     if ( skipSplitting ) {
00167       CProxy_ComputeMgr(thisgroup).updateLocalComputes();
00168     } else {
00169       CProxy_ComputeMgr(thisgroup).splitComputes();
00170       skipSplitting = 1;
00171     }
00172 }

void ComputeMgr::updateLocalComputes (  ) 

Definition at line 214 of file ComputeMgr.C.

References ResizeArray< Elem >::add(), ComputeMap::compute(), ProxyMgr::createProxy(), LdbCoordinator::Migrate(), ComputeMap::newNode(), ComputeMap::newNumPartitions(), ComputeMap::node(), ComputeMap::numComputes(), ComputeMap::numPids(), LdbCoordinator::Object(), ComputeMap::Object(), ComputeMap::pid(), ComputeMap::registerCompute(), and ResizeArray< Elem >::resize().

00215 {
00216     ComputeMap *computeMap = ComputeMap::Object();
00217     CProxy_ProxyMgr pm(CkpvAccess(BOCclass_group).proxyMgr);
00218     ProxyMgr *proxyMgr = pm.ckLocalBranch();
00219     LdbCoordinator *ldbCoordinator = LdbCoordinator::Object();
00220 
00221      computeFlag.resize(0);
00222 
00223     const int nc = computeMap->numComputes();
00224     for (int i=0; i<nc; i++) {
00225 
00226         if ( computeMap->node(i) == CkMyPe() &&
00227              computeMap->newNumPartitions(i) > 1 ) {
00228            Compute *c = computeMap->compute(i);
00229            ldbCoordinator->Migrate(c->ldObjHandle,CkMyPe());
00230            delete c;
00231            computeMap->registerCompute(i,NULL);
00232            if ( computeMap->newNode(i) == CkMyPe() ) computeFlag.add(i); 
00233         } else
00234         if (computeMap->newNode(i) == CkMyPe() && computeMap->node(i) != CkMyPe())
00235         {
00236             computeFlag.add(i);
00237             for (int n=0; n < computeMap->numPids(i); n++)
00238             {
00239                 proxyMgr->createProxy(computeMap->pid(i,n));
00240             }
00241         }
00242         else if (computeMap->node(i) == CkMyPe() &&
00243                  (computeMap->newNode(i) != -1 && computeMap->newNode(i) != CkMyPe() ))
00244         {
00245             // CkPrintf("delete compute %d on pe %d\n",i,CkMyPe());
00246             delete computeMap->compute(i);
00247             computeMap->registerCompute(i,NULL);
00248         }
00249     }
00250 
00251     if (!CkMyPe())
00252     {
00253         CkStartQD(CkIndex_ComputeMgr::updateLocalComputes2((CkQdMsg*)0), &thishandle);
00254     }
00255 }

void ComputeMgr::updateLocalComputes2 ( CkQdMsg *   ) 

Definition at line 258 of file ComputeMgr.C.

00259 {
00260     delete msg;
00261     CProxy_ComputeMgr(thisgroup).updateLocalComputes3();
00262 }

void ComputeMgr::updateLocalComputes3 (  ) 

Definition at line 265 of file ComputeMgr.C.

References ResizeArray< Elem >::clear(), ComputeMap::newNode(), ProxyMgr::nodecount, ComputeMap::numComputes(), ComputeMap::Object(), ProxyMgr::removeUnusedProxies(), ComputeMap::setNewNode(), ComputeMap::setNewNumPartitions(), ComputeMap::setNode(), and ResizeArray< Elem >::size().

00266 {
00267     ComputeMap *computeMap = ComputeMap::Object();
00268     CProxy_ProxyMgr pm(CkpvAccess(BOCclass_group).proxyMgr);
00269     ProxyMgr *proxyMgr = pm.ckLocalBranch();
00270 
00271     ProxyMgr::nodecount = 0;
00272 
00273     const int nc = computeMap->numComputes();
00274 
00275     if ( ! CkMyRank() ) {
00276       for (int i=0; i<nc; i++) {
00277         computeMap->setNewNumPartitions(i,0);
00278         if (computeMap->newNode(i) != -1) {
00279           computeMap->setNode(i,computeMap->newNode(i));
00280           computeMap->setNewNode(i,-1);
00281         }
00282       }
00283     }
00284  
00285     for(int i=0; i<computeFlag.size(); i++) createCompute(computeFlag[i], computeMap);
00286     computeFlag.clear();
00287 
00288     proxyMgr->removeUnusedProxies();
00289 
00290     if (!CkMyPe())
00291     {
00292         CkStartQD(CkIndex_ComputeMgr::updateLocalComputes4((CkQdMsg*)0), &thishandle);
00293     }
00294 }

void ComputeMgr::updateLocalComputes4 ( CkQdMsg *   ) 

Definition at line 297 of file ComputeMgr.C.

References ComputeMap::Object(), Node::Object(), ComputeMap::saveComputeMap(), Node::simParameters, and simParams.

00298 {
00299     delete msg;
00300     CProxy_ComputeMgr(thisgroup).updateLocalComputes5();
00301 
00302     // store the latest compute map
00303            SimParameters *simParams = Node::Object()->simParameters;
00304     if (simParams->storeComputeMap) {
00305       ComputeMap *computeMap = ComputeMap::Object();
00306       computeMap->saveComputeMap(simParams->computeMapFilename);
00307     }
00308 }

void ComputeMgr::updateLocalComputes5 (  ) 

Definition at line 315 of file ComputeMgr.C.

References ProxyMgr::buildProxySpanningTree2(), PatchMap::checkMap(), ComputeMap::checkMap(), ProxyMgr::Object(), PatchMap::Object(), ComputeMap::Object(), proxyRecvSpanning, proxySendSpanning, and ProxyMgr::sendSpanningTrees().

00316 {
00317     if ( ! CkMyRank() ) {
00318       ComputeMap::Object()->checkMap();
00319       PatchMap::Object()->checkMap();
00320     }
00321 
00322     // we always use the centralized building of spanning tree
00323     // distributed building of ST called in Node.C only
00324     if (proxySendSpanning || proxyRecvSpanning)
00325         ProxyMgr::Object()->buildProxySpanningTree2();
00326 
00327     // this code needs to be turned on if we want to
00328     // shift the creation of ST to the load balancer
00329 
00330 #if 0
00331     if (proxySendSpanning || proxyRecvSpanning)
00332     {
00333         if (firstphase)
00334             ProxyMgr::Object()->buildProxySpanningTree2();
00335         else
00336             if (CkMyPe() == 0)
00337                 ProxyMgr::Object()->sendSpanningTrees();
00338 
00339         firstphase = 0;
00340     }
00341 #endif
00342 
00343     if (!CkMyPe())
00344         CkStartQD(CkIndex_ComputeMgr::doneUpdateLocalComputes(), &thishandle);
00345 }


Member Data Documentation

ComputeGlobal* ComputeMgr::computeGlobalObject

Definition at line 97 of file ComputeMgr.h.

Referenced by ComputeMgr(), createComputes(), Sequencer::integrate(), Sequencer::minimize(), and recvComputeGlobalResults().

int ComputeMgr::computeGlobalResultsMsgMasterSeq

Definition at line 100 of file ComputeMgr.h.

Referenced by ComputeMgr(), and sendComputeGlobalResults().

ResizeArray<ComputeGlobalResultsMsg*> ComputeMgr::computeGlobalResultsMsgs

Definition at line 98 of file ComputeMgr.h.

Referenced by enableComputeGlobalResults(), and recvComputeGlobalResults().

int ComputeMgr::computeGlobalResultsMsgSeq

Definition at line 99 of file ComputeMgr.h.

Referenced by ComputeMgr(), enableComputeGlobalResults(), and recvComputeGlobalResults().


The documentation for this class was generated from the following files:
Generated on Sun Sep 24 01:17:16 2017 for NAMD by  doxygen 1.4.7