NAMD
Public Member Functions | Public Attributes | List of all members
ComputeMgr Class Reference

#include <ComputeMgr.h>

Inheritance diagram for ComputeMgr:

Public Member Functions

 ComputeMgr ()
 
 ~ComputeMgr ()
 
void createComputes (ComputeMap *map)
 
void updateComputes (int, CkGroupID)
 
void updateComputes2 (CkQdMsg *)
 
void updateComputes3 ()
 
void splitComputes ()
 
void splitComputes2 (CkQdMsg *)
 
void updateLocalComputes ()
 
void updateLocalComputes2 (CkQdMsg *)
 
void updateLocalComputes3 ()
 
void updateLocalComputes4 (CkQdMsg *)
 
void updateLocalComputes5 ()
 
void doneUpdateLocalComputes ()
 
void sendComputeGlobalConfig (ComputeGlobalConfigMsg *)
 
void recvComputeGlobalConfig (ComputeGlobalConfigMsg *)
 
void sendComputeGlobalData (ComputeGlobalDataMsg *)
 
void recvComputeGlobalData (ComputeGlobalDataMsg *)
 
void sendComputeGlobalResults (ComputeGlobalResultsMsg *)
 
void recvComputeGlobalResults (ComputeGlobalResultsMsg *)
 
void enableComputeGlobalResults ()
 
void sendComputeDPMEData (ComputeDPMEDataMsg *)
 
void recvComputeDPMEData (ComputeDPMEDataMsg *)
 
void sendComputeDPMEResults (ComputeDPMEResultsMsg *, int)
 
void recvComputeDPMEResults (ComputeDPMEResultsMsg *)
 
void sendComputeEwaldData (ComputeEwaldMsg *)
 
void recvComputeEwaldData (ComputeEwaldMsg *)
 
void sendComputeEwaldResults (ComputeEwaldMsg *)
 
void recvComputeEwaldResults (ComputeEwaldMsg *)
 
void recvComputeConsForceMsg (ComputeConsForceMsg *)
 
void recvCudaGlobalMasterCreateMsg (std::vector< std::string > args)
 
void recvCudaGlobalMasterRemoveMsg (std::vector< std::string > args)
 
void recvCudaGlobalMasterUpdateMsg (std::vector< std::string > args)
 
void recvCudaGlobalMasterUpdateResultMsg (std::vector< std::string > args)
 
std::string getCudaGlobalMasterUpdateResult (const std::string &client_name) const
 
void sendYieldDevice (int pe)
 
void recvYieldDevice (int pe)
 
void sendAssignPatchesOnPe (std::vector< int > &pes, CudaComputeNonbonded *c)
 
void recvAssignPatchesOnPe (CudaComputeNonbondedMsg *msg)
 
void sendSkipPatchesOnPe (std::vector< int > &pes, CudaComputeNonbonded *c)
 
void recvSkipPatchesOnPe (CudaComputeNonbondedMsg *msg)
 
void sendFinishPatchesOnPe (std::vector< int > &pes, CudaComputeNonbonded *c)
 
void recvFinishPatchesOnPe (CudaComputeNonbondedMsg *msg)
 
void sendFinishPatchOnPe (int pe, CudaComputeNonbonded *c, int i, PatchID patchID)
 
void recvFinishPatchOnPe (CudaComputeNonbondedMsg *msg)
 
void sendOpenBoxesOnPe (std::vector< int > &pes, CudaComputeNonbonded *c)
 
void recvOpenBoxesOnPe (CudaComputeNonbondedMsg *msg)
 
void sendFinishReductions (int pe, CudaComputeNonbonded *c)
 
void recvFinishReductions (CudaComputeNonbondedMsg *msg)
 
void sendMessageEnqueueWork (int pe, CudaComputeNonbonded *c)
 
void recvMessageEnqueueWork (CudaComputeNonbondedMsg *msg)
 
void sendLaunchWork (int pe, CudaComputeNonbonded *c)
 
void recvLaunchWork (CudaComputeNonbondedMsg *msg)
 
void sendUnregisterBoxesOnPe (std::vector< int > &pes, CudaComputeNonbonded *c)
 
void recvUnregisterBoxesOnPe (CudaComputeNonbondedMsg *msg)
 

Public Attributes

ComputeGlobalcomputeGlobalObject
 
ResizeArray< ComputeGlobalResultsMsg * > computeGlobalResultsMsgs
 
int computeGlobalResultsMsgSeq
 
int computeGlobalResultsMsgMasterSeq
 
CkCallback callMeBackCB
 

Detailed Description

Definition at line 62 of file ComputeMgr.h.

Constructor & Destructor Documentation

◆ ComputeMgr()

ComputeMgr::ComputeMgr ( )

Definition at line 115 of file ComputeMgr.C.

References computeGlobalObject, computeGlobalResultsMsgMasterSeq, and computeGlobalResultsMsgSeq.

116 {
117  CkpvAccess(BOCclass_group).computeMgr = thisgroup;
121  computeDPMEObject = 0;
122  computeEwaldObject = 0;
123  computeNonbondedWorkArrays = new ComputeNonbondedWorkArrays;
124  skipSplitting = 0;
125  masterServerObject = NULL;
126 }
int computeGlobalResultsMsgSeq
Definition: ComputeMgr.h:156
int computeGlobalResultsMsgMasterSeq
Definition: ComputeMgr.h:157
ComputeGlobal * computeGlobalObject
Definition: ComputeMgr.h:154

◆ ~ComputeMgr()

ComputeMgr::~ComputeMgr ( void  )

Definition at line 128 of file ComputeMgr.C.

References endi(), iINFO(), and iout.

129 {
130  delete computeNonbondedWorkArrays;
131  if (masterServerObject != NULL) delete masterServerObject;
132  for (auto& loader: CudaGlobalMasterClientDlloaders) {
133  if (loader) {
134  iout << iINFO << "Close library " << loader->LibName() << "\n" << endi;
135  loader->DLCloseLib();
136  }
137  }
138 }
std::ostream & iINFO(std::ostream &s)
Definition: InfoStream.C:81
std::ostream & endi(std::ostream &s)
Definition: InfoStream.C:54
#define iout
Definition: InfoStream.h:51

Member Function Documentation

◆ createComputes()

void ComputeMgr::createComputes ( ComputeMap map)

Definition at line 973 of file ComputeMgr.C.

References GlobalMasterServer::addClient(), CudaComputeNonbonded::assignPatches(), computeAnglesType, computeBondsType, computeCrosstermsType, computeDihedralsType, computeExclsType, computeImpropersType, computeNonbondedCUDA2Type, computeNonbondedPairType, computeNonbondedSelfType, computeSelfAnglesType, computeSelfBondsType, computeSelfCrosstermsType, computeSelfDihedralsType, computeSelfExclsType, computeSelfImpropersType, DebugM, DeviceCUDA::device_shared_with_pe(), deviceCUDA, getCudaComputeNonbonded(), DeviceCUDA::getMasterPe(), CudaComputeNonbonded::initialize(), Node::molecule, Node::myid(), Molecule::numAtoms, PatchMap::Object(), Node::Object(), Node::simParameters, simParams, and ComputeMap::type().

Referenced by Node::startup().

974 {
975 // #ifdef NAMD_CUDA
976 // int ComputePmeCUDACounter = 0;
977 // #endif
978  Node *node = Node::Object();
980  int myNode = node->myid();
981 
982  if ( simParams->globalForcesOn && !myNode )
983  {
984  DebugM(4,"Mgr running on Node "<<CkMyPe()<<"\n");
985  /* create a master server to allow multiple masters */
986  masterServerObject = new GlobalMasterServer(this,
987  PatchMap::Object()->numNodesWithPatches());
988 
989  #ifdef NODEGROUP_FORCE_REGISTER
990  CProxy_PatchData cpdata(CkpvAccess(BOCclass_group).patchData);
991  PatchData *patchData = cpdata.ckLocalBranch();
992  patchData->master_mgr = this;
993  #endif
994 
995  /* create the individual global masters */
996  // masterServerObject->addClient(new GlobalMasterTest());
997  if (simParams->tclForcesOn)
998  masterServerObject->addClient(new GlobalMasterTcl());
999  if (simParams->IMDon && ! (simParams->IMDignore || simParams->IMDignoreForces) )
1000  masterServerObject->addClient(new GlobalMasterIMD());
1001  // SMD is implemented on GPU resident version of NAMD (NAMD3)
1002  if (simParams->SMDOn && !simParams->CUDASOAintegrateMode)
1003  masterServerObject->addClient(
1004  new GlobalMasterSMD(simParams->SMDk, simParams->SMDk2,
1005  simParams->SMDVel,
1006  simParams->SMDDir, simParams->SMDOutputFreq,
1007  simParams->firstTimestep, simParams->SMDFile,
1008  node->molecule->numAtoms)
1009  );
1010 
1011  if (simParams->symmetryOn &&
1012  (simParams->firstTimestep < simParams->symmetryLastStep ||
1013  simParams->symmetryLastStep == -1))
1014  masterServerObject->addClient(new GlobalMasterSymmetry());
1015  if (simParams->TMDOn)
1016  masterServerObject->addClient(new GlobalMasterTMD());
1017  if (simParams->miscForcesOn)
1018  masterServerObject->addClient(new GlobalMasterMisc());
1019  if ( simParams->freeEnergyOn )
1020  masterServerObject->addClient(new GlobalMasterFreeEnergy());
1021  if ( simParams->colvarsOn )
1022  masterServerObject->addClient(new GlobalMasterColvars());
1023 
1024  }
1025 
1026  if ( !myNode && simParams->IMDon && (simParams->IMDignore || simParams->IMDignoreForces) ) {
1027  // GlobalMasterIMD constructor saves pointer to node->IMDOutput object
1028  new GlobalMasterIMD();
1029  }
1030 
1031 #if defined(NAMD_CUDA) || defined(NAMD_HIP)
1032  bool deviceIsMine = ( deviceCUDA->getMasterPe() == CkMyPe() );
1033 #ifdef BONDED_CUDA
1034  // Place bonded forces on Pe different from non-bonded forces
1035  int bondedMasterPe = deviceCUDA->getMasterPe();
1036  // for (int i=0;i < deviceCUDA->getNumPesSharingDevice();i++) {
1037  // int pe = deviceCUDA->getPesSharingDevice(i);
1038  // if (pe != deviceCUDA->getMasterPe()) {
1039  // bondedMasterPe = pe;
1040  // }
1041  // }
1042  bool deviceIsMineBonded = (CkMyPe() == bondedMasterPe);
1043 #endif
1044 #endif
1045 
1046  for (int i=0; i < map->nComputes; i++)
1047  {
1048  if ( ! ( i % 100 ) )
1049  {
1050  }
1051 
1052 #if defined(NAMD_CUDA) || defined(NAMD_HIP)
1053  switch ( map->type(i) )
1054  {
1055  // case computePmeCUDAType:
1056  // // Only create single ComputePmeCUDA object per Pe
1057  // if ( map->computeData[i].node != myNode ) continue;
1058  // if (ComputePmeCUDACounter > 0) continue;
1059  // ComputePmeCUDACounter++;
1060  // break;
1062  if ( ! deviceIsMine ) continue;
1063  if ( ! deviceCUDA->device_shared_with_pe(map->computeData[i].node) ) continue;
1064  break;
1065 
1067  if ( ! deviceIsMine ) continue;
1068  if ( ! deviceCUDA->device_shared_with_pe(map->computeData[i].node) ) continue;
1069  break;
1070 
1071 #ifdef BONDED_CUDA
1072  case computeSelfBondsType:
1073  case computeBondsType:
1074  if (simParams->bondedCUDA & 1) {
1075  if ( ! deviceIsMineBonded ) continue;
1076  if ( ! deviceCUDA->device_shared_with_pe(map->computeData[i].node) ) continue;
1077  } else {
1078  if ( map->computeData[i].node != myNode ) continue;
1079  }
1080  break;
1081 
1082  case computeSelfAnglesType:
1083  case computeAnglesType:
1084  if (simParams->bondedCUDA & 2) {
1085  if ( ! deviceIsMineBonded ) continue;
1086  if ( ! deviceCUDA->device_shared_with_pe(map->computeData[i].node) ) continue;
1087  } else {
1088  if ( map->computeData[i].node != myNode ) continue;
1089  }
1090  break;
1091 
1093  case computeDihedralsType:
1094  if (simParams->bondedCUDA & 4) {
1095  if ( ! deviceIsMineBonded ) continue;
1096  if ( ! deviceCUDA->device_shared_with_pe(map->computeData[i].node) ) continue;
1097  } else {
1098  if ( map->computeData[i].node != myNode ) continue;
1099  }
1100  break;
1101 
1103  case computeImpropersType:
1104  if (simParams->bondedCUDA & 8) {
1105  if ( ! deviceIsMineBonded ) continue;
1106  if ( ! deviceCUDA->device_shared_with_pe(map->computeData[i].node) ) continue;
1107  } else {
1108  if ( map->computeData[i].node != myNode ) continue;
1109  }
1110  break;
1111 
1112  case computeSelfExclsType:
1113  case computeExclsType:
1114  if (simParams->bondedCUDA & 16) {
1115  if ( ! deviceIsMineBonded ) continue;
1116  if ( ! deviceCUDA->device_shared_with_pe(map->computeData[i].node) ) continue;
1117  } else {
1118  if ( map->computeData[i].node != myNode ) continue;
1119  }
1120  break;
1121 
1123  case computeCrosstermsType:
1124  if (simParams->bondedCUDA & 32) {
1125  if ( ! deviceIsMineBonded ) continue;
1126  if ( ! deviceCUDA->device_shared_with_pe(map->computeData[i].node) ) continue;
1127  } else {
1128  if ( map->computeData[i].node != myNode ) continue;
1129  }
1130  break;
1131 
1132  case computeBondedCUDAType:
1133  if ( ! deviceIsMineBonded ) continue;
1134  if ( map->computeData[i].node != myNode ) continue;
1135  break;
1136 #endif // BONDED_CUDA
1137 
1139  if ( ! deviceIsMine ) continue;
1140 // #ifdef BONDED_CUDA
1141 // case computeBondedCUDAType:
1142 // #endif
1143  default:
1144  if ( map->computeData[i].node != myNode ) continue;
1145  }
1146 #else // defined(NAMD_CUDA) || defined(NAMD_HIP)
1147  if ( map->computeData[i].node != myNode ) continue;
1148 #endif
1149  DebugM(1,"Compute " << i << '\n');
1150  DebugM(1," node = " << map->computeData[i].node << '\n');
1151  DebugM(1," type = " << map->computeData[i].type << '\n');
1152  DebugM(1," numPids = " << map->computeData[i].numPids << '\n');
1153 // DebugM(1," numPidsAllocated = " << map->computeData[i].numPidsAllocated << '\n');
1154  for (int j=0; j < map->computeData[i].numPids; j++)
1155  {
1156  DebugM(1," pid " << map->computeData[i].pids[j].pid << '\n');
1157  if (!((j+1) % 6))
1158  DebugM(1,'\n');
1159  }
1160  DebugM(1,"\n---------------------------------------");
1161  DebugM(1,"---------------------------------------\n");
1162 
1163  createCompute(i, map);
1164 
1165  }
1166 
1167 #if defined(NAMD_CUDA) || defined(NAMD_HIP)
1168  if (deviceIsMine) {
1171  }
1172 #ifdef BONDED_CUDA
1173  if (simParams->bondedCUDA) {
1174  if (deviceIsMineBonded) {
1175  getComputeBondedCUDA()->initialize();
1176  }
1177  }
1178 #endif
1179 #endif
1180 }
static Node * Object()
Definition: Node.h:86
__thread DeviceCUDA * deviceCUDA
Definition: DeviceCUDA.C:23
Definition: Node.h:78
CudaComputeNonbonded * getCudaComputeNonbonded()
Definition: ComputeMgr.C:358
static PatchMap * Object()
Definition: PatchMap.h:27
SimParameters * simParameters
Definition: Node.h:181
#define DebugM(x, y)
Definition: Debug.h:75
int getMasterPe()
Definition: DeviceCUDA.h:137
ComputeType type(ComputeID cid)
Definition: ComputeMap.C:118
bool device_shared_with_pe(int pe)
Definition: DeviceCUDA.C:537
int numAtoms
Definition: Molecule.h:585
void addClient(GlobalMaster *newClient)
int myid()
Definition: Node.h:191
#define simParams
Definition: Output.C:129
void assignPatches(ComputeMgr *computeMgrIn)
Molecule * molecule
Definition: Node.h:179
colvarproxy_namd GlobalMasterColvars

◆ doneUpdateLocalComputes()

void ComputeMgr::doneUpdateLocalComputes ( )

Definition at line 346 of file ComputeMgr.C.

References DebugM.

347 {
348 
349 // if (!--updateComputesCount) {
350  DebugM(4, "doneUpdateLocalComputes on Pe("<<CkMyPe()<<")\n");
351  void *msg = CkAllocMsg(0,0,0);
352  CkSendMsgBranch(updateComputesReturnEP,msg,0,updateComputesReturnChareID);
353 // }
354 }
#define DebugM(x, y)
Definition: Debug.h:75

◆ enableComputeGlobalResults()

void ComputeMgr::enableComputeGlobalResults ( )

Definition at line 1313 of file ComputeMgr.C.

References computeGlobalResultsMsgs, computeGlobalResultsMsgSeq, DebugM, ResizeArray< Elem >::del(), NAMD_EVENT_START, NAMD_EVENT_STOP, recvComputeGlobalResults(), and ResizeArray< Elem >::size().

Referenced by ComputeGlobal::doWork().

1314 {
1315  NAMD_EVENT_START(1, NamdProfileEvent::GM_ENABLE_COMP_RESULTS);
1317  DebugM(3,"["<<CkMyPe() <<"] enableComputeGlobalResults for "<< computeGlobalResultsMsgs.size() <<" messages seq "<< computeGlobalResultsMsgSeq <<"\n");
1318  for ( int i=0; i<computeGlobalResultsMsgs.size(); ++i ) {
1323  break;
1324  }
1325  }
1326  NAMD_EVENT_STOP(1, NamdProfileEvent::GM_ENABLE_COMP_RESULTS);
1327  DebugM(3,"["<<CkMyPe() <<"] exiting enableComputeGlobalResults for "<< computeGlobalResultsMsgs.size() <<" messages seq "<< computeGlobalResultsMsgSeq <<"\n");
1328 }
#define NAMD_EVENT_STOP(eon, id)
int size(void) const
Definition: ResizeArray.h:131
#define DebugM(x, y)
Definition: Debug.h:75
int computeGlobalResultsMsgSeq
Definition: ComputeMgr.h:156
#define NAMD_EVENT_START(eon, id)
void recvComputeGlobalResults(ComputeGlobalResultsMsg *)
Definition: ComputeMgr.C:1330
ResizeArray< ComputeGlobalResultsMsg * > computeGlobalResultsMsgs
Definition: ComputeMgr.h:155
void del(int index, int num=1)
Definition: ResizeArray.h:108

◆ getCudaGlobalMasterUpdateResult()

std::string ComputeMgr::getCudaGlobalMasterUpdateResult ( const std::string &  client_name) const

Definition at line 1660 of file ComputeMgr.C.

1660  {
1661  return CudaGlobalMasterClientUpdateResults.at(client_name);
1662 }

◆ recvAssignPatchesOnPe()

void ComputeMgr::recvAssignPatchesOnPe ( CudaComputeNonbondedMsg msg)

Definition at line 1689 of file ComputeMgr.C.

References CudaComputeNonbonded::assignPatchesOnPe(), and CudaComputeNonbondedMsg::c.

1689  {
1690  msg->c->assignPatchesOnPe();
1691  delete msg;
1692 }
CudaComputeNonbonded * c
Definition: ComputeMgr.C:1677

◆ recvComputeConsForceMsg()

void ComputeMgr::recvComputeConsForceMsg ( ComputeConsForceMsg msg)

Definition at line 1450 of file ComputeMgr.C.

References ComputeConsForceMsg::aid, Molecule::consForce, Molecule::consForceIndexes, ComputeConsForceMsg::f, for(), Node::molecule, Molecule::numAtoms, Node::Object(), and ResizeArray< Elem >::size().

1451 {
1452  Molecule *m = Node::Object()->molecule;
1453  if(CkMyRank()==0){ // there is only one molecule per process
1454  delete [] m->consForceIndexes;
1455  delete [] m->consForce;
1456  int n = msg->aid.size();
1457  if (n > 0)
1458  {
1459  m->consForceIndexes = new int32[m->numAtoms];
1460  m->consForce = new Vector[n];
1461  int i;
1462  for (i=0; i<m->numAtoms; i++) m->consForceIndexes[i] = -1;
1463  for (i=0; i<msg->aid.size(); i++)
1464  {
1465  m->consForceIndexes[msg->aid[i]] = i;
1466  m->consForce[i] = msg->f[i];
1467  }
1468  }
1469  else
1470  {
1471  m->consForceIndexes = NULL;
1472  m->consForce = NULL;
1473  }
1474  }
1475  delete msg;
1476 #ifdef NODEGROUP_FORCE_REGISTER
1477  if(CkMyPe()==0)
1478  {
1479  CProxy_PatchData cpdata(CkpvAccess(BOCclass_group).patchData);
1480  cpdata.setDeviceKernelUpdateCounter();
1481  }
1482 #endif
1483 }
static Node * Object()
Definition: Node.h:86
int size(void) const
Definition: ResizeArray.h:131
Definition: Vector.h:72
int32_t int32
Definition: common.h:38
Molecule stores the structural information for the system.
Definition: Molecule.h:175
int numAtoms
Definition: Molecule.h:585
int32 * consForceIndexes
Definition: Molecule.h:646
Vector * consForce
Definition: Molecule.h:647
Molecule * molecule
Definition: Node.h:179
for(int i=0;i< n1;++i)

◆ recvComputeDPMEData()

void ComputeMgr::recvComputeDPMEData ( ComputeDPMEDataMsg msg)

Definition at line 1413 of file ComputeMgr.C.

References NAMD_die(), and PatchMap::Object().

1414 {
1415  if ( computeDPMEObject )
1416  {
1417 #ifdef DPME
1418  computeDPMEObject->recvData(msg);
1419 #endif
1420  }
1421  else if ( ! (PatchMap::Object())->numHomePatches() ) delete msg;
1422  else NAMD_die("ComputeMgr::computeDPMEObject is NULL!");
1423 }
static PatchMap * Object()
Definition: PatchMap.h:27
void NAMD_die(const char *err_msg)
Definition: common.C:147

◆ recvComputeDPMEResults()

void ComputeMgr::recvComputeDPMEResults ( ComputeDPMEResultsMsg msg)

Definition at line 1431 of file ComputeMgr.C.

References NAMD_die(), and PatchMap::Object().

1432 {
1433  if ( computeDPMEObject )
1434  {
1435 #ifdef DPME
1436  computeDPMEObject->recvResults(msg);
1437 #endif
1438  }
1439  else if ( ! (PatchMap::Object())->numHomePatches() ) delete msg;
1440  else NAMD_die("ComputeMgr::computeDPMEObject is NULL!");
1441 }
static PatchMap * Object()
Definition: PatchMap.h:27
void NAMD_die(const char *err_msg)
Definition: common.C:147

◆ recvComputeEwaldData()

void ComputeMgr::recvComputeEwaldData ( ComputeEwaldMsg msg)

Definition at line 1376 of file ComputeMgr.C.

References NAMD_die(), and ComputeEwald::recvData().

1377 {
1378  if (computeEwaldObject)
1379  computeEwaldObject->recvData(msg);
1380  else NAMD_die("ComputeMgr::computeEwaldObject in recvData is NULL!");
1381 }
void recvData(ComputeEwaldMsg *)
Definition: ComputeEwald.C:187
void NAMD_die(const char *err_msg)
Definition: common.C:147

◆ recvComputeEwaldResults()

void ComputeMgr::recvComputeEwaldResults ( ComputeEwaldMsg msg)

Definition at line 1388 of file ComputeMgr.C.

References NAMD_die(), PatchMap::Object(), and ComputeEwald::recvResults().

Referenced by sendComputeEwaldResults().

1389 {
1390  if (computeEwaldObject) {
1391  CmiEnableUrgentSend(1);
1392  computeEwaldObject->recvResults(msg);
1393  CmiEnableUrgentSend(0);
1394  }
1395  else if ( ! (PatchMap::Object())->numHomePatches() ) delete msg;
1396  else NAMD_die("ComputeMgr::computeEwaldObject in recvResults is NULL!");
1397 }
void recvResults(ComputeEwaldMsg *)
Definition: ComputeEwald.C:204
static PatchMap * Object()
Definition: PatchMap.h:27
void NAMD_die(const char *err_msg)
Definition: common.C:147

◆ recvComputeGlobalConfig()

void ComputeMgr::recvComputeGlobalConfig ( ComputeGlobalConfigMsg *  )

◆ recvComputeGlobalData()

void ComputeMgr::recvComputeGlobalData ( ComputeGlobalDataMsg msg)

Definition at line 1263 of file ComputeMgr.C.

References DebugM, NAMD_die(), NAMD_EVENT_START, NAMD_EVENT_STOP, and GlobalMasterServer::recvData().

1264 {
1265  NAMD_EVENT_START(1, NamdProfileEvent::GM_RECV_COMP_DATA);
1266  if (masterServerObject) // make sure it has been initialized
1267  {
1268  DebugM(3, "["<<CkMyPe()<<"] recvComputeGlobalData calling recvData\n");
1269  masterServerObject->recvData(msg);
1270  }
1271  else NAMD_die("ComputeMgr::masterServerObject is NULL!");
1272  NAMD_EVENT_STOP(1, NamdProfileEvent::GM_RECV_COMP_DATA);
1273 }
#define NAMD_EVENT_STOP(eon, id)
#define DebugM(x, y)
Definition: Debug.h:75
#define NAMD_EVENT_START(eon, id)
void NAMD_die(const char *err_msg)
Definition: common.C:147
void recvData(ComputeGlobalDataMsg *)

◆ recvComputeGlobalResults()

void ComputeMgr::recvComputeGlobalResults ( ComputeGlobalResultsMsg msg)

Definition at line 1330 of file ComputeMgr.C.

References ResizeArray< Elem >::add(), computeGlobalObject, computeGlobalResultsMsgs, computeGlobalResultsMsgSeq, DebugM, NAMD_die(), NAMD_EVENT_START, NAMD_EVENT_STOP, PatchMap::Object(), ComputeGlobal::recvResults(), and ComputeGlobalResultsMsg::seq.

Referenced by ComputeGlobal::doWork(), enableComputeGlobalResults(), and sendComputeGlobalData().

1331 {
1332  NAMD_EVENT_START(1, NamdProfileEvent::GM_RCV_COMP_RESULTS);
1333  DebugM(3,"[" << CkMyPe() << "] recvComputeGlobalResults msg->seq "<< msg->seq << " computeGlobalResultsMsgSeq " << computeGlobalResultsMsgSeq << "\n");
1334  if ( computeGlobalObject )
1335  {
1336  if ( msg->seq == computeGlobalResultsMsgSeq ) {
1337  CmiEnableUrgentSend(1);
1338 
1340  // CkPrintf("*** past recvResults on PE %d \n", CkMyPe());
1341  CmiEnableUrgentSend(0);
1342  } else {
1343  // CkPrintf("*** Adding recvComputeGlobalResults on PE %d \n", CkMyPe());
1345  }
1346  }
1347  else if ( ! (PatchMap::Object())->numHomePatches() )
1348  {
1349  // CkPrintf("*** ignoring recvComputeGlobalResults on PE %d due to no home patch\n", CkMyPe());
1350  delete msg;
1351  }
1352  else NAMD_die("ComputeMgr::computeGlobalObject is NULL!");
1353  NAMD_EVENT_STOP(1, NamdProfileEvent::GM_RCV_COMP_RESULTS);
1354  // CkPrintf("*** exiting recvComputeGlobalResults on PE %d \n", CkMyPe());
1355 }
#define NAMD_EVENT_STOP(eon, id)
void recvResults(ComputeGlobalResultsMsg *)
static PatchMap * Object()
Definition: PatchMap.h:27
#define DebugM(x, y)
Definition: Debug.h:75
int add(const Elem &elem)
Definition: ResizeArray.h:101
int computeGlobalResultsMsgSeq
Definition: ComputeMgr.h:156
#define NAMD_EVENT_START(eon, id)
void NAMD_die(const char *err_msg)
Definition: common.C:147
ResizeArray< ComputeGlobalResultsMsg * > computeGlobalResultsMsgs
Definition: ComputeMgr.h:155
ComputeGlobal * computeGlobalObject
Definition: ComputeMgr.h:154

◆ recvCudaGlobalMasterCreateMsg()

void ComputeMgr::recvCudaGlobalMasterCreateMsg ( std::vector< std::string >  args)

Definition at line 1485 of file ComputeMgr.C.

References ComputeCUDAMgr::createCudaGlobalMaster(), DebugM, deviceCUDA, endi(), ComputeCUDAMgr::getComputeCUDAMgr(), ComputeCUDAMgr::getCudaGlobalMaster(), DeviceCUDA::getGlobalDevice(), DeviceCUDA::getIsGlobalDevice(), DeviceCUDA::getMasterPe(), iERROR(), iINFO(), iout, NAMD_die(), Node::Object(), Node::simParameters, and simParams.

1485  {
1486 #ifdef NAMD_CUDA
1487  Node *node = Node::Object();
1489  if (simParams->CUDASOAintegrate && simParams->useCudaGlobal) {
1490 #ifdef NODEGROUP_FORCE_REGISTER
1491  if (deviceCUDA->getMasterPe() == CkMyPe()) {
1492  if (deviceCUDA->getIsGlobalDevice()) {
1493  DebugM(3, "Call recvCudaGlobalMasterCreateMsg on master PE " << CkMyPe() << ".\n");
1495  cudaMgr->createCudaGlobalMaster();
1496  std::shared_ptr<CudaGlobalMasterClient> client = nullptr;
1497  const std::string library_name = args[0];
1498  // Find to see if library_name has been loaded
1499  std::shared_ptr<dlloader::DLLoader<CudaGlobalMasterClient>> loader = nullptr;
1500  for (auto it = CudaGlobalMasterClientDlloaders.begin();
1501  it != CudaGlobalMasterClientDlloaders.end(); ++it) {
1502  if ((*it)->LibName() == library_name) {
1503  loader = (*it);
1504  break;
1505  }
1506  }
1507  // Create a new loader if not found
1508  if (loader == nullptr) {
1509  loader = std::shared_ptr<dlloader::DLLoader<CudaGlobalMasterClient>>(new dlloader::DLLoader<CudaGlobalMasterClient>(library_name));
1510  }
1511  try {
1512  iout << iINFO << "Loading library " << library_name
1513  << " on PE: " << CkMyPe() << "\n" << endi;
1514  loader->DLOpenLib();
1515  client = loader->DLGetInstance();
1516  } catch (std::exception& e) {
1517  iout << iERROR << "Cannot load the shared library " << library_name << "\n" << endi;
1518  NAMD_die(e.what());
1519  }
1520  // Try to initialize the client
1521  try {
1522  client->initialize(args,
1524  cudaMgr->getCudaGlobalMaster()->getStream());
1525  client->subscribe(cudaMgr->getCudaGlobalMaster());
1526  iout << iINFO << "CudaGlobalMaster client \"" << client->name()
1527  << "\"" << " initialized\n" << endi;
1528  } catch (std::exception& e) {
1529  iout << iERROR << "Cannot initialize the CudaGlobalMaster client from "
1530  << library_name << "\n" << endi;
1531  NAMD_die(e.what());
1532  }
1533  CudaGlobalMasterClientDlloaders.push_back(loader);
1534  } else {
1535  DebugM(3, "Skip recvCudaGlobalMasterCreateMsg on master PE " <<
1536  CkMyPe() << " that is not scheduled for GPU-resident global master.\n");
1537  }
1538  } else {
1539  DebugM(3, "Skip recvCudaGlobalMasterCreateMsg on non-master PE " << CkMyPe() << ".\n");
1540  }
1541 #endif // NODEGROUP_FORCE_REGISTER
1542  } else {
1543  if (!(simParams->CUDASOAintegrate)) {
1544  NAMD_die("GPU-resident mode is not enabled.\n");
1545  }
1546  if (!(simParams->useCudaGlobal)) {
1547  NAMD_die("GPU-resident external forces are not enabled.\n");
1548  }
1549  }
1550  // CmiNodeBarrier();
1551 #endif
1552 }
static Node * Object()
Definition: Node.h:86
__thread DeviceCUDA * deviceCUDA
Definition: DeviceCUDA.C:23
std::ostream & iINFO(std::ostream &s)
Definition: InfoStream.C:81
std::shared_ptr< CudaGlobalMasterServer > getCudaGlobalMaster()
Definition: Node.h:78
SimParameters * simParameters
Definition: Node.h:181
#define DebugM(x, y)
Definition: Debug.h:75
std::ostream & endi(std::ostream &s)
Definition: InfoStream.C:54
#define iout
Definition: InfoStream.h:51
int getGlobalDevice() const
Definition: DeviceCUDA.h:171
int getMasterPe()
Definition: DeviceCUDA.h:137
static ComputeCUDAMgr * getComputeCUDAMgr()
std::shared_ptr< CudaGlobalMasterServer > createCudaGlobalMaster()
bool getIsGlobalDevice() const
Definition: DeviceCUDA.h:172
void NAMD_die(const char *err_msg)
Definition: common.C:147
#define simParams
Definition: Output.C:129
std::ostream & iERROR(std::ostream &s)
Definition: InfoStream.C:83

◆ recvCudaGlobalMasterRemoveMsg()

void ComputeMgr::recvCudaGlobalMasterRemoveMsg ( std::vector< std::string >  args)

Definition at line 1554 of file ComputeMgr.C.

References deviceCUDA, endi(), ComputeCUDAMgr::getComputeCUDAMgr(), ComputeCUDAMgr::getCudaGlobalMaster(), DeviceCUDA::getIsGlobalDevice(), DeviceCUDA::getMasterPe(), iINFO(), iout, NAMD_die(), Node::Object(), Node::simParameters, and simParams.

1554  {
1555 #ifdef NAMD_CUDA
1556  Node *node = Node::Object();
1558  const std::string client_name_to_remove = args[0];
1559  if (simParams->CUDASOAintegrate && simParams->useCudaGlobal) {
1560 #ifdef NODEGROUP_FORCE_REGISTER
1561  if (deviceCUDA->getMasterPe() == CkMyPe()) {
1562  if (deviceCUDA->getIsGlobalDevice()) {
1564  std::shared_ptr<CudaGlobalMasterServer> gm = cudaMgr->getCudaGlobalMaster();
1565  if (gm) {
1566  std::shared_ptr<CudaGlobalMasterClient> c = nullptr;
1567  const std::vector<std::shared_ptr<CudaGlobalMasterClient>>& clients = gm->getClients();
1568  for (size_t i = 0; i < clients.size(); ++i) {
1569  if (client_name_to_remove == clients[i]->name()) {
1570  c = clients[i];
1571  break;
1572  }
1573  }
1574  if (c) {
1575  gm->removeClient(c);
1576  iout << iINFO << "CudaGlobalMasterClient \""
1577  << client_name_to_remove << "\" removed\n" << endi;
1578  } else {
1579  const std::string error = "CudaGlobalMasterClient \""
1580  + client_name_to_remove + "\" not found";
1581  NAMD_die(error.c_str());
1582  }
1583  }
1584  }
1585  }
1586 #endif // NODEGROUP_FORCE_REGISTER
1587  } else {
1588  if (!(simParams->CUDASOAintegrate)) {
1589  NAMD_die("GPU-resident mode is not enabled.\n");
1590  }
1591  if (!(simParams->useCudaGlobal)) {
1592  NAMD_die("GPU-resident external forces are not enabled.\n");
1593  }
1594  }
1595 #endif
1596 }
static Node * Object()
Definition: Node.h:86
__thread DeviceCUDA * deviceCUDA
Definition: DeviceCUDA.C:23
std::ostream & iINFO(std::ostream &s)
Definition: InfoStream.C:81
std::shared_ptr< CudaGlobalMasterServer > getCudaGlobalMaster()
Definition: Node.h:78
SimParameters * simParameters
Definition: Node.h:181
std::ostream & endi(std::ostream &s)
Definition: InfoStream.C:54
#define iout
Definition: InfoStream.h:51
int getMasterPe()
Definition: DeviceCUDA.h:137
static ComputeCUDAMgr * getComputeCUDAMgr()
bool getIsGlobalDevice() const
Definition: DeviceCUDA.h:172
void NAMD_die(const char *err_msg)
Definition: common.C:147
#define simParams
Definition: Output.C:129

◆ recvCudaGlobalMasterUpdateMsg()

void ComputeMgr::recvCudaGlobalMasterUpdateMsg ( std::vector< std::string >  args)

Definition at line 1598 of file ComputeMgr.C.

References deviceCUDA, endi(), ComputeCUDAMgr::getComputeCUDAMgr(), ComputeCUDAMgr::getCudaGlobalMaster(), DeviceCUDA::getIsGlobalDevice(), DeviceCUDA::getMasterPe(), iINFO(), iout, NAMD_die(), Node::Object(), Node::simParameters, and simParams.

1598  {
1599  // XXX Should this also be for NAMD_HIP ?
1600 #ifdef NAMD_CUDA
1601  std::vector<std::string> result_args;
1602  Node *node = Node::Object();
1604  const std::string client_name_to_update = args[0];
1605  if (simParams->CUDASOAintegrate && simParams->useCudaGlobal) {
1606 #ifdef NODEGROUP_FORCE_REGISTER
1607  if (deviceCUDA->getMasterPe() == CkMyPe()) {
1608  if (deviceCUDA->getIsGlobalDevice()) {
1610  std::shared_ptr<CudaGlobalMasterServer> gm = cudaMgr->getCudaGlobalMaster();
1611  if (gm) {
1612  std::shared_ptr<CudaGlobalMasterClient> c = nullptr;
1613  const std::vector<std::shared_ptr<CudaGlobalMasterClient>>& clients = gm->getClients();
1614  for (size_t i = 0; i < clients.size(); ++i) {
1615  if (client_name_to_update == clients[i]->name()) {
1616  c = clients[i];
1617  break;
1618  }
1619  }
1620  if (c) {
1621  result_args.push_back(client_name_to_update);
1622  result_args.push_back(c->updateFromTCLCommand(args));
1623  iout << iINFO << "CudaGlobalMasterClient \""
1624  << client_name_to_update << "\" updated\n" << endi;
1625  } else {
1626  const std::string error = "CudaGlobalMasterClient \""
1627  + client_name_to_update + "\" not found";
1628  NAMD_die(error.c_str());
1629  }
1630  }
1631  }
1632  }
1633 #endif // NODEGROUP_FORCE_REGISTER
1634  } else {
1635  if (!(simParams->CUDASOAintegrate)) {
1636  NAMD_die("GPU-resident mode is not enabled.\n");
1637  }
1638  if (!(simParams->useCudaGlobal)) {
1639  NAMD_die("GPU-resident external forces are not enabled.\n");
1640  }
1641  }
1642  CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
1643  cm[0].recvCudaGlobalMasterUpdateResultMsg(result_args);
1644 #endif
1645 }
static Node * Object()
Definition: Node.h:86
__thread DeviceCUDA * deviceCUDA
Definition: DeviceCUDA.C:23
std::ostream & iINFO(std::ostream &s)
Definition: InfoStream.C:81
std::shared_ptr< CudaGlobalMasterServer > getCudaGlobalMaster()
Definition: Node.h:78
SimParameters * simParameters
Definition: Node.h:181
std::ostream & endi(std::ostream &s)
Definition: InfoStream.C:54
#define iout
Definition: InfoStream.h:51
int getMasterPe()
Definition: DeviceCUDA.h:137
static ComputeCUDAMgr * getComputeCUDAMgr()
bool getIsGlobalDevice() const
Definition: DeviceCUDA.h:172
void NAMD_die(const char *err_msg)
Definition: common.C:147
#define simParams
Definition: Output.C:129

◆ recvCudaGlobalMasterUpdateResultMsg()

void ComputeMgr::recvCudaGlobalMasterUpdateResultMsg ( std::vector< std::string >  args)

Definition at line 1647 of file ComputeMgr.C.

References NAMD_bug().

1647  {
1648  if (CkMyPe() == 0) {
1649  if (!args.empty()) {
1650  CudaGlobalMasterClientUpdateResults[args[0]] = args[1];
1651  }
1652  } else {
1653  const std::string error =
1654  "recvCudaGlobalMasterUpdateResultMsg is called on " +
1655  std::to_string(CkMyPe()) + " but expected on PE 0!\n";
1656  NAMD_bug(error.c_str());
1657  }
1658 }
void NAMD_bug(const char *err_msg)
Definition: common.C:195

◆ recvFinishPatchesOnPe()

void ComputeMgr::recvFinishPatchesOnPe ( CudaComputeNonbondedMsg msg)

Definition at line 1716 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, and CudaComputeNonbonded::finishPatchesOnPe().

1716  {
1717  msg->c->finishPatchesOnPe();
1718  delete msg;
1719 }
CudaComputeNonbonded * c
Definition: ComputeMgr.C:1677

◆ recvFinishPatchOnPe()

void ComputeMgr::recvFinishPatchOnPe ( CudaComputeNonbondedMsg msg)

Definition at line 1729 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, CudaComputeNonbonded::finishPatchOnPe(), and CudaComputeNonbondedMsg::i.

1729  {
1730  msg->c->finishPatchOnPe(msg->i);
1731  delete msg;
1732 }
CudaComputeNonbonded * c
Definition: ComputeMgr.C:1677

◆ recvFinishReductions()

void ComputeMgr::recvFinishReductions ( CudaComputeNonbondedMsg msg)

Definition at line 1754 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, and CudaComputeNonbonded::finishReductions().

1754  {
1755  msg->c->finishReductions();
1756  delete msg;
1757 }
CudaComputeNonbonded * c
Definition: ComputeMgr.C:1677

◆ recvLaunchWork()

void ComputeMgr::recvLaunchWork ( CudaComputeNonbondedMsg msg)

Definition at line 1776 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, and CudaComputeNonbonded::launchWork().

1776  {
1777  msg->c->launchWork();
1778  delete msg;
1779 }
CudaComputeNonbonded * c
Definition: ComputeMgr.C:1677

◆ recvMessageEnqueueWork()

void ComputeMgr::recvMessageEnqueueWork ( CudaComputeNonbondedMsg msg)

Definition at line 1765 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, and CudaComputeNonbonded::messageEnqueueWork().

1765  {
1766  msg->c->messageEnqueueWork();
1767  delete msg;
1768 }
CudaComputeNonbonded * c
Definition: ComputeMgr.C:1677

◆ recvOpenBoxesOnPe()

void ComputeMgr::recvOpenBoxesOnPe ( CudaComputeNonbondedMsg msg)

Definition at line 1743 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, and CudaComputeNonbonded::openBoxesOnPe().

1743  {
1744  msg->c->openBoxesOnPe();
1745  delete msg;
1746 }
CudaComputeNonbonded * c
Definition: ComputeMgr.C:1677

◆ recvSkipPatchesOnPe()

void ComputeMgr::recvSkipPatchesOnPe ( CudaComputeNonbondedMsg msg)

Definition at line 1702 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, and CudaComputeNonbonded::skipPatchesOnPe().

1702  {
1703  msg->c->skipPatchesOnPe();
1704  delete msg;
1705 }
CudaComputeNonbonded * c
Definition: ComputeMgr.C:1677

◆ recvUnregisterBoxesOnPe()

void ComputeMgr::recvUnregisterBoxesOnPe ( CudaComputeNonbondedMsg msg)

Definition at line 1789 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, and CudaComputeNonbonded::unregisterBoxesOnPe().

1789  {
1790  msg->c->unregisterBoxesOnPe();
1791  delete msg;
1792 }
CudaComputeNonbonded * c
Definition: ComputeMgr.C:1677

◆ recvYieldDevice()

void ComputeMgr::recvYieldDevice ( int  pe)

Definition at line 1669 of file ComputeMgr.C.

1669  {
1670  // XXX MIC support was only code using YieldDevice functionality
1671  // computeNonbondedMICObject->recvYieldDevice(pe);
1672 }

◆ sendAssignPatchesOnPe()

void ComputeMgr::sendAssignPatchesOnPe ( std::vector< int > &  pes,
CudaComputeNonbonded c 
)

Definition at line 1681 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c.

Referenced by CudaComputeNonbonded::assignPatches().

1681  {
1682  for (int i=0;i < pes.size();i++) {
1684  msg->c = c;
1685  thisProxy[pes[i]].recvAssignPatchesOnPe(msg);
1686  }
1687 }
CudaComputeNonbonded * c
Definition: ComputeMgr.C:1677

◆ sendComputeDPMEData()

void ComputeMgr::sendComputeDPMEData ( ComputeDPMEDataMsg msg)

Definition at line 1399 of file ComputeMgr.C.

References NAMD_die(), and PatchMap::Object().

1400 {
1401  if ( computeDPMEObject )
1402  {
1403 #ifdef DPME
1404  int node = computeDPMEObject->getMasterNode();
1405  CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
1406  cm.recvComputeDPMEData(msg,node);
1407 #endif
1408  }
1409  else if ( ! (PatchMap::Object())->numHomePatches() ) delete msg;
1410  else NAMD_die("ComputeMgr::computeDPMEObject is NULL!");
1411 }
static PatchMap * Object()
Definition: PatchMap.h:27
void NAMD_die(const char *err_msg)
Definition: common.C:147

◆ sendComputeDPMEResults()

void ComputeMgr::sendComputeDPMEResults ( ComputeDPMEResultsMsg msg,
int  node 
)

Definition at line 1425 of file ComputeMgr.C.

1426 {
1427  CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
1428  cm[node].recvComputeDPMEResults(msg);
1429 }

◆ sendComputeEwaldData()

void ComputeMgr::sendComputeEwaldData ( ComputeEwaldMsg msg)

Definition at line 1360 of file ComputeMgr.C.

References ComputeEwald::getMasterNode(), NAMD_die(), and PatchMap::Object().

Referenced by ComputeEwald::doWork().

1361 {
1362  if (computeEwaldObject)
1363  {
1364  int node = computeEwaldObject->getMasterNode();
1365  CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
1366  cm[node].recvComputeEwaldData(msg);
1367  }
1368  else if (!PatchMap::Object()->numHomePatches())
1369  {
1370  // CkPrintf("skipping message on Pe(%d)\n", CkMyPe());
1371  delete msg;
1372  }
1373  else NAMD_die("ComputeMgr::computeEwaldObject is NULL!");
1374 }
int getMasterNode() const
Definition: ComputeEwald.h:86
static PatchMap * Object()
Definition: PatchMap.h:27
void NAMD_die(const char *err_msg)
Definition: common.C:147

◆ sendComputeEwaldResults()

void ComputeMgr::sendComputeEwaldResults ( ComputeEwaldMsg msg)

Definition at line 1383 of file ComputeMgr.C.

References recvComputeEwaldResults().

Referenced by ComputeEwald::recvData().

1384 {
1385  (CProxy_ComputeMgr(CkpvAccess(BOCclass_group).computeMgr)).recvComputeEwaldResults(msg);
1386 }
void recvComputeEwaldResults(ComputeEwaldMsg *)
Definition: ComputeMgr.C:1388

◆ sendComputeGlobalConfig()

void ComputeMgr::sendComputeGlobalConfig ( ComputeGlobalConfigMsg *  )

◆ sendComputeGlobalData()

void ComputeMgr::sendComputeGlobalData ( ComputeGlobalDataMsg msg)

Definition at line 1200 of file ComputeMgr.C.

References SimParameters::CUDASOAintegrate, DebugM, NAMD_EVENT_START, NAMD_EVENT_STOP, Node::Object(), recvComputeGlobalResults(), and Node::simParameters.

Referenced by ComputeGlobal::doWork().

1201 {
1202  NAMD_EVENT_START(1, NamdProfileEvent::GM_SEND_COMP_DATA);
1203  // CkPrintf("*** [%d] Calling sendComputeGlobalData\n", CkMyPe());
1204  #ifdef NODEGROUP_FORCE_REGISTER
1206  if (sp->CUDASOAintegrate) {
1207  NAMD_EVENT_START(1, NamdProfileEvent::GM_NODELOCK);
1208  CProxy_PatchData cpdata(CkpvAccess(BOCclass_group).patchData);
1209  PatchData *patchData = cpdata.ckLocalBranch();
1210  CmiNodeLock &nl = patchData->nodeLock;
1211  // atomic access to GlobalMasterServer to simulate queueing
1212  if (CkMyPe() != 0)
1213  {
1214  CmiLock(nl);
1215  //CkPrintf("*** [%d] Acquired nodelock!\n", CkMyPe());
1216  patchData->master_mgr->recvComputeGlobalData(msg);
1217  CmiUnlock(nl);
1218  }
1219  NAMD_EVENT_STOP(1, NamdProfileEvent::GM_NODELOCK);
1220  NAMD_EVENT_START(1, NamdProfileEvent::GM_BARRIER);
1221  // Barrier to make sure 0 goes last, since invocation of the clients and
1222  // message coordination has to happen on PE 0 and the last PE to call
1223  // recvComputeGlobalData will trigger all of that on itself
1224  // CmiNodeBarrier();
1225  // CkPrintf("*** sendComputeGlobalData entering barrier 1 on PE %d \n", CkMyPe());
1226  stowSuspendULT();
1227 
1228  NAMD_EVENT_STOP(1, NamdProfileEvent::GM_BARRIER);
1229  if (CkMyPe() == 0)
1230  {
1231  CmiLock(nl);
1232  patchData->master_mgr->recvComputeGlobalData(msg);
1233  CmiUnlock(nl);
1234  }
1235  else
1236  {
1237  // All PEs other than 0 wait here while the clients run and the global
1238  // results messages are prepared and copied into their slots (happens from
1239  // sendComputeGlobalResults on PE0)
1240  // CmiNodeBarrier();
1241  // CkPrintf("before call to stow %d\n",CkMyPe());
1242  // CkPrintf("*** sendComputeGlobalData barrier 3 on PE %d \n", CkMyPe());
1243  stowSuspendULT();
1244  // CkPrintf("*** sendComputeGlobalData out barrier 3 on PE %d \n", CkMyPe());
1245  // CkPrintf("returned from call to stow %d\n",CkMyPe());
1246  }
1247  // Get the message from the slot for this PE and resume execution
1248  ComputeGlobalResultsMsg* resultsMsg = CkpvAccess(ComputeGlobalResultsMsg_instance);
1249  DebugM(3,"["<<CkMyPe()<<"] calling recvComputeGlobalResults\n");
1250  recvComputeGlobalResults(resultsMsg);
1251  } else {
1252  #endif
1253  CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
1254  DebugM(3,"["<<CkMyPe()<<"] msg to recvComputeGlobalData\n");
1255  cm[0].recvComputeGlobalData(msg);
1256  #ifdef NODEGROUP_FORCE_REGISTER
1257  }
1258  #endif
1259  NAMD_EVENT_STOP(1, NamdProfileEvent::GM_SEND_COMP_DATA);
1260  DebugM(3,"["<<CkMyPe()<<"] done sendComputeGlobalData\n");
1261 }
static Node * Object()
Definition: Node.h:86
#define NAMD_EVENT_STOP(eon, id)
SimParameters * simParameters
Definition: Node.h:181
#define DebugM(x, y)
Definition: Debug.h:75
#define NAMD_EVENT_START(eon, id)
void recvComputeGlobalResults(ComputeGlobalResultsMsg *)
Definition: ComputeMgr.C:1330

◆ sendComputeGlobalResults()

void ComputeMgr::sendComputeGlobalResults ( ComputeGlobalResultsMsg msg)

Definition at line 1275 of file ComputeMgr.C.

References computeGlobalResultsMsgMasterSeq, SimParameters::CUDASOAintegrate, DebugM, NAMD_EVENT_START, NAMD_EVENT_STOP, Node::Object(), ComputeGlobalResultsMsg::seq, and Node::simParameters.

1276 {
1277  NAMD_EVENT_START(1, NamdProfileEvent::GM_SEND_COMP_RESULTS);
1279  DebugM(3,"["<< CkMyPe()<< "] sendComputeGlobalResults seq "<<msg->seq<<"\n");
1280 
1281  #ifdef NODEGROUP_FORCE_REGISTER
1283  if (sp->CUDASOAintegrate) {
1284  // Only PE 0 runs this code
1285  // Copy the message into each PE's slot (Assumes single-node with multicore build)
1286  for (int pe = 0; pe < CkMyNodeSize(); pe++) {
1287  if(CkpvAccessOther(ComputeGlobalResultsMsg_instance, pe)!=nullptr)
1288  {
1289  // make sure msg delete happens on the same PE as made the msg to
1290  // avoid unbounded memory pool growth for these unsent messages
1291  delete CkpvAccessOther(ComputeGlobalResultsMsg_instance, pe);
1292  }
1293  CkpvAccessOther(ComputeGlobalResultsMsg_instance, pe) = (ComputeGlobalResultsMsg*)CkCopyMsg((void**)&msg);
1294  }
1295  delete msg;
1296  // Now that copies are done, trigger the barrier to resume the other PEs
1297  // (most other PEs call this barrier from sendComputeGlobalData)
1298  // CkPrintf("this is where we would call awaken\n",CkMyPe());
1299  //CmiNodeBarrier();
1300  // CkPrintf("*** sendComputeGlobalResults entering barrier 2 on PE %d \n", CkMyPe());
1301  stowSuspendULT();
1302  //thisProxy.recvComputeGlobalResults(msg);
1303  } else {
1304  #endif
1305  DebugM(3,"["<< CkMyPe() << "] ComputeMgr::sendComputeGlobalResults invoking bcast recvComputeGlobalResults\n");
1306  thisProxy.recvComputeGlobalResults(msg);
1307  #ifdef NODEGROUP_FORCE_REGISTER
1308  }
1309  #endif
1310  NAMD_EVENT_STOP(1, NamdProfileEvent::GM_SEND_COMP_RESULTS);
1311 }
static Node * Object()
Definition: Node.h:86
#define NAMD_EVENT_STOP(eon, id)
SimParameters * simParameters
Definition: Node.h:181
#define DebugM(x, y)
Definition: Debug.h:75
int computeGlobalResultsMsgMasterSeq
Definition: ComputeMgr.h:157
#define NAMD_EVENT_START(eon, id)

◆ sendFinishPatchesOnPe()

void ComputeMgr::sendFinishPatchesOnPe ( std::vector< int > &  pes,
CudaComputeNonbonded c 
)

Definition at line 1707 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, COMPUTE_PROXY_PRIORITY, PRIORITY_SIZE, Compute::sequence(), and SET_PRIORITY.

Referenced by CudaComputeNonbonded::finishPatches().

1707  {
1708  for (int i=0;i < pes.size();i++) {
1711  msg->c = c;
1712  thisProxy[pes[i]].recvFinishPatchesOnPe(msg);
1713  }
1714 }
#define COMPUTE_PROXY_PRIORITY
Definition: Priorities.h:71
int sequence(void)
Definition: Compute.h:64
CudaComputeNonbonded * c
Definition: ComputeMgr.C:1677
#define PRIORITY_SIZE
Definition: Priorities.h:13
#define SET_PRIORITY(MSG, SEQ, PRIO)
Definition: Priorities.h:18

◆ sendFinishPatchOnPe()

void ComputeMgr::sendFinishPatchOnPe ( int  pe,
CudaComputeNonbonded c,
int  i,
PatchID  patchID 
)

Definition at line 1721 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, COMPUTE_PROXY_PRIORITY, CudaComputeNonbondedMsg::i, PATCH_PRIORITY, PRIORITY_SIZE, Compute::sequence(), and SET_PRIORITY.

1721  {
1724  msg->c = c;
1725  msg->i = i;
1726  thisProxy[pe].recvFinishPatchOnPe(msg);
1727 }
#define COMPUTE_PROXY_PRIORITY
Definition: Priorities.h:71
int sequence(void)
Definition: Compute.h:64
CudaComputeNonbonded * c
Definition: ComputeMgr.C:1677
#define PRIORITY_SIZE
Definition: Priorities.h:13
#define SET_PRIORITY(MSG, SEQ, PRIO)
Definition: Priorities.h:18
#define PATCH_PRIORITY(PID)
Definition: Priorities.h:25

◆ sendFinishReductions()

void ComputeMgr::sendFinishReductions ( int  pe,
CudaComputeNonbonded c 
)

Definition at line 1748 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c.

Referenced by CudaComputeNonbonded::skipPatchesOnPe().

1748  {
1750  msg->c = c;
1751  thisProxy[pe].recvFinishReductions(msg);
1752 }
CudaComputeNonbonded * c
Definition: ComputeMgr.C:1677

◆ sendLaunchWork()

void ComputeMgr::sendLaunchWork ( int  pe,
CudaComputeNonbonded c 
)

Definition at line 1770 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c.

Referenced by CudaComputeNonbonded::openBoxesOnPe().

1770  {
1772  msg->c = c;
1773  thisProxy[pe].recvLaunchWork(msg);
1774 }
CudaComputeNonbonded * c
Definition: ComputeMgr.C:1677

◆ sendMessageEnqueueWork()

void ComputeMgr::sendMessageEnqueueWork ( int  pe,
CudaComputeNonbonded c 
)

Definition at line 1759 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c.

Referenced by CudaComputeNonbonded::noWork().

1759  {
1761  msg->c = c;
1762  thisProxy[pe].recvMessageEnqueueWork(msg);
1763 }
CudaComputeNonbonded * c
Definition: ComputeMgr.C:1677

◆ sendOpenBoxesOnPe()

void ComputeMgr::sendOpenBoxesOnPe ( std::vector< int > &  pes,
CudaComputeNonbonded c 
)

Definition at line 1734 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, PRIORITY_SIZE, PROXY_DATA_PRIORITY, Compute::sequence(), and SET_PRIORITY.

Referenced by CudaComputeNonbonded::doWork().

1734  {
1735  for (int i=0;i < pes.size();i++) {
1737  SET_PRIORITY(msg, c->sequence(), PROXY_DATA_PRIORITY+1); // after bonded
1738  msg->c = c;
1739  thisProxy[pes[i]].recvOpenBoxesOnPe(msg);
1740  }
1741 }
int sequence(void)
Definition: Compute.h:64
#define PROXY_DATA_PRIORITY
Definition: Priorities.h:40
CudaComputeNonbonded * c
Definition: ComputeMgr.C:1677
#define PRIORITY_SIZE
Definition: Priorities.h:13
#define SET_PRIORITY(MSG, SEQ, PRIO)
Definition: Priorities.h:18

◆ sendSkipPatchesOnPe()

void ComputeMgr::sendSkipPatchesOnPe ( std::vector< int > &  pes,
CudaComputeNonbonded c 
)

Definition at line 1694 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c.

1694  {
1695  for (int i=0;i < pes.size();i++) {
1697  msg->c = c;
1698  thisProxy[pes[i]].recvSkipPatchesOnPe(msg);
1699  }
1700 }
CudaComputeNonbonded * c
Definition: ComputeMgr.C:1677

◆ sendUnregisterBoxesOnPe()

void ComputeMgr::sendUnregisterBoxesOnPe ( std::vector< int > &  pes,
CudaComputeNonbonded c 
)

Definition at line 1781 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c.

Referenced by CudaComputeNonbonded::~CudaComputeNonbonded().

1781  {
1782  for (int i=0;i < pes.size();i++) {
1784  msg->c = c;
1785  thisProxy[pes[i]].recvUnregisterBoxesOnPe(msg);
1786  }
1787 }
CudaComputeNonbonded * c
Definition: ComputeMgr.C:1677

◆ sendYieldDevice()

void ComputeMgr::sendYieldDevice ( int  pe)

Definition at line 1664 of file ComputeMgr.C.

1664  {
1665  CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
1666  cm[pe].recvYieldDevice(CkMyPe());
1667 }

◆ splitComputes()

void ComputeMgr::splitComputes ( )

Definition at line 173 of file ComputeMgr.C.

References ComputeMap::cloneCompute(), ComputeMap::extendPtrs(), ComputeMap::newNode(), ComputeMap::newNumPartitions(), ComputeMap::node(), ComputeMap::numComputes(), ComputeMap::numPartitions(), ComputeMap::Object(), ComputeMap::setNewNode(), ComputeMap::setNewNumPartitions(), and ComputeMap::setNumPartitions().

174 {
175  if ( ! CkMyRank() ) {
176  ComputeMap *computeMap = ComputeMap::Object();
177  const int nc = computeMap->numComputes();
178 
179  for (int i=0; i<nc; i++) {
180  int nnp = computeMap->newNumPartitions(i);
181  if ( nnp > 0 ) {
182  if ( computeMap->numPartitions(i) != 1 ) {
183  CkPrintf("Warning: unable to partition compute %d\n", i);
184  computeMap->setNewNumPartitions(i,0);
185  continue;
186  }
187  //CkPrintf("splitting compute %d by %d\n",i,nnp);
188  computeMap->setNumPartitions(i,nnp);
189  if (computeMap->newNode(i) == -1) {
190  computeMap->setNewNode(i,computeMap->node(i));
191  }
192  for ( int j=1; j<nnp; ++j ) {
193  int newcid = computeMap->cloneCompute(i,j);
194  //CkPrintf("compute %d partition %d is %d\n",i,j,newcid);
195  }
196  }
197  }
198  computeMap->extendPtrs();
199  }
200 
201  if (!CkMyPe())
202  {
203  CkStartQD(CkIndex_ComputeMgr::splitComputes2((CkQdMsg*)0), &thishandle);
204  }
205 }
void setNewNumPartitions(ComputeID cid, char numPartitions)
Definition: ComputeMap.h:144
int numComputes(void)
Definition: ComputeMap.h:101
void setNumPartitions(ComputeID cid, char numPartitions)
Definition: ComputeMap.h:138
char newNumPartitions(ComputeID cid)
Definition: ComputeMap.h:141
int numPartitions(ComputeID cid)
Definition: ComputeMap.C:133
void setNewNode(ComputeID cid, NodeID node)
Definition: ComputeMap.h:120
void extendPtrs()
Definition: ComputeMap.C:87
ComputeID cloneCompute(ComputeID src, int partition)
Definition: ComputeMap.C:183
static ComputeMap * Object()
Definition: ComputeMap.h:89
int node(ComputeID cid)
Definition: ComputeMap.h:106
NodeID newNode(ComputeID cid)
Definition: ComputeMap.h:116

◆ splitComputes2()

void ComputeMgr::splitComputes2 ( CkQdMsg *  msg)

Definition at line 207 of file ComputeMgr.C.

208 {
209  delete msg;
210  CProxy_ComputeMgr(thisgroup).updateLocalComputes();
211 }

◆ updateComputes()

void ComputeMgr::updateComputes ( int  ep,
CkGroupID  chareID 
)

Definition at line 140 of file ComputeMgr.C.

References NAMD_bug().

Referenced by LdbCoordinator::ExecuteMigrations().

141 {
142  updateComputesReturnEP = ep;
143  updateComputesReturnChareID = chareID;
144  updateComputesCount = CkNumPes();
145 
146  if (CkMyPe())
147  {
148  NAMD_bug("updateComputes signaled on wrong Pe!");
149  }
150 
151  CkStartQD(CkIndex_ComputeMgr::updateComputes2((CkQdMsg*)0),&thishandle);
152 }
void NAMD_bug(const char *err_msg)
Definition: common.C:195

◆ updateComputes2()

void ComputeMgr::updateComputes2 ( CkQdMsg *  msg)

Definition at line 154 of file ComputeMgr.C.

References WorkDistrib::saveComputeMapChanges().

155 {
156  delete msg;
157 
158  CProxy_WorkDistrib wd(CkpvAccess(BOCclass_group).workDistrib);
159  WorkDistrib *workDistrib = wd.ckLocalBranch();
160  workDistrib->saveComputeMapChanges(CkIndex_ComputeMgr::updateComputes3(),thisgroup);
161 }
void saveComputeMapChanges(int, CkGroupID)
Definition: WorkDistrib.C:357

◆ updateComputes3()

void ComputeMgr::updateComputes3 ( )

Definition at line 163 of file ComputeMgr.C.

164 {
165  if ( skipSplitting ) {
166  CProxy_ComputeMgr(thisgroup).updateLocalComputes();
167  } else {
168  CProxy_ComputeMgr(thisgroup).splitComputes();
169  skipSplitting = 1;
170  }
171 }

◆ updateLocalComputes()

void ComputeMgr::updateLocalComputes ( )

Definition at line 213 of file ComputeMgr.C.

References ResizeArray< Elem >::add(), ComputeMap::compute(), ProxyMgr::createProxy(), Compute::ldObjHandle, LdbCoordinator::Migrate(), ComputeMap::newNode(), ComputeMap::newNumPartitions(), ComputeMap::node(), ComputeMap::numComputes(), ComputeMap::numPids(), ComputeMap::Object(), LdbCoordinator::Object(), ComputeMap::pid(), ComputeMap::registerCompute(), and ResizeArray< Elem >::resize().

214 {
215  ComputeMap *computeMap = ComputeMap::Object();
216  CProxy_ProxyMgr pm(CkpvAccess(BOCclass_group).proxyMgr);
217  ProxyMgr *proxyMgr = pm.ckLocalBranch();
218  LdbCoordinator *ldbCoordinator = LdbCoordinator::Object();
219 
220  computeFlag.resize(0);
221 
222  const int nc = computeMap->numComputes();
223  for (int i=0; i<nc; i++) {
224 
225  if ( computeMap->node(i) == CkMyPe() &&
226  computeMap->newNumPartitions(i) > 1 ) {
227  Compute *c = computeMap->compute(i);
228  ldbCoordinator->Migrate(c->ldObjHandle,CkMyPe());
229  delete c;
230  computeMap->registerCompute(i,NULL);
231  if ( computeMap->newNode(i) == CkMyPe() ) computeFlag.add(i);
232  } else
233  if (computeMap->newNode(i) == CkMyPe() && computeMap->node(i) != CkMyPe())
234  {
235  computeFlag.add(i);
236  for (int n=0; n < computeMap->numPids(i); n++)
237  {
238  proxyMgr->createProxy(computeMap->pid(i,n));
239  }
240  }
241  else if (computeMap->node(i) == CkMyPe() &&
242  (computeMap->newNode(i) != -1 && computeMap->newNode(i) != CkMyPe() ))
243  {
244  // CkPrintf("delete compute %d on pe %d\n",i,CkMyPe());
245  delete computeMap->compute(i);
246  computeMap->registerCompute(i,NULL);
247  }
248  }
249 
250  if (!CkMyPe())
251  {
252  CkStartQD(CkIndex_ComputeMgr::updateLocalComputes2((CkQdMsg*)0), &thishandle);
253  }
254 }
int numComputes(void)
Definition: ComputeMap.h:101
void registerCompute(ComputeID cid, Compute *c)
Definition: ComputeMap.h:95
void Migrate(LDObjHandle handle, int dest)
LDObjHandle ldObjHandle
Definition: Compute.h:44
int add(const Elem &elem)
Definition: ResizeArray.h:101
void resize(int i)
Definition: ResizeArray.h:84
char newNumPartitions(ComputeID cid)
Definition: ComputeMap.h:141
void createProxy(PatchID pid)
Definition: ProxyMgr.C:492
static LdbCoordinator * Object()
Compute * compute(ComputeID cid)
Definition: ComputeMap.h:171
static ComputeMap * Object()
Definition: ComputeMap.h:89
int node(ComputeID cid)
Definition: ComputeMap.h:106
int numPids(ComputeID cid)
Definition: ComputeMap.C:101
int pid(ComputeID cid, int i)
Definition: ComputeMap.C:107
NodeID newNode(ComputeID cid)
Definition: ComputeMap.h:116

◆ updateLocalComputes2()

void ComputeMgr::updateLocalComputes2 ( CkQdMsg *  msg)

Definition at line 257 of file ComputeMgr.C.

258 {
259  delete msg;
260  CProxy_ComputeMgr(thisgroup).updateLocalComputes3();
261 }

◆ updateLocalComputes3()

void ComputeMgr::updateLocalComputes3 ( )

Definition at line 264 of file ComputeMgr.C.

References ResizeArray< Elem >::clear(), ComputeMap::newNode(), ProxyMgr::nodecount, ComputeMap::numComputes(), ComputeMap::Object(), ProxyMgr::removeUnusedProxies(), ComputeMap::setNewNode(), ComputeMap::setNewNumPartitions(), ComputeMap::setNode(), and ResizeArray< Elem >::size().

265 {
266  ComputeMap *computeMap = ComputeMap::Object();
267  CProxy_ProxyMgr pm(CkpvAccess(BOCclass_group).proxyMgr);
268  ProxyMgr *proxyMgr = pm.ckLocalBranch();
269 
271 
272  const int nc = computeMap->numComputes();
273 
274  if ( ! CkMyRank() ) {
275  for (int i=0; i<nc; i++) {
276  computeMap->setNewNumPartitions(i,0);
277  if (computeMap->newNode(i) != -1) {
278  computeMap->setNode(i,computeMap->newNode(i));
279  computeMap->setNewNode(i,-1);
280  }
281  }
282  }
283 
284  for(int i=0; i<computeFlag.size(); i++) createCompute(computeFlag[i], computeMap);
285  computeFlag.clear();
286 
287  proxyMgr->removeUnusedProxies();
288 
289  if (!CkMyPe())
290  {
291  CkStartQD(CkIndex_ComputeMgr::updateLocalComputes4((CkQdMsg*)0), &thishandle);
292  }
293 }
int size(void) const
Definition: ResizeArray.h:131
void setNewNumPartitions(ComputeID cid, char numPartitions)
Definition: ComputeMap.h:144
int numComputes(void)
Definition: ComputeMap.h:101
void clear()
Definition: ResizeArray.h:91
void setNode(ComputeID cid, NodeID node)
Definition: ComputeMap.h:110
void removeUnusedProxies(void)
Definition: ProxyMgr.C:398
void setNewNode(ComputeID cid, NodeID node)
Definition: ComputeMap.h:120
static ComputeMap * Object()
Definition: ComputeMap.h:89
static int nodecount
Definition: ProxyMgr.h:398
NodeID newNode(ComputeID cid)
Definition: ComputeMap.h:116

◆ updateLocalComputes4()

void ComputeMgr::updateLocalComputes4 ( CkQdMsg *  msg)

Definition at line 296 of file ComputeMgr.C.

References Node::Object(), ComputeMap::Object(), ComputeMap::saveComputeMap(), Node::simParameters, and simParams.

297 {
298  delete msg;
299  CProxy_ComputeMgr(thisgroup).updateLocalComputes5();
300 
301  // store the latest compute map
303  if (simParams->storeComputeMap) {
304  ComputeMap *computeMap = ComputeMap::Object();
305  computeMap->saveComputeMap(simParams->computeMapFilename);
306  }
307 }
static Node * Object()
Definition: Node.h:86
void saveComputeMap(const char *fname)
Definition: ComputeMap.C:260
SimParameters * simParameters
Definition: Node.h:181
#define simParams
Definition: Output.C:129
static ComputeMap * Object()
Definition: ComputeMap.h:89

◆ updateLocalComputes5()

void ComputeMgr::updateLocalComputes5 ( )

Definition at line 314 of file ComputeMgr.C.

References ProxyMgr::buildProxySpanningTree2(), PatchMap::checkMap(), ComputeMap::checkMap(), PatchMap::Object(), ComputeMap::Object(), ProxyMgr::Object(), proxyRecvSpanning, proxySendSpanning, and ProxyMgr::sendSpanningTrees().

315 {
316  if ( ! CkMyRank() ) {
319  }
320 
321  // we always use the centralized building of spanning tree
322  // distributed building of ST called in Node.C only
325 
326  // this code needs to be turned on if we want to
327  // shift the creation of ST to the load balancer
328 
329 #if 0
331  {
332  if (firstphase)
334  else
335  if (CkMyPe() == 0)
337 
338  firstphase = 0;
339  }
340 #endif
341 
342  if (!CkMyPe())
343  CkStartQD(CkIndex_ComputeMgr::doneUpdateLocalComputes(), &thishandle);
344 }
void checkMap()
Definition: ComputeMap.C:46
int proxyRecvSpanning
Definition: ProxyMgr.C:45
static ProxyMgr * Object()
Definition: ProxyMgr.h:394
static PatchMap * Object()
Definition: PatchMap.h:27
void buildProxySpanningTree2()
Definition: ProxyMgr.C:576
void checkMap()
Definition: PatchMap.C:274
static ComputeMap * Object()
Definition: ComputeMap.h:89
void sendSpanningTrees()
Definition: ProxyMgr.C:1106
int proxySendSpanning
Definition: ProxyMgr.C:44

Member Data Documentation

◆ callMeBackCB

CkCallback ComputeMgr::callMeBackCB

Definition at line 158 of file ComputeMgr.h.

◆ computeGlobalObject

ComputeGlobal* ComputeMgr::computeGlobalObject

◆ computeGlobalResultsMsgMasterSeq

int ComputeMgr::computeGlobalResultsMsgMasterSeq

Definition at line 157 of file ComputeMgr.h.

Referenced by ComputeMgr(), and sendComputeGlobalResults().

◆ computeGlobalResultsMsgs

ResizeArray<ComputeGlobalResultsMsg*> ComputeMgr::computeGlobalResultsMsgs

Definition at line 155 of file ComputeMgr.h.

Referenced by enableComputeGlobalResults(), and recvComputeGlobalResults().

◆ computeGlobalResultsMsgSeq

int ComputeMgr::computeGlobalResultsMsgSeq

Definition at line 156 of file ComputeMgr.h.

Referenced by ComputeMgr(), enableComputeGlobalResults(), and recvComputeGlobalResults().


The documentation for this class was generated from the following files: