NAMD
Public Member Functions | Public Attributes | List of all members
ComputeMgr Class Reference

#include <ComputeMgr.h>

Inheritance diagram for ComputeMgr:

Public Member Functions

 ComputeMgr ()
 
 ~ComputeMgr ()
 
void createComputes (ComputeMap *map)
 
void updateComputes (int, CkGroupID)
 
void updateComputes2 (CkQdMsg *)
 
void updateComputes3 ()
 
void splitComputes ()
 
void splitComputes2 (CkQdMsg *)
 
void updateLocalComputes ()
 
void updateLocalComputes2 (CkQdMsg *)
 
void updateLocalComputes3 ()
 
void updateLocalComputes4 (CkQdMsg *)
 
void updateLocalComputes5 ()
 
void doneUpdateLocalComputes ()
 
void sendComputeGlobalConfig (ComputeGlobalConfigMsg *)
 
void recvComputeGlobalConfig (ComputeGlobalConfigMsg *)
 
void sendComputeGlobalData (ComputeGlobalDataMsg *)
 
void recvComputeGlobalData (ComputeGlobalDataMsg *)
 
void sendComputeGlobalResults (ComputeGlobalResultsMsg *)
 
void recvComputeGlobalResults (ComputeGlobalResultsMsg *)
 
void enableComputeGlobalResults ()
 
void sendComputeDPMEData (ComputeDPMEDataMsg *)
 
void recvComputeDPMEData (ComputeDPMEDataMsg *)
 
void sendComputeDPMEResults (ComputeDPMEResultsMsg *, int)
 
void recvComputeDPMEResults (ComputeDPMEResultsMsg *)
 
void sendComputeEwaldData (ComputeEwaldMsg *)
 
void recvComputeEwaldData (ComputeEwaldMsg *)
 
void sendComputeEwaldResults (ComputeEwaldMsg *)
 
void recvComputeEwaldResults (ComputeEwaldMsg *)
 
void recvComputeConsForceMsg (ComputeConsForceMsg *)
 
void recvCudaGlobalMasterCreateMsg (std::vector< std::string > args)
 
void recvCudaGlobalMasterRemoveMsg (std::vector< std::string > args)
 
void recvCudaGlobalMasterUpdateMsg (std::vector< std::string > args)
 
void recvCudaGlobalMasterUpdateResultMsg (int tcl_error_code, std::vector< std::string > args)
 
std::string getCudaGlobalMasterUpdateResultString (const std::string &client_name) const
 
int getCudaGlobalMasterUpdateResult (const std::string &client_name) const
 
void sendYieldDevice (int pe)
 
void recvYieldDevice (int pe)
 
void sendAssignPatchesOnPe (std::vector< int > &pes, CudaComputeNonbonded *c)
 
void recvAssignPatchesOnPe (CudaComputeNonbondedMsg *msg)
 
void sendSkipPatchesOnPe (std::vector< int > &pes, CudaComputeNonbonded *c)
 
void recvSkipPatchesOnPe (CudaComputeNonbondedMsg *msg)
 
void sendFinishPatchesOnPe (std::vector< int > &pes, CudaComputeNonbonded *c)
 
void recvFinishPatchesOnPe (CudaComputeNonbondedMsg *msg)
 
void sendFinishPatchOnPe (int pe, CudaComputeNonbonded *c, int i, PatchID patchID)
 
void recvFinishPatchOnPe (CudaComputeNonbondedMsg *msg)
 
void sendOpenBoxesOnPe (std::vector< int > &pes, CudaComputeNonbonded *c)
 
void recvOpenBoxesOnPe (CudaComputeNonbondedMsg *msg)
 
void sendFinishReductions (int pe, CudaComputeNonbonded *c)
 
void recvFinishReductions (CudaComputeNonbondedMsg *msg)
 
void sendMessageEnqueueWork (int pe, CudaComputeNonbonded *c)
 
void recvMessageEnqueueWork (CudaComputeNonbondedMsg *msg)
 
void sendLaunchWork (int pe, CudaComputeNonbonded *c)
 
void recvLaunchWork (CudaComputeNonbondedMsg *msg)
 
void sendUnregisterBoxesOnPe (std::vector< int > &pes, CudaComputeNonbonded *c)
 
void recvUnregisterBoxesOnPe (CudaComputeNonbondedMsg *msg)
 

Public Attributes

ComputeGlobalcomputeGlobalObject
 
ResizeArray< ComputeGlobalResultsMsg * > computeGlobalResultsMsgs
 
int computeGlobalResultsMsgSeq
 
int computeGlobalResultsMsgMasterSeq
 
CkCallback callMeBackCB
 

Detailed Description

Definition at line 66 of file ComputeMgr.h.

Constructor & Destructor Documentation

◆ ComputeMgr()

ComputeMgr::ComputeMgr ( )

Definition at line 117 of file ComputeMgr.C.

References computeGlobalObject, computeGlobalResultsMsgMasterSeq, and computeGlobalResultsMsgSeq.

118 {
119  CkpvAccess(BOCclass_group).computeMgr = thisgroup;
123  computeDPMEObject = 0;
124  computeEwaldObject = 0;
125  computeNonbondedWorkArrays = new ComputeNonbondedWorkArrays;
126  skipSplitting = 0;
127  masterServerObject = NULL;
128 }
int computeGlobalResultsMsgSeq
Definition: ComputeMgr.h:162
int computeGlobalResultsMsgMasterSeq
Definition: ComputeMgr.h:163
ComputeGlobal * computeGlobalObject
Definition: ComputeMgr.h:160

◆ ~ComputeMgr()

ComputeMgr::~ComputeMgr ( void  )

Definition at line 130 of file ComputeMgr.C.

References endi(), iINFO(), and iout.

131 {
132  delete computeNonbondedWorkArrays;
133  if (masterServerObject != NULL) delete masterServerObject;
134  for (auto& loader: CudaGlobalMasterClientDlloaders) {
135  if (loader) {
136  iout << iINFO << "Close library " << loader->LibName() << "\n" << endi;
137  loader->DLCloseLib();
138  }
139  }
140 }
std::ostream & iINFO(std::ostream &s)
Definition: InfoStream.C:81
std::ostream & endi(std::ostream &s)
Definition: InfoStream.C:54
#define iout
Definition: InfoStream.h:51

Member Function Documentation

◆ createComputes()

void ComputeMgr::createComputes ( ComputeMap map)

Definition at line 1037 of file ComputeMgr.C.

References GlobalMasterServer::addClient(), CudaComputeNonbonded::assignPatches(), computeAnglesType, computeAnisoType, computeBondsType, computeCrosstermsType, computeDihedralsType, computeExclsType, computeImpropersType, computeNonbondedCUDA2Type, computeNonbondedPairType, computeNonbondedSelfType, computeOneFourNbTholeType, computeSelfAnglesType, computeSelfAnisoType, computeSelfBondsType, computeSelfCrosstermsType, computeSelfDihedralsType, computeSelfExclsType, computeSelfImpropersType, computeSelfOneFourNbTholeType, computeSelfTholeType, computeTholeType, DebugM, DeviceCUDA::device_shared_with_pe(), deviceCUDA, getCudaComputeNonbonded(), DeviceCUDA::getMasterPe(), CudaComputeNonbonded::initialize(), Node::molecule, Node::myid(), NAMD_BONDEDGPU_ANGLES, NAMD_BONDEDGPU_ANISOS, NAMD_BONDEDGPU_BONDS, NAMD_BONDEDGPU_CROSSTERMS, NAMD_BONDEDGPU_DIHEDRALS, NAMD_BONDEDGPU_EXCLS, NAMD_BONDEDGPU_IMPROPERS, NAMD_BONDEDGPU_ONEFOURENBTHOLES, NAMD_BONDEDGPU_THOLES, Molecule::numAtoms, PatchMap::Object(), Node::Object(), Node::simParameters, simParams, and ComputeMap::type().

Referenced by Node::startup().

1038 {
1039 // #ifdef NAMD_CUDA
1040 // int ComputePmeCUDACounter = 0;
1041 // #endif
1042  Node *node = Node::Object();
1044  int myNode = node->myid();
1045 
1046  if ( simParams->globalForcesOn && !myNode )
1047  {
1048  DebugM(4,"Mgr running on Node "<<CkMyPe()<<"\n");
1049  /* create a master server to allow multiple masters */
1050  masterServerObject = new GlobalMasterServer(this,
1051  PatchMap::Object()->numNodesWithPatches());
1052 
1053  #ifdef NODEGROUP_FORCE_REGISTER
1054  CProxy_PatchData cpdata(CkpvAccess(BOCclass_group).patchData);
1055  PatchData *patchData = cpdata.ckLocalBranch();
1056  patchData->master_mgr = this;
1057  #endif
1058 
1059  /* create the individual global masters */
1060  // masterServerObject->addClient(new GlobalMasterTest());
1061  if (simParams->tclForcesOn)
1062  masterServerObject->addClient(new GlobalMasterTcl());
1063  if (simParams->IMDon && ! (simParams->IMDignore || simParams->IMDignoreForces) )
1064  masterServerObject->addClient(new GlobalMasterIMD());
1065  // SMD is implemented on GPU resident version of NAMD (NAMD3)
1066  if (simParams->SMDOn && !simParams->CUDASOAintegrateMode)
1067  masterServerObject->addClient(
1068  new GlobalMasterSMD(simParams->SMDk, simParams->SMDk2,
1069  simParams->SMDVel,
1070  simParams->SMDDir, simParams->SMDOutputFreq,
1071  simParams->firstTimestep, simParams->SMDFile,
1072  node->molecule->numAtoms)
1073  );
1074 
1075  if (simParams->symmetryOn &&
1076  (simParams->firstTimestep < simParams->symmetryLastStep ||
1077  simParams->symmetryLastStep == -1))
1078  masterServerObject->addClient(new GlobalMasterSymmetry());
1079  if (simParams->TMDOn)
1080  masterServerObject->addClient(new GlobalMasterTMD());
1081  if (simParams->miscForcesOn)
1082  masterServerObject->addClient(new GlobalMasterMisc());
1083  if ( simParams->freeEnergyOn )
1084  masterServerObject->addClient(new GlobalMasterFreeEnergy());
1085  if ( simParams->colvarsOn )
1086  masterServerObject->addClient(new GlobalMasterColvars());
1087 
1088  }
1089 
1090  if ( !myNode && simParams->IMDon && (simParams->IMDignore || simParams->IMDignoreForces) ) {
1091  // GlobalMasterIMD constructor saves pointer to node->IMDOutput object
1092  new GlobalMasterIMD();
1093  }
1094 
1095 #if defined(NAMD_CUDA) || defined(NAMD_HIP)
1096  bool deviceIsMine = ( deviceCUDA->getMasterPe() == CkMyPe() );
1097 #ifdef BONDED_CUDA
1098  // Place bonded forces on Pe different from non-bonded forces
1099  int bondedMasterPe = deviceCUDA->getMasterPe();
1100  // for (int i=0;i < deviceCUDA->getNumPesSharingDevice();i++) {
1101  // int pe = deviceCUDA->getPesSharingDevice(i);
1102  // if (pe != deviceCUDA->getMasterPe()) {
1103  // bondedMasterPe = pe;
1104  // }
1105  // }
1106  bool deviceIsMineBonded = (CkMyPe() == bondedMasterPe);
1107 #endif
1108 #endif
1109 
1110  for (int i=0; i < map->nComputes; i++)
1111  {
1112  if ( ! ( i % 100 ) )
1113  {
1114  }
1115 
1116 #if defined(NAMD_CUDA) || defined(NAMD_HIP)
1117  switch ( map->type(i) )
1118  {
1119  // case computePmeCUDAType:
1120  // // Only create single ComputePmeCUDA object per Pe
1121  // if ( map->computeData[i].node != myNode ) continue;
1122  // if (ComputePmeCUDACounter > 0) continue;
1123  // ComputePmeCUDACounter++;
1124  // break;
1126  if ( ! deviceIsMine ) continue;
1127  if ( ! deviceCUDA->device_shared_with_pe(map->computeData[i].node) ) continue;
1128  break;
1129 
1131  if ( ! deviceIsMine ) continue;
1132  if ( ! deviceCUDA->device_shared_with_pe(map->computeData[i].node) ) continue;
1133  break;
1134 
1135 #ifdef BONDED_CUDA
1136  case computeSelfBondsType:
1137  case computeBondsType:
1138  if (simParams->bondedCUDA & NAMD_BONDEDGPU_BONDS) {
1139  if ( ! deviceIsMineBonded ) continue;
1140  if ( ! deviceCUDA->device_shared_with_pe(map->computeData[i].node) ) continue;
1141  } else {
1142  if ( map->computeData[i].node != myNode ) continue;
1143  }
1144  break;
1145 
1146  case computeSelfAnglesType:
1147  case computeAnglesType:
1148  if (simParams->bondedCUDA & NAMD_BONDEDGPU_ANGLES) {
1149  if ( ! deviceIsMineBonded ) continue;
1150  if ( ! deviceCUDA->device_shared_with_pe(map->computeData[i].node) ) continue;
1151  } else {
1152  if ( map->computeData[i].node != myNode ) continue;
1153  }
1154  break;
1155 
1157  case computeDihedralsType:
1158  if (simParams->bondedCUDA & NAMD_BONDEDGPU_DIHEDRALS) {
1159  if ( ! deviceIsMineBonded ) continue;
1160  if ( ! deviceCUDA->device_shared_with_pe(map->computeData[i].node) ) continue;
1161  } else {
1162  if ( map->computeData[i].node != myNode ) continue;
1163  }
1164  break;
1165 
1167  case computeImpropersType:
1168  if (simParams->bondedCUDA & NAMD_BONDEDGPU_IMPROPERS) {
1169  if ( ! deviceIsMineBonded ) continue;
1170  if ( ! deviceCUDA->device_shared_with_pe(map->computeData[i].node) ) continue;
1171  } else {
1172  if ( map->computeData[i].node != myNode ) continue;
1173  }
1174  break;
1175 
1176  case computeSelfExclsType:
1177  case computeExclsType:
1178  if (simParams->bondedCUDA & NAMD_BONDEDGPU_EXCLS) {
1179  if ( ! deviceIsMineBonded ) continue;
1180  if ( ! deviceCUDA->device_shared_with_pe(map->computeData[i].node) ) continue;
1181  } else {
1182  if ( map->computeData[i].node != myNode ) continue;
1183  }
1184  break;
1185 
1187  case computeCrosstermsType:
1188  if (simParams->bondedCUDA & NAMD_BONDEDGPU_CROSSTERMS) {
1189  if ( ! deviceIsMineBonded ) continue;
1190  if ( ! deviceCUDA->device_shared_with_pe(map->computeData[i].node) ) continue;
1191  } else {
1192  if ( map->computeData[i].node != myNode ) continue;
1193  }
1194  break;
1195 
1196  case computeSelfTholeType:
1197  case computeTholeType:
1198  if (simParams->bondedCUDA & NAMD_BONDEDGPU_THOLES) {
1199  if ( ! deviceIsMineBonded ) continue;
1200  if ( ! deviceCUDA->device_shared_with_pe(map->computeData[i].node) ) continue;
1201  } else {
1202  if ( map->computeData[i].node != myNode ) continue;
1203  }
1204  break;
1205 
1206  case computeSelfAnisoType:
1207  case computeAnisoType:
1208  if (simParams->bondedCUDA & NAMD_BONDEDGPU_ANISOS) {
1209  if ( ! deviceIsMineBonded ) continue;
1210  if ( ! deviceCUDA->device_shared_with_pe(map->computeData[i].node) ) continue;
1211  } else {
1212  if ( map->computeData[i].node != myNode ) continue;
1213  }
1214  break;
1215 
1218  if (simParams->bondedCUDA & NAMD_BONDEDGPU_ONEFOURENBTHOLES) {
1219  if ( ! deviceIsMineBonded ) continue;
1220  if ( ! deviceCUDA->device_shared_with_pe(map->computeData[i].node) ) continue;
1221  } else {
1222  if ( map->computeData[i].node != myNode ) continue;
1223  }
1224  break;
1225 
1226  case computeBondedCUDAType:
1227  if ( ! deviceIsMineBonded ) continue;
1228  if ( map->computeData[i].node != myNode ) continue;
1229  break;
1230 #endif // BONDED_CUDA
1231 
1233  if ( ! deviceIsMine ) continue;
1234 // #ifdef BONDED_CUDA
1235 // case computeBondedCUDAType:
1236 // #endif
1237  default:
1238  if ( map->computeData[i].node != myNode ) continue;
1239  }
1240 #else // defined(NAMD_CUDA) || defined(NAMD_HIP)
1241  if ( map->computeData[i].node != myNode ) continue;
1242 #endif
1243  DebugM(1,"Compute " << i << '\n');
1244  DebugM(1," node = " << map->computeData[i].node << '\n');
1245  DebugM(1," type = " << map->computeData[i].type << '\n');
1246  DebugM(1," numPids = " << map->computeData[i].numPids << '\n');
1247 // DebugM(1," numPidsAllocated = " << map->computeData[i].numPidsAllocated << '\n');
1248  for (int j=0; j < map->computeData[i].numPids; j++)
1249  {
1250  DebugM(1," pid " << map->computeData[i].pids[j].pid << '\n');
1251  if (!((j+1) % 6))
1252  DebugM(1,'\n');
1253  }
1254  DebugM(1,"\n---------------------------------------");
1255  DebugM(1,"---------------------------------------\n");
1256 
1257  createCompute(i, map);
1258 
1259  }
1260 
1261 #if defined(NAMD_CUDA) || defined(NAMD_HIP)
1262  if (deviceIsMine) {
1265  }
1266 #ifdef BONDED_CUDA
1267  if (simParams->bondedCUDA) {
1268  if (deviceIsMineBonded) {
1269  getComputeBondedCUDA()->initialize();
1270  }
1271  }
1272 #endif
1273 #endif
1274 }
static Node * Object()
Definition: Node.h:86
__thread DeviceCUDA * deviceCUDA
Definition: DeviceCUDA.C:23
#define NAMD_BONDEDGPU_IMPROPERS
#define NAMD_BONDEDGPU_CROSSTERMS
#define NAMD_BONDEDGPU_ANISOS
Definition: Node.h:78
CudaComputeNonbonded * getCudaComputeNonbonded()
Definition: ComputeMgr.C:360
static PatchMap * Object()
Definition: PatchMap.h:27
#define NAMD_BONDEDGPU_ONEFOURENBTHOLES
#define NAMD_BONDEDGPU_ANGLES
#define NAMD_BONDEDGPU_THOLES
SimParameters * simParameters
Definition: Node.h:181
#define DebugM(x, y)
Definition: Debug.h:75
#define NAMD_BONDEDGPU_DIHEDRALS
int getMasterPe()
Definition: DeviceCUDA.h:137
ComputeType type(ComputeID cid)
Definition: ComputeMap.C:118
#define NAMD_BONDEDGPU_EXCLS
bool device_shared_with_pe(int pe)
Definition: DeviceCUDA.C:539
int numAtoms
Definition: Molecule.h:586
void addClient(GlobalMaster *newClient)
int myid()
Definition: Node.h:191
#define simParams
Definition: Output.C:131
void assignPatches(ComputeMgr *computeMgrIn)
Molecule * molecule
Definition: Node.h:179
colvarproxy_namd GlobalMasterColvars
#define NAMD_BONDEDGPU_BONDS

◆ doneUpdateLocalComputes()

void ComputeMgr::doneUpdateLocalComputes ( )

Definition at line 348 of file ComputeMgr.C.

References DebugM.

349 {
350 
351 // if (!--updateComputesCount) {
352  DebugM(4, "doneUpdateLocalComputes on Pe("<<CkMyPe()<<")\n");
353  void *msg = CkAllocMsg(0,0,0);
354  CkSendMsgBranch(updateComputesReturnEP,msg,0,updateComputesReturnChareID);
355 // }
356 }
#define DebugM(x, y)
Definition: Debug.h:75

◆ enableComputeGlobalResults()

void ComputeMgr::enableComputeGlobalResults ( )

Definition at line 1407 of file ComputeMgr.C.

References computeGlobalResultsMsgs, computeGlobalResultsMsgSeq, DebugM, ResizeArray< Elem >::del(), NAMD_EVENT_START, NAMD_EVENT_STOP, recvComputeGlobalResults(), and ResizeArray< Elem >::size().

Referenced by ComputeGlobal::doWork().

1408 {
1409  NAMD_EVENT_START(1, NamdProfileEvent::GM_ENABLE_COMP_RESULTS);
1411  DebugM(3,"["<<CkMyPe() <<"] enableComputeGlobalResults for "<< computeGlobalResultsMsgs.size() <<" messages seq "<< computeGlobalResultsMsgSeq <<"\n");
1412  for ( int i=0; i<computeGlobalResultsMsgs.size(); ++i ) {
1417  break;
1418  }
1419  }
1420  NAMD_EVENT_STOP(1, NamdProfileEvent::GM_ENABLE_COMP_RESULTS);
1421  DebugM(3,"["<<CkMyPe() <<"] exiting enableComputeGlobalResults for "<< computeGlobalResultsMsgs.size() <<" messages seq "<< computeGlobalResultsMsgSeq <<"\n");
1422 }
#define NAMD_EVENT_STOP(eon, id)
int size(void) const
Definition: ResizeArray.h:131
#define DebugM(x, y)
Definition: Debug.h:75
int computeGlobalResultsMsgSeq
Definition: ComputeMgr.h:162
#define NAMD_EVENT_START(eon, id)
void recvComputeGlobalResults(ComputeGlobalResultsMsg *)
Definition: ComputeMgr.C:1424
ResizeArray< ComputeGlobalResultsMsg * > computeGlobalResultsMsgs
Definition: ComputeMgr.h:161
void del(int index, int num=1)
Definition: ResizeArray.h:108

◆ getCudaGlobalMasterUpdateResult()

int ComputeMgr::getCudaGlobalMasterUpdateResult ( const std::string &  client_name) const

Definition at line 1760 of file ComputeMgr.C.

1760  {
1761  return CudaGlobalMasterClientUpdateResults.at(client_name);
1762 }

◆ getCudaGlobalMasterUpdateResultString()

std::string ComputeMgr::getCudaGlobalMasterUpdateResultString ( const std::string &  client_name) const

Definition at line 1764 of file ComputeMgr.C.

1764  {
1765  return CudaGlobalMasterClientUpdateResultStrings.at(client_name);
1766 }

◆ recvAssignPatchesOnPe()

void ComputeMgr::recvAssignPatchesOnPe ( CudaComputeNonbondedMsg msg)

Definition at line 1793 of file ComputeMgr.C.

References CudaComputeNonbonded::assignPatchesOnPe(), and CudaComputeNonbondedMsg::c.

1793  {
1794  msg->c->assignPatchesOnPe();
1795  delete msg;
1796 }
CudaComputeNonbonded * c
Definition: ComputeMgr.C:1781

◆ recvComputeConsForceMsg()

void ComputeMgr::recvComputeConsForceMsg ( ComputeConsForceMsg msg)

Definition at line 1544 of file ComputeMgr.C.

References ComputeConsForceMsg::aid, Molecule::consForce, Molecule::consForceIndexes, ComputeConsForceMsg::f, for(), Node::molecule, Molecule::numAtoms, Node::Object(), and ResizeArray< Elem >::size().

1545 {
1546  Molecule *m = Node::Object()->molecule;
1547  if(CkMyRank()==0){ // there is only one molecule per process
1548  delete [] m->consForceIndexes;
1549  delete [] m->consForce;
1550  int n = msg->aid.size();
1551  if (n > 0)
1552  {
1553  m->consForceIndexes = new int32[m->numAtoms];
1554  m->consForce = new Vector[n];
1555  int i;
1556  for (i=0; i<m->numAtoms; i++) m->consForceIndexes[i] = -1;
1557  for (i=0; i<msg->aid.size(); i++)
1558  {
1559  m->consForceIndexes[msg->aid[i]] = i;
1560  m->consForce[i] = msg->f[i];
1561  }
1562  }
1563  else
1564  {
1565  m->consForceIndexes = NULL;
1566  m->consForce = NULL;
1567  }
1568  }
1569  delete msg;
1570 #ifdef NODEGROUP_FORCE_REGISTER
1571  if(CkMyPe()==0)
1572  {
1573  CProxy_PatchData cpdata(CkpvAccess(BOCclass_group).patchData);
1574  cpdata.setDeviceKernelUpdateCounter();
1575  }
1576 #endif
1577 }
static Node * Object()
Definition: Node.h:86
int size(void) const
Definition: ResizeArray.h:131
Definition: Vector.h:72
int32_t int32
Definition: common.h:38
Molecule stores the structural information for the system.
Definition: Molecule.h:174
int numAtoms
Definition: Molecule.h:586
int32 * consForceIndexes
Definition: Molecule.h:648
Vector * consForce
Definition: Molecule.h:649
Molecule * molecule
Definition: Node.h:179
for(int i=0;i< n1;++i)

◆ recvComputeDPMEData()

void ComputeMgr::recvComputeDPMEData ( ComputeDPMEDataMsg msg)

Definition at line 1507 of file ComputeMgr.C.

References NAMD_die(), and PatchMap::Object().

1508 {
1509  if ( computeDPMEObject )
1510  {
1511 #ifdef DPME
1512  computeDPMEObject->recvData(msg);
1513 #endif
1514  }
1515  else if ( ! (PatchMap::Object())->numHomePatches() ) delete msg;
1516  else NAMD_die("ComputeMgr::computeDPMEObject is NULL!");
1517 }
static PatchMap * Object()
Definition: PatchMap.h:27
void NAMD_die(const char *err_msg)
Definition: common.C:147

◆ recvComputeDPMEResults()

void ComputeMgr::recvComputeDPMEResults ( ComputeDPMEResultsMsg msg)

Definition at line 1525 of file ComputeMgr.C.

References NAMD_die(), and PatchMap::Object().

1526 {
1527  if ( computeDPMEObject )
1528  {
1529 #ifdef DPME
1530  computeDPMEObject->recvResults(msg);
1531 #endif
1532  }
1533  else if ( ! (PatchMap::Object())->numHomePatches() ) delete msg;
1534  else NAMD_die("ComputeMgr::computeDPMEObject is NULL!");
1535 }
static PatchMap * Object()
Definition: PatchMap.h:27
void NAMD_die(const char *err_msg)
Definition: common.C:147

◆ recvComputeEwaldData()

void ComputeMgr::recvComputeEwaldData ( ComputeEwaldMsg msg)

Definition at line 1470 of file ComputeMgr.C.

References NAMD_die(), and ComputeEwald::recvData().

1471 {
1472  if (computeEwaldObject)
1473  computeEwaldObject->recvData(msg);
1474  else NAMD_die("ComputeMgr::computeEwaldObject in recvData is NULL!");
1475 }
void recvData(ComputeEwaldMsg *)
Definition: ComputeEwald.C:187
void NAMD_die(const char *err_msg)
Definition: common.C:147

◆ recvComputeEwaldResults()

void ComputeMgr::recvComputeEwaldResults ( ComputeEwaldMsg msg)

Definition at line 1482 of file ComputeMgr.C.

References NAMD_die(), PatchMap::Object(), and ComputeEwald::recvResults().

Referenced by sendComputeEwaldResults().

1483 {
1484  if (computeEwaldObject) {
1485  CmiEnableUrgentSend(1);
1486  computeEwaldObject->recvResults(msg);
1487  CmiEnableUrgentSend(0);
1488  }
1489  else if ( ! (PatchMap::Object())->numHomePatches() ) delete msg;
1490  else NAMD_die("ComputeMgr::computeEwaldObject in recvResults is NULL!");
1491 }
void recvResults(ComputeEwaldMsg *)
Definition: ComputeEwald.C:204
static PatchMap * Object()
Definition: PatchMap.h:27
void NAMD_die(const char *err_msg)
Definition: common.C:147

◆ recvComputeGlobalConfig()

void ComputeMgr::recvComputeGlobalConfig ( ComputeGlobalConfigMsg *  )

◆ recvComputeGlobalData()

void ComputeMgr::recvComputeGlobalData ( ComputeGlobalDataMsg msg)

Definition at line 1357 of file ComputeMgr.C.

References DebugM, NAMD_die(), NAMD_EVENT_START, NAMD_EVENT_STOP, and GlobalMasterServer::recvData().

1358 {
1359  NAMD_EVENT_START(1, NamdProfileEvent::GM_RECV_COMP_DATA);
1360  if (masterServerObject) // make sure it has been initialized
1361  {
1362  DebugM(3, "["<<CkMyPe()<<"] recvComputeGlobalData calling recvData\n");
1363  masterServerObject->recvData(msg);
1364  }
1365  else NAMD_die("ComputeMgr::masterServerObject is NULL!");
1366  NAMD_EVENT_STOP(1, NamdProfileEvent::GM_RECV_COMP_DATA);
1367 }
#define NAMD_EVENT_STOP(eon, id)
#define DebugM(x, y)
Definition: Debug.h:75
#define NAMD_EVENT_START(eon, id)
void NAMD_die(const char *err_msg)
Definition: common.C:147
void recvData(ComputeGlobalDataMsg *)

◆ recvComputeGlobalResults()

void ComputeMgr::recvComputeGlobalResults ( ComputeGlobalResultsMsg msg)

Definition at line 1424 of file ComputeMgr.C.

References ResizeArray< Elem >::add(), computeGlobalObject, computeGlobalResultsMsgs, computeGlobalResultsMsgSeq, DebugM, NAMD_die(), NAMD_EVENT_START, NAMD_EVENT_STOP, PatchMap::Object(), ComputeGlobal::recvResults(), and ComputeGlobalResultsMsg::seq.

Referenced by ComputeGlobal::doWork(), enableComputeGlobalResults(), and sendComputeGlobalData().

1425 {
1426  NAMD_EVENT_START(1, NamdProfileEvent::GM_RCV_COMP_RESULTS);
1427  DebugM(3,"[" << CkMyPe() << "] recvComputeGlobalResults msg->seq "<< msg->seq << " computeGlobalResultsMsgSeq " << computeGlobalResultsMsgSeq << "\n");
1428  if ( computeGlobalObject )
1429  {
1430  if ( msg->seq == computeGlobalResultsMsgSeq ) {
1431  CmiEnableUrgentSend(1);
1432 
1434  // CkPrintf("*** past recvResults on PE %d \n", CkMyPe());
1435  CmiEnableUrgentSend(0);
1436  } else {
1437  // CkPrintf("*** Adding recvComputeGlobalResults on PE %d \n", CkMyPe());
1439  }
1440  }
1441  else if ( ! (PatchMap::Object())->numHomePatches() )
1442  {
1443  // CkPrintf("*** ignoring recvComputeGlobalResults on PE %d due to no home patch\n", CkMyPe());
1444  delete msg;
1445  }
1446  else NAMD_die("ComputeMgr::computeGlobalObject is NULL!");
1447  NAMD_EVENT_STOP(1, NamdProfileEvent::GM_RCV_COMP_RESULTS);
1448  // CkPrintf("*** exiting recvComputeGlobalResults on PE %d \n", CkMyPe());
1449 }
#define NAMD_EVENT_STOP(eon, id)
void recvResults(ComputeGlobalResultsMsg *)
static PatchMap * Object()
Definition: PatchMap.h:27
#define DebugM(x, y)
Definition: Debug.h:75
int add(const Elem &elem)
Definition: ResizeArray.h:101
int computeGlobalResultsMsgSeq
Definition: ComputeMgr.h:162
#define NAMD_EVENT_START(eon, id)
void NAMD_die(const char *err_msg)
Definition: common.C:147
ResizeArray< ComputeGlobalResultsMsg * > computeGlobalResultsMsgs
Definition: ComputeMgr.h:161
ComputeGlobal * computeGlobalObject
Definition: ComputeMgr.h:160

◆ recvCudaGlobalMasterCreateMsg()

void ComputeMgr::recvCudaGlobalMasterCreateMsg ( std::vector< std::string >  args)

Definition at line 1579 of file ComputeMgr.C.

References ComputeCUDAMgr::createCudaGlobalMaster(), DebugM, deviceCUDA, endi(), ComputeCUDAMgr::getComputeCUDAMgr(), ComputeCUDAMgr::getCudaGlobalMaster(), DeviceCUDA::getGlobalDevice(), DeviceCUDA::getIsGlobalDevice(), DeviceCUDA::getMasterPe(), iERROR(), iINFO(), iout, NAMD_die(), Node::Object(), Node::simParameters, and simParams.

1579  {
1580 #if (defined(NAMD_CUDA) || defined(NAMD_HIP))
1581  Node *node = Node::Object();
1583  if (simParams->CUDASOAintegrate && simParams->useCudaGlobal) {
1584 #ifdef NODEGROUP_FORCE_REGISTER
1585  if (deviceCUDA->getMasterPe() == CkMyPe()) {
1586  if (deviceCUDA->getIsGlobalDevice()) {
1587  DebugM(3, "Call recvCudaGlobalMasterCreateMsg on master PE " << CkMyPe() << ".\n");
1589  cudaMgr->createCudaGlobalMaster();
1590  std::shared_ptr<CudaGlobalMaster::CudaGlobalMasterClient> client = nullptr;
1591  const std::string library_name = args[0];
1592  // Find to see if library_name has been loaded
1593  std::shared_ptr<dlloader::DLLoader<CudaGlobalMaster::CudaGlobalMasterClient>> loader = nullptr;
1594  for (auto it = CudaGlobalMasterClientDlloaders.begin();
1595  it != CudaGlobalMasterClientDlloaders.end(); ++it) {
1596  if ((*it)->LibName() == library_name) {
1597  loader = (*it);
1598  break;
1599  }
1600  }
1601  // Create a new loader if not found
1602  if (loader == nullptr) {
1603  loader = std::shared_ptr<dlloader::DLLoader<CudaGlobalMaster::CudaGlobalMasterClient>>(new dlloader::DLLoader<CudaGlobalMaster::CudaGlobalMasterClient>(library_name));
1604  }
1605  try {
1606  iout << iINFO << "Loading library " << library_name
1607  << " on PE: " << CkMyPe() << "\n" << endi;
1608  loader->DLOpenLib();
1609  client = loader->DLGetInstance();
1610  } catch (std::exception& e) {
1611  iout << iERROR << "Cannot load the shared library " << library_name << "\n" << endi;
1612  NAMD_die(e.what());
1613  }
1614  // Try to initialize the client
1615  try {
1616  client->initialize(args,
1618  cudaMgr->getCudaGlobalMaster()->getStream());
1619  client->subscribe(cudaMgr->getCudaGlobalMaster());
1620  iout << iINFO << "CudaGlobalMaster client \"" << client->name()
1621  << "\"" << " initialized\n" << endi;
1622  } catch (std::exception& e) {
1623  iout << iERROR << "Cannot initialize the CudaGlobalMaster client from "
1624  << library_name << "\n" << endi;
1625  NAMD_die(e.what());
1626  }
1627  CudaGlobalMasterClientDlloaders.push_back(loader);
1628  } else {
1629  DebugM(3, "Skip recvCudaGlobalMasterCreateMsg on master PE " <<
1630  CkMyPe() << " that is not scheduled for GPU-resident global master.\n");
1631  }
1632  } else {
1633  DebugM(3, "Skip recvCudaGlobalMasterCreateMsg on non-master PE " << CkMyPe() << ".\n");
1634  }
1635 #endif // NODEGROUP_FORCE_REGISTER
1636  } else {
1637  if (!(simParams->CUDASOAintegrate)) {
1638  NAMD_die("GPU-resident mode is not enabled.\n");
1639  }
1640  if (!(simParams->useCudaGlobal)) {
1641  NAMD_die("GPU-resident external forces are not enabled.\n");
1642  }
1643  }
1644  // CmiNodeBarrier();
1645 #endif // (defined(NAMD_CUDA) || defined(NAMD_HIP))
1646 }
static Node * Object()
Definition: Node.h:86
__thread DeviceCUDA * deviceCUDA
Definition: DeviceCUDA.C:23
std::ostream & iINFO(std::ostream &s)
Definition: InfoStream.C:81
std::shared_ptr< CudaGlobalMasterServer > getCudaGlobalMaster()
Definition: Node.h:78
SimParameters * simParameters
Definition: Node.h:181
#define DebugM(x, y)
Definition: Debug.h:75
std::ostream & endi(std::ostream &s)
Definition: InfoStream.C:54
#define iout
Definition: InfoStream.h:51
int getGlobalDevice() const
Definition: DeviceCUDA.h:171
int getMasterPe()
Definition: DeviceCUDA.h:137
static ComputeCUDAMgr * getComputeCUDAMgr()
std::shared_ptr< CudaGlobalMasterServer > createCudaGlobalMaster()
bool getIsGlobalDevice() const
Definition: DeviceCUDA.h:172
void NAMD_die(const char *err_msg)
Definition: common.C:147
#define simParams
Definition: Output.C:131
std::ostream & iERROR(std::ostream &s)
Definition: InfoStream.C:83

◆ recvCudaGlobalMasterRemoveMsg()

void ComputeMgr::recvCudaGlobalMasterRemoveMsg ( std::vector< std::string >  args)

Definition at line 1648 of file ComputeMgr.C.

References deviceCUDA, endi(), ComputeCUDAMgr::getComputeCUDAMgr(), ComputeCUDAMgr::getCudaGlobalMaster(), DeviceCUDA::getIsGlobalDevice(), DeviceCUDA::getMasterPe(), iINFO(), iout, NAMD_die(), Node::Object(), Node::simParameters, and simParams.

1648  {
1649 #if (defined(NAMD_CUDA) || defined(NAMD_HIP))
1650  Node *node = Node::Object();
1652  const std::string client_name_to_remove = args[0];
1653  if (simParams->CUDASOAintegrate && simParams->useCudaGlobal) {
1654 #ifdef NODEGROUP_FORCE_REGISTER
1655  if (deviceCUDA->getMasterPe() == CkMyPe()) {
1656  if (deviceCUDA->getIsGlobalDevice()) {
1658  std::shared_ptr<CudaGlobalMasterServer> gm = cudaMgr->getCudaGlobalMaster();
1659  if (gm) {
1660  std::shared_ptr<CudaGlobalMaster::CudaGlobalMasterClient> c = nullptr;
1661  const std::vector<std::shared_ptr<CudaGlobalMaster::CudaGlobalMasterClient>>& clients = gm->getClients();
1662  for (size_t i = 0; i < clients.size(); ++i) {
1663  if (client_name_to_remove == clients[i]->name()) {
1664  c = clients[i];
1665  break;
1666  }
1667  }
1668  if (c) {
1669  gm->removeClient(c);
1670  iout << iINFO << "CudaGlobalMasterClient \""
1671  << client_name_to_remove << "\" removed\n" << endi;
1672  } else {
1673  const std::string error = "CudaGlobalMasterClient \""
1674  + client_name_to_remove + "\" not found";
1675  NAMD_die(error.c_str());
1676  }
1677  }
1678  }
1679  }
1680 #endif // NODEGROUP_FORCE_REGISTER
1681  } else {
1682  if (!(simParams->CUDASOAintegrate)) {
1683  NAMD_die("GPU-resident mode is not enabled.\n");
1684  }
1685  if (!(simParams->useCudaGlobal)) {
1686  NAMD_die("GPU-resident external forces are not enabled.\n");
1687  }
1688  }
1689 #endif // (defined(NAMD_CUDA) || defined(NAMD_HIP))
1690 }
static Node * Object()
Definition: Node.h:86
__thread DeviceCUDA * deviceCUDA
Definition: DeviceCUDA.C:23
std::ostream & iINFO(std::ostream &s)
Definition: InfoStream.C:81
std::shared_ptr< CudaGlobalMasterServer > getCudaGlobalMaster()
Definition: Node.h:78
SimParameters * simParameters
Definition: Node.h:181
std::ostream & endi(std::ostream &s)
Definition: InfoStream.C:54
#define iout
Definition: InfoStream.h:51
int getMasterPe()
Definition: DeviceCUDA.h:137
static ComputeCUDAMgr * getComputeCUDAMgr()
bool getIsGlobalDevice() const
Definition: DeviceCUDA.h:172
void NAMD_die(const char *err_msg)
Definition: common.C:147
#define simParams
Definition: Output.C:131

◆ recvCudaGlobalMasterUpdateMsg()

void ComputeMgr::recvCudaGlobalMasterUpdateMsg ( std::vector< std::string >  args)

Definition at line 1692 of file ComputeMgr.C.

References deviceCUDA, endi(), ComputeCUDAMgr::getComputeCUDAMgr(), ComputeCUDAMgr::getCudaGlobalMaster(), DeviceCUDA::getIsGlobalDevice(), DeviceCUDA::getMasterPe(), iINFO(), iout, NAMD_die(), Node::Object(), Node::simParameters, and simParams.

1692  {
1693 #if (defined(NAMD_CUDA) || defined(NAMD_HIP))
1694  std::vector<std::string> result_args;
1695  Node *node = Node::Object();
1697  const std::string client_name_to_update = args[0];
1698 #ifdef NAMD_TCL
1699  int error_code = TCL_OK;
1700 #else
1701  int error_code = 0;
1702 #endif
1703  if (simParams->CUDASOAintegrate && simParams->useCudaGlobal) {
1704 #ifdef NODEGROUP_FORCE_REGISTER
1705  if (deviceCUDA->getMasterPe() == CkMyPe()) {
1706  if (deviceCUDA->getIsGlobalDevice()) {
1708  std::shared_ptr<CudaGlobalMasterServer> gm = cudaMgr->getCudaGlobalMaster();
1709  if (gm) {
1710  std::shared_ptr<CudaGlobalMaster::CudaGlobalMasterClient> c = nullptr;
1711  const std::vector<std::shared_ptr<CudaGlobalMaster::CudaGlobalMasterClient>>& clients = gm->getClients();
1712  for (size_t i = 0; i < clients.size(); ++i) {
1713  if (client_name_to_update == clients[i]->name()) {
1714  c = clients[i];
1715  break;
1716  }
1717  }
1718  if (c) {
1719  result_args.push_back(client_name_to_update);
1720  error_code = c->updateFromTCLCommand(args);
1721  result_args.push_back(c->getTCLUpdateResult());
1722  iout << iINFO << "CudaGlobalMasterClient \""
1723  << client_name_to_update << "\" updated\n" << endi;
1724  } else {
1725  const std::string error = "CudaGlobalMasterClient \""
1726  + client_name_to_update + "\" not found";
1727  NAMD_die(error.c_str());
1728  }
1729  }
1730  }
1731  }
1732 #endif // NODEGROUP_FORCE_REGISTER
1733  } else {
1734  if (!(simParams->CUDASOAintegrate)) {
1735  NAMD_die("GPU-resident mode is not enabled.\n");
1736  }
1737  if (!(simParams->useCudaGlobal)) {
1738  NAMD_die("GPU-resident external forces are not enabled.\n");
1739  }
1740  }
1741  CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
1742  cm[0].recvCudaGlobalMasterUpdateResultMsg(error_code, result_args);
1743 #endif // (defined(NAMD_CUDA) || defined(NAMD_HIP))
1744 }
static Node * Object()
Definition: Node.h:86
__thread DeviceCUDA * deviceCUDA
Definition: DeviceCUDA.C:23
std::ostream & iINFO(std::ostream &s)
Definition: InfoStream.C:81
std::shared_ptr< CudaGlobalMasterServer > getCudaGlobalMaster()
Definition: Node.h:78
SimParameters * simParameters
Definition: Node.h:181
std::ostream & endi(std::ostream &s)
Definition: InfoStream.C:54
#define iout
Definition: InfoStream.h:51
int getMasterPe()
Definition: DeviceCUDA.h:137
static ComputeCUDAMgr * getComputeCUDAMgr()
bool getIsGlobalDevice() const
Definition: DeviceCUDA.h:172
void NAMD_die(const char *err_msg)
Definition: common.C:147
#define simParams
Definition: Output.C:131

◆ recvCudaGlobalMasterUpdateResultMsg()

void ComputeMgr::recvCudaGlobalMasterUpdateResultMsg ( int  tcl_error_code,
std::vector< std::string >  args 
)

Definition at line 1746 of file ComputeMgr.C.

References NAMD_bug().

1746  {
1747  if (CkMyPe() == 0) {
1748  if (!args.empty()) {
1749  CudaGlobalMasterClientUpdateResults[args[0]] = tcl_error_code;
1750  CudaGlobalMasterClientUpdateResultStrings[args[0]] = args[1];
1751  }
1752  } else {
1753  const std::string error =
1754  "recvCudaGlobalMasterUpdateResultMsg is called on " +
1755  std::to_string(CkMyPe()) + " but expected on PE 0!\n";
1756  NAMD_bug(error.c_str());
1757  }
1758 }
void NAMD_bug(const char *err_msg)
Definition: common.C:195

◆ recvFinishPatchesOnPe()

void ComputeMgr::recvFinishPatchesOnPe ( CudaComputeNonbondedMsg msg)

Definition at line 1820 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, and CudaComputeNonbonded::finishPatchesOnPe().

1820  {
1821  msg->c->finishPatchesOnPe();
1822  delete msg;
1823 }
CudaComputeNonbonded * c
Definition: ComputeMgr.C:1781

◆ recvFinishPatchOnPe()

void ComputeMgr::recvFinishPatchOnPe ( CudaComputeNonbondedMsg msg)

Definition at line 1833 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, CudaComputeNonbonded::finishPatchOnPe(), and CudaComputeNonbondedMsg::i.

1833  {
1834  msg->c->finishPatchOnPe(msg->i);
1835  delete msg;
1836 }
CudaComputeNonbonded * c
Definition: ComputeMgr.C:1781

◆ recvFinishReductions()

void ComputeMgr::recvFinishReductions ( CudaComputeNonbondedMsg msg)

Definition at line 1858 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, and CudaComputeNonbonded::finishReductions().

1858  {
1859  msg->c->finishReductions();
1860  delete msg;
1861 }
CudaComputeNonbonded * c
Definition: ComputeMgr.C:1781

◆ recvLaunchWork()

void ComputeMgr::recvLaunchWork ( CudaComputeNonbondedMsg msg)

Definition at line 1880 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, and CudaComputeNonbonded::launchWork().

1880  {
1881  msg->c->launchWork();
1882  delete msg;
1883 }
CudaComputeNonbonded * c
Definition: ComputeMgr.C:1781

◆ recvMessageEnqueueWork()

void ComputeMgr::recvMessageEnqueueWork ( CudaComputeNonbondedMsg msg)

Definition at line 1869 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, and CudaComputeNonbonded::messageEnqueueWork().

1869  {
1870  msg->c->messageEnqueueWork();
1871  delete msg;
1872 }
CudaComputeNonbonded * c
Definition: ComputeMgr.C:1781

◆ recvOpenBoxesOnPe()

void ComputeMgr::recvOpenBoxesOnPe ( CudaComputeNonbondedMsg msg)

Definition at line 1847 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, and CudaComputeNonbonded::openBoxesOnPe().

1847  {
1848  msg->c->openBoxesOnPe();
1849  delete msg;
1850 }
CudaComputeNonbonded * c
Definition: ComputeMgr.C:1781

◆ recvSkipPatchesOnPe()

void ComputeMgr::recvSkipPatchesOnPe ( CudaComputeNonbondedMsg msg)

Definition at line 1806 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, and CudaComputeNonbonded::skipPatchesOnPe().

1806  {
1807  msg->c->skipPatchesOnPe();
1808  delete msg;
1809 }
CudaComputeNonbonded * c
Definition: ComputeMgr.C:1781

◆ recvUnregisterBoxesOnPe()

void ComputeMgr::recvUnregisterBoxesOnPe ( CudaComputeNonbondedMsg msg)

Definition at line 1893 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, and CudaComputeNonbonded::unregisterBoxesOnPe().

1893  {
1894  msg->c->unregisterBoxesOnPe();
1895  delete msg;
1896 }
CudaComputeNonbonded * c
Definition: ComputeMgr.C:1781

◆ recvYieldDevice()

void ComputeMgr::recvYieldDevice ( int  pe)

Definition at line 1773 of file ComputeMgr.C.

1773  {
1774  // XXX MIC support was only code using YieldDevice functionality
1775  // computeNonbondedMICObject->recvYieldDevice(pe);
1776 }

◆ sendAssignPatchesOnPe()

void ComputeMgr::sendAssignPatchesOnPe ( std::vector< int > &  pes,
CudaComputeNonbonded c 
)

Definition at line 1785 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c.

Referenced by CudaComputeNonbonded::assignPatches().

1785  {
1786  for (int i=0;i < pes.size();i++) {
1788  msg->c = c;
1789  thisProxy[pes[i]].recvAssignPatchesOnPe(msg);
1790  }
1791 }
CudaComputeNonbonded * c
Definition: ComputeMgr.C:1781

◆ sendComputeDPMEData()

void ComputeMgr::sendComputeDPMEData ( ComputeDPMEDataMsg msg)

Definition at line 1493 of file ComputeMgr.C.

References NAMD_die(), and PatchMap::Object().

1494 {
1495  if ( computeDPMEObject )
1496  {
1497 #ifdef DPME
1498  int node = computeDPMEObject->getMasterNode();
1499  CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
1500  cm.recvComputeDPMEData(msg,node);
1501 #endif
1502  }
1503  else if ( ! (PatchMap::Object())->numHomePatches() ) delete msg;
1504  else NAMD_die("ComputeMgr::computeDPMEObject is NULL!");
1505 }
static PatchMap * Object()
Definition: PatchMap.h:27
void NAMD_die(const char *err_msg)
Definition: common.C:147

◆ sendComputeDPMEResults()

void ComputeMgr::sendComputeDPMEResults ( ComputeDPMEResultsMsg msg,
int  node 
)

Definition at line 1519 of file ComputeMgr.C.

1520 {
1521  CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
1522  cm[node].recvComputeDPMEResults(msg);
1523 }

◆ sendComputeEwaldData()

void ComputeMgr::sendComputeEwaldData ( ComputeEwaldMsg msg)

Definition at line 1454 of file ComputeMgr.C.

References ComputeEwald::getMasterNode(), NAMD_die(), and PatchMap::Object().

Referenced by ComputeEwald::doWork().

1455 {
1456  if (computeEwaldObject)
1457  {
1458  int node = computeEwaldObject->getMasterNode();
1459  CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
1460  cm[node].recvComputeEwaldData(msg);
1461  }
1462  else if (!PatchMap::Object()->numHomePatches())
1463  {
1464  // CkPrintf("skipping message on Pe(%d)\n", CkMyPe());
1465  delete msg;
1466  }
1467  else NAMD_die("ComputeMgr::computeEwaldObject is NULL!");
1468 }
int getMasterNode() const
Definition: ComputeEwald.h:86
static PatchMap * Object()
Definition: PatchMap.h:27
void NAMD_die(const char *err_msg)
Definition: common.C:147

◆ sendComputeEwaldResults()

void ComputeMgr::sendComputeEwaldResults ( ComputeEwaldMsg msg)

Definition at line 1477 of file ComputeMgr.C.

References recvComputeEwaldResults().

Referenced by ComputeEwald::recvData().

1478 {
1479  (CProxy_ComputeMgr(CkpvAccess(BOCclass_group).computeMgr)).recvComputeEwaldResults(msg);
1480 }
void recvComputeEwaldResults(ComputeEwaldMsg *)
Definition: ComputeMgr.C:1482

◆ sendComputeGlobalConfig()

void ComputeMgr::sendComputeGlobalConfig ( ComputeGlobalConfigMsg *  )

◆ sendComputeGlobalData()

void ComputeMgr::sendComputeGlobalData ( ComputeGlobalDataMsg msg)

Definition at line 1294 of file ComputeMgr.C.

References SimParameters::CUDASOAintegrate, DebugM, NAMD_EVENT_START, NAMD_EVENT_STOP, Node::Object(), recvComputeGlobalResults(), and Node::simParameters.

Referenced by ComputeGlobal::doWork().

1295 {
1296  NAMD_EVENT_START(1, NamdProfileEvent::GM_SEND_COMP_DATA);
1297  // CkPrintf("*** [%d] Calling sendComputeGlobalData\n", CkMyPe());
1298  #ifdef NODEGROUP_FORCE_REGISTER
1300  if (sp->CUDASOAintegrate) {
1301  NAMD_EVENT_START(1, NamdProfileEvent::GM_NODELOCK);
1302  CProxy_PatchData cpdata(CkpvAccess(BOCclass_group).patchData);
1303  PatchData *patchData = cpdata.ckLocalBranch();
1304  CmiNodeLock &nl = patchData->nodeLock;
1305  // atomic access to GlobalMasterServer to simulate queueing
1306  if (CkMyPe() != 0)
1307  {
1308  CmiLock(nl);
1309  //CkPrintf("*** [%d] Acquired nodelock!\n", CkMyPe());
1310  patchData->master_mgr->recvComputeGlobalData(msg);
1311  CmiUnlock(nl);
1312  }
1313  NAMD_EVENT_STOP(1, NamdProfileEvent::GM_NODELOCK);
1314  NAMD_EVENT_START(1, NamdProfileEvent::GM_BARRIER);
1315  // Barrier to make sure 0 goes last, since invocation of the clients and
1316  // message coordination has to happen on PE 0 and the last PE to call
1317  // recvComputeGlobalData will trigger all of that on itself
1318  // CmiNodeBarrier();
1319  // CkPrintf("*** sendComputeGlobalData entering barrier 1 on PE %d \n", CkMyPe());
1320  stowSuspendULT();
1321 
1322  NAMD_EVENT_STOP(1, NamdProfileEvent::GM_BARRIER);
1323  if (CkMyPe() == 0)
1324  {
1325  CmiLock(nl);
1326  patchData->master_mgr->recvComputeGlobalData(msg);
1327  CmiUnlock(nl);
1328  }
1329  else
1330  {
1331  // All PEs other than 0 wait here while the clients run and the global
1332  // results messages are prepared and copied into their slots (happens from
1333  // sendComputeGlobalResults on PE0)
1334  // CmiNodeBarrier();
1335  // CkPrintf("before call to stow %d\n",CkMyPe());
1336  // CkPrintf("*** sendComputeGlobalData barrier 3 on PE %d \n", CkMyPe());
1337  stowSuspendULT();
1338  // CkPrintf("*** sendComputeGlobalData out barrier 3 on PE %d \n", CkMyPe());
1339  // CkPrintf("returned from call to stow %d\n",CkMyPe());
1340  }
1341  // Get the message from the slot for this PE and resume execution
1342  ComputeGlobalResultsMsg* resultsMsg = CkpvAccess(ComputeGlobalResultsMsg_instance);
1343  DebugM(3,"["<<CkMyPe()<<"] calling recvComputeGlobalResults\n");
1344  recvComputeGlobalResults(resultsMsg);
1345  } else {
1346  #endif
1347  CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
1348  DebugM(3,"["<<CkMyPe()<<"] msg to recvComputeGlobalData\n");
1349  cm[0].recvComputeGlobalData(msg);
1350  #ifdef NODEGROUP_FORCE_REGISTER
1351  }
1352  #endif
1353  NAMD_EVENT_STOP(1, NamdProfileEvent::GM_SEND_COMP_DATA);
1354  DebugM(3,"["<<CkMyPe()<<"] done sendComputeGlobalData\n");
1355 }
static Node * Object()
Definition: Node.h:86
#define NAMD_EVENT_STOP(eon, id)
SimParameters * simParameters
Definition: Node.h:181
#define DebugM(x, y)
Definition: Debug.h:75
#define NAMD_EVENT_START(eon, id)
void recvComputeGlobalResults(ComputeGlobalResultsMsg *)
Definition: ComputeMgr.C:1424

◆ sendComputeGlobalResults()

void ComputeMgr::sendComputeGlobalResults ( ComputeGlobalResultsMsg msg)

Definition at line 1369 of file ComputeMgr.C.

References computeGlobalResultsMsgMasterSeq, SimParameters::CUDASOAintegrate, DebugM, NAMD_EVENT_START, NAMD_EVENT_STOP, Node::Object(), ComputeGlobalResultsMsg::seq, and Node::simParameters.

1370 {
1371  NAMD_EVENT_START(1, NamdProfileEvent::GM_SEND_COMP_RESULTS);
1373  DebugM(3,"["<< CkMyPe()<< "] sendComputeGlobalResults seq "<<msg->seq<<"\n");
1374 
1375  #ifdef NODEGROUP_FORCE_REGISTER
1377  if (sp->CUDASOAintegrate) {
1378  // Only PE 0 runs this code
1379  // Copy the message into each PE's slot (Assumes single-node with multicore build)
1380  for (int pe = 0; pe < CkMyNodeSize(); pe++) {
1381  if(CkpvAccessOther(ComputeGlobalResultsMsg_instance, pe)!=nullptr)
1382  {
1383  // make sure msg delete happens on the same PE as made the msg to
1384  // avoid unbounded memory pool growth for these unsent messages
1385  delete CkpvAccessOther(ComputeGlobalResultsMsg_instance, pe);
1386  }
1387  CkpvAccessOther(ComputeGlobalResultsMsg_instance, pe) = (ComputeGlobalResultsMsg*)CkCopyMsg((void**)&msg);
1388  }
1389  delete msg;
1390  // Now that copies are done, trigger the barrier to resume the other PEs
1391  // (most other PEs call this barrier from sendComputeGlobalData)
1392  // CkPrintf("this is where we would call awaken\n",CkMyPe());
1393  //CmiNodeBarrier();
1394  // CkPrintf("*** sendComputeGlobalResults entering barrier 2 on PE %d \n", CkMyPe());
1395  stowSuspendULT();
1396  //thisProxy.recvComputeGlobalResults(msg);
1397  } else {
1398  #endif
1399  DebugM(3,"["<< CkMyPe() << "] ComputeMgr::sendComputeGlobalResults invoking bcast recvComputeGlobalResults\n");
1400  thisProxy.recvComputeGlobalResults(msg);
1401  #ifdef NODEGROUP_FORCE_REGISTER
1402  }
1403  #endif
1404  NAMD_EVENT_STOP(1, NamdProfileEvent::GM_SEND_COMP_RESULTS);
1405 }
static Node * Object()
Definition: Node.h:86
#define NAMD_EVENT_STOP(eon, id)
SimParameters * simParameters
Definition: Node.h:181
#define DebugM(x, y)
Definition: Debug.h:75
int computeGlobalResultsMsgMasterSeq
Definition: ComputeMgr.h:163
#define NAMD_EVENT_START(eon, id)

◆ sendFinishPatchesOnPe()

void ComputeMgr::sendFinishPatchesOnPe ( std::vector< int > &  pes,
CudaComputeNonbonded c 
)

Definition at line 1811 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, COMPUTE_PROXY_PRIORITY, PRIORITY_SIZE, Compute::sequence(), and SET_PRIORITY.

Referenced by CudaComputeNonbonded::finishPatches().

1811  {
1812  for (int i=0;i < pes.size();i++) {
1815  msg->c = c;
1816  thisProxy[pes[i]].recvFinishPatchesOnPe(msg);
1817  }
1818 }
#define COMPUTE_PROXY_PRIORITY
Definition: Priorities.h:71
int sequence(void)
Definition: Compute.h:64
CudaComputeNonbonded * c
Definition: ComputeMgr.C:1781
#define PRIORITY_SIZE
Definition: Priorities.h:13
#define SET_PRIORITY(MSG, SEQ, PRIO)
Definition: Priorities.h:18

◆ sendFinishPatchOnPe()

void ComputeMgr::sendFinishPatchOnPe ( int  pe,
CudaComputeNonbonded c,
int  i,
PatchID  patchID 
)

Definition at line 1825 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, COMPUTE_PROXY_PRIORITY, CudaComputeNonbondedMsg::i, PATCH_PRIORITY, PRIORITY_SIZE, Compute::sequence(), and SET_PRIORITY.

1825  {
1828  msg->c = c;
1829  msg->i = i;
1830  thisProxy[pe].recvFinishPatchOnPe(msg);
1831 }
#define COMPUTE_PROXY_PRIORITY
Definition: Priorities.h:71
int sequence(void)
Definition: Compute.h:64
CudaComputeNonbonded * c
Definition: ComputeMgr.C:1781
#define PRIORITY_SIZE
Definition: Priorities.h:13
#define SET_PRIORITY(MSG, SEQ, PRIO)
Definition: Priorities.h:18
#define PATCH_PRIORITY(PID)
Definition: Priorities.h:25

◆ sendFinishReductions()

void ComputeMgr::sendFinishReductions ( int  pe,
CudaComputeNonbonded c 
)

Definition at line 1852 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c.

Referenced by CudaComputeNonbonded::skipPatchesOnPe().

1852  {
1854  msg->c = c;
1855  thisProxy[pe].recvFinishReductions(msg);
1856 }
CudaComputeNonbonded * c
Definition: ComputeMgr.C:1781

◆ sendLaunchWork()

void ComputeMgr::sendLaunchWork ( int  pe,
CudaComputeNonbonded c 
)

Definition at line 1874 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c.

Referenced by CudaComputeNonbonded::openBoxesOnPe().

1874  {
1876  msg->c = c;
1877  thisProxy[pe].recvLaunchWork(msg);
1878 }
CudaComputeNonbonded * c
Definition: ComputeMgr.C:1781

◆ sendMessageEnqueueWork()

void ComputeMgr::sendMessageEnqueueWork ( int  pe,
CudaComputeNonbonded c 
)

Definition at line 1863 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c.

Referenced by CudaComputeNonbonded::noWork().

1863  {
1865  msg->c = c;
1866  thisProxy[pe].recvMessageEnqueueWork(msg);
1867 }
CudaComputeNonbonded * c
Definition: ComputeMgr.C:1781

◆ sendOpenBoxesOnPe()

void ComputeMgr::sendOpenBoxesOnPe ( std::vector< int > &  pes,
CudaComputeNonbonded c 
)

Definition at line 1838 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, PRIORITY_SIZE, PROXY_DATA_PRIORITY, Compute::sequence(), and SET_PRIORITY.

Referenced by CudaComputeNonbonded::doWork().

1838  {
1839  for (int i=0;i < pes.size();i++) {
1841  SET_PRIORITY(msg, c->sequence(), PROXY_DATA_PRIORITY+1); // after bonded
1842  msg->c = c;
1843  thisProxy[pes[i]].recvOpenBoxesOnPe(msg);
1844  }
1845 }
int sequence(void)
Definition: Compute.h:64
#define PROXY_DATA_PRIORITY
Definition: Priorities.h:40
CudaComputeNonbonded * c
Definition: ComputeMgr.C:1781
#define PRIORITY_SIZE
Definition: Priorities.h:13
#define SET_PRIORITY(MSG, SEQ, PRIO)
Definition: Priorities.h:18

◆ sendSkipPatchesOnPe()

void ComputeMgr::sendSkipPatchesOnPe ( std::vector< int > &  pes,
CudaComputeNonbonded c 
)

Definition at line 1798 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c.

1798  {
1799  for (int i=0;i < pes.size();i++) {
1801  msg->c = c;
1802  thisProxy[pes[i]].recvSkipPatchesOnPe(msg);
1803  }
1804 }
CudaComputeNonbonded * c
Definition: ComputeMgr.C:1781

◆ sendUnregisterBoxesOnPe()

void ComputeMgr::sendUnregisterBoxesOnPe ( std::vector< int > &  pes,
CudaComputeNonbonded c 
)

Definition at line 1885 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c.

Referenced by CudaComputeNonbonded::~CudaComputeNonbonded().

1885  {
1886  for (int i=0;i < pes.size();i++) {
1888  msg->c = c;
1889  thisProxy[pes[i]].recvUnregisterBoxesOnPe(msg);
1890  }
1891 }
CudaComputeNonbonded * c
Definition: ComputeMgr.C:1781

◆ sendYieldDevice()

void ComputeMgr::sendYieldDevice ( int  pe)

Definition at line 1768 of file ComputeMgr.C.

1768  {
1769  CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
1770  cm[pe].recvYieldDevice(CkMyPe());
1771 }

◆ splitComputes()

void ComputeMgr::splitComputes ( )

Definition at line 175 of file ComputeMgr.C.

References ComputeMap::cloneCompute(), ComputeMap::extendPtrs(), ComputeMap::newNode(), ComputeMap::newNumPartitions(), ComputeMap::node(), ComputeMap::numComputes(), ComputeMap::numPartitions(), ComputeMap::Object(), ComputeMap::setNewNode(), ComputeMap::setNewNumPartitions(), and ComputeMap::setNumPartitions().

176 {
177  if ( ! CkMyRank() ) {
178  ComputeMap *computeMap = ComputeMap::Object();
179  const int nc = computeMap->numComputes();
180 
181  for (int i=0; i<nc; i++) {
182  int nnp = computeMap->newNumPartitions(i);
183  if ( nnp > 0 ) {
184  if ( computeMap->numPartitions(i) != 1 ) {
185  CkPrintf("Warning: unable to partition compute %d\n", i);
186  computeMap->setNewNumPartitions(i,0);
187  continue;
188  }
189  //CkPrintf("splitting compute %d by %d\n",i,nnp);
190  computeMap->setNumPartitions(i,nnp);
191  if (computeMap->newNode(i) == -1) {
192  computeMap->setNewNode(i,computeMap->node(i));
193  }
194  for ( int j=1; j<nnp; ++j ) {
195  int newcid = computeMap->cloneCompute(i,j);
196  //CkPrintf("compute %d partition %d is %d\n",i,j,newcid);
197  }
198  }
199  }
200  computeMap->extendPtrs();
201  }
202 
203  if (!CkMyPe())
204  {
205  CkStartQD(CkIndex_ComputeMgr::splitComputes2((CkQdMsg*)0), &thishandle);
206  }
207 }
void setNewNumPartitions(ComputeID cid, char numPartitions)
Definition: ComputeMap.h:146
int numComputes(void)
Definition: ComputeMap.h:103
void setNumPartitions(ComputeID cid, char numPartitions)
Definition: ComputeMap.h:140
char newNumPartitions(ComputeID cid)
Definition: ComputeMap.h:143
int numPartitions(ComputeID cid)
Definition: ComputeMap.C:133
void setNewNode(ComputeID cid, NodeID node)
Definition: ComputeMap.h:122
void extendPtrs()
Definition: ComputeMap.C:87
ComputeID cloneCompute(ComputeID src, int partition)
Definition: ComputeMap.C:183
static ComputeMap * Object()
Definition: ComputeMap.h:91
int node(ComputeID cid)
Definition: ComputeMap.h:108
NodeID newNode(ComputeID cid)
Definition: ComputeMap.h:118

◆ splitComputes2()

void ComputeMgr::splitComputes2 ( CkQdMsg *  msg)

Definition at line 209 of file ComputeMgr.C.

210 {
211  delete msg;
212  CProxy_ComputeMgr(thisgroup).updateLocalComputes();
213 }

◆ updateComputes()

void ComputeMgr::updateComputes ( int  ep,
CkGroupID  chareID 
)

Definition at line 142 of file ComputeMgr.C.

References NAMD_bug().

Referenced by LdbCoordinator::ExecuteMigrations().

143 {
144  updateComputesReturnEP = ep;
145  updateComputesReturnChareID = chareID;
146  updateComputesCount = CkNumPes();
147 
148  if (CkMyPe())
149  {
150  NAMD_bug("updateComputes signaled on wrong Pe!");
151  }
152 
153  CkStartQD(CkIndex_ComputeMgr::updateComputes2((CkQdMsg*)0),&thishandle);
154 }
void NAMD_bug(const char *err_msg)
Definition: common.C:195

◆ updateComputes2()

void ComputeMgr::updateComputes2 ( CkQdMsg *  msg)

Definition at line 156 of file ComputeMgr.C.

References WorkDistrib::saveComputeMapChanges().

157 {
158  delete msg;
159 
160  CProxy_WorkDistrib wd(CkpvAccess(BOCclass_group).workDistrib);
161  WorkDistrib *workDistrib = wd.ckLocalBranch();
162  workDistrib->saveComputeMapChanges(CkIndex_ComputeMgr::updateComputes3(),thisgroup);
163 }
void saveComputeMapChanges(int, CkGroupID)
Definition: WorkDistrib.C:359

◆ updateComputes3()

void ComputeMgr::updateComputes3 ( )

Definition at line 165 of file ComputeMgr.C.

166 {
167  if ( skipSplitting ) {
168  CProxy_ComputeMgr(thisgroup).updateLocalComputes();
169  } else {
170  CProxy_ComputeMgr(thisgroup).splitComputes();
171  skipSplitting = 1;
172  }
173 }

◆ updateLocalComputes()

void ComputeMgr::updateLocalComputes ( )

Definition at line 215 of file ComputeMgr.C.

References ResizeArray< Elem >::add(), ComputeMap::compute(), ProxyMgr::createProxy(), Compute::ldObjHandle, LdbCoordinator::Migrate(), ComputeMap::newNode(), ComputeMap::newNumPartitions(), ComputeMap::node(), ComputeMap::numComputes(), ComputeMap::numPids(), ComputeMap::Object(), LdbCoordinator::Object(), ComputeMap::pid(), ComputeMap::registerCompute(), and ResizeArray< Elem >::resize().

216 {
217  ComputeMap *computeMap = ComputeMap::Object();
218  CProxy_ProxyMgr pm(CkpvAccess(BOCclass_group).proxyMgr);
219  ProxyMgr *proxyMgr = pm.ckLocalBranch();
220  LdbCoordinator *ldbCoordinator = LdbCoordinator::Object();
221 
222  computeFlag.resize(0);
223 
224  const int nc = computeMap->numComputes();
225  for (int i=0; i<nc; i++) {
226 
227  if ( computeMap->node(i) == CkMyPe() &&
228  computeMap->newNumPartitions(i) > 1 ) {
229  Compute *c = computeMap->compute(i);
230  ldbCoordinator->Migrate(c->ldObjHandle,CkMyPe());
231  delete c;
232  computeMap->registerCompute(i,NULL);
233  if ( computeMap->newNode(i) == CkMyPe() ) computeFlag.add(i);
234  } else
235  if (computeMap->newNode(i) == CkMyPe() && computeMap->node(i) != CkMyPe())
236  {
237  computeFlag.add(i);
238  for (int n=0; n < computeMap->numPids(i); n++)
239  {
240  proxyMgr->createProxy(computeMap->pid(i,n));
241  }
242  }
243  else if (computeMap->node(i) == CkMyPe() &&
244  (computeMap->newNode(i) != -1 && computeMap->newNode(i) != CkMyPe() ))
245  {
246  // CkPrintf("delete compute %d on pe %d\n",i,CkMyPe());
247  delete computeMap->compute(i);
248  computeMap->registerCompute(i,NULL);
249  }
250  }
251 
252  if (!CkMyPe())
253  {
254  CkStartQD(CkIndex_ComputeMgr::updateLocalComputes2((CkQdMsg*)0), &thishandle);
255  }
256 }
int numComputes(void)
Definition: ComputeMap.h:103
void registerCompute(ComputeID cid, Compute *c)
Definition: ComputeMap.h:97
void Migrate(LDObjHandle handle, int dest)
LDObjHandle ldObjHandle
Definition: Compute.h:44
int add(const Elem &elem)
Definition: ResizeArray.h:101
void resize(int i)
Definition: ResizeArray.h:84
char newNumPartitions(ComputeID cid)
Definition: ComputeMap.h:143
void createProxy(PatchID pid)
Definition: ProxyMgr.C:492
static LdbCoordinator * Object()
Compute * compute(ComputeID cid)
Definition: ComputeMap.h:173
static ComputeMap * Object()
Definition: ComputeMap.h:91
int node(ComputeID cid)
Definition: ComputeMap.h:108
int numPids(ComputeID cid)
Definition: ComputeMap.C:101
int pid(ComputeID cid, int i)
Definition: ComputeMap.C:107
NodeID newNode(ComputeID cid)
Definition: ComputeMap.h:118

◆ updateLocalComputes2()

void ComputeMgr::updateLocalComputes2 ( CkQdMsg *  msg)

Definition at line 259 of file ComputeMgr.C.

260 {
261  delete msg;
262  CProxy_ComputeMgr(thisgroup).updateLocalComputes3();
263 }

◆ updateLocalComputes3()

void ComputeMgr::updateLocalComputes3 ( )

Definition at line 266 of file ComputeMgr.C.

References ResizeArray< Elem >::clear(), ComputeMap::newNode(), ProxyMgr::nodecount, ComputeMap::numComputes(), ComputeMap::Object(), ProxyMgr::removeUnusedProxies(), ComputeMap::setNewNode(), ComputeMap::setNewNumPartitions(), ComputeMap::setNode(), and ResizeArray< Elem >::size().

267 {
268  ComputeMap *computeMap = ComputeMap::Object();
269  CProxy_ProxyMgr pm(CkpvAccess(BOCclass_group).proxyMgr);
270  ProxyMgr *proxyMgr = pm.ckLocalBranch();
271 
273 
274  const int nc = computeMap->numComputes();
275 
276  if ( ! CkMyRank() ) {
277  for (int i=0; i<nc; i++) {
278  computeMap->setNewNumPartitions(i,0);
279  if (computeMap->newNode(i) != -1) {
280  computeMap->setNode(i,computeMap->newNode(i));
281  computeMap->setNewNode(i,-1);
282  }
283  }
284  }
285 
286  for(int i=0; i<computeFlag.size(); i++) createCompute(computeFlag[i], computeMap);
287  computeFlag.clear();
288 
289  proxyMgr->removeUnusedProxies();
290 
291  if (!CkMyPe())
292  {
293  CkStartQD(CkIndex_ComputeMgr::updateLocalComputes4((CkQdMsg*)0), &thishandle);
294  }
295 }
int size(void) const
Definition: ResizeArray.h:131
void setNewNumPartitions(ComputeID cid, char numPartitions)
Definition: ComputeMap.h:146
int numComputes(void)
Definition: ComputeMap.h:103
void clear()
Definition: ResizeArray.h:91
void setNode(ComputeID cid, NodeID node)
Definition: ComputeMap.h:112
void removeUnusedProxies(void)
Definition: ProxyMgr.C:398
void setNewNode(ComputeID cid, NodeID node)
Definition: ComputeMap.h:122
static ComputeMap * Object()
Definition: ComputeMap.h:91
static int nodecount
Definition: ProxyMgr.h:398
NodeID newNode(ComputeID cid)
Definition: ComputeMap.h:118

◆ updateLocalComputes4()

void ComputeMgr::updateLocalComputes4 ( CkQdMsg *  msg)

Definition at line 298 of file ComputeMgr.C.

References Node::Object(), ComputeMap::Object(), ComputeMap::saveComputeMap(), Node::simParameters, and simParams.

299 {
300  delete msg;
301  CProxy_ComputeMgr(thisgroup).updateLocalComputes5();
302 
303  // store the latest compute map
305  if (simParams->storeComputeMap) {
306  ComputeMap *computeMap = ComputeMap::Object();
307  computeMap->saveComputeMap(simParams->computeMapFilename);
308  }
309 }
static Node * Object()
Definition: Node.h:86
void saveComputeMap(const char *fname)
Definition: ComputeMap.C:260
SimParameters * simParameters
Definition: Node.h:181
#define simParams
Definition: Output.C:131
static ComputeMap * Object()
Definition: ComputeMap.h:91

◆ updateLocalComputes5()

void ComputeMgr::updateLocalComputes5 ( )

Definition at line 316 of file ComputeMgr.C.

References ProxyMgr::buildProxySpanningTree2(), PatchMap::checkMap(), ComputeMap::checkMap(), PatchMap::Object(), ComputeMap::Object(), ProxyMgr::Object(), proxyRecvSpanning, proxySendSpanning, and ProxyMgr::sendSpanningTrees().

317 {
318  if ( ! CkMyRank() ) {
321  }
322 
323  // we always use the centralized building of spanning tree
324  // distributed building of ST called in Node.C only
327 
328  // this code needs to be turned on if we want to
329  // shift the creation of ST to the load balancer
330 
331 #if 0
333  {
334  if (firstphase)
336  else
337  if (CkMyPe() == 0)
339 
340  firstphase = 0;
341  }
342 #endif
343 
344  if (!CkMyPe())
345  CkStartQD(CkIndex_ComputeMgr::doneUpdateLocalComputes(), &thishandle);
346 }
void checkMap()
Definition: ComputeMap.C:46
int proxyRecvSpanning
Definition: ProxyMgr.C:45
static ProxyMgr * Object()
Definition: ProxyMgr.h:394
static PatchMap * Object()
Definition: PatchMap.h:27
void buildProxySpanningTree2()
Definition: ProxyMgr.C:576
void checkMap()
Definition: PatchMap.C:274
static ComputeMap * Object()
Definition: ComputeMap.h:91
void sendSpanningTrees()
Definition: ProxyMgr.C:1106
int proxySendSpanning
Definition: ProxyMgr.C:44

Member Data Documentation

◆ callMeBackCB

CkCallback ComputeMgr::callMeBackCB

Definition at line 164 of file ComputeMgr.h.

◆ computeGlobalObject

ComputeGlobal* ComputeMgr::computeGlobalObject

◆ computeGlobalResultsMsgMasterSeq

int ComputeMgr::computeGlobalResultsMsgMasterSeq

Definition at line 163 of file ComputeMgr.h.

Referenced by ComputeMgr(), and sendComputeGlobalResults().

◆ computeGlobalResultsMsgs

ResizeArray<ComputeGlobalResultsMsg*> ComputeMgr::computeGlobalResultsMsgs

Definition at line 161 of file ComputeMgr.h.

Referenced by enableComputeGlobalResults(), and recvComputeGlobalResults().

◆ computeGlobalResultsMsgSeq

int ComputeMgr::computeGlobalResultsMsgSeq

Definition at line 162 of file ComputeMgr.h.

Referenced by ComputeMgr(), enableComputeGlobalResults(), and recvComputeGlobalResults().


The documentation for this class was generated from the following files: