NAMD
Public Member Functions | Public Attributes | List of all members
ParallelIOMgr Class Reference

#include <ParallelIOMgr.h>

Inheritance diagram for ParallelIOMgr:

Public Member Functions

 ParallelIOMgr ()
 
 ~ParallelIOMgr ()
 
void initialize (Node *node)
 
void readPerAtomInfo ()
 
void readInfoForParOutDcdSelection ()
 
void migrateAtomsMGrp ()
 
void recvAtomsMGrp (MoveInputAtomsMsg *msg)
 
void integrateMigratedAtoms ()
 
void updateMolInfo ()
 
void recvMolInfo (MolInfoMsg *msg)
 
void bcastMolInfo (MolInfoMsg *msg)
 
void recvHydroBasedCounter (HydroBasedMsg *msg)
 
void bcastHydroBasedCounter (HydroBasedMsg *msg)
 
void sendDcdParams ()
 
void recvDcdParams (std::vector< uint16 > tags, std::vector< std::string > inputFileNames, std::vector< std::string > outputFileNames, std::vector< int > freqs, std::vector< OUTPUTFILETYPE > types)
 
void calcAtomsInEachPatch ()
 
void recvAtomsCntPerPatch (AtomsCntPerPatchMsg *msg)
 
void sendAtomsToHomePatchProcs ()
 
void ackAtomsToHomePatchProcs ()
 
void recvAtomsToHomePatchProcs (MovePatchAtomsMsg *msg)
 
void createHomePatches ()
 
void initializeDcdSelectionParams ()
 
void freeMolSpace ()
 
int getNumOutputProcs ()
 
bool isOutputProcessor (int pe)
 
void recvClusterSize (ClusterSizeMsg *msg)
 
void integrateClusterSize ()
 
void recvFinalClusterSize (ClusterSizeMsg *msg)
 
void receivePositions (CollectVectorVarMsg *msg)
 
void receiveVelocities (CollectVectorVarMsg *msg)
 
void receiveForces (CollectVectorVarMsg *msg)
 
void disposePositions (int seq, double prevT)
 
void disposeVelocities (int seq, double prevT)
 
void disposeForces (int seq, double prevT)
 
void wrapCoor (int seq, Lattice lat)
 
void recvClusterCoor (ClusterCoorMsg *msg)
 
void recvFinalClusterCoor (ClusterCoorMsg *msg)
 

Public Attributes

CthThread sendAtomsThread
 
int numAcksOutstanding
 

Detailed Description

Definition at line 155 of file ParallelIOMgr.h.

Constructor & Destructor Documentation

◆ ParallelIOMgr()

ParallelIOMgr::ParallelIOMgr ( )

Definition at line 162 of file ParallelIOMgr.C.

163 {
164  CkpvAccess(BOCclass_group).ioMgr = thisgroup;
165 
166  numInputProcs=-1;
167  inputProcArray = NULL;
168  numOutputProcs=-1;
169  outputProcArray = NULL;
170 
171  procsReceived=0;
172  hydroMsgRecved=0;
173 
174  totalMV.x = totalMV.y = totalMV.z = 0.0;
175  totalMass = 0.0;
176  totalCharge = 0.0;
177  numTotalExclusions = 0;
178  numCalcExclusions = 0;
179  numCalcFullExclusions = 0;
180 
181  isOKToRecvHPAtoms = false;
182  hpAtomsList = NULL;
183 
184  clusterID = NULL;
185  clusterSize = NULL;
186 
187 #ifdef MEM_OPT_VERSION
188  midCM = NULL;
189 #endif
190 
191  isWater = NULL;
192 
193  numCSMAck = 0;
194  numReqRecved = 0;
195 
196  sendAtomsThread = 0;
197 
198 #if COLLECT_PERFORMANCE_DATA
199  numFixedAtomLookup = 0;
200 #endif
201 }
BigReal z
Definition: Vector.h:74
CthThread sendAtomsThread
BigReal x
Definition: Vector.h:74
BigReal y
Definition: Vector.h:74

◆ ~ParallelIOMgr()

ParallelIOMgr::~ParallelIOMgr ( )

Definition at line 203 of file ParallelIOMgr.C.

204 {
205  delete [] inputProcArray;
206  delete [] outputProcArray;
207  delete [] clusterID;
208  delete [] clusterSize;
209 
210 #ifdef MEM_OPT_VERSION
211  delete midCM;
212 #endif
213 
214  delete [] isWater;
215 }

Member Function Documentation

◆ ackAtomsToHomePatchProcs()

void ParallelIOMgr::ackAtomsToHomePatchProcs ( )

Definition at line 1612 of file ParallelIOMgr.C.

1613 {
1615  if ( sendAtomsThread ) {
1616  CthAwaken(sendAtomsThread);
1617  sendAtomsThread = 0;
1618  }
1619 }
CthThread sendAtomsThread

◆ bcastHydroBasedCounter()

void ParallelIOMgr::bcastHydroBasedCounter ( HydroBasedMsg msg)

Definition at line 1320 of file ParallelIOMgr.C.

References endi(), iINFO(), iout, HydroBasedMsg::numFixedGroups, and HydroBasedMsg::numFixedRigidBonds.

1320  {
1321 #ifdef MEM_OPT_VERSION
1322  //only the rank 0 in the SMP node update the Molecule object
1323  if(CmiMyRank()) {
1324  delete msg;
1325  return;
1326  }
1327  molecule->numFixedRigidBonds = msg->numFixedRigidBonds;
1328  molecule->numFixedGroups = msg->numFixedGroups;
1329  delete msg;
1330 
1331  if(!CkMyPe()) {
1332  iout << iINFO << "****************************\n";
1333  iout << iINFO << "STRUCTURE SUMMARY:\n";
1334  iout << iINFO << molecule->numAtoms << " ATOMS\n";
1335  iout << iINFO << molecule->numBonds << " BONDS\n";
1336  iout << iINFO << molecule->numAngles << " ANGLES\n";
1337  iout << iINFO << molecule->numDihedrals << " DIHEDRALS\n";
1338  iout << iINFO << molecule->numImpropers << " IMPROPERS\n";
1339  iout << iINFO << molecule->numCrossterms << " CROSSTERMS\n";
1340  iout << iINFO << molecule->numExclusions << " EXCLUSIONS\n";
1341 
1342  //****** BEGIN CHARMM/XPLOR type changes
1343  if ((molecule->numMultipleDihedrals) && (simParameters->paraTypeXplorOn)){
1344  iout << iINFO << molecule->numMultipleDihedrals
1345  << " DIHEDRALS WITH MULTIPLE PERIODICITY (BASED ON PSF FILE)\n";
1346  }
1347  if ((molecule->numMultipleDihedrals) && (simParameters->paraTypeCharmmOn)){
1348  iout << iINFO << molecule->numMultipleDihedrals
1349  << " DIHEDRALS WITH MULTIPLE PERIODICITY IGNORED (BASED ON PSF FILE) \n";
1350  iout << iINFO
1351  << " CHARMM MULTIPLICITIES BASED ON PARAMETER FILE INFO! \n";
1352  }
1353  //****** END CHARMM/XPLOR type changes
1354 
1355  if (molecule->numMultipleImpropers){
1356  iout << iINFO << molecule->numMultipleImpropers
1357  << " IMPROPERS WITH MULTIPLE PERIODICITY\n";
1358  }
1359 
1360  if (simParameters->fixedAtomsOn)
1361  iout << iINFO << molecule->numFixedAtoms << " FIXED ATOMS\n";
1362 
1363 
1364  if (simParameters->rigidBonds)
1365  iout << iINFO << molecule->numRigidBonds << " RIGID BONDS\n";
1366 
1367  if (simParameters->fixedAtomsOn && simParameters->rigidBonds)
1368  iout << iINFO << molecule->numFixedRigidBonds <<
1369  " RIGID BONDS BETWEEN FIXED ATOMS\n";
1370 
1371  iout << iINFO << molecule->num_deg_freedom(1)
1372  << " DEGREES OF FREEDOM\n";
1373 
1374  iout << iINFO << molecule->numHydrogenGroups << " HYDROGEN GROUPS\n";
1375  iout << iINFO << molecule->maxHydrogenGroupSize
1376  << " ATOMS IN LARGEST HYDROGEN GROUP\n";
1377  iout << iINFO << molecule->numMigrationGroups << " MIGRATION GROUPS\n";
1378  iout << iINFO << molecule->maxMigrationGroupSize
1379  << " ATOMS IN LARGEST MIGRATION GROUP\n";
1380  if (simParameters->fixedAtomsOn)
1381  {
1382  iout << iINFO << molecule->numFixedGroups <<
1383  " HYDROGEN GROUPS WITH ALL ATOMS FIXED\n";
1384  }
1385 
1386  iout << iINFO << "TOTAL MASS = " << totalMass << " amu\n";
1387  iout << iINFO << "TOTAL CHARGE = " << totalCharge << " e\n";
1388 
1389  BigReal volume = simParameters->lattice.volume();
1390  if ( volume ) {
1391  iout << iINFO << "MASS DENSITY = "
1392  << ((totalMass/volume) / 0.6022) << " g/cm^3\n";
1393  iout << iINFO << "ATOM DENSITY = "
1394  << (molecule->numAtoms/volume) << " atoms/A^3\n";
1395  }
1396 
1397  iout << iINFO << "*****************************\n";
1398  iout << endi;
1399  fflush(stdout);
1400  }
1401 #endif
1402 }
int numFixedGroups
Definition: Molecule.h:639
std::ostream & iINFO(std::ostream &s)
Definition: InfoStream.C:81
int numBonds
Definition: Molecule.h:589
int numHydrogenGroups
Definition: Molecule.h:635
std::ostream & endi(std::ostream &s)
Definition: InfoStream.C:54
#define iout
Definition: InfoStream.h:51
int numMultipleImpropers
Definition: Molecule.h:674
int numMultipleDihedrals
Definition: Molecule.h:672
int numFixedRigidBonds
Definition: Molecule.h:641
int numFixedAtoms
Definition: Molecule.h:632
NAMD_HOST_DEVICE BigReal volume(void) const
Definition: Lattice.h:293
int numAngles
Definition: Molecule.h:590
int numAtoms
Definition: Molecule.h:586
int numCrossterms
Definition: Molecule.h:597
int maxMigrationGroupSize
Definition: Molecule.h:638
int numDihedrals
Definition: Molecule.h:591
int numImpropers
Definition: Molecule.h:596
int maxHydrogenGroupSize
Definition: Molecule.h:636
int numFixedRigidBonds
Definition: ParallelIOMgr.h:68
int numMigrationGroups
Definition: Molecule.h:637
int64_t num_deg_freedom(int isInitialReport=0) const
Definition: Molecule.h:553
int numRigidBonds
Definition: Molecule.h:640
double BigReal
Definition: common.h:123
int numExclusions
Definition: Molecule.h:600

◆ bcastMolInfo()

void ParallelIOMgr::bcastMolInfo ( MolInfoMsg msg)

Definition at line 1260 of file ParallelIOMgr.C.

References endi(), iINFO(), iout, MolInfoMsg::numAngles, MolInfoMsg::numBonds, MolInfoMsg::numCalcAngles, MolInfoMsg::numCalcBonds, MolInfoMsg::numCalcCrossterms, MolInfoMsg::numCalcDihedrals, MolInfoMsg::numCalcExclusions, MolInfoMsg::numCalcFullExclusions, MolInfoMsg::numCalcImpropers, MolInfoMsg::numCrossterms, MolInfoMsg::numDihedrals, MolInfoMsg::numExclusions, MolInfoMsg::numImpropers, MolInfoMsg::numRigidBonds, PDBVELFACTOR, MolInfoMsg::totalMass, and MolInfoMsg::totalMV.

1261 {
1262 #ifdef MEM_OPT_VERSION
1263  if(myInputRank!=-1) {
1264  if(!simParameters->comMove) {
1265  //needs to remove the center of mass motion from a molecule
1266  Vector val = msg->totalMV / msg->totalMass;
1267  for (int i=0; i<initAtoms.size(); i++) initAtoms[i].velocity -= val;
1268  }
1269  }
1270 
1271  //only the rank 0 in the SMP node update the Molecule object
1272  if(CmiMyRank()) {
1273  delete msg;
1274  return;
1275  }
1276 
1277  molecule->numBonds = msg->numBonds;
1278  molecule->numCalcBonds = msg->numCalcBonds;
1279  molecule->numAngles = msg->numAngles;
1280  molecule->numCalcAngles = msg->numCalcAngles;
1281  molecule->numDihedrals = msg->numDihedrals;
1282  molecule->numCalcDihedrals = msg->numCalcDihedrals;
1283  molecule->numImpropers = msg->numImpropers;
1284  molecule->numCalcImpropers = msg->numCalcImpropers;
1285  molecule->numCrossterms = msg->numCrossterms;
1286  molecule->numCalcCrossterms = msg->numCalcCrossterms;
1287 
1288  molecule->numTotalExclusions = msg->numExclusions;
1289  molecule->numCalcExclusions = msg->numCalcExclusions;
1291 
1292  molecule->numRigidBonds = msg->numRigidBonds;
1293 
1294 
1295  if(!CkMyPe()) {
1296  iout << iINFO << "LOADED " << molecule->numTotalExclusions << " TOTAL EXCLUSIONS\n" << endi;
1297  if(!simParameters->comMove) {
1298  iout << iINFO << "REMOVING COM VELOCITY "
1299  << (PDBVELFACTOR * (msg->totalMV / msg->totalMass))<< "\n" <<endi;
1300  }
1301  }
1302  delete msg;
1303 #endif
1304 }
int64 numCalcFullExclusions
Definition: ParallelIOMgr.h:49
std::ostream & iINFO(std::ostream &s)
Definition: InfoStream.C:81
int numCalcBonds
Definition: Molecule.h:657
int size(void) const
Definition: ResizeArray.h:131
int numBonds
Definition: Molecule.h:589
int numCalcCrossterms
Definition: ParallelIOMgr.h:46
Definition: Vector.h:72
std::ostream & endi(std::ostream &s)
Definition: InfoStream.C:54
int64 numCalcExclusions
Definition: ParallelIOMgr.h:48
#define iout
Definition: InfoStream.h:51
int numRigidBonds
Definition: ParallelIOMgr.h:53
int numCalcDihedrals
Definition: ParallelIOMgr.h:42
int numCalcCrossterms
Definition: Molecule.h:661
int64 numTotalExclusions
Definition: Molecule.h:601
int numDihedrals
Definition: ParallelIOMgr.h:41
int64 numCalcFullExclusions
Definition: Molecule.h:663
int64 numExclusions
Definition: ParallelIOMgr.h:47
BigReal totalMass
Definition: ParallelIOMgr.h:57
int numCalcDihedrals
Definition: Molecule.h:659
int numCalcImpropers
Definition: Molecule.h:660
int numAngles
Definition: Molecule.h:590
int numCrossterms
Definition: Molecule.h:597
int numCalcImpropers
Definition: ParallelIOMgr.h:44
int numCrossterms
Definition: ParallelIOMgr.h:45
int numDihedrals
Definition: Molecule.h:591
int numImpropers
Definition: Molecule.h:596
int64 numCalcExclusions
Definition: Molecule.h:662
#define PDBVELFACTOR
Definition: common.h:57
int numCalcBonds
Definition: ParallelIOMgr.h:38
Vector totalMV
Definition: ParallelIOMgr.h:58
int numImpropers
Definition: ParallelIOMgr.h:43
int numRigidBonds
Definition: Molecule.h:640
int numCalcAngles
Definition: Molecule.h:658
int numCalcAngles
Definition: ParallelIOMgr.h:40

◆ calcAtomsInEachPatch()

void ParallelIOMgr::calcAtomsInEachPatch ( )

Definition at line 1404 of file ParallelIOMgr.C.

References PatchMap::assignToPatch(), AtomsCntPerPatchMsg::atomsCntList, AtomsCntPerPatchMsg::fixedAtomsCntList, PatchMap::getTmpPatchAtomsList(), PatchMap::initTmpPatchAtomsList(), InputAtom::isMP, InputAtom::isValid, AtomsCntPerPatchMsg::length, NAMD_die(), PatchMap::numPatches(), PatchMap::Object(), AtomsCntPerPatchMsg::pidList, and CompAtom::position.

1405 {
1406  if(myInputRank==-1) return;
1407 
1408  PatchMap *patchMap = PatchMap::Object();
1409  int numPatches = patchMap->numPatches();
1410 
1411  patchMap->initTmpPatchAtomsList();
1412 
1413  //each list contains the atom index to the initAtoms
1414  vector<int> *eachPatchAtomList = patchMap->getTmpPatchAtomsList();
1415 
1416  CProxy_PatchMgr pm(CkpvAccess(BOCclass_group).patchMgr);
1417  PatchMgr *patchMgr = pm.ckLocalBranch();
1418 
1419  int pid=0;
1420  const Lattice lattice = simParameters->lattice;
1421  for(int i=0; i<initAtoms.size(); i++) {
1422  InputAtom *atom = &(initAtoms[i]);
1423  if(!atom->isValid) continue;
1424  if(atom->isMP) {
1425  pid = patchMap->assignToPatch(atom->position, lattice);
1426  }
1427  eachPatchAtomList[pid].push_back(i);
1428  }
1429 
1430  CProxy_ParallelIOMgr pIO(thisgroup);
1431 
1432  int patchCnt = 0;
1433  for(int i=0; i<numPatches; i++) {
1434  int cursize = eachPatchAtomList[i].size();
1435  if(cursize>0) patchCnt++;
1436  }
1437 
1438  AtomsCntPerPatchMsg *msg = NULL;
1439  if(simParameters->fixedAtomsOn) {
1440  msg = new (patchCnt, patchCnt, patchCnt, 0)AtomsCntPerPatchMsg;
1441  } else {
1442  msg = new (patchCnt, patchCnt, 0, 0)AtomsCntPerPatchMsg;
1443  }
1444 
1445  msg->length = patchCnt;
1446  patchCnt = 0;
1447  for(int i=0; i<numPatches; i++) {
1448  int cursize = eachPatchAtomList[i].size();
1449  if(cursize>0) {
1450  if ( cursize > USHRT_MAX ) {
1451  char errstr[512];
1452  sprintf(errstr, "Patch %d exceeds %d atoms.", i, USHRT_MAX);
1453  NAMD_die(errstr);
1454  }
1455  msg->pidList[patchCnt] = i;
1456  msg->atomsCntList[patchCnt] = cursize;
1457  patchCnt++;
1458  }
1459  }
1460 
1461  if(simParameters->fixedAtomsOn) {
1462  patchCnt = 0;
1463  for(int i=0; i<numPatches; i++) {
1464  int cursize = eachPatchAtomList[i].size();
1465  if(cursize>0) {
1466  int fixedCnt = 0;
1467  for(int j=0; j<cursize; j++) {
1468  int aid = eachPatchAtomList[i][j];
1469  //atomFixed is either 0 or 1
1470  fixedCnt += initAtoms[aid].atomFixed;
1471  }
1472  msg->fixedAtomsCntList[patchCnt] = fixedCnt;
1473  patchCnt++;
1474  }
1475  }
1476  }
1477 
1478  pIO[0].recvAtomsCntPerPatch(msg);
1479 
1480 }
unsigned short * fixedAtomsCntList
Definition: ParallelIOMgr.h:86
int size(void) const
Definition: ResizeArray.h:131
PatchID assignToPatch(Position p, const Lattice &l)
Definition: PatchMap.inl:14
void initTmpPatchAtomsList()
Definition: PatchMap.h:223
static PatchMap * Object()
Definition: PatchMap.h:27
Position position
Definition: NamdTypes.h:78
int numPatches(void) const
Definition: PatchMap.h:59
int16 isMP
Definition: NamdTypes.h:256
bool isValid
Definition: NamdTypes.h:254
std::vector< int > * getTmpPatchAtomsList()
Definition: PatchMap.h:233
void NAMD_die(const char *err_msg)
Definition: common.C:147
unsigned short * atomsCntList
Definition: ParallelIOMgr.h:85

◆ createHomePatches()

void ParallelIOMgr::createHomePatches ( )

Definition at line 1685 of file ParallelIOMgr.C.

References PatchMgr::createHomePatch(), PatchMap::node(), PatchMap::numPatches(), PatchMap::numPatchesOnNode(), PatchMap::Object(), Node::Object(), and Node::workDistrib.

1686 {
1687 #ifdef MEM_OPT_VERSION
1688 
1689  int assignedPids = PatchMap::Object()->numPatchesOnNode(CkMyPe());
1690  int numPids = hpIDList.size();
1691  if(numPids==0){
1692  //this node actually contains no homepatches
1693  if(assignedPids == 0) return;
1694 
1695  //Entering the rare condition that all the homepatches this node has
1696  //are empty so that "recvAtomsToHomePatchProcs" is never called!
1697  //But we still need to create those empty homepatches!
1698  CmiAssert(isOKToRecvHPAtoms == false);
1699  PatchMap *patchMap = PatchMap::Object();
1700  CProxy_PatchMgr pm(CkpvAccess(BOCclass_group).patchMgr);
1701  PatchMgr *patchMgr = pm.ckLocalBranch();
1702  for(int i=0; i<patchMap->numPatches(); i++) {
1703  if(patchMap->node(i)==CkMyPe()) {
1704  FullAtomList emptyone;
1705  patchMgr->createHomePatch(i, emptyone);
1706  }
1707  }
1708  return;
1709  }
1710 
1711  CProxy_PatchMgr pm(CkpvAccess(BOCclass_group).patchMgr);
1712  PatchMgr *patchMgr = pm.ckLocalBranch();
1713 
1714  //go through the home patch list
1715  for(int i=0; i<numPids; i++) {
1716  int pid = hpIDList[i];
1717 
1718  //re-sort the atom list of this patch
1719  std::sort(hpAtomsList[i].begin(), hpAtomsList[i].end());
1720  Node::Object()->workDistrib->fillAtomListForOnePatch(pid, hpAtomsList[i]);
1721  patchMgr->createHomePatch(pid, hpAtomsList[i]);
1722  }
1723 
1724  hpIDList.clear();
1725  delete [] hpAtomsList;
1726 
1727  hpAtomsList = NULL;
1728 #endif
1729 }
static Node * Object()
Definition: Node.h:86
int size(void) const
Definition: ResizeArray.h:131
static PatchMap * Object()
Definition: PatchMap.h:27
void createHomePatch(PatchID pid, FullAtomList &a)
Definition: PatchMgr.C:74
void clear()
Definition: ResizeArray.h:91
int numPatches(void) const
Definition: PatchMap.h:59
WorkDistrib * workDistrib
Definition: Node.h:169
int numPatchesOnNode(int node)
Definition: PatchMap.h:60
int node(int pid) const
Definition: PatchMap.h:114

◆ disposeForces()

void ParallelIOMgr::disposeForces ( int  seq,
double  prevT 
)

Definition at line 1929 of file ParallelIOMgr.C.

1930 {
1931 #ifdef MEM_OPT_VERSION
1932  double iotime = CmiWallTimer();
1933  midCM->disposeForces(seq);
1934  iotime = CmiWallTimer()-iotime+prevT;
1935 
1936 #if OUTPUT_SINGLE_FILE
1937  //Token-based file output
1938  if(myOutputRank==getMyOutputGroupHighestRank()) {
1939  //notify the CollectionMaster to start the next round
1940  CProxy_CollectionMaster cm(mainMaster);
1941  cm.startNextRoundOutputForce(iotime);
1942  } else {
1943  CProxy_ParallelIOMgr io(thisgroup);
1944  io[outputProcArray[myOutputRank+1]].disposeForces(seq, iotime);
1945  }
1946 #else
1947  //notify the CollectionMaster to start the next round
1948  CProxy_CollectionMaster cm(mainMaster);
1949  cm.startNextRoundOutputForce(iotime);
1950 #endif
1951 
1952 #endif
1953 }

◆ disposePositions()

void ParallelIOMgr::disposePositions ( int  seq,
double  prevT 
)

Definition at line 1876 of file ParallelIOMgr.C.

References DebugM.

1877 {
1878 #ifdef MEM_OPT_VERSION
1879  DebugM(3, "["<<CkMyPe()<<"]"<<"ParallelIOMgr::disposePositions"<<"\n");
1880  double iotime = CmiWallTimer();
1881  midCM->disposePositions(seq);
1882  iotime = CmiWallTimer()-iotime+prevT;
1883 
1884 #if OUTPUT_SINGLE_FILE
1885  //Token-based file output
1886  if(myOutputRank == getMyOutputGroupHighestRank()) {
1887  //notify the CollectionMaster to start the next round
1888  CProxy_CollectionMaster cm(mainMaster);
1889  cm.startNextRoundOutputPos(iotime);
1890  } else {
1891  CProxy_ParallelIOMgr io(thisgroup);
1892  io[outputProcArray[myOutputRank+1]].disposePositions(seq, iotime);
1893  }
1894 #else
1895  //notify the CollectionMaster to start the next round
1896  CProxy_CollectionMaster cm(mainMaster);
1897  cm.startNextRoundOutputPos(iotime);
1898 #endif
1899 
1900 #endif
1901 }
#define DebugM(x, y)
Definition: Debug.h:75

◆ disposeVelocities()

void ParallelIOMgr::disposeVelocities ( int  seq,
double  prevT 
)

Definition at line 1903 of file ParallelIOMgr.C.

1904 {
1905 #ifdef MEM_OPT_VERSION
1906  double iotime = CmiWallTimer();
1907  midCM->disposeVelocities(seq);
1908  iotime = CmiWallTimer()-iotime+prevT;
1909 
1910 #if OUTPUT_SINGLE_FILE
1911  //Token-based file output
1912  if(myOutputRank==getMyOutputGroupHighestRank()) {
1913  //notify the CollectionMaster to start the next round
1914  CProxy_CollectionMaster cm(mainMaster);
1915  cm.startNextRoundOutputVel(iotime);
1916  } else {
1917  CProxy_ParallelIOMgr io(thisgroup);
1918  io[outputProcArray[myOutputRank+1]].disposeVelocities(seq, iotime);
1919  }
1920 #else
1921  //notify the CollectionMaster to start the next round
1922  CProxy_CollectionMaster cm(mainMaster);
1923  cm.startNextRoundOutputVel(iotime);
1924 #endif
1925 
1926 #endif
1927 }

◆ freeMolSpace()

void ParallelIOMgr::freeMolSpace ( )

Definition at line 1731 of file ParallelIOMgr.C.

1732 {
1733 #ifdef MEM_OPT_VERSION
1734  molecule->delAtomNames();
1735  molecule->delChargeSpace();
1736 
1737  //???TODO NOT SURE WHETHER freeEnergyOn is support in MEM_OPT_VERSION
1738  //-CHAOMEI
1739  if(!CkMyPe() && !simParameters->freeEnergyOn)
1740  molecule->delMassSpace();
1741 
1742  molecule->delFixedAtoms();
1743 #endif
1744 }

◆ getNumOutputProcs()

int ParallelIOMgr::getNumOutputProcs ( )
inline

Definition at line 400 of file ParallelIOMgr.h.

400 { return numOutputProcs; }

◆ initialize()

void ParallelIOMgr::initialize ( Node node)

Definition at line 222 of file ParallelIOMgr.C.

References endi(), CollectionMgr::getMasterChareID(), iINFO(), iout, Node::molecule, NAMD_bug(), CollectionMgr::Object(), WorkDistrib::peCompactOrderingIndex, WorkDistrib::peDiffuseOrdering, and Node::simParameters.

223 {
224  simParameters = node->simParameters;
225  molecule = node->molecule;
226 
227  numInputProcs = simParameters->numinputprocs;
228  numOutputProcs = simParameters->numoutputprocs;
229  numOutputWrts = simParameters->numoutputwrts;
230 
231  numProxiesPerOutputProc = std::min((int)sqrt(CkNumPes()),(CkNumPes()-1)/numOutputProcs-1);
232  if ( numProxiesPerOutputProc < 2 ) numProxiesPerOutputProc = 0;
233 
234  if(!CkMyPe()) {
235  iout << iINFO << "Running with " <<numInputProcs<<" input processors.\n"<<endi;
236  #if OUTPUT_SINGLE_FILE
237  iout << iINFO << "Running with " <<numOutputProcs<<" output processors ("<<numOutputWrts<<" of them will output simultaneously).\n"<<endi;
238  #else
239  iout << iINFO << "Running with " <<numOutputProcs<<" output processors, and each of them will output to its own separate file.\n"<<endi;
240  #endif
241  if ( numProxiesPerOutputProc ) {
242  iout << iINFO << "Running with " <<numProxiesPerOutputProc<<" proxies per output processor.\n"<<endi;
243  }
244  }
245 
246  //build inputProcArray
247  {
248  inputProcArray = new int[numInputProcs];
249  myInputRank = -1;
250  for(int i=0; i<numInputProcs; ++i) {
251  inputProcArray[i] = WorkDistrib::peDiffuseOrdering[(1+numOutputProcs+i)%CkNumPes()];
252  }
253  std::sort(inputProcArray, inputProcArray+numInputProcs);
254  for(int i=0; i<numInputProcs; ++i) {
255  if ( CkMyPe() == inputProcArray[i] ) {
256  if ( myInputRank != -1 ) NAMD_bug("Duplicate input proc");
257  myInputRank = i;
258  }
259  }
260 
261  if(!CkMyPe()) {
262  iout << iINFO << "INPUT PROC LOCATIONS:";
263  int i;
264  for ( i=0; i<numInputProcs && i < 10; ++i ) {
265  iout << " " << inputProcArray[i];
266  }
267  if ( i<numInputProcs ) iout << " ... " << inputProcArray[numInputProcs-1];
268  iout << "\n" << endi;
269  }
270  }
271 
272  if(myInputRank!=-1) {
273  //NOTE: this could further be optimized by pre-allocate the memory
274  //for incoming atoms --Chao Mei
275  int numMyAtoms = numInitMyAtomsOnInput();
276  initAtoms.resize(numMyAtoms+100); // extra space for orphan hydrogens
277  initAtoms.resize(numMyAtoms);
278  tmpRecvAtoms.resize(0);
279  } else {
280  initAtoms.resize(0);
281  tmpRecvAtoms.resize(0);
282  }
283  hpIDList.resize(0);
284 
285  //build outputProcArray
286  //spread the output processors across all the processors
287  {
288  outputProcArray = new int[numOutputProcs];
289  outputProcFlags = new char[CkNumPes()];
290  outputProxyArray = new int[numOutputProcs*numProxiesPerOutputProc];
291  myOutputProxies = new int[numOutputProcs];
292  myOutputRank = -1;
293  myOutputProxyRank = -1;
294  for(int i=0; i<numOutputProcs; ++i) {
295  outputProcArray[i] = WorkDistrib::peDiffuseOrdering[(1+i)%CkNumPes()];
296  }
297  std::sort(outputProcArray, outputProcArray+numOutputProcs);
298  for(int i=0; i<numOutputProcs*numProxiesPerOutputProc; ++i) {
299  outputProxyArray[i] = WorkDistrib::peDiffuseOrdering[(1+numOutputProcs+i)%CkNumPes()];
300  }
301  std::sort(outputProxyArray, outputProxyArray+numOutputProcs*numProxiesPerOutputProc,
303  for(int i=0; i<CkNumPes(); ++i) {
304  outputProcFlags[i] = 0;
305  }
306  for(int i=0; i<numOutputProcs; ++i) {
307  outputProcFlags[outputProcArray[i]] = 1;
308  if ( CkMyPe() == outputProcArray[i] ) {
309  if ( myOutputRank != -1 ) NAMD_bug("Duplicate output proc");
310  myOutputRank = i;
311  }
312  }
313  for(int i=0; i<numOutputProcs*numProxiesPerOutputProc; ++i) {
314  if ( CkMyPe() == outputProxyArray[i] ) {
315  if ( myOutputRank != -1 ) NAMD_bug("Output proxy is also output proc");
316  if ( myOutputProxyRank != -1 ) NAMD_bug("Duplicate output proxy");
317  myOutputProxyRank = i;
318  }
319  }
320  int myProxySet = (WorkDistrib::peCompactOrderingIndex[CkMyPe()]*numProxiesPerOutputProc)/CkNumPes();
321  for(int i=0; i<numOutputProcs; ++i) {
322  if ( numProxiesPerOutputProc ) {
323  myOutputProxies[i] = outputProxyArray[myProxySet*numOutputProcs+i];
324  } else {
325  myOutputProxies[i] = outputProcArray[i];
326  }
327  }
328 
329  // delay building sequences until after PatchMap is initialized
330  myOutputProxyPositions = 0;
331  myOutputProxyVelocities = 0;
332  myOutputProxyForces = 0;
333 
334  if(!CkMyPe()) {
335  iout << iINFO << "OUTPUT PROC LOCATIONS:";
336  int i;
337  for ( i=0; i<numOutputProcs && i < 10; ++i ) {
338  iout << " " << outputProcArray[i];
339  }
340  if ( i<numOutputProcs ) iout << " ... " << outputProcArray[numOutputProcs-1];
341  iout << "\n" << endi;
342  }
343  }
344 
345 #ifdef MEM_OPT_VERSION
346  if(myOutputRank!=-1) {
347  midCM = new CollectionMidMaster(this);
348  }
349  remoteClusters.clear();
350  csmBuf.resize(0);
351  remoteCoors.clear();
352  ccmBuf.resize(0);
353 
354  mainMaster = CollectionMgr::Object()->getMasterChareID();
355 #endif
356 }
static CollectionMgr * Object()
Definition: CollectionMgr.h:30
std::ostream & iINFO(std::ostream &s)
Definition: InfoStream.C:81
void clear(void)
Definition: UniqueSet.h:62
SimParameters * simParameters
Definition: Node.h:181
std::ostream & endi(std::ostream &s)
Definition: InfoStream.C:54
#define iout
Definition: InfoStream.h:51
void resize(int i)
Definition: ResizeArray.h:84
CkChareID getMasterChareID()
Definition: CollectionMgr.h:43
void NAMD_bug(const char *err_msg)
Definition: common.C:195
static int * peDiffuseOrdering
Definition: WorkDistrib.h:116
static int * peCompactOrderingIndex
Definition: WorkDistrib.h:119
Molecule * molecule
Definition: Node.h:179

◆ initializeDcdSelectionParams()

void ParallelIOMgr::initializeDcdSelectionParams ( )

◆ integrateClusterSize()

void ParallelIOMgr::integrateClusterSize ( )

Definition at line 790 of file ParallelIOMgr.C.

References ClusterSizeMsg::atomsCnt, ClusterSizeMsg::clusterId, and ClusterSizeMsg::srcRank.

791 {
792  if(myOutputRank==-1) return;
793  if(!(simParameters->wrapAll || simParameters->wrapWater)) return;
794 
795  int fromIdx, toIdx; //atoms' range
796  getMyAtomsRangeOnOutput(fromIdx,toIdx);
797 
798  //calculated the final cluster size
799  for(int i=0; i<csmBuf.size(); i++) {
800  ClusterSizeMsg *msg = csmBuf[i];
801  int lidx = msg->clusterId - fromIdx;
802  clusterSize[lidx] += msg->atomsCnt;
803  }
804 
805  CProxy_ParallelIOMgr pIO(thisgroup);
806  for(int i=0; i<csmBuf.size(); i++) {
807  ClusterSizeMsg *msg = csmBuf[i];
808  int lidx = msg->clusterId - fromIdx;
809  msg->atomsCnt = clusterSize[lidx];
810  pIO[outputProcArray[msg->srcRank]].recvFinalClusterSize(msg);
811  }
812  numRemoteReqs = csmBuf.size();
813  csmBuf.resize(0);
814 
815  //There's a possible msg race problem here that recvFinalClusterSize
816  //executes before integrateClusterSize because other proc finishes faster
817  //in calculating the cluster size. The recvFinalClusterSize should be
818  //executed after integrateClusterSize. To avoid this, a self message is
819  //sent to participate the reduction.
820  if(numRemoteClusters!=0){
821  recvFinalClusterSize(NULL);
822  }else{
823  //this output proc already has the final cluster size for each atom
824  int numMyAtoms = toIdx-fromIdx+1;
825  for(int i=0; i<numMyAtoms; i++) {
826  int lidx = clusterID[i]-fromIdx;
827  clusterSize[i] = clusterSize[lidx];
828  }
829 
830  #if 0 //write out cluster debug info
831  char fname[128];
832  sprintf(fname, "cluster.par.%d", CkMyPe());
833  FILE *ofp = fopen(fname, "w");
834  for(int i=0; i<numMyAtoms; i++) {
835  fprintf(ofp, "%d: %d: %d\n", i+fromIdx, clusterID[i], clusterSize[i]);
836  }
837  fclose(ofp);
838  #endif
839  }
840 }
int size(void) const
Definition: ResizeArray.h:131
void resize(int i)
Definition: ResizeArray.h:84
void recvFinalClusterSize(ClusterSizeMsg *msg)

◆ integrateMigratedAtoms()

void ParallelIOMgr::integrateMigratedAtoms ( )

Definition at line 949 of file ParallelIOMgr.C.

References CompAtomExt::atomFixed, CompAtom::hydrogenGroupSize, InputAtom::isGP, InputAtom::isValid, HydroBasedMsg::numFixedGroups, HydroBasedMsg::numFixedRigidBonds, and FullAtom::rigidBondLength.

950 {
951  if(myInputRank==-1) return;
952 
953  for(int i=0; i<tmpRecvAtoms.size(); i++) {
954  tmpRecvAtoms[i].isValid = true;
955  initAtoms.add(tmpRecvAtoms[i]);
956  }
957  tmpRecvAtoms.clear();
958 
959  //sort atom list based on hydrogenList value
960  std::sort(initAtoms.begin(), initAtoms.end());
961 
962  //now compute the counters inside Molecule such as numFixedRigidBonds
963  //which is based on the hydrogen group info
964 
965  int numFixedRigidBonds = 0;
966  if(molecule->numRigidBonds){
967  int parentIsFixed = 0;
968  for(int i=0; i<initAtoms.size(); i++) {
969  InputAtom *one = &(initAtoms[i]);
970  if(!one->isValid) continue;
971  if(one->isGP) {
972  parentIsFixed = one->atomFixed;
973  InputAtom *a1 = &(initAtoms[i+1]);
974  InputAtom *a2 = &(initAtoms[i+2]);
975  if((one->rigidBondLength>0.0) &&
976  a1->atomFixed && a2->atomFixed) {
977  numFixedRigidBonds++;
978  }
979  }else{
980  if((one->rigidBondLength>0.0) &&
981  one->atomFixed && parentIsFixed) {
982  numFixedRigidBonds++;
983  }
984  }
985  }
986  }
987 
988  int numFixedGroups = 0;
989  if(molecule->numFixedAtoms){
990  for(int i=0; i<initAtoms.size();) {
991  InputAtom *one = &(initAtoms[i]);
992  if(!one->isValid){
993  i++;
994  continue;
995  }
996  if(one->isGP) {
997  int allFixed = 1;
998  for(int j=0; j<one->hydrogenGroupSize; j++){
999  InputAtom *a1 = &(initAtoms[i+j]);
1000  allFixed = allFixed & a1->atomFixed;
1001  if(!allFixed) break;
1002  }
1003  if(allFixed) numFixedGroups++;
1004  i += one->hydrogenGroupSize;
1005  }
1006  }
1007  }
1008 
1009  CProxy_ParallelIOMgr pIO(thisgroup);
1010  HydroBasedMsg *msg = new HydroBasedMsg;
1011  msg->numFixedGroups = numFixedGroups;
1012  msg->numFixedRigidBonds = numFixedRigidBonds;
1013  pIO[0].recvHydroBasedCounter(msg);
1014 }
int size(void) const
Definition: ResizeArray.h:131
void clear()
Definition: ResizeArray.h:91
int add(const Elem &elem)
Definition: ResizeArray.h:101
bool isValid
Definition: NamdTypes.h:254
uint8 hydrogenGroupSize
Definition: NamdTypes.h:89
int numFixedAtoms
Definition: Molecule.h:632
Real rigidBondLength
Definition: NamdTypes.h:231
iterator begin(void)
Definition: ResizeArray.h:36
int numFixedRigidBonds
Definition: ParallelIOMgr.h:68
iterator end(void)
Definition: ResizeArray.h:37
int16 isGP
Definition: NamdTypes.h:255
uint32 atomFixed
Definition: NamdTypes.h:162
int numRigidBonds
Definition: Molecule.h:640

◆ isOutputProcessor()

bool ParallelIOMgr::isOutputProcessor ( int  pe)

Definition at line 358 of file ParallelIOMgr.C.

358  {
359  return outputProcFlags[pe];
360 }

◆ migrateAtomsMGrp()

void ParallelIOMgr::migrateAtomsMGrp ( )

Definition at line 889 of file ParallelIOMgr.C.

References ResizeArray< Elem >::add(), MoveInputAtomsMsg::atomList, ResizeArray< Elem >::begin(), ResizeArray< Elem >::clear(), MoveInputAtomsMsg::length, and ResizeArray< Elem >::size().

890 {
891  if(myInputRank==-1) return;
892 
893  //1. first get the list of atoms to be migrated
894  //which should be few compared with the number of atoms
895  //initially assigned to this input proc.
896  AtomIDList toMigrateList; //the list of atoms to be migrated
897  //the max distance from this processor of atoms to be sent
898  int maxOffset = 0;
899  for(int i=0; i<initAtoms.size(); i++) {
900  //returns the proc id on which atom MPID resides on
901  int parentRank = atomInitRankOnInput(initAtoms[i].MPID);
902  if(parentRank != myInputRank) {
903  toMigrateList.add(i);
904  initAtoms[i].isValid = false;
905  int tmp = parentRank - myInputRank;
906  tmp = tmp>0 ? tmp : -tmp;
907  if(tmp > maxOffset) maxOffset = tmp;
908  }
909  }
910 
911  //2. prepare atom migration messages
912  //the messages are indexed as [-maxOffset,..., -1,0,1,..., maxOffset]
913  //where the "0" is not used at all. It is added for the sake of
914  //computing the index easily.
915  InputAtomList *migLists = new InputAtomList[2*maxOffset+1];
916  for(int i=0; i<toMigrateList.size(); i++) {
917  int idx = toMigrateList[i];
918  int parentRank = atomInitRankOnInput(initAtoms[idx].MPID);
919  //decide which migList to put this atom
920  int offset = parentRank - myInputRank + maxOffset;
921  migLists[offset].add(initAtoms[idx]);
922  }
923 
924  CProxy_ParallelIOMgr pIO(thisgroup);
925  for(int i=0; i<2*maxOffset+1; i++) {
926  int migLen = migLists[i].size();
927  if(migLen>0) {
928  MoveInputAtomsMsg *msg = new (migLen, 0)MoveInputAtomsMsg;
929  msg->length = migLen;
930  memcpy(msg->atomList, migLists[i].begin(), sizeof(InputAtom)*migLen);
931  int destRank = i-maxOffset+myInputRank;
932  pIO[inputProcArray[destRank]].recvAtomsMGrp(msg);
933  migLists[i].clear();
934  }
935  }
936 
937  toMigrateList.clear();
938  delete [] migLists;
939 }
int size(void) const
Definition: ResizeArray.h:131
InputAtom * atomList
Definition: ParallelIOMgr.h:76
void clear()
Definition: ResizeArray.h:91
int add(const Elem &elem)
Definition: ResizeArray.h:101

◆ readInfoForParOutDcdSelection()

void ParallelIOMgr::readInfoForParOutDcdSelection ( )

Definition at line 738 of file ParallelIOMgr.C.

References Molecule::dcdSelectionParams, DebugM, IndexFile::getAllElements(), dcd_params::inputFilename, Node::molecule, Node::Object(), and dcd_params::size.

739 {
740 #ifdef MEM_OPT_VERSION
741  // for each selection file, open, scan to fromIdx
742  // keep counting matches until toIdx
743  if(myOutputRank>=0)
744  { // only do this on output processors
745  Molecule *mol = Node::Object()->molecule;
746  int selFromIdx, selToIdx; //atoms' range
747  getMyAtomsRangeOnOutput(selFromIdx, selToIdx);
748  DebugM(3, "["<<CkMyPe()<<"]"<< " ParallelIOMgr::readInfoForParOutDcdSelection from " << selFromIdx << " to " << selToIdx <<"\n");
749  for(int index=0; index < 16; ++index)
750  {
751  char *dcdSelectionInputFile = mol->dcdSelectionParams[index].inputFilename;
752  if(filesystem::path(dcdSelectionInputFile).extension() == ".idx")
753  {
754  // binary index file, each element in it is in the selection
755 
756  IndexFile dcdSelectionInputIdx(dcdSelectionInputFile);
757  std::vector<uint32> indexVec=dcdSelectionInputIdx.getAllElements();
758  // find the range for our from_to on the selection
759  // lower bound will do a binary search so we avoid scanning
760  // irrelevant elements
761  auto start = std::lower_bound(indexVec.begin(), indexVec.end(), selFromIdx);
762  off_t startOffset = std::distance(indexVec.begin(), start);
763  auto end = std::lower_bound(start, indexVec.end(), selToIdx);
764  size_t size = std::distance(start,end);
765  // compute the map(localOffset -> globalId)
766  std::vector <uint32> dcdSelectionIndexReverseMap(size);
767  for(size_t offset=0; offset<size; ++offset)
768  {
769  dcdSelectionIndexReverseMap[offset] = indexVec[startOffset+offset];
770  }
771  mol->dcdSelectionParams[index].size= indexVec.size();
772  midCM->parOut->setDcdSelectionParams(index, startOffset, size, dcdSelectionIndexReverseMap);
773  }
774  else
775  { // no such file present
776  }
777  }
778  }
779 #endif
780 }
static Node * Object()
Definition: Node.h:86
DCDParams dcdSelectionParams[16]
Definition: Molecule.h:482
char inputFilename[NAMD_FILENAME_BUFFER_SIZE]
Definition: common.h:253
#define DebugM(x, y)
Definition: Debug.h:75
Molecule stores the structural information for the system.
Definition: Molecule.h:174
int size
Definition: common.h:259
Molecule * molecule
Definition: Node.h:179

◆ readPerAtomInfo()

void ParallelIOMgr::readPerAtomInfo ( )

Definition at line 368 of file ParallelIOMgr.C.

369 {
370 #ifdef MEM_OPT_VERSION
371  if(myInputRank!=-1) {
372  int myAtomLIdx, myAtomUIdx;
373  getMyAtomsInitRangeOnInput(myAtomLIdx, myAtomUIdx);
374 
375  //1. read the file that contains per-atom info such as signature index
376  molecule->read_binary_atom_info(myAtomLIdx, myAtomUIdx, initAtoms);
377 
378  //2. read coordinates and velocities of each atom if the velocity file
379  //exists, otherwise, the velocity of each atom is randomly generated.
380  //This has to be DONE AFTER THE FIRST STEP as the atom mass is required
381  //if the velocity is generated randomly.
382  readCoordinatesAndVelocity();
383 
384  //3. set every atom's output processor rank, i.e. the dest pe this
385  //atom will be sent for writing positions and velocities etc.
386  int oRank=atomRankOnOutput(myAtomLIdx);
387  for(int i=oRank; i<numOutputProcs; i++) {
388  int lIdx, uIdx; //indicates the range of atom ids outputProcArray[i] has
389  getAtomsRangeOnOutput(lIdx, uIdx, i);
390  if(lIdx > myAtomUIdx) break;
391  int fid = lIdx>myAtomLIdx?lIdx:myAtomLIdx;
392  int tid = uIdx>myAtomUIdx?myAtomUIdx:uIdx;
393  for(int j=fid; j<=tid; j++) initAtoms[j-myAtomLIdx].outputRank = i;
394  }
395  }
396 
397  //read clusters
398  if(myOutputRank!=-1) {
399  //only when wrapAll or wrapWater is set, cluster info is required
400  if(!(simParameters->wrapAll || simParameters->wrapWater)) return;
401  readInfoForParOutput();
402  }
403 #endif
404 }

◆ receiveForces()

void ParallelIOMgr::receiveForces ( CollectVectorVarMsg msg)

Definition at line 1852 of file ParallelIOMgr.C.

References NAMD_bug(), and CollectVectorVarMsg::seq.

1853 {
1854 #ifdef MEM_OPT_VERSION
1855  if ( myOutputRank != -1 ) {
1856  int ready = midCM->receiveForces(msg);
1857  if(ready) {
1858  CProxy_CollectionMaster cm(mainMaster);
1859  cm.receiveOutputForceReady(msg->seq);
1860  }
1861  delete msg;
1862  } else if ( myOutputProxyRank != -1 ) {
1863  if ( ! myOutputProxyForces ) {
1864  myOutputProxyForces = new CollectProxyVectorSequence(calcMyOutputProxyClients());
1865  }
1866  CollectVectorVarMsg *newmsg = myOutputProxyForces->submitData(msg);
1867  if ( newmsg ) thisProxy[outputProcArray[myOutputProxyRank%numOutputProcs]].receiveForces(newmsg);
1868  delete msg;
1869  } else {
1870  NAMD_bug("ParallelIOMgr::receiveForces on bad pe");
1871  }
1872 #endif
1873 }
void NAMD_bug(const char *err_msg)
Definition: common.C:195
CollectVectorVarMsg * submitData(CollectVectorVarMsg *msg)

◆ receivePositions()

void ParallelIOMgr::receivePositions ( CollectVectorVarMsg msg)

Definition at line 1806 of file ParallelIOMgr.C.

References NAMD_bug(), and CollectVectorVarMsg::seq.

1807 {
1808 #ifdef MEM_OPT_VERSION
1809  if ( myOutputRank != -1 ) {
1810  int ready = midCM->receivePositions(msg);
1811  if(ready) {
1812  CProxy_CollectionMaster cm(mainMaster);
1813  cm.receiveOutputPosReady(msg->seq);
1814  }
1815  delete msg;
1816  } else if ( myOutputProxyRank != -1 ) {
1817  if ( ! myOutputProxyPositions ) {
1818  myOutputProxyPositions = new CollectProxyVectorSequence(calcMyOutputProxyClients());
1819  }
1820  CollectVectorVarMsg *newmsg = myOutputProxyPositions->submitData(msg);
1821  if ( newmsg ) thisProxy[outputProcArray[myOutputProxyRank%numOutputProcs]].receivePositions(newmsg);
1822  delete msg;
1823  } else {
1824  NAMD_bug("ParallelIOMgr::receivePositions on bad pe");
1825  }
1826 #endif
1827 }
void NAMD_bug(const char *err_msg)
Definition: common.C:195
CollectVectorVarMsg * submitData(CollectVectorVarMsg *msg)

◆ receiveVelocities()

void ParallelIOMgr::receiveVelocities ( CollectVectorVarMsg msg)

Definition at line 1829 of file ParallelIOMgr.C.

References NAMD_bug(), and CollectVectorVarMsg::seq.

1830 {
1831 #ifdef MEM_OPT_VERSION
1832  if ( myOutputRank != -1 ) {
1833  int ready = midCM->receiveVelocities(msg);
1834  if(ready) {
1835  CProxy_CollectionMaster cm(mainMaster);
1836  cm.receiveOutputVelReady(msg->seq);
1837  }
1838  delete msg;
1839  } else if ( myOutputProxyRank != -1 ) {
1840  if ( ! myOutputProxyVelocities ) {
1841  myOutputProxyVelocities = new CollectProxyVectorSequence(calcMyOutputProxyClients());
1842  }
1843  CollectVectorVarMsg *newmsg = myOutputProxyVelocities->submitData(msg);
1844  if ( newmsg ) thisProxy[outputProcArray[myOutputProxyRank%numOutputProcs]].receiveVelocities(newmsg);
1845  delete msg;
1846  } else {
1847  NAMD_bug("ParallelIOMgr::receiveVelocities on bad pe");
1848  }
1849 #endif
1850 }
void NAMD_bug(const char *err_msg)
Definition: common.C:195
CollectVectorVarMsg * submitData(CollectVectorVarMsg *msg)

◆ recvAtomsCntPerPatch()

void ParallelIOMgr::recvAtomsCntPerPatch ( AtomsCntPerPatchMsg msg)

Definition at line 1482 of file ParallelIOMgr.C.

References AtomsCntPerPatchMsg::atomsCntList, endi(), AtomsCntPerPatchMsg::fixedAtomsCntList, iINFO(), iout, AtomsCntPerPatchMsg::length, NAMD_die(), PatchMap::numPatches(), PatchMap::Object(), Node::Object(), and AtomsCntPerPatchMsg::pidList.

1483 {
1484 #ifdef MEM_OPT_VERSION
1485  PatchMap *patchMap = PatchMap::Object();
1486  for(int i=0; i<msg->length; i++) {
1487  int pid = msg->pidList[i];
1488  int oldNum = patchMap->numAtoms(pid);
1489  if ( oldNum + msg->atomsCntList[i] > USHRT_MAX ) {
1490  char errstr[512];
1491  sprintf(errstr, "Patch %d exceeds %d atoms.", pid, USHRT_MAX);
1492  NAMD_die(errstr);
1493  }
1494  patchMap->setNumAtoms(pid, oldNum+msg->atomsCntList[i]);
1495  if(simParameters->fixedAtomsOn) {
1496  oldNum = patchMap->numFixedAtoms(pid);
1497  patchMap->setNumFixedAtoms(pid, oldNum+msg->fixedAtomsCntList[i]);
1498  }
1499  }
1500  delete msg;
1501 
1502  if(++procsReceived == numInputProcs) {
1503  //print max PATCH info
1504  int maxAtoms = -1;
1505  int maxPatch = -1;
1506  int totalAtoms = 0;
1507  for(int i=0; i<patchMap->numPatches(); i++) {
1508  int cnt = patchMap->numAtoms(i);
1509  totalAtoms += cnt;
1510  if(cnt>maxAtoms) {
1511  maxAtoms = cnt;
1512  maxPatch = i;
1513  }
1514  }
1515  procsReceived = 0;
1516  iout << iINFO << "LARGEST PATCH (" << maxPatch <<
1517  ") HAS " << maxAtoms << " ATOMS\n" << endi;
1518  if ( totalAtoms != Node::Object()->molecule->numAtoms ) {
1519  char errstr[512];
1520  sprintf(errstr, "Incorrect atom count in void ParallelIOMgr::recvAtomsCntPerPatch: %d vs %d", totalAtoms, Node::Object()->molecule->numAtoms);
1521  NAMD_die(errstr);
1522  }
1523  }
1524 #endif
1525 }
static Node * Object()
Definition: Node.h:86
unsigned short * fixedAtomsCntList
Definition: ParallelIOMgr.h:86
std::ostream & iINFO(std::ostream &s)
Definition: InfoStream.C:81
static PatchMap * Object()
Definition: PatchMap.h:27
std::ostream & endi(std::ostream &s)
Definition: InfoStream.C:54
#define iout
Definition: InfoStream.h:51
int numPatches(void) const
Definition: PatchMap.h:59
int numAtoms
Definition: Molecule.h:586
void NAMD_die(const char *err_msg)
Definition: common.C:147
unsigned short * atomsCntList
Definition: ParallelIOMgr.h:85

◆ recvAtomsMGrp()

void ParallelIOMgr::recvAtomsMGrp ( MoveInputAtomsMsg msg)

Definition at line 941 of file ParallelIOMgr.C.

References MoveInputAtomsMsg::atomList, and MoveInputAtomsMsg::length.

942 {
943  for(int i=0; i<msg->length; i++) {
944  tmpRecvAtoms.add((msg->atomList)[i]);
945  }
946  delete msg;
947 }
InputAtom * atomList
Definition: ParallelIOMgr.h:76
int add(const Elem &elem)
Definition: ResizeArray.h:101

◆ recvAtomsToHomePatchProcs()

void ParallelIOMgr::recvAtomsToHomePatchProcs ( MovePatchAtomsMsg msg)

Definition at line 1621 of file ParallelIOMgr.C.

References MovePatchAtomsMsg::allAtoms, MovePatchAtomsMsg::from, MovePatchAtomsMsg::patchCnt, MovePatchAtomsMsg::pidList, and MovePatchAtomsMsg::sizeList.

1622 {
1623  CProxy_ParallelIOMgr pIO(thisgroup);
1624  pIO[msg->from].ackAtomsToHomePatchProcs();
1625 
1626  if(!isOKToRecvHPAtoms) {
1627  prepareHomePatchAtomList();
1628  isOKToRecvHPAtoms = true;
1629  }
1630 
1631  int numRecvPatches = msg->patchCnt;
1632  int aid = 0;
1633  for(int i=0; i<numRecvPatches; i++) {
1634  int pid = msg->pidList[i];
1635  int size = msg->sizeList[i];
1636  int idx = binaryFindHPID(pid);
1637  for(int j=0; j<size; j++, aid++) {
1638  hpAtomsList[idx].add(msg->allAtoms[aid]);
1639  }
1640  }
1641  //CkPrintf("Pe %d recvAtomsToHomePatchProcs for %d patches %d atoms\n",CkMyPe(),numRecvPatches,aid);
1642  delete msg;
1643 }
int add(const Elem &elem)
Definition: ResizeArray.h:101
FullAtom * allAtoms
Definition: ParallelIOMgr.h:97

◆ recvClusterCoor()

void ParallelIOMgr::recvClusterCoor ( ClusterCoorMsg msg)

Definition at line 2030 of file ParallelIOMgr.C.

2030  {
2031  //only add the msg from remote procs
2032  if(msg!=NULL) ccmBuf.add(msg);
2033 
2034  //include a msg sent by itself
2035  if(++numReqRecved == (numRemoteReqs+1)){
2036  numReqRecved = 0;
2037  integrateClusterCoor();
2038  }
2039 }
int add(const Elem &elem)
Definition: ResizeArray.h:101

◆ recvClusterSize()

void ParallelIOMgr::recvClusterSize ( ClusterSizeMsg msg)

Definition at line 782 of file ParallelIOMgr.C.

783 {
784  csmBuf.add(msg); //added to buffer for reuse to send back to src
785 
786  //update cluster size has to be delayed to integration to prevent
787  //data racing where the clusterSize has not been created!
788 }
int add(const Elem &elem)
Definition: ResizeArray.h:101

◆ recvDcdParams()

void ParallelIOMgr::recvDcdParams ( std::vector< uint16 tags,
std::vector< std::string >  inputFileNames,
std::vector< std::string >  outputFileNames,
std::vector< int >  freqs,
std::vector< OUTPUTFILETYPE types 
)

Definition at line 712 of file ParallelIOMgr.C.

References Molecule::dcdSelectionAtoms, Molecule::dcdSelectionParams, dcd_params::frequency, dcd_params::inputFilename, Node::molecule, NAMD_FILENAME_BUFFER_SIZE, Node::Object(), dcd_params::outFilename, dcd_params::tag, and dcd_params::type.

717 {
718  // only the 0th rank updates the molecule object
719  if(!CkMyRank())
720  {
721  Molecule *mol = Node::Object()->molecule;
722  mol->dcdSelectionAtoms = true;
723  for(int index=0; index < 16 ;++index)
724  {
725  mol->dcdSelectionParams[index].tag = tags[index];
726  strncpy(mol->dcdSelectionParams[index].inputFilename, inputFileNames[index].c_str(),NAMD_FILENAME_BUFFER_SIZE);
727  strncpy(mol->dcdSelectionParams[index].outFilename, outputFileNames[index].c_str(),NAMD_FILENAME_BUFFER_SIZE);
728  mol->dcdSelectionParams[index].frequency = freqs[index];
729  mol->dcdSelectionParams[index].type = types[index];
730  }
731  }
732 }
static Node * Object()
Definition: Node.h:86
DCDParams dcdSelectionParams[16]
Definition: Molecule.h:482
int frequency
Definition: common.h:255
char inputFilename[NAMD_FILENAME_BUFFER_SIZE]
Definition: common.h:253
uint16 tag
Definition: common.h:252
#define NAMD_FILENAME_BUFFER_SIZE
Definition: common.h:45
Molecule stores the structural information for the system.
Definition: Molecule.h:174
OUTPUTFILETYPE type
Definition: common.h:256
char outFilename[NAMD_FILENAME_BUFFER_SIZE]
Definition: common.h:254
bool dcdSelectionAtoms
Definition: Molecule.h:481
Molecule * molecule
Definition: Node.h:179

◆ recvFinalClusterCoor()

void ParallelIOMgr::recvFinalClusterCoor ( ClusterCoorMsg msg)

Definition at line 2102 of file ParallelIOMgr.C.

References ClusterCoorElem::clusterId, ClusterCoorMsg::clusterId, ClusterCoorElem::dsum, ClusterCoorMsg::dsum, ResizeArray< Elem >::size(), Lattice::wrap_delta(), and Lattice::wrap_nearest_delta().

2102  {
2103 #ifdef MEM_OPT_VERSION
2104  if(msg!=NULL){
2105  //only process the message sent from other procs!
2106  ClusterCoorElem one(msg->clusterId);
2107  ClusterCoorElem *ret = remoteCoors.find(one);
2108  ret->dsum = msg->dsum;
2109  delete msg;
2110  }
2111 
2112  if(++numCSMAck == (numRemoteClusters+1)){
2113  //final wrap coor computation
2114  int fromIdx = coorInstance->fromAtomID;
2115  int toIdx = coorInstance->toAtomID;
2116  int numMyAtoms = toIdx-fromIdx+1;
2117  ResizeArray<Vector> &data = coorInstance->data;
2118  ResizeArray<FloatVector> &fdata = coorInstance->fdata;
2119  ClusterCoorElem tmp;
2120  for(int i=0; i<numMyAtoms; i++){
2121  if(!simParameters->wrapAll && !isWater[i]) continue;
2122  int cid = clusterID[i];
2123  int lidx = cid-fromIdx;
2124  if(lidx<0){
2125  //this cid should be inside remoteCoors
2126  tmp.clusterId = cid;
2127  ClusterCoorElem *fone = remoteCoors.find(tmp);
2128  if(data.size()) data[i] += fone->dsum;
2129  if(fdata.size()) fdata[i] = fdata[i] + fone->dsum;
2130  }else{
2131  if(lidx==i){
2132  Lattice *lat = &(coorInstance->lattice);
2133  Vector coni = tmpCoorCon[lidx]/clusterSize[lidx];
2134  tmpCoorCon[lidx] = (simParameters->wrapNearest ?
2135  lat->wrap_nearest_delta(coni) : lat->wrap_delta(coni));
2136  }
2137  if(data.size()) data[i] += tmpCoorCon[lidx];
2138  if(fdata.size()) fdata[i] = fdata[i] + tmpCoorCon[lidx];
2139  }
2140  }
2141 
2142  delete [] tmpCoorCon;
2143  tmpCoorCon = NULL;
2144  CProxy_CollectionMaster cm(mainMaster);
2145  cm.wrapCoorFinished();
2146  numCSMAck = 0;
2147  remoteCoors.clear();
2148  }
2149 #endif
2150 }
Elem * find(const Elem &elem)
Definition: UniqueSet.h:60
int size(void) const
Definition: ResizeArray.h:131
void clear(void)
Definition: UniqueSet.h:62
Definition: Vector.h:72
NAMD_HOST_DEVICE Vector wrap_nearest_delta(Position pos1) const
Definition: Lattice.h:233
NAMD_HOST_DEVICE Vector wrap_delta(const Position &pos1) const
Definition: Lattice.h:222

◆ recvFinalClusterSize()

void ParallelIOMgr::recvFinalClusterSize ( ClusterSizeMsg msg)

Definition at line 842 of file ParallelIOMgr.C.

References ClusterElem::atomsCnt, ClusterSizeMsg::atomsCnt, ClusterElem::clusterId, and ClusterSizeMsg::clusterId.

843 {
844  //only process the message sent by other procs
845  if(msg!=NULL) {
846  //indicating a message from other procs
847  ClusterElem one(msg->clusterId);
848  ClusterElem *ret = remoteClusters.find(one);
849  CmiAssert(ret!=NULL);
850  ret->atomsCnt = msg->atomsCnt;
851  }
852  delete msg;
853 
854  //include a msg sent by itself for reduction
855  if(++numCSMAck == (numRemoteClusters+1)) {
856  //recved all the msgs needed to update the cluster size for each atom finally
857  int fromIdx, toIdx; //atoms' range
858  getMyAtomsRangeOnOutput(fromIdx,toIdx);
859  int numMyAtoms = toIdx-fromIdx+1;
860  ClusterElem tmp;
861  for(int i=0; i<numMyAtoms; i++) {
862  int cid = clusterID[i];
863  int lidx = cid-fromIdx;
864  if(lidx<0) {
865  //this cid should be inside remoteClusters
866  tmp.clusterId = cid;
867  ClusterElem *fone = remoteClusters.find(tmp);
868  clusterSize[i] = fone->atomsCnt;
869  } else {
870  clusterSize[i] = clusterSize[lidx];
871  }
872  }
873  numCSMAck = 0;
874  remoteClusters.clear();
875 
876 #if 0 //write out cluster debug info
877  char fname[128];
878  sprintf(fname, "cluster.par.%d", CkMyPe());
879  FILE *ofp = fopen(fname, "w");
880  for(int i=0; i<numMyAtoms; i++) {
881  fprintf(ofp, "%d: %d: %d\n", i+fromIdx, clusterID[i], clusterSize[i]);
882  }
883  fclose(ofp);
884 #endif
885 
886  }
887 }
Elem * find(const Elem &elem)
Definition: UniqueSet.h:60
void clear(void)
Definition: UniqueSet.h:62

◆ recvHydroBasedCounter()

void ParallelIOMgr::recvHydroBasedCounter ( HydroBasedMsg msg)

Definition at line 1307 of file ParallelIOMgr.C.

References HydroBasedMsg::numFixedGroups, and HydroBasedMsg::numFixedRigidBonds.

1307  {
1308  molecule->numFixedRigidBonds += msg->numFixedRigidBonds;
1309  molecule->numFixedGroups += msg->numFixedGroups;
1310 
1311  if(++hydroMsgRecved == numInputProcs){
1312  msg->numFixedRigidBonds = molecule->numFixedRigidBonds;
1313  msg->numFixedGroups = molecule->numFixedGroups;
1314  CProxy_ParallelIOMgr pIO(thisgroup);
1315  pIO.bcastHydroBasedCounter(msg);
1316  hydroMsgRecved = 0;
1317  }else delete msg;
1318 }
int numFixedGroups
Definition: Molecule.h:639
int numFixedRigidBonds
Definition: Molecule.h:641
int numFixedRigidBonds
Definition: ParallelIOMgr.h:68

◆ recvMolInfo()

void ParallelIOMgr::recvMolInfo ( MolInfoMsg msg)

Definition at line 1204 of file ParallelIOMgr.C.

References MolInfoMsg::numAngles, MolInfoMsg::numBonds, MolInfoMsg::numCalcAngles, MolInfoMsg::numCalcBonds, MolInfoMsg::numCalcCrossterms, MolInfoMsg::numCalcDihedrals, MolInfoMsg::numCalcExclusions, MolInfoMsg::numCalcFullExclusions, MolInfoMsg::numCalcImpropers, MolInfoMsg::numCrossterms, MolInfoMsg::numDihedrals, MolInfoMsg::numExclusions, MolInfoMsg::numImpropers, MolInfoMsg::numRigidBonds, MolInfoMsg::totalCharge, MolInfoMsg::totalMass, and MolInfoMsg::totalMV.

1205 {
1206  molecule->numBonds += msg->numBonds;
1207  molecule->numCalcBonds += msg->numCalcBonds;
1208  molecule->numAngles += msg->numAngles;
1209  molecule->numCalcAngles += msg->numCalcAngles;
1210  molecule->numDihedrals += msg->numDihedrals;
1211  molecule->numCalcDihedrals += msg->numCalcDihedrals;
1212  molecule->numImpropers += msg->numImpropers;
1213  molecule->numCalcImpropers += msg->numCalcImpropers;
1214  molecule->numCrossterms += msg->numCrossterms;
1215  molecule->numCalcCrossterms += msg->numCalcCrossterms;
1216  numTotalExclusions += msg->numExclusions;
1217  numCalcExclusions += msg->numCalcExclusions;
1218  numCalcFullExclusions += msg->numCalcFullExclusions;
1219  molecule->numRigidBonds += msg->numRigidBonds;
1220 
1221  totalMass += msg->totalMass;
1222  totalCharge += msg->totalCharge;
1223 
1224  if(!simParameters->comMove) {
1225  totalMV += msg->totalMV;
1226  }
1227 
1228  if(++procsReceived == numInputProcs) {
1229  //received all the counters
1230  msg->numBonds = molecule->numBonds;
1231  msg->numCalcBonds = molecule->numCalcBonds;
1232  msg->numAngles = molecule->numAngles;
1233  msg->numCalcAngles = molecule->numCalcAngles;
1234  msg->numDihedrals = molecule->numDihedrals;
1235  msg->numCalcDihedrals = molecule->numCalcDihedrals;
1236  msg->numImpropers = molecule->numImpropers;
1237  msg->numCalcImpropers = molecule->numCalcImpropers;
1238  msg->numCrossterms = molecule->numCrossterms;
1239  msg->numCalcCrossterms = molecule->numCalcCrossterms;
1240  msg->numExclusions = numTotalExclusions/2;
1241  msg->numCalcExclusions = numCalcExclusions/2;
1242  msg->numCalcFullExclusions = numCalcFullExclusions/2;
1243  msg->numRigidBonds = molecule->numRigidBonds;
1244 
1245  msg->totalMass = totalMass;
1246  msg->totalCharge = totalCharge;
1247 
1248  if(!simParameters->comMove) {
1249  msg->totalMV = totalMV;
1250  }
1251 
1252  CProxy_ParallelIOMgr pIO(thisgroup);
1253  pIO.bcastMolInfo(msg);
1254 
1255  //reset to 0 for the next p2p-based reduction on input procs
1256  procsReceived = 0;
1257  } else delete msg;
1258 }
int64 numCalcFullExclusions
Definition: ParallelIOMgr.h:49
int numCalcBonds
Definition: Molecule.h:657
int numBonds
Definition: Molecule.h:589
int numCalcCrossterms
Definition: ParallelIOMgr.h:46
int64 numCalcExclusions
Definition: ParallelIOMgr.h:48
int numRigidBonds
Definition: ParallelIOMgr.h:53
int numCalcDihedrals
Definition: ParallelIOMgr.h:42
int numCalcCrossterms
Definition: Molecule.h:661
int numDihedrals
Definition: ParallelIOMgr.h:41
BigReal totalCharge
Definition: ParallelIOMgr.h:60
int64 numExclusions
Definition: ParallelIOMgr.h:47
BigReal totalMass
Definition: ParallelIOMgr.h:57
int numCalcDihedrals
Definition: Molecule.h:659
int numCalcImpropers
Definition: Molecule.h:660
int numAngles
Definition: Molecule.h:590
int numCrossterms
Definition: Molecule.h:597
int numCalcImpropers
Definition: ParallelIOMgr.h:44
int numCrossterms
Definition: ParallelIOMgr.h:45
int numDihedrals
Definition: Molecule.h:591
int numImpropers
Definition: Molecule.h:596
int numCalcBonds
Definition: ParallelIOMgr.h:38
Vector totalMV
Definition: ParallelIOMgr.h:58
int numImpropers
Definition: ParallelIOMgr.h:43
int numRigidBonds
Definition: Molecule.h:640
int numCalcAngles
Definition: Molecule.h:658
int numCalcAngles
Definition: ParallelIOMgr.h:40

◆ sendAtomsToHomePatchProcs()

void ParallelIOMgr::sendAtomsToHomePatchProcs ( )

Definition at line 1532 of file ParallelIOMgr.C.

References ResizeArray< Elem >::add(), MovePatchAtomsMsg::allAtoms, ResizeArray< Elem >::begin(), call_sendAtomsToHomePatchProcs(), ResizeArray< Elem >::clear(), PatchMap::delTmpPatchAtomsList(), MovePatchAtomsMsg::from, PatchMap::getTmpPatchAtomsList(), PatchMap::node(), PatchMap::numPatches(), PatchMap::Object(), MovePatchAtomsMsg::patchCnt, MovePatchAtomsMsg::pidList, Random::reorder(), ResizeArray< Elem >::size(), and MovePatchAtomsMsg::sizeList.

1533 {
1534 #ifdef MEM_OPT_VERSION
1535  if(myInputRank==-1) return;
1536 
1537  if ( sendAtomsThread == 0 ) {
1538  sendAtomsThread = CthCreate((CthVoidFn)call_sendAtomsToHomePatchProcs,this,0);
1539  CthAwaken(sendAtomsThread);
1540  return;
1541  }
1542  sendAtomsThread = 0;
1543  numAcksOutstanding = 0;
1544 
1545  PatchMap *patchMap = PatchMap::Object();
1546  int numPatches = patchMap->numPatches();
1547  vector<int> *eachPatchAtomList = patchMap->getTmpPatchAtomsList();
1548 
1549  //each element (proc) contains the list of ids of patches which will stay
1550  //on that processor
1551  ResizeArray<int> *procList = new ResizeArray<int>[CkNumPes()];
1552  ResizeArray<int> pesToSend;
1553  for(int i=0; i<numPatches; i++) {
1554  if(eachPatchAtomList[i].size()==0) continue;
1555  int onPE = patchMap->node(i);
1556  if ( procList[onPE].size() == 0 ) pesToSend.add(onPE);
1557  procList[onPE].add(i);
1558  }
1559 
1560  Random(CkMyPe()).reorder(pesToSend.begin(),pesToSend.size());
1561  //CkPrintf("Pe %d ParallelIOMgr::sendAtomsToHomePatchProcs sending to %d pes\n",CkMyPe(),pesToSend.size());
1562 
1563  //go over every processor to send a message if necessary
1564  //TODO: Optimization for local home patches to save temp memory usage??? -CHAOMEI
1565  CProxy_ParallelIOMgr pIO(thisgroup);
1566  for(int k=0; k<pesToSend.size(); k++) {
1567  const int i = pesToSend[k];
1568  int len = procList[i].size();
1569  if(len==0) continue;
1570 
1571  // Sending one message per pe can result in very large messages
1572  // that break Converse so send one message per patch instead.
1573  for(int j=0; j<len; j++) {
1574  int pid = procList[i][j];
1575  int atomCnt = eachPatchAtomList[pid].size();
1576 
1577  if ( numAcksOutstanding >= 10 ) {
1578  sendAtomsThread = CthSelf();
1579  CthSuspend();
1580  }
1582 
1583  MovePatchAtomsMsg *msg = new (1, 1, atomCnt, 0)MovePatchAtomsMsg;
1584  msg->from = CkMyPe();
1585  msg->patchCnt = 1;
1586  int atomIdx = 0;
1587  msg->pidList[0] = pid;
1588  msg->sizeList[0] = atomCnt;
1589  for(int k=0; k<atomCnt; k++, atomIdx++) {
1590  int aid = eachPatchAtomList[pid][k];
1591  FullAtom one = initAtoms[aid];
1592  //HACK to re-sort the atom list after receiving the atom list on
1593  //home patch processor -Chao Mei
1594  one.hydVal = initAtoms[aid].hydList;
1595  msg->allAtoms[atomIdx] = one;
1596  }
1597  pIO[i].recvAtomsToHomePatchProcs(msg);
1598  }
1599 
1600  procList[i].clear();
1601  }
1602 
1603  //clean up to free space
1604  delete [] procList;
1605  patchMap->delTmpPatchAtomsList();
1606 
1607  //free the space occupied by the list that contains the input atoms
1608  initAtoms.clear();
1609 #endif
1610 }
int size(void) const
Definition: ResizeArray.h:131
static PatchMap * Object()
Definition: PatchMap.h:27
void clear()
Definition: ResizeArray.h:91
int add(const Elem &elem)
Definition: ResizeArray.h:101
void reorder(Elem *a, int n)
Definition: Random.h:234
Definition: Random.h:37
int numPatches(void) const
Definition: PatchMap.h:59
void call_sendAtomsToHomePatchProcs(void *arg)
CthThread sendAtomsThread
FullAtom * allAtoms
Definition: ParallelIOMgr.h:97
std::vector< int > * getTmpPatchAtomsList()
Definition: PatchMap.h:233
iterator begin(void)
Definition: ResizeArray.h:36
void delTmpPatchAtomsList()
Definition: PatchMap.h:226
int node(int pid) const
Definition: PatchMap.h:114

◆ sendDcdParams()

void ParallelIOMgr::sendDcdParams ( )

Definition at line 679 of file ParallelIOMgr.C.

References Molecule::dcdSelectionAtoms, Molecule::dcdSelectionParams, dcd_params::frequency, dcd_params::inputFilename, Node::molecule, Node::Object(), dcd_params::outFilename, dcd_params::tag, and dcd_params::type.

680 {
681 
682  Molecule *mol = Node::Object()->molecule;
683  std::vector<string> inputs;
684  std::vector<string> outputs;
685  std::vector<uint16> tags;
686  std::vector<int> freqs;
687  std::vector<OUTPUTFILETYPE> types;
688  if(mol->dcdSelectionAtoms)
689  {
690  for(int index=0; index<16;++index)
691  {
692  tags.push_back(mol->dcdSelectionParams[index].tag);
693  freqs.push_back(mol->dcdSelectionParams[index].frequency);
694  types.push_back(mol->dcdSelectionParams[index].type);
695  inputs.push_back(mol->dcdSelectionParams[index].inputFilename);
696  outputs.push_back(mol->dcdSelectionParams[index].outFilename);
697  }
698  CProxy_ParallelIOMgr pIO(thisgroup);
699  pIO.recvDcdParams(tags,
700  inputs,
701  outputs,
702  freqs,
703  types
704  );
705  }
706 }
static Node * Object()
Definition: Node.h:86
DCDParams dcdSelectionParams[16]
Definition: Molecule.h:482
int frequency
Definition: common.h:255
char inputFilename[NAMD_FILENAME_BUFFER_SIZE]
Definition: common.h:253
uint16 tag
Definition: common.h:252
Molecule stores the structural information for the system.
Definition: Molecule.h:174
OUTPUTFILETYPE type
Definition: common.h:256
char outFilename[NAMD_FILENAME_BUFFER_SIZE]
Definition: common.h:254
bool dcdSelectionAtoms
Definition: Molecule.h:481
Molecule * molecule
Definition: Node.h:179

◆ updateMolInfo()

void ParallelIOMgr::updateMolInfo ( )

Definition at line 1016 of file ParallelIOMgr.C.

References AtomSignature::angleCnt, AtomSignature::angleSigs, atomSigPool, AtomSignature::bondCnt, AtomSignature::bondSigs, AtomSignature::crosstermCnt, AtomSignature::crosstermSigs, AtomSignature::dihedralCnt, AtomSignature::dihedralSigs, ExclusionSignature::fullExclCnt, ExclusionSignature::fullOffset, AtomSignature::gromacsPairCnt, AtomSignature::gromacsPairSigs, AtomSignature::improperCnt, AtomSignature::improperSigs, ExclusionSignature::modExclCnt, ExclusionSignature::modOffset, MolInfoMsg::numAngles, MolInfoMsg::numBonds, MolInfoMsg::numCalcAngles, MolInfoMsg::numCalcBonds, MolInfoMsg::numCalcCrossterms, MolInfoMsg::numCalcDihedrals, MolInfoMsg::numCalcExclusions, MolInfoMsg::numCalcFullExclusions, MolInfoMsg::numCalcImpropers, MolInfoMsg::numCalcLJPairs, MolInfoMsg::numCrossterms, MolInfoMsg::numDihedrals, MolInfoMsg::numExclusions, MolInfoMsg::numImpropers, MolInfoMsg::numLJPairs, MolInfoMsg::numRigidBonds, TupleSignature::offset, MolInfoMsg::totalCharge, MolInfoMsg::totalMass, and MolInfoMsg::totalMV.

1017 {
1018 #ifdef MEM_OPT_VERSION
1019  if(myInputRank==-1) return;
1020 
1021  CProxy_ParallelIOMgr pIO(thisgroup);
1022 
1023  MolInfoMsg *msg = new MolInfoMsg;
1024  msg->numBonds = msg->numCalcBonds = 0;
1025  msg->numAngles = msg->numCalcAngles = 0;
1026  msg->numDihedrals = msg->numCalcDihedrals = 0;
1027  msg->numImpropers = msg->numCalcImpropers = 0;
1028  msg->numCrossterms = msg->numCalcCrossterms = 0;
1029  msg->numExclusions = msg->numCalcExclusions = 0;
1030  int64 numFullExclusions = msg->numCalcFullExclusions = 0;
1031  // JLai
1032  msg->numLJPairs = msg->numCalcLJPairs = 0;
1033  // End of JLai
1034  msg->numRigidBonds = 0;
1035  msg->totalMass = 0.0;
1036  msg->totalCharge = 0.0;
1037 
1038  //calculate the tuples this input processor have
1039  AtomSignature *atomSigPool = molecule->atomSigPool;
1040  ExclusionSignature *exclSigPool = molecule->exclSigPool;
1041  for(int i=0; i<initAtoms.size(); i++) {
1042  AtomSignature *thisSig = &atomSigPool[initAtoms[i].sigId];
1043  msg->numBonds += thisSig->bondCnt;
1044  msg->numAngles += thisSig->angleCnt;
1045  msg->numDihedrals += thisSig->dihedralCnt;
1046  msg->numImpropers += thisSig->improperCnt;
1047  msg->numCrossterms += thisSig->crosstermCnt;
1048  // JLai
1049  msg->numLJPairs += thisSig->gromacsPairCnt;
1050  // End of JLai
1051 
1052  ExclusionSignature *exclSig = &exclSigPool[initAtoms[i].exclId];
1053  msg->numExclusions += (exclSig->fullExclCnt + exclSig->modExclCnt);
1054  numFullExclusions += exclSig->fullExclCnt;
1055 
1056  if(initAtoms[i].rigidBondLength > 0.0) msg->numRigidBonds++;
1057 
1058  msg->totalMass += initAtoms[i].mass;
1059  msg->totalCharge += initAtoms[i].charge;
1060  }
1061 
1062  //deal with numCalc* which is related with fixed atoms!
1063  if(molecule->numFixedAtoms>0 && ! simParameters->fixedAtomsForces) {
1064  //if there's fixed atoms, calcExclusions needs to be calculated
1065  //Since it's possible the atom inside the this exclusion set is on
1066  //another input processor, we have to resort to the global fixed atoms
1067  //info inside the Molecule object. The number of such accesses should
1068  //be very small! --Chao Mei
1069  int sAId = initAtoms[0].id;
1070  int remoteCnt=0; //stats info
1071  for(int i=0; i<initAtoms.size(); i++) {
1072  //When all the atoms in the set are fixed, the elem (Bond etc.)
1073  //is not counted as a calc*.
1074  int myAId = initAtoms[i].id;
1075  AtomSignature *thisSig = &atomSigPool[initAtoms[i].sigId];
1076  ExclusionSignature *exclSig = &exclSigPool[initAtoms[i].exclId];
1077  if(!initAtoms[i].atomFixed) {
1078  msg->numCalcBonds += thisSig->bondCnt;
1079  msg->numCalcAngles += thisSig->angleCnt;
1080  msg->numCalcDihedrals += thisSig->dihedralCnt;
1081  msg->numCalcImpropers += thisSig->improperCnt;
1082  msg->numCalcCrossterms += thisSig->crosstermCnt;
1083  msg->numCalcExclusions+=(exclSig->fullExclCnt+exclSig->modExclCnt);
1084  msg->numCalcFullExclusions+=(exclSig->fullExclCnt);
1085  continue;
1086  }
1087 
1088  //1. Bonds
1089  for(int j=0; j<thisSig->bondCnt; j++) {
1090  TupleSignature *bsig = &(thisSig->bondSigs[j]);
1091  int a1 = myAId + bsig->offset[0];
1092  if(!isAtomFixed(sAId, a1)) msg->numCalcBonds++;
1093  }
1094 
1095  //2. Angles
1096  for(int j=0; j<thisSig->angleCnt; j++) {
1097  TupleSignature *bsig = &(thisSig->angleSigs[j]);
1098  int a1 = myAId + bsig->offset[0];
1099  int a2 = myAId + bsig->offset[1];
1100  if(!isAtomFixed(sAId, a1) || !isAtomFixed(sAId, a2))
1101  msg->numCalcAngles++;
1102  }
1103 
1104  //3. Dihedrals
1105  for(int j=0; j<thisSig->dihedralCnt; j++) {
1106  TupleSignature *bsig = &(thisSig->dihedralSigs[j]);
1107  int a1 = myAId + bsig->offset[0];
1108  int a2 = myAId + bsig->offset[1];
1109  int a3 = myAId + bsig->offset[2];
1110  if(!isAtomFixed(sAId, a1) ||
1111  !isAtomFixed(sAId, a2) ||
1112  !isAtomFixed(sAId, a3))
1113  msg->numCalcDihedrals++;
1114  }
1115 
1116  //4. Impropers
1117  for(int j=0; j<thisSig->improperCnt; j++) {
1118  TupleSignature *bsig = &(thisSig->improperSigs[j]);
1119  int a1 = myAId + bsig->offset[0];
1120  int a2 = myAId + bsig->offset[1];
1121  int a3 = myAId + bsig->offset[2];
1122  if(!isAtomFixed(sAId, a1) ||
1123  !isAtomFixed(sAId, a2) ||
1124  !isAtomFixed(sAId, a3))
1125  msg->numCalcImpropers++;
1126  }
1127 
1128  //5. Crossterms
1129  for(int j=0; j<thisSig->crosstermCnt; j++) {
1130  TupleSignature *bsig = &(thisSig->crosstermSigs[j]);
1131  int a1 = myAId + bsig->offset[0];
1132  int a2 = myAId + bsig->offset[1];
1133  int a3 = myAId + bsig->offset[2];
1134  int a4 = myAId + bsig->offset[3];
1135  int a5 = myAId + bsig->offset[4];
1136  int a6 = myAId + bsig->offset[5];
1137  int a7 = myAId + bsig->offset[6];
1138 
1139  if(!isAtomFixed(sAId, a1) ||
1140  !isAtomFixed(sAId, a2) ||
1141  !isAtomFixed(sAId, a3) ||
1142  !isAtomFixed(sAId, a4) ||
1143  !isAtomFixed(sAId, a5) ||
1144  !isAtomFixed(sAId, a6) ||
1145  !isAtomFixed(sAId, a7))
1146  msg->numCalcDihedrals++;
1147  }
1148 
1149  //6: Exclusions
1150  //this atom is fixed, check atoms in the exclusion set
1151  for(int j=0; j<exclSig->fullExclCnt; j++) {
1152  int thisAId = exclSig->fullOffset[j]+myAId;
1153  if(!isAtomFixed(sAId, thisAId)) { msg->numCalcExclusions++; msg->numCalcFullExclusions++; }
1154  }
1155  for(int j=0; j<exclSig->modExclCnt; j++) {
1156  int thisAId = exclSig->modOffset[j]+myAId;
1157  if(!isAtomFixed(sAId, thisAId)) msg->numCalcExclusions++;
1158  }
1159 
1160  //7: GromacsPair
1161  for(int j=0; j<thisSig->gromacsPairCnt; j++) {
1162  TupleSignature *bsig = &(thisSig->gromacsPairSigs[j]);
1163  int a1 = myAId + bsig->offset[0];
1164  int a2 = myAId + bsig->offset[1];
1165  if(!isAtomFixed(sAId, a1) ||
1166  !isAtomFixed(sAId, a2))
1167  msg->numCalcLJPairs++;
1168  }
1169  }
1170 #if COLLECT_PERFORMANCE_DATA
1171  printf("Num fixedAtom lookup on proc %d is %d\n", CkMyPe(), numFixedAtomLookup);
1172 #endif
1173  } else {
1174  //no fixed atoms, numCalc* is same with numExclusions
1175  msg->numCalcBonds = msg->numBonds;
1176  msg->numCalcAngles = msg->numAngles;
1177  msg->numCalcDihedrals = msg->numDihedrals;
1178  msg->numCalcImpropers = msg->numImpropers;
1179  msg->numCalcCrossterms = msg->numCrossterms;
1180  msg->numCalcExclusions = msg->numExclusions;
1181  msg->numCalcFullExclusions = numFullExclusions;
1182  }
1183 
1184 
1185  if(!simParameters->comMove) {
1186  //to remove the center of mass motion from a molecule.
1187  //first calculate the values on every input proc, then reduce.
1188  //For more info, refer to WorkDistrib::remove_com_motion
1189  //-Chao Mei
1190  (msg->totalMV).x = 0.0;
1191  (msg->totalMV).y = 0.0;
1192  (msg->totalMV).z = 0.0;
1193  for (int i=0; i<initAtoms.size(); i++) {
1194  msg->totalMV += initAtoms[i].mass * initAtoms[i].velocity;
1195  }
1196  }
1197 
1198  //always send to the master processor (proc 0)
1199  pIO[0].recvMolInfo(msg);
1200 #endif
1201 }
int64 numCalcFullExclusions
Definition: ParallelIOMgr.h:49
int size(void) const
Definition: ResizeArray.h:131
int numCalcCrossterms
Definition: ParallelIOMgr.h:46
TupleSignature * improperSigs
Definition: structures.h:346
TupleSignature * dihedralSigs
Definition: structures.h:345
int64 numCalcExclusions
Definition: ParallelIOMgr.h:48
TupleSignature * crosstermSigs
Definition: structures.h:347
int numRigidBonds
Definition: ParallelIOMgr.h:53
int numCalcDihedrals
Definition: ParallelIOMgr.h:42
int numDihedrals
Definition: ParallelIOMgr.h:41
TupleSignature * gromacsPairSigs
Definition: structures.h:349
BigReal totalCharge
Definition: ParallelIOMgr.h:60
int64 numExclusions
Definition: ParallelIOMgr.h:47
BigReal totalMass
Definition: ParallelIOMgr.h:57
int numFixedAtoms
Definition: Molecule.h:632
TupleSignature * bondSigs
Definition: structures.h:343
int numCalcImpropers
Definition: ParallelIOMgr.h:44
int numCrossterms
Definition: ParallelIOMgr.h:45
int numCalcLJPairs
Definition: ParallelIOMgr.h:51
int numCalcBonds
Definition: ParallelIOMgr.h:38
Vector totalMV
Definition: ParallelIOMgr.h:58
int numImpropers
Definition: ParallelIOMgr.h:43
int64_t int64
Definition: common.h:39
int gromacsPairCnt
Definition: structures.h:341
TupleSignature * angleSigs
Definition: structures.h:344
HashPool< AtomSigInfo > atomSigPool
Definition: CompressPsf.C:313
int numCalcAngles
Definition: ParallelIOMgr.h:40

◆ wrapCoor()

void ParallelIOMgr::wrapCoor ( int  seq,
Lattice  lat 
)

Definition at line 1956 of file ParallelIOMgr.C.

References ResizeArray< Elem >::add(), UniqueSetIter< T >::begin(), ClusterCoorElem::clusterId, ClusterCoorMsg::clusterId, ClusterCoorElem::dsum, ClusterCoorMsg::dsum, UniqueSetIter< T >::end(), ResizeArray< Elem >::size(), and ClusterCoorMsg::srcRank.

1957 {
1958 #ifdef MEM_OPT_VERSION
1959  coorInstance = midCM->getReadyPositions(seq);
1960 
1961  coorInstance->lattice = lat; //record the lattice to use for wrapAll/Water!
1962  int fromAtomID = coorInstance->fromAtomID;
1963  int toAtomID = coorInstance->toAtomID;
1964 
1965  //only reference copies
1966  ResizeArray<Vector> &data = coorInstance->data;
1967  ResizeArray<FloatVector> &fdata = coorInstance->fdata;
1968  //if both data and fdata are not empty, they contain exact values, the only
1969  //difference lies in their precisions. Therefore, we only need to compute
1970  //the higher precision coordinate array. -Chao Mei
1971  int dsize = data.size();
1972  int numMyAtoms = toAtomID-fromAtomID+1;
1973  tmpCoorCon = new Vector[numMyAtoms];
1974  ClusterCoorElem one;
1975  //1. compute wrapped coordinates locally
1976  for(int i=0; i<numMyAtoms; i++){
1977  tmpCoorCon[i] = 0.0;
1978  int cid = clusterID[i];
1979  if(cid<fromAtomID){
1980  //on output procs ahead of me
1981  one.clusterId = cid;
1982  ClusterCoorElem *ret = remoteCoors.find(one);
1983  if(ret==NULL){
1984  if(dsize==0)
1985  one.dsum = fdata[i];
1986  else
1987  one.dsum = data[i];
1988 
1989  remoteCoors.add(one);
1990  }else{
1991  if(dsize==0)
1992  ret->dsum += fdata[i];
1993  else
1994  ret->dsum += data[i];
1995  }
1996  }else{
1997  if(dsize==0)
1998  tmpCoorCon[cid-fromAtomID] += fdata[i];
1999  else
2000  tmpCoorCon[cid-fromAtomID] += data[i];
2001  }
2002  }
2003 
2004  //2. Prepare to send msgs to remote output procs to reduce coordinates
2005  //values of a cluster
2006  CmiAssert(numRemoteClusters == remoteCoors.size());
2007  numCSMAck = 0; //set to 0 to prepare recving the final coor update
2008  CProxy_ParallelIOMgr pIO(thisgroup);
2009  ClusterCoorSetIter iter(remoteCoors);
2010  for(iter=iter.begin(); iter!=iter.end(); iter++){
2011  ClusterCoorMsg *msg = new ClusterCoorMsg;
2012  msg->srcRank = myOutputRank;
2013  msg->clusterId = iter->clusterId;
2014  msg->dsum = iter->dsum;
2015  int dstRank = atomRankOnOutput(iter->clusterId);
2016  pIO[outputProcArray[dstRank]].recvClusterCoor(msg);
2017  }
2018 
2019  //Just send a local NULL msg to indicate the local wrapping
2020  //coordinates has finished.
2021  recvClusterCoor(NULL);
2022 #endif
2023 }
Elem * find(const Elem &elem)
Definition: UniqueSet.h:60
int size(void) const
Definition: ResizeArray.h:131
int size(void) const
Definition: UniqueSet.h:58
Definition: Vector.h:72
int add(const Elem &elem)
Definition: UniqueSet.h:52
void recvClusterCoor(ClusterCoorMsg *msg)

Member Data Documentation

◆ numAcksOutstanding

int ParallelIOMgr::numAcksOutstanding

Definition at line 386 of file ParallelIOMgr.h.

◆ sendAtomsThread

CthThread ParallelIOMgr::sendAtomsThread

Definition at line 384 of file ParallelIOMgr.h.


The documentation for this class was generated from the following files: