NAMD
ComputeGlobal.C
Go to the documentation of this file.
1 
7 /*
8  Forwards atoms to master node for force evaluation.
9 */
10 
11 #include "InfoStream.h"
12 #include "Node.h"
13 #include "PatchMap.h"
14 #include "PatchMap.inl"
15 #include "AtomMap.h"
16 #include "ComputeGlobal.h"
17 #include "ComputeGlobalMsgs.h"
18 #include "GridForceGrid.h"
19 #include "PatchMgr.h"
20 #include "Molecule.h"
21 #include "ReductionMgr.h"
22 #include "ComputeMgr.h"
23 #include "ComputeMgr.decl.h"
24 #include "SimParameters.h"
25 #include <stdio.h>
26 #include <algorithm>
27 
28 #include "Debug.h"
29 
30 #include "GridForceGrid.inl"
31 #include "MGridforceParams.h"
32 
33 // CLIENTS
34 
37 {
38  DebugM(3,"Constructing client\n");
39  aid.resize(0);
40  gdef.resize(0);
41  comm = m;
42  firsttime = 1;
43  isRequested = 0;
44  isRequestedAllocSize = 0;
45  endRequested = 0;
46  numGroupsRequested = 0;
48  dofull = (sp->GBISserOn || sp->GBISOn || sp->fullDirectOn || sp->FMAOn || sp->PMEOn);
49  forceSendEnabled = 0;
50  if ( sp->tclForcesOn ) forceSendEnabled = 1;
51  if ( sp->colvarsOn ) forceSendEnabled = 1;
52  forceSendActive = 0;
53  fid.resize(0);
54  totalForce.resize(0);
55  gfcount = 0;
56  groupTotalForce.resize(0);
59  forcePtrs = new Force*[numPatches];
60  atomPtrs = new FullAtom*[numPatches];
61  gridForcesPtrs = new ForceList **[numPatches];
62  numGridObjects = numActiveGridObjects = 0;
63  for ( int i = 0; i < numPatches; ++i ) {
64  forcePtrs[i] = NULL; atomPtrs[i] = NULL;
65  gridForcesPtrs[i] = NULL;
66  }
67 }
68 
70 {
71  delete[] isRequested;
72  delete[] forcePtrs;
73  deleteGridObjects();
74  delete[] gridForcesPtrs;
75  delete[] atomPtrs;
76  delete reduction;
77 }
78 
79 void ComputeGlobal::configure(AtomIDList &newaid, AtomIDList &newgdef, IntList &newgridobjid) {
80  DebugM(4,"Receiving configuration (" << newaid.size() <<
81  " atoms, " << newgdef.size() << " atoms/groups and " <<
82  newgridobjid.size() << " grid objects) on client\n" << endi);
83 
84  AtomIDList::iterator a, a_e;
85 
86  if ( forceSendEnabled ) {
87  // clear previous data
88  int max = -1;
89  for (a=newaid.begin(),a_e=newaid.end(); a!=a_e; ++a) {
90  if ( *a > max ) max = *a;
91  }
92  for (a=newgdef.begin(),a_e=newgdef.end(); a!=a_e; ++a) {
93  if ( *a > max ) max = *a;
94  }
95  endRequested = max+1;
96  if ( endRequested > isRequestedAllocSize ) {
97  delete [] isRequested;
98  isRequestedAllocSize = endRequested+10;
99  isRequested = new char[isRequestedAllocSize];
100  memset(isRequested, 0, isRequestedAllocSize);
101  } else {
102  for (a=aid.begin(),a_e=aid.end(); a!=a_e; ++a) {
103  isRequested[*a] = 0;
104  }
105  for (a=gdef.begin(),a_e=gdef.end(); a!=a_e; ++a) {
106  if ( *a != -1 ) isRequested[*a] = 0;
107  }
108  }
109  // reserve space
110  gpair.resize(0);
111  gpair.resize(newgdef.size());
112  gpair.resize(0);
113  }
114 
115  // store data
116  aid.swap(newaid);
117  gdef.swap(newgdef);
118 
119  if (newgridobjid.size()) configureGridObjects(newgridobjid);
120 
121  if ( forceSendEnabled ) {
122  int newgcount = 0;
123  for (a=aid.begin(),a_e=aid.end(); a!=a_e; ++a) {
124  isRequested[*a] = 1;
125  }
126  for (a=gdef.begin(),a_e=gdef.end(); a!=a_e; ++a) {
127  if ( *a == -1 ) ++newgcount;
128  else {
129  isRequested[*a] |= 2;
130  gpair.add(intpair(*a,newgcount));
131  }
132  }
133  std::sort(gpair.begin(),gpair.end());
134  numGroupsRequested = newgcount;
135  }
136 }
137 
138 void ComputeGlobal::deleteGridObjects()
139 {
140  if (numGridObjects == 0) return;
142  for (ap = ap.begin(); ap != ap.end(); ap++) {
143  ForceList **gridForces = gridForcesPtrs[ap->p->getPatchID()];
144  if (gridForces != NULL) {
145  for (size_t ig = 0; ig < numGridObjects; ig++) {
146  if (gridForces[ig] != NULL) {
147  delete gridForces[ig];
148  gridForces[ig] = NULL;
149  }
150  }
151  delete [] gridForces;
152  gridForces = NULL;
153  }
154  }
155  numGridObjects = numActiveGridObjects = 0;
156 }
157 
158 void ComputeGlobal::configureGridObjects(IntList &newgridobjid)
159 {
160  Molecule *mol = Node::Object()->molecule;
161 
162  deleteGridObjects();
163 
164  numGridObjects = mol->numGridforceGrids;
165  numActiveGridObjects = 0;
166 
167  gridObjActive.resize(numGridObjects);
168  gridObjActive.setall(0);
169 
170  IntList::const_iterator goid_i = newgridobjid.begin();
171  IntList::const_iterator goid_e = newgridobjid.end();
172  for ( ; goid_i != goid_e; goid_i++) {
173  if ((*goid_i < 0) || (*goid_i >= numGridObjects)) {
174  NAMD_bug("Requested illegal gridForceGrid index.");
175  } else {
176  DebugM(3,"Adding grid with index " << *goid_i << " to ComputeGlobal\n");
177  gridObjActive[*goid_i] = 1;
178  numActiveGridObjects++;
179  }
180  }
181 
182  for (size_t ig = 0; ig < numGridObjects; ig++) {
183  DebugM(3,"Grid index " << ig << " is active or inactive? "
184  << gridObjActive[ig] << "\n" << endi);
185  }
186 
188  for (ap = ap.begin(); ap != ap.end(); ap++) {
189  gridForcesPtrs[ap->p->getPatchID()] = new ForceList *[numGridObjects];
190  ForceList **gridForces = gridForcesPtrs[ap->p->getPatchID()];
191  for (size_t ig = 0; ig < numGridObjects; ig++) {
192  if (gridObjActive[ig]) {
193  gridForces[ig] = new ForceList;
194  } else {
195  gridForces[ig] = NULL;
196  }
197  }
198  }
199 }
200 
201 #if 0
202 void ComputeGlobal::recvConfig(ComputeGlobalConfigMsg *msg) {
203  DebugM(3,"Receiving configure on client\n");
204  configure(msg->aid,msg->gdef);
205  delete msg;
206  sendData();
207 }
208 #endif
209 
211  DebugM(3,"Receiving results (" << msg->aid.size() << " forces, "
212  << msg->newgdef.size() << " new group atoms) on client\n");
213 
214  forceSendActive = msg->totalforces;
215  if ( forceSendActive && ! forceSendEnabled ) NAMD_bug("ComputeGlobal::recvResults forceSendActive without forceSendEnabled");
216 
217  // set the forces only if we aren't going to resend the data
218  int setForces = !msg->resendCoordinates;
219 
220  if(setForces) { // we are requested to
221  // Store forces to patches
222  AtomMap *atomMap = AtomMap::Object();
223  const Lattice & lattice = patchList[0].p->lattice;
225  Force **f = forcePtrs;
226  FullAtom **t = atomPtrs;
227  Force extForce = 0.;
228  Tensor extVirial;
229 
230  for (ap = ap.begin(); ap != ap.end(); ap++) {
231  (*ap).r = (*ap).forceBox->open();
232  f[(*ap).patchID] = (*ap).r->f[Results::normal];
233  t[(*ap).patchID] = (*ap).p->getAtomList().begin();
234  }
235 
236  AtomIDList::iterator a = msg->aid.begin();
237  AtomIDList::iterator a_e = msg->aid.end();
238  ForceList::iterator f2 = msg->f.begin();
239  for ( ; a != a_e; ++a, ++f2 ) {
240  DebugM(1,"processing atom "<<(*a)<<", F="<<(*f2)<<"...\n");
241  /* XXX if (*a) is out of bounds here we get a segfault */
242  LocalID localID = atomMap->localID(*a);
243  if ( localID.pid == notUsed || ! f[localID.pid] ) continue;
244  Force f_atom = (*f2);
245  f[localID.pid][localID.index] += f_atom;
246  FullAtom &atom = t[localID.pid][localID.index];
247  Position x_orig = atom.position;
248  Transform trans = atom.transform;
249  Position x_atom = lattice.reverse_transform(x_orig,trans);
250  extForce += f_atom;
251  extVirial += outer(f_atom,x_atom);
252  }
253  DebugM(1,"done with the loop\n");
254 
255  // calculate forces for atoms in groups
256  AtomIDList::iterator g_i, g_e;
257  g_i = gdef.begin(); g_e = gdef.end();
258  ForceList::iterator gf_i = msg->gforce.begin();
259  //iout << iDEBUG << "recvResults\n" << endi;
260  for ( ; g_i != g_e; ++g_i, ++gf_i ) {
261  //iout << iDEBUG << *gf_i << '\n' << endi;
262  Vector accel = (*gf_i);
263  for ( ; *g_i != -1; ++g_i ) {
264  //iout << iDEBUG << *g_i << '\n' << endi;
265  LocalID localID = atomMap->localID(*g_i);
266  if ( localID.pid == notUsed || ! f[localID.pid] ) continue;
267  FullAtom &atom = t[localID.pid][localID.index];
268  Force f_atom = accel * atom.mass;
269  f[localID.pid][localID.index] += f_atom;
270  Position x_orig = atom.position;
271  Transform trans = atom.transform;
272  Position x_atom = lattice.reverse_transform(x_orig,trans);
273  extForce += f_atom;
274  extVirial += outer(f_atom,x_atom);
275  }
276  }
277  DebugM(1,"done with the groups\n");
278 
279  if (numActiveGridObjects > 0) {
280  applyGridObjectForces(msg, &extForce, &extVirial);
281  }
282 
283  ADD_VECTOR_OBJECT(reduction,REDUCTION_EXT_FORCE_NORMAL,extForce);
284  ADD_TENSOR_OBJECT(reduction,REDUCTION_VIRIAL_NORMAL,extVirial);
285  reduction->submit();
286  }
287  // done setting the forces, close boxes below
288 
289  // Get reconfiguration if present
290  if ( msg->reconfig ) {
291  DebugM(3,"Reconfiguring\n");
292  configure(msg->newaid, msg->newgdef, msg->newgridobjid);
293  }
294 
295  // send another round of data if requested
296 
297  if(msg->resendCoordinates) {
298  DebugM(3,"Sending requested data right away\n");
299  sendData();
300  }
301 
302  groupTotalForce.resize(numGroupsRequested);
303  for ( int i=0; i<numGroupsRequested; ++i ) groupTotalForce[i] = 0;
304 
305  if(setForces) {
307  Force **f = forcePtrs;
308  FullAtom **t = atomPtrs;
309  for (ap = ap.begin(); ap != ap.end(); ap++) {
310  CompAtom *x;
311  (*ap).positionBox->close(&x);
312  (*ap).forceBox->close(&((*ap).r));
313  f[(*ap).patchID] = 0;
314  t[(*ap).patchID] = 0;
315  }
316  }
317 
318  delete msg;
319  DebugM(3,"Done processing results\n");
320 }
321 
323 {
324  DebugM(2,"doWork\n");
325 
327  FullAtom **t = atomPtrs;
328 
329  for (ap = ap.begin(); ap != ap.end(); ap++) {
330  CompAtom *x = (*ap).positionBox->open();
331  t[(*ap).patchID] = (*ap).p->getAtomList().begin();
332  }
333 
334  if(!firsttime) sendData();
335  else {
336  if ( hasPatchZero ) {
338  msg->lat.add(patchList[0].p->lattice);
339  msg->step = -1;
340  msg->count = 1;
341  msg->patchcount = 0;
342  comm->sendComputeGlobalData(msg);
343  }
344  firsttime = 0;
346  }
347  DebugM(2,"done with doWork\n");
348 }
349 
350 void ComputeGlobal::sendData()
351 {
352  DebugM(2,"sendData\n");
353  // Get positions from patches
354  AtomMap *atomMap = AtomMap::Object();
355  const Lattice & lattice = patchList[0].p->lattice;
356  FullAtom **t = atomPtrs;
357 
359 
360  msg->step = patchList[0].p->flags.step;
361  msg->count = 0;
362  msg->patchcount = 0;
363 
364  AtomIDList::iterator a = aid.begin();
365  AtomIDList::iterator a_e = aid.end();
366  for ( ; a != a_e; ++a ) {
367  LocalID localID = atomMap->localID(*a);
368  if ( localID.pid == notUsed || ! t[localID.pid] ) continue;
369  msg->aid.add(*a);
370  msg->count++;
371  FullAtom &atom = t[localID.pid][localID.index];
372  Position x_orig = atom.position;
373  Transform trans = atom.transform;
374  msg->p.add(lattice.reverse_transform(x_orig,trans));
375  }
376 
377  // calculate group centers of mass
378  AtomIDList::iterator g_i, g_e;
379  g_i = gdef.begin(); g_e = gdef.end();
380  for ( ; g_i != g_e; ++g_i ) {
381  Vector com(0,0,0);
382  BigReal mass = 0.;
383  for ( ; *g_i != -1; ++g_i ) {
384  LocalID localID = atomMap->localID(*g_i);
385  if ( localID.pid == notUsed || ! t[localID.pid] ) continue;
386  msg->count++;
387  FullAtom &atom = t[localID.pid][localID.index];
388  Position x_orig = atom.position;
389  Transform trans = atom.transform;
390  com += lattice.reverse_transform(x_orig,trans) * atom.mass;
391  mass += atom.mass;
392  }
393  DebugM(1,"Adding center of mass "<<com<<"\n");
394  msg->gcom.add(com);
395  msg->gmass.add(mass);
396  }
397 
398  if (numActiveGridObjects > 0) {
399  computeGridObjects(msg);
400  }
401 
402  msg->fid.swap(fid);
403  msg->tf.swap(totalForce);
404  fid.resize(0);
405  totalForce.resize(0);
406 
407  if ( gfcount ) msg->gtf.swap(groupTotalForce);
408  msg->count += ( msg->fid.size() + gfcount );
409  gfcount = 0;
410 
411  DebugM(3,"Sending data (" << msg->p.size() << " positions, "
412  << msg->gcom.size() << " groups, " << msg->gridobjvalue.size()
413  << " grid objects) on client\n");
414  if ( hasPatchZero ) { msg->count++; msg->lat.add(lattice); }
415  if ( msg->count || msg->patchcount ) comm->sendComputeGlobalData(msg);
416  else delete msg;
418 }
419 
420 template<class T> void ComputeGlobal::computeGridForceGrid(FullAtomList::iterator aii,
422  ForceList::iterator gfii,
423  Lattice const &lattice,
424  int gridIndex,
425  T *grid,
426  BigReal &gridObjValue)
427 {
428  ForceList::iterator gfi = gfii;
429  FullAtomList::iterator ai = aii;
430  FullAtomList::iterator ae = aei;
431  Molecule *mol = Node::Object()->molecule;
432  for ( ; ai != ae; ai++, gfi++) {
433  *gfi = Vector(0.0, 0.0, 0.0);
434  if (! mol->is_atom_gridforced(ai->id, gridIndex)) {
435  continue;
436  }
437  Real scale;
438  Charge charge;
439  Vector dV;
440  float V;
441  mol->get_gridfrc_params(scale, charge, ai->id, gridIndex);
442  Position pos = grid->wrap_position(ai->position, lattice);
443  DebugM(1, "id = " << ai->id << ", scale = " << scale
444  << ", charge = " << charge << ", position = " << pos << "\n");
445  if (grid->compute_VdV(pos, V, dV)) {
446  // out-of-bounds atom
447  continue;
448  }
449  // ignore global gfScale
450  *gfi = -charge * scale * dV;
451  gridObjValue += charge * scale * V;
452  DebugM(1, "id = " << ai->id << ", force = " << *gfi << "\n");
453  }
454  DebugM(3, "gridObjValue = " << gridObjValue << "\n" << endi);
455 }
456 
457 void ComputeGlobal::computeGridObjects(ComputeGlobalDataMsg *msg)
458 {
459  DebugM(3,"computeGridObjects\n" << endi);
460  Molecule *mol = Node::Object()->molecule;
461  const Lattice &lattice = patchList[0].p->lattice;
462 
463  if (mol->numGridforceGrids < 1) {
464  NAMD_bug("No grids loaded in memory but ComputeGlobal has been requested to use them.");
465  }
466 
467  msg->gridobjindex.resize(numActiveGridObjects);
468  msg->gridobjindex.setall(-1);
469  msg->gridobjvalue.resize(numActiveGridObjects);
470  msg->gridobjvalue.setall(0.0);
471 
472  size_t ig = 0, gridobjcount = 0;
473 
474  // loop over home patches
476  for (ap = ap.begin(); ap != ap.end(); ap++) {
477 
478  msg->patchcount++;
479 
480  int const numAtoms = ap->p->getNumAtoms();
481  ForceList **gridForces = gridForcesPtrs[ap->p->getPatchID()];
482 
483  gridobjcount = 0;
484  for (ig = 0; ig < numGridObjects; ig++) {
485 
486  DebugM(2,"Processing grid index " << ig << "\n" << endi);
487 
488  // Only process here objects requested by the GlobalMasters
489  if (!gridObjActive[ig]) {
490  DebugM(2,"Skipping grid index " << ig << "; it is handled by "
491  "ComputeGridForce\n" << endi);
492  continue;
493  }
494 
495  ForceList *gridForcesGrid = gridForces[ig];
496  gridForcesGrid->resize(numAtoms);
497 
498  ForceList::iterator gfi = gridForcesGrid->begin();
499  FullAtomList::iterator ai = ap->p->getAtomList().begin();
500  FullAtomList::iterator ae = ap->p->getAtomList().end();
501 
502  DebugM(2, "computeGridObjects(): patch = " << ap->p->getPatchID()
503  << ", grid index = " << ig << "\n" << endi);
504  GridforceGrid *grid = mol->get_gridfrc_grid(ig);
505 
506  msg->gridobjindex[gridobjcount] = ig;
507  BigReal &gridobjvalue = msg->gridobjvalue[gridobjcount];
508 
509  if (grid->get_grid_type() == GridforceGrid::GridforceGridTypeFull) {
510 
511  GridforceFullMainGrid *g = dynamic_cast<GridforceFullMainGrid *>(grid);
512  computeGridForceGrid(ai, ae, gfi, ap->p->lattice, ig, g, gridobjvalue);
513 
514  } else if (grid->get_grid_type() == GridforceGrid::GridforceGridTypeLite) {
515 
516  GridforceLiteGrid *g = dynamic_cast<GridforceLiteGrid *>(grid);
517  computeGridForceGrid(ai, ae, gfi, ap->p->lattice, ig, g, gridobjvalue);
518  }
519 
520  gridobjcount++;
521  }
522  }
523 
524  for (gridobjcount = 0; gridobjcount < numActiveGridObjects; gridobjcount++) {
525  DebugM(3, "Total gridObjValue[" << msg->gridobjindex[gridobjcount]
526  << "] = " << msg->gridobjvalue[gridobjcount] << "\n");
527  }
528 
529  DebugM(2,"computeGridObjects done\n");
530 }
531 
532 void ComputeGlobal::applyGridObjectForces(ComputeGlobalResultsMsg *msg,
533  Force *extForce_in,
534  Tensor *extVirial_in)
535 {
536  if (msg->gridobjforce.size() == 0) return;
537 
538  if (msg->gridobjforce.size() != numActiveGridObjects) {
539  NAMD_bug("ComputeGlobal received a different number of grid forces than active grids.");
540  }
541 
542  Molecule *mol = Node::Object()->molecule;
543  const Lattice &lattice = patchList[0].p->lattice;
544  AtomMap *atomMap = AtomMap::Object();
545  Force &extForce = *extForce_in;
546  Tensor &extVirial = *extVirial_in;
547 
548  // map applied forces from the message
549  BigRealList gridObjForces;
550  gridObjForces.resize(numGridObjects);
551  gridObjForces.setall(0.0);
552  BigRealList::iterator gridobjforce_i = msg->gridobjforce.begin();
553  BigRealList::iterator gridobjforce_e = msg->gridobjforce.end();
554  int ig;
555  for (ig = 0; gridobjforce_i != gridobjforce_e ;
556  gridobjforce_i++, ig++) {
557  if (!gridObjActive[ig]) continue;
558  gridObjForces[ig] = *gridobjforce_i;
559  }
560 
561  // loop over home patches
563  for (ap = ap.begin(); ap != ap.end(); ap++) {
564 
565  ForceList **gridForces = gridForcesPtrs[ap->p->getPatchID()];
566 
567  for (ig = 0; ig < numGridObjects; ig++) {
568 
569  if (!gridObjActive[ig]) continue;
570 
571  DebugM(2, "gof = " << gridObjForces[ig] << "\n" << endi);
572 
573  ForceList *gridForcesGrid = gridForces[ig];
574 
575  FullAtomList::iterator ai = ap->p->getAtomList().begin();
576  FullAtomList::iterator ae = ap->p->getAtomList().end();
577  Force *f = ap->r->f[Results::normal];
578  ForceList::iterator gfi = gridForcesGrid->begin();
579 
580  for ( ; ai != ae; ai++, gfi++) {
581  if (! mol->is_atom_gridforced(ai->id, ig)) {
582  *gfi = Vector(0.0, 0.0, 0.0);
583  continue;
584  }
585  LocalID localID = atomMap->localID(ai->id);
586  // forces were stored; flipping sign to get gradients
587  Vector const gridforceatom(-1.0 * (*gfi) * gridObjForces[ig]);
588  DebugM(2, "id = " << ai->id
589  << ", pid = " << localID.pid
590  << ", index = " << localID.index
591  << ", force = " << gridforceatom << "\n" << endi);
592  f[localID.index] += gridforceatom;
593  extForce += gridforceatom;
594  Position x_orig = ai->position;
595  Transform transform = ai->transform;
596  Position x_virial = lattice.reverse_transform(x_orig, transform);
597  extVirial += outer(gridforceatom, x_virial);
598  }
599  }
600  }
601  // extForce and extVirial are being communicated by calling function
602 }
603 
604 // This function is called by each HomePatch after force
605 // evaluation. It stores the indices and forces of the requested
606 // atoms here, to be sent to GlobalMasterServer during the next
607 // time step. The total force is the sum of three components:
608 // "normal", "nbond" and "slow", the latter two may be calculated
609 // less frequently, so their most recent values are stored in
610 // "f_saved" and used here. If we don't do full electrostatics,
611 // there's no "slow" part.
613 {
614  if ( ! forceSendEnabled ) NAMD_bug("ComputeGlobal::saveTotalForces called unexpectedly");
615  if ( ! forceSendActive ) return;
616 
617  if ( Node::Object()->simParameters->accelMDOn && Node::Object()->simParameters->accelMDDebugOn && Node::Object()->simParameters->accelMDdihe ) {
618  int num=homePatch->numAtoms;
619  FullAtomList &atoms = homePatch->atom;
620  ForceList &af=homePatch->f[Results::amdf];
621 
622  for (int i=0; i<num; ++i) {
623  int index = atoms[i].id;
624  if (index < endRequested && isRequested[index] & 1) {
625  fid.add(index);
626  totalForce.add(af[i]);
627  }
628  }
629  return;
630  }
631 
632  int fixedAtomsOn = Node::Object()->simParameters->fixedAtomsOn;
633  int num=homePatch->numAtoms;
634  FullAtomList &atoms = homePatch->atom;
635  ForceList &f1=homePatch->f[Results::normal], &f2=homePatch->f_saved[Results::nbond],
636  &f3=homePatch->f_saved[Results::slow];
637  Force f_sum;
638 
639  for (int i=0; i<num; ++i) {
640  int index = atoms[i].id;
641  char reqflag;
642  if (index < endRequested && (reqflag = isRequested[index])) {
643  f_sum = f1[i]+f2[i];
644  if (dofull)
645  f_sum += f3[i];
646  if ( fixedAtomsOn && atoms[i].atomFixed ) f_sum = 0.;
647  if ( reqflag & 1 ) { // individual atom
648  fid.add(index);
649  totalForce.add(f_sum);
650  }
651  if ( reqflag & 2 ) { // part of group
652  intpair *gpend = gpair.end();
653  intpair *gpi = std::lower_bound(gpair.begin(),gpend,intpair(index,0));
654  if ( gpi == gpend || gpi->first != index )
655  NAMD_bug("ComputeGlobal::saveTotalForces gpair corrupted.");
656  do {
657  ++gfcount;
658  groupTotalForce[gpi->second] += f_sum;
659  } while ( ++gpi != gpend && gpi->first == index );
660  }
661  }
662  }
663 }
static Node * Object()
Definition: Node.h:86
void recvResults(ComputeGlobalResultsMsg *)
const Elem * const_iterator
Definition: ResizeArray.h:38
int ComputeID
Definition: NamdTypes.h:183
static PatchMap * Object()
Definition: PatchMap.h:27
BigRealList gridobjvalue
Partial values of the GridForce objects from this message.
Definition: Vector.h:64
#define ADD_TENSOR_OBJECT(R, RL, D)
Definition: ReductionMgr.h:43
SimParameters * simParameters
Definition: Node.h:178
ComputeHomePatchList patchList
static __thread atom * atoms
float Real
Definition: common.h:109
#define DebugM(x, y)
Definition: Debug.h:59
std::ostream & endi(std::ostream &s)
Definition: InfoStream.C:54
Position position
Definition: NamdTypes.h:53
int numGridforceGrids
Definition: Molecule.h:591
SubmitReduction * willSubmit(int setID, int size=-1)
Definition: ReductionMgr.C:365
void saveTotalForces(HomePatch *)
static ReductionMgr * Object(void)
Definition: ReductionMgr.h:278
void get_gridfrc_params(Real &k, Charge &q, int atomnum, int gridnum) const
Definition: Molecule.h:1274
ComputeGlobal(ComputeID, ComputeMgr *)
Definition: ComputeGlobal.C:35
int second
Definition: ComputeGlobal.h:24
ResizeArray< Force > ForceList
Definition: NamdTypes.h:172
ResizeArrayIter< T > end(void) const
void NAMD_bug(const char *err_msg)
Definition: common.C:129
int index
Definition: NamdTypes.h:195
iterator end(void)
Definition: ResizeArray.h:37
LocalID localID(AtomID id)
Definition: AtomMap.h:74
ResizeArray< Lattice > lat
IntList gridobjindex
Indices of the GridForce objects contained in this message.
int numAtoms
Definition: Patch.h:144
void setall(const Elem &elem)
Definition: ResizeArray.h:90
virtual ~ComputeGlobal()
Definition: ComputeGlobal.C:69
__global__ void const int const TileList *__restrict__ TileExcl *__restrict__ const int *__restrict__ const int const float2 *__restrict__ cudaTextureObject_t const int *__restrict__ const float3 const float3 const float3 const float4 *__restrict__ const float cudaTextureObject_t cudaTextureObject_t float const PatchPairRecord *__restrict__ const int *__restrict__ const int2 *__restrict__ const unsigned int *__restrict__ unsigned int *__restrict__ int *__restrict__ int *__restrict__ TileListStat *__restrict__ const BoundingBox *__restrict__ float *__restrict__ float *__restrict__ float *__restrict__ float *__restrict__ float *__restrict__ float *__restrict__ float *__restrict__ float *__restrict__ const int numPatches
GridforceGrid * get_gridfrc_grid(int gridnum) const
Definition: Molecule.h:1280
void enableComputeGlobalResults()
Definition: ComputeMgr.C:1293
PatchID pid
Definition: NamdTypes.h:194
static AtomMap * Object()
Definition: AtomMap.h:36
Tensor outer(const Vector &v1, const Vector &v2)
Definition: Tensor.h:241
int add(const Elem &elem)
Definition: ResizeArray.h:97
BlockRadixSort::TempStorage sort
int numPatches(void) const
Definition: PatchMap.h:59
void resize(int i)
Definition: ResizeArray.h:84
void swap(ResizeArray< Elem > &ra)
Definition: ResizeArray.h:64
Position reverse_transform(Position data, const Transform &t) const
Definition: Lattice.h:138
Definition: Tensor.h:15
Mass mass
Definition: NamdTypes.h:108
#define ADD_VECTOR_OBJECT(R, RL, D)
Definition: ReductionMgr.h:27
int count
Numer of atoms processed for this message.
k< npairi;++k){TABENERGY(const int numtypes=simParams->tableNumTypes;const float table_spacing=simParams->tableSpacing;const int npertype=(int)(namdnearbyint(simParams->tableMaxDist/simParams->tableSpacing)+1);) int table_i=(r2iilist[2 *k] >> 14)+r2_delta_expc;const int j=pairlisti[k];#define p_j BigReal diffa=r2list[k]-r2_table[table_i];#define table_four_i TABENERGY(register const int tabtype=-1-(lj_pars->A< 0?lj_pars->A:0);) BigReal kqq=kq_i *p_j-> charge
void sendComputeGlobalData(ComputeGlobalDataMsg *)
Definition: ComputeMgr.C:1272
void submit(void)
Definition: ReductionMgr.h:323
int size(void) const
Definition: ResizeArray.h:127
ForceList f[Results::maxNumForces]
Definition: Patch.h:207
int patchcount
Number of patches processed for this message.
gridSize x
Molecule * molecule
Definition: Node.h:176
float Charge
Definition: NamdTypes.h:32
ResizeArrayIter< T > begin(void) const
double BigReal
Definition: common.h:114
Transform transform
Definition: NamdTypes.h:116
Bool is_atom_gridforced(int atomnum, int gridnum) const
Definition: Molecule.h:1185
iterator begin(void)
Definition: ResizeArray.h:36