NAMD
ComputeGlobal.C
Go to the documentation of this file.
1 
7 /*
8  Forwards atoms to master node for force evaluation.
9 */
10 
11 #include "InfoStream.h"
12 #include "Node.h"
13 #include "PatchMap.h"
14 #include "PatchMap.inl"
15 #include "AtomMap.h"
16 #include "ComputeGlobal.h"
17 #include "ComputeGlobalMsgs.h"
18 #include "GridForceGrid.h"
19 #include "PatchMgr.h"
20 #include "Molecule.h"
21 #include "ReductionMgr.h"
22 #include "ComputeMgr.h"
23 #include "ComputeMgr.decl.h"
24 #include "SimParameters.h"
25 #include "PatchData.h"
26 #include <stdio.h>
27 #include <algorithm>
28 #include "NamdEventsProfiling.h"
29 #define MIN_DEBUG_LEVEL 3
30 //#define DEBUGM
31 #include "Debug.h"
32 #define USE_GLOBALMASTER_VIRIAL_KERNEL 1
33 #include "GridForceGrid.inl"
34 #include "MGridforceParams.h"
35 
36 // CLIENTS
37 
40 {
41  DebugM(3,"Constructing client\n");
42  aid.resize(0);
43  gdef.resize(0);
44  comm = m;
45  firsttime = 1;
46  isRequested = 0;
47  isRequestedAllocSize = 0;
48  endRequested = 0;
49  numGroupsRequested = 0;
51  dofull = (sp->GBISserOn || sp->GBISOn || sp->fullDirectOn || sp->FMAOn || sp->PMEOn);
52  forceSendEnabled = 0;
53  if ( sp->tclForcesOn ) forceSendEnabled = 1;
54  if ( sp->colvarsOn ) forceSendEnabled = 1;
55  if ( sp->IMDon ) forceSendEnabled = 1;
56  forceSendActive = 0;
57  fid.resize(0);
58  totalForce.resize(0);
59  gfcount = 0;
60  groupTotalForce.resize(0);
61  if(Node::Object()->simParameters->CUDASOAintegrate) {
63  } else {
65  }
66  int numPatches = PatchMap::Object()->numPatches();
67  forcePtrs = new Force*[numPatches];
68  atomPtrs = new FullAtom*[numPatches];
69  for ( int i = 0; i < numPatches; ++i ) { forcePtrs[i] = 0; atomPtrs[i] = 0; }
70 
71  if (sp->CUDASOAintegrateMode) {
72  // Allocate memory for numPatches to access SOA data
73  mass_soa = new float*[numPatches];
74  pos_soa_x = new double*[numPatches];
75  pos_soa_y = new double*[numPatches];
76  pos_soa_z = new double*[numPatches];
77  force_soa_x = new double*[numPatches];
78  force_soa_y = new double*[numPatches];
79  force_soa_z = new double*[numPatches];
80  transform_soa_i = new int*[numPatches];
81  transform_soa_j = new int*[numPatches];
82  transform_soa_k = new int*[numPatches];
83  for ( int i = 0; i < numPatches; ++i ) {
84  mass_soa[i] = NULL;
85  pos_soa_x[i] = NULL;
86  pos_soa_y[i] = NULL;
87  pos_soa_z[i] = NULL;
88  force_soa_x[i] = NULL;
89  force_soa_y[i] = NULL;
90  force_soa_z[i] = NULL;
91  transform_soa_i[i] = NULL;
92  transform_soa_j[i] = NULL;
93  transform_soa_k[i] = NULL;
94  }
95  } else {
96  mass_soa = NULL;
97  pos_soa_x = NULL;
98  pos_soa_y = NULL;
99  pos_soa_z = NULL;
100  force_soa_x = NULL;
101  force_soa_y = NULL;
102  force_soa_z = NULL;
103  transform_soa_i = NULL;
104  transform_soa_j = NULL;
105  transform_soa_k = NULL;
106  }
107  gridForcesPtrs = new ForceList **[numPatches];
108  numGridObjects = numActiveGridObjects = 0;
109  for ( int i = 0; i < numPatches; ++i ) {
110  forcePtrs[i] = NULL; atomPtrs[i] = NULL;
111  gridForcesPtrs[i] = NULL;
112  }
113 }
114 
116 {
117  delete[] isRequested;
118  delete[] forcePtrs;
119  deleteGridObjects();
120  delete[] gridForcesPtrs;
121  delete[] atomPtrs;
122  delete reduction;
123 
124  if(mass_soa) delete [] mass_soa;
125  if(pos_soa_x) delete [] pos_soa_x;
126  if(pos_soa_y) delete [] pos_soa_y;
127  if(pos_soa_z) delete [] pos_soa_z;
128  if(force_soa_x) delete [] force_soa_x;
129  if(force_soa_y) delete [] force_soa_y;
130  if(force_soa_z) delete [] force_soa_z;
131  if(transform_soa_i) delete [] transform_soa_i;
132  if(transform_soa_j) delete [] transform_soa_j;
133  if(transform_soa_k) delete [] transform_soa_k;
134 }
135 
136 void ComputeGlobal::configure(AtomIDList &newaid, AtomIDList &newgdef, IntList &newgridobjid) {
137  DebugM(4,"Receiving configuration (" << newaid.size() <<
138  " atoms, " << newgdef.size() << " atoms/groups and " <<
139  newgridobjid.size() << " grid objects) on client\n" << endi);
140 
141  AtomIDList::iterator a, a_e;
142 
143  if ( forceSendEnabled ) {
144  // clear previous data
145  int max = -1;
146  for (a=newaid.begin(),a_e=newaid.end(); a!=a_e; ++a) {
147  if ( *a > max ) max = *a;
148  }
149  for (a=newgdef.begin(),a_e=newgdef.end(); a!=a_e; ++a) {
150  if ( *a > max ) max = *a;
151  }
152  endRequested = max+1;
153  if ( endRequested > isRequestedAllocSize ) {
154  delete [] isRequested;
155  isRequestedAllocSize = endRequested+10;
156  isRequested = new char[isRequestedAllocSize];
157  memset(isRequested, 0, isRequestedAllocSize);
158  } else {
159  for (a=aid.begin(),a_e=aid.end(); a!=a_e; ++a) {
160  isRequested[*a] = 0;
161  }
162  for (a=gdef.begin(),a_e=gdef.end(); a!=a_e; ++a) {
163  if ( *a != -1 ) isRequested[*a] = 0;
164  }
165  }
166  // reserve space
167  gpair.resize(0);
168  gpair.resize(newgdef.size());
169  gpair.resize(0);
170  }
171 
172  // store data
173  aid.swap(newaid);
174  gdef.swap(newgdef);
175 
176  if (newgridobjid.size()) configureGridObjects(newgridobjid);
177 
178  if ( forceSendEnabled ) {
179  int newgcount = 0;
180  for (a=aid.begin(),a_e=aid.end(); a!=a_e; ++a) {
181  isRequested[*a] = 1;
182  }
183  for (a=gdef.begin(),a_e=gdef.end(); a!=a_e; ++a) {
184  if ( *a == -1 ) ++newgcount;
185  else {
186  isRequested[*a] |= 2;
187  gpair.add(intpair(*a,newgcount));
188  }
189  }
190  std::sort(gpair.begin(),gpair.end());
191  numGroupsRequested = newgcount;
192  }
193  DebugM(3,"Done configure on client\n");
194 }
195 
196 void ComputeGlobal::deleteGridObjects()
197 {
198  if (numGridObjects == 0) return;
200  for (ap = ap.begin(); ap != ap.end(); ap++) {
201  ForceList **gridForces = gridForcesPtrs[ap->p->getPatchID()];
202  if (gridForces != NULL) {
203  for (size_t ig = 0; ig < numGridObjects; ig++) {
204  if (gridForces[ig] != NULL) {
205  delete gridForces[ig];
206  gridForces[ig] = NULL;
207  }
208  }
209  delete [] gridForces;
210  gridForces = NULL;
211  }
212  }
213  numGridObjects = numActiveGridObjects = 0;
214 }
215 
216 void ComputeGlobal::configureGridObjects(IntList &newgridobjid)
217 {
218  Molecule *mol = Node::Object()->molecule;
219 
220  deleteGridObjects();
221 
222  numGridObjects = mol->numGridforceGrids;
223  numActiveGridObjects = 0;
224 
225  gridObjActive.resize(numGridObjects);
226  gridObjActive.setall(0);
227 
228  IntList::const_iterator goid_i = newgridobjid.begin();
229  IntList::const_iterator goid_e = newgridobjid.end();
230  for ( ; goid_i != goid_e; goid_i++) {
231  if ((*goid_i < 0) || (*goid_i >= numGridObjects)) {
232  NAMD_bug("Requested illegal gridForceGrid index.");
233  } else {
234  DebugM(3,"Adding grid with index " << *goid_i << " to ComputeGlobal\n");
235  gridObjActive[*goid_i] = 1;
236  numActiveGridObjects++;
237  }
238  }
239 
240  for (size_t ig = 0; ig < numGridObjects; ig++) {
241  DebugM(3,"Grid index " << ig << " is active or inactive? "
242  << gridObjActive[ig] << "\n" << endi);
243  }
244 
246  for (ap = ap.begin(); ap != ap.end(); ap++) {
247  gridForcesPtrs[ap->p->getPatchID()] = new ForceList *[numGridObjects];
248  ForceList **gridForces = gridForcesPtrs[ap->p->getPatchID()];
249  for (size_t ig = 0; ig < numGridObjects; ig++) {
250  if (gridObjActive[ig]) {
251  gridForces[ig] = new ForceList;
252  } else {
253  gridForces[ig] = NULL;
254  }
255  }
256  }
257 }
258 
259 #if 0
260 void ComputeGlobal::recvConfig(ComputeGlobalConfigMsg *msg) {
261  DebugM(3,"Receiving configure on client\n");
262  configure(msg->aid,msg->gdef);
263  delete msg;
264  sendData();
265 }
266 #endif
267 
269  DebugM(3,"Receiving results (" << msg->aid.size() << " forces, "
270  << msg->newgdef.size() << " new group atoms) on client thread " << CthGetToken(CthSelf())->serialNo <<" msg->resendCoordinates " << msg->resendCoordinates << " msg->totalforces " << msg->totalforces<< "\n");
271 
272  forceSendActive = msg->totalforces;
273  if ( forceSendActive && ! forceSendEnabled ) NAMD_bug("ComputeGlobal::recvResults forceSendActive without forceSendEnabled");
274 
275  // set the forces only if we aren't going to resend the data
276  int setForces = !msg->resendCoordinates;
278  if(setForces) { // we are requested to
279  // Store forces to patches
280  AtomMap *atomMap = AtomMap::Object();
281  const Lattice & lattice = patchList[0].p->lattice;
283  Force **f = forcePtrs;
284  FullAtom **t = atomPtrs;
285  Force extForce = 0.;
286  Tensor extVirial;
287 
288  for (ap = ap.begin(); ap != ap.end(); ap++) {
289  (*ap).r = (*ap).forceBox->open();
290  f[(*ap).patchID] = (*ap).r->f[Results::normal];
291  t[(*ap).patchID] = (*ap).p->getAtomList().begin();
292 
293  if (sp->CUDASOAintegrate) {
294  // Assigne the pointer to SOA data structure
295  PatchID pId = (*ap).patchID;
296  mass_soa[pId] = (*ap).p->patchDataSOA.mass;
297  force_soa_x[pId] = (*ap).p->patchDataSOA.f_global_x;
298  force_soa_y[pId] = (*ap).p->patchDataSOA.f_global_y;
299  force_soa_z[pId] = (*ap).p->patchDataSOA.f_global_z;
300  transform_soa_i[pId] = (*ap).p->patchDataSOA.transform_i;
301  transform_soa_j[pId] = (*ap).p->patchDataSOA.transform_j;
302  transform_soa_k[pId] = (*ap).p->patchDataSOA.transform_k;
303  }
304  }
305 
306 
307  AtomIDList::iterator a = msg->aid.begin();
308  AtomIDList::iterator a_e = msg->aid.end();
309  ForceList::iterator f2 = msg->f.begin();
311  for ( ; a != a_e; ++a, ++f2 ) {
312  Force f_atom;
313  f_atom = (*f2);
314  f_atom.x*=(float) sp->globalMasterFrequency;
315  f_atom.y*=(float) sp->globalMasterFrequency;
316  f_atom.z*=(float) sp->globalMasterFrequency;
317  }
318  if (sp->CUDASOAintegrate) {
319  LocalID localID;
320  PatchID lpid;
321  int lidx;
322  Position x_orig, x_atom;
323  Transform trans;
324  Force f_atom;
325  for ( ; a != a_e; ++a, ++f2 ) {
326  DebugM(1,"processing atom "<<(*a)<<", F="<<(*f2)<<"...\n");
327  /* XXX if (*a) is out of bounds here we get a segfault */
328  localID = atomMap->localID(*a);
329  lpid = localID.pid;
330  lidx = localID.index;
331  if ( lpid == notUsed || ! f[lpid] ) continue;
332  f_atom = (*f2);
333  // printf("NAMD3-recv: atom %d, Before Force (%8.6f, %8.6f, %8.6f) \n",
334  // *a, force_soa_x[lpid][lidx], force_soa_y[lpid][lidx], force_soa_z[lpid][lidx]);
335  // printf("NAMD3-recv: atom %d, Added Force (%8.6f, %8.6f, %8.6f) \n", *a, f_atom.x, f_atom.y, f_atom.z);
336  force_soa_x[lpid][lidx] += f_atom.x;
337  force_soa_y[lpid][lidx] += f_atom.y;
338  force_soa_z[lpid][lidx] += f_atom.z;
339 #ifndef USE_GLOBALMASTER_VIRIAL_KERNEL
340  x_orig.x = pos_soa_x[lpid][lidx];
341  x_orig.y = pos_soa_y[lpid][lidx];
342  x_orig.z = pos_soa_z[lpid][lidx];
343  trans.i = transform_soa_i[lpid][lidx];
344  trans.j = transform_soa_j[lpid][lidx];
345  trans.k = transform_soa_k[lpid][lidx];
346  x_atom = lattice.reverse_transform(x_orig,trans);
347  extForce += f_atom;
348  extVirial += outer(f_atom,x_atom);
349 #endif
350  }
351  } else {
352  for ( ; a != a_e; ++a, ++f2 ) {
353  DebugM(1,"processing atom "<<(*a)<<", F="<<(*f2)<<"...\n");
354  /* XXX if (*a) is out of bounds here we get a segfault */
355  LocalID localID = atomMap->localID(*a);
356  if ( localID.pid == notUsed || ! f[localID.pid] ) continue;
357  Force f_atom = (*f2);
358  // printf("NAMD3-recv: atom %d, Before Force (%8.6f, %8.6f, %8.6f) \n",
359  // *a, f[localID.pid][localID.index].x, f[localID.pid][localID.index].y, f[localID.pid][localID.index].z);
360  // printf("NAMD3-recv: atom %d, Added Force (%8.6f, %8.6f, %8.6f) \n", *a, f_atom.x, f_atom.y, f_atom.z);
361  f[localID.pid][localID.index] += f_atom;
362  FullAtom &atom = t[localID.pid][localID.index];
363  Position x_orig = atom.position;
364  Transform trans = atom.transform;
365  Position x_atom = lattice.reverse_transform(x_orig,trans);
366  extForce += f_atom;
367  extVirial += outer(f_atom,x_atom);
368  }
369  }
370  DebugM(1,"done with the loop\n");
371 
372  // calculate forces for atoms in groups
373  AtomIDList::iterator g_i, g_e;
374  g_i = gdef.begin(); g_e = gdef.end();
375  ForceList::iterator gf_i = msg->gforce.begin();
376  //iout << iDEBUG << "recvResults\n" << endi;
377  if (sp->CUDASOAintegrate) {
378  LocalID localID;
379  PatchID lpid;
380  int lidx;
381  Position x_orig, x_atom;
382  Transform trans;
383  Force f_atom;
384  for ( ; g_i != g_e; ++g_i, ++gf_i ) {
385  //iout << iDEBUG << *gf_i << '\n' << endi;
386  Vector accel = (*gf_i);
387  for ( ; *g_i != -1; ++g_i ) {
388  //iout << iDEBUG << *g_i << '\n' << endi;
389  localID = atomMap->localID(*g_i);
390  lpid = localID.pid;
391  lidx = localID.index;
392  if ( lpid == notUsed || ! f[lpid] ) continue;
393  f_atom = accel * mass_soa[lpid][lidx];
394 #if 0
395  if (*g_i < 20) {
396  CkPrintf("NAMD3-recv: group %d, Before Force (%8.6f, %8.6f, %8.6f) \n",
397  *g_i, force_soa_x[lpid][lidx], force_soa_y[lpid][lidx], force_soa_z[lpid][lidx]);
398  CkPrintf("NAMD3-recv: group %d, Added Force (%8.6f, %8.6f, %8.6f) \n", *g_i, f_atom.x, f_atom.y, f_atom.z);
399  }
400 #endif
401  force_soa_x[lpid][lidx] += f_atom.x;
402  force_soa_y[lpid][lidx] += f_atom.y;
403  force_soa_z[lpid][lidx] += f_atom.z;
404 #ifndef USE_GLOBALMASTER_VIRIAL_KERNEL
405  x_orig.x = pos_soa_x[lpid][lidx];
406  x_orig.y = pos_soa_y[lpid][lidx];
407  x_orig.z = pos_soa_z[lpid][lidx];
408  trans.i = transform_soa_i[lpid][lidx];
409  trans.j = transform_soa_j[lpid][lidx];
410  trans.k = transform_soa_k[lpid][lidx];
411  x_atom = lattice.reverse_transform(x_orig,trans);
412  extForce += f_atom;
413  extVirial += outer(f_atom,x_atom);
414 #endif
415  }
416  }
417  } else {
418  for ( ; g_i != g_e; ++g_i, ++gf_i ) {
419  //iout << iDEBUG << *gf_i << '\n' << endi;
420  Vector accel = (*gf_i);
421  for ( ; *g_i != -1; ++g_i ) {
422  //iout << iDEBUG << *g_i << '\n' << endi;
423  LocalID localID = atomMap->localID(*g_i);
424  if ( localID.pid == notUsed || ! f[localID.pid] ) continue;
425  FullAtom &atom = t[localID.pid][localID.index];
426  Force f_atom = accel * atom.mass;
427 #if 0
428  if (*g_i < 20) {
429  CkPrintf("NAMD2-recv: group %d, Before Force (%8.6f, %8.6f, %8.6f) \n",
430  *g_i, f[localID.pid][localID.index].x, f[localID.pid][localID.index].y, f[localID.pid][localID.index].z);
431  CkPrintf("NAMD2-recv: group %d, Added Force (%8.6f, %8.6f, %8.6f) \n", *g_i, f_atom.x, f_atom.y, f_atom.z);
432  }
433 #endif
434  f[localID.pid][localID.index] += f_atom;
435 
436  Position x_orig = atom.position;
437  Transform trans = atom.transform;
438  Position x_atom = lattice.reverse_transform(x_orig,trans);
439  extForce += f_atom;
440  extVirial += outer(f_atom,x_atom);
441  }
442  }
443  }
444  DebugM(1,"done with the groups\n");
445 
446  if (numActiveGridObjects > 0) {
447  applyGridObjectForces(msg, &extForce, &extVirial);
448  }
449  // printf("Finish receiving at step: %d ####################################################\n",
450  // patchList[0].p->flags.step);
451 
452  ADD_VECTOR_OBJECT(reduction,REDUCTION_EXT_FORCE_NORMAL,extForce);
453  ADD_TENSOR_OBJECT(reduction,REDUCTION_VIRIAL_NORMAL,extVirial);
454  reduction->submit();
455  }
456  // done setting the forces, close boxes below
457 
458  // Get reconfiguration if present
459  if ( msg->reconfig ) {
460  DebugM(3,"Reconfiguring\n");
461  configure(msg->newaid, msg->newgdef, msg->newgridobjid);
462  }
463 
464  // send another round of data if requested
465 
466  if(msg->resendCoordinates) {
467  DebugM(3,"Sending requested data right away\n");
468  // CkPrintf("*** Resending data on PE %d \n", CkMyPe());
469  sendData();
470  }
471 
472  groupTotalForce.resize(numGroupsRequested);
473  for ( int i=0; i<numGroupsRequested; ++i ) groupTotalForce[i] = 0;
474  DebugM(3,"resized\n");
475  if(setForces) {
476  DebugM(3,"setting forces\n");
478  Force **f = forcePtrs;
479  FullAtom **t = atomPtrs;
480  for (ap = ap.begin(); ap != ap.end(); ap++) {
481  CompAtom *x;
482  PatchID pId = (*ap).patchID;
483  if (!sp->CUDASOAintegrate) {
484  (*ap).positionBox->close(&x);
485  (*ap).forceBox->close(&((*ap).r));
486  DebugM(1,"closing boxes\n");
487  }
488  f[pId] = 0;
489  t[pId] = 0;
490  if (sp->CUDASOAintegrate) {
491  // XXX Possibly code below is needed by SOAintegrate mode
492  mass_soa[pId] = NULL;
493  pos_soa_x[pId] = NULL;
494  pos_soa_y[pId] = NULL;
495  pos_soa_z[pId] = NULL;
496  force_soa_x[pId] = NULL;
497  force_soa_y[pId] = NULL;
498  force_soa_z[pId] = NULL;
499  transform_soa_i[pId] = NULL;
500  transform_soa_j[pId] = NULL;
501  transform_soa_k[pId] = NULL;
502  DebugM(2,"nulling ptrs\n");
503  }
504  }
505  DebugM(3,"done setting forces\n");
506  }
507 
508  #ifdef NODEGROUP_FORCE_REGISTER
509  if (!sp->CUDASOAintegrate) {
510  // CUDASOAintegrate handles this on PE 0 in sendComputeGlobalResults
511  delete msg;
512  }
513  #else
514  delete msg;
515  #endif
516  DebugM(3,"Done processing results\n");
517 }
518 
520 {
521  DebugM(2,"doWork thread " << CthGetToken(CthSelf())->serialNo << "\n");
522 
525  FullAtom **t = atomPtrs;
526  int step = patchList[0].p->flags.step;
527  if((step % sp->globalMasterFrequency) ==0)
528  {
529  DebugM(3,"doWork for step " << step <<"\n"<<endi);
530  // if(sp->CUDASOAintegrateOn) {
531  // hasPatchZero = 0;
532  // }
533 
534  for (ap = ap.begin(); ap != ap.end(); ap++) {
535  CompAtom *x = (*ap).positionBox->open();
536  t[(*ap).patchID] = (*ap).p->getAtomList().begin();
537 
538  if (sp->CUDASOAintegrate) {
539  // Assigne the pointer to SOA data structure
540  PatchID pId = (*ap).patchID;
541  mass_soa[pId] = (*ap).p->patchDataSOA.mass;
542  pos_soa_x[pId] = (*ap).p->patchDataSOA.pos_x;
543  pos_soa_y[pId] = (*ap).p->patchDataSOA.pos_y;
544  pos_soa_z[pId] = (*ap).p->patchDataSOA.pos_z;
545  transform_soa_i[pId] = (*ap).p->patchDataSOA.transform_i;
546  transform_soa_j[pId] = (*ap).p->patchDataSOA.transform_j;
547  transform_soa_k[pId] = (*ap).p->patchDataSOA.transform_k;
548  // if(sp->CUDASOAintegrateOn && (pId == 0)) {
549  // hasPatchZero = 1;
550  // }
551  }
552  }
553 
554  if(!firsttime) {
555  // CkPrintf("*** Start NoFirstTime on PE %d \n", CkMyPe());
556  sendData();
557  // CkPrintf("*** End NoFirstTime on PE %d \n", CkMyPe());
558  } else {
559  // CkPrintf("*** Start FirstTime on PE %d \n", CkMyPe());
560  if ( hasPatchZero ) {
562  msg->lat.add(patchList[0].p->lattice);
563  msg->step = -1;
564  msg->count = 1;
565  msg->patchcount = 0;
566  // CkPrintf("***DoWork calling sendComputeGlobalData PE %d \n", CkMyPe());
567  comm->sendComputeGlobalData(msg);
568  }
569 #ifdef NODEGROUP_FORCE_REGISTER
570  else if (sp->CUDASOAintegrate) {
571 
572  // CkPrintf("***DoWork FirstTime barrier 1 on PE %d \n", CkMyPe());
573  comm->stowSuspendULT();
574  // CmiNodeBarrier();
575  // CkPrintf("***DoWork FirstTime barrier 2 on PE %d \n", CkMyPe());
576  comm->stowSuspendULT();
577  // CkPrintf("***DoWork out of barrier 2 on PE %d \n", CkMyPe());
578  // CmiNodeBarrier();
579  ComputeGlobalResultsMsg* resultsMsg = CkpvAccess(ComputeGlobalResultsMsg_instance);
580  // CkPrintf("*** ComputeGlobal::doWork PE (%d) calling recvComputeGlobalResults in doWork at step: %d \n",CkMyPe(), patchList[0].p->flags.step);
581  comm->recvComputeGlobalResults(resultsMsg);
582  }
583 #endif // NODEGROUP_FORCE_REGISTER
584  firsttime = 0;
585  // CkPrintf("*** ComputeGlobal::doWork PE (%d) calling enableComputeGlobalResults in doWork at step: %d \n",CkMyPe(), patchList[0].p->flags.step);
587 
588  // CkPrintf("*** End FirstTime on PE %d \n", CkMyPe());
589  }
590  }
591  else
592  {
593  DebugM(2,"skipping step "<< step <<"\n"<<endi);
594  /* TODO to support CPU side MTS we need to do something to avoid hang some distillation from sendData(); and the reductions
595  ADD_VECTOR_OBJECT(reduction,REDUCTION_EXT_FORCE_NORMAL,extForce);
596  ADD_TENSOR_OBJECT(reduction,REDUCTION_VIRIAL_NORMAL,extVirial);
597  reduction->submit();
598  and as yet undetermined message handling
599  */
600  }
601  DebugM(2,"done with doWork\n");
602 }
603 
604 void ComputeGlobal::sendData()
605 {
606  DebugM(2,"sendData\n");
607  // Get positions from patches
608  AtomMap *atomMap = AtomMap::Object();
609  const Lattice & lattice = patchList[0].p->lattice;
611  FullAtom **t = atomPtrs;
612 
615 
616  msg->step = patchList[0].p->flags.step;
617  msg->count = 0;
618  msg->patchcount = 0;
619 
620  // CkPrintf("*** PE (%d) Start sending at step: %d \n",
621  // CkMyPe(), patchList[0].p->flags.step);
622  AtomIDList::iterator a = aid.begin();
623  AtomIDList::iterator a_e = aid.end();
624  NAMD_EVENT_START(1, NamdProfileEvent::GM_MSGPADD);
625  if (sp->CUDASOAintegrate) {
626  LocalID localID;
627  PatchID lpid;
628  int lidx;
629  Position x_orig;
630  Transform trans;
631 
632  for ( ; a != a_e; ++a ) {
633  localID = atomMap->localID(*a);
634  lpid = localID.pid;
635  lidx = localID.index;
636  if ( lpid == notUsed || ! t[lpid] ) continue;
637  msg->aid.add(*a);
638  msg->count++;
639  x_orig.x = pos_soa_x[lpid][lidx];
640  x_orig.y = pos_soa_y[lpid][lidx];
641  x_orig.z = pos_soa_z[lpid][lidx];
642  trans.i = transform_soa_i[lpid][lidx];
643  trans.j = transform_soa_j[lpid][lidx];
644  trans.k = transform_soa_k[lpid][lidx];
645  msg->p.add(lattice.reverse_transform(x_orig,trans));
646  // printf("NAMD3-send: step %d atom %d, POS (%8.6f, %8.6f, %8.6f) \n", patchList[0].p->flags.step, *a, x_orig.x, x_orig.y, x_orig.z);
647  }
648  } else {
649  for ( ; a != a_e; ++a ) {
650  LocalID localID = atomMap->localID(*a);
651  if ( localID.pid == notUsed || ! t[localID.pid] ) continue;
652  msg->aid.add(*a);
653  msg->count++;
654  FullAtom &atom = t[localID.pid][localID.index];
655  Position x_orig = atom.position;
656  Transform trans = atom.transform;
657  msg->p.add(lattice.reverse_transform(x_orig,trans));
658  // printf("NAMD2-send: step %d atom %d, POS (%8.6f, %8.6f, %8.6f) \n", patchList[0].p->flags.step, *a, x_orig.x, x_orig.y, x_orig.z);
659  }
660  }
661  NAMD_EVENT_STOP(1, NamdProfileEvent::GM_MSGPADD);
662  NAMD_EVENT_START(1, NamdProfileEvent::GM_GCOM);
663  // calculate group centers of mass
664  AtomIDList::iterator g_i, g_e;
665  g_i = gdef.begin(); g_e = gdef.end();
666  if (sp->CUDASOAintegrate) {
667  LocalID localID;
668  PatchID lpid;
669  int lidx;
670  Position x_orig;
671  Transform trans;
672  for ( ; g_i != g_e; ++g_i ) {
673  Vector com(0,0,0);
674  BigReal mass = 0.;
675  for ( ; *g_i != -1; ++g_i ) {
676  localID = atomMap->localID(*g_i);
677  lpid = localID.pid;
678  lidx = localID.index;
679  if ( lpid == notUsed || ! t[lpid] ) continue;
680  msg->count++;
681  x_orig.x = pos_soa_x[lpid][lidx];
682  x_orig.y = pos_soa_y[lpid][lidx];
683  x_orig.z = pos_soa_z[lpid][lidx];
684  trans.i = transform_soa_i[lpid][lidx];
685  trans.j = transform_soa_j[lpid][lidx];
686  trans.k = transform_soa_k[lpid][lidx];
687  com += lattice.reverse_transform(x_orig,trans) * mass_soa[lpid][lidx];
688  mass += mass_soa[lpid][lidx];
689 #if 0
690  if (*g_i < 20) {
691  printf("NAMD3-send: step %d atom %d, POS (%8.6f, %8.6f, %8.6f) \n", patchList[0].p->flags.step, *g_i, x_orig.x, x_orig.y, x_orig.z);
692  }
693 #endif
694 
695  }
696  // CkPrintf("*** NAMD3-send (%d): step %d group %d, COM (%8.6f, %8.6f, %8.6f) \n",
697  // CkMyPe(), patchList[0].p->flags.step, *g_i, com.x, com.y, com.z);
698  DebugM(1,"Adding center of mass "<<com<<"\n");
699  NAMD_EVENT_START(1, NamdProfileEvent::GM_GCOMADD);
700  msg->gcom.add(com);
701  msg->gmass.add(mass);
702  NAMD_EVENT_STOP(1, NamdProfileEvent::GM_GCOMADD);
703  }
704  } else {
705  for ( ; g_i != g_e; ++g_i ) {
706  Vector com(0,0,0);
707  BigReal mass = 0.;
708  for ( ; *g_i != -1; ++g_i ) {
709  LocalID localID = atomMap->localID(*g_i);
710  if ( localID.pid == notUsed || ! t[localID.pid] ) continue;
711  msg->count++;
712  FullAtom &atom = t[localID.pid][localID.index];
713  Position x_orig = atom.position;
714  Transform trans = atom.transform;
715  com += lattice.reverse_transform(x_orig,trans) * atom.mass;
716  mass += atom.mass;
717 #if 0
718  if (*g_i < 20) {
719  printf("NAMD2-send: step %d atom %d, POS (%8.6f, %8.6f, %8.6f) \n", patchList[0].p->flags.step, *g_i, x_orig.x, x_orig.y, x_orig.z);
720  }
721 #endif
722  }
723  // CkPrintf("*** NAMD2-send (%d): step %d group %d, COM (%8.6f, %8.6f, %8.6f) \n",
724  // CkMyPe(), patchList[0].p->flags.step, *g_i, com.x, com.y, com.z);
725 
726 
727  DebugM(1,"Adding center of mass "<<com<<"\n");
728  NAMD_EVENT_START(1, NamdProfileEvent::GM_GCOMADD);
729  msg->gcom.add(com);
730  msg->gmass.add(mass);
731  NAMD_EVENT_STOP(1, NamdProfileEvent::GM_GCOMADD);
732  }
733  }
734  NAMD_EVENT_STOP(1, NamdProfileEvent::GM_GCOM);
735 // printf("Finish sending at step: %d ####################################################\n",
736 // patchList[0].p->flags.step);
737 
738  if (numActiveGridObjects > 0) {
739  if(!sp->CUDASOAintegrate) // SequencerCUDA handles this
740  computeGridObjects(msg);
741  }
742 
743  msg->fid.swap(fid);
744  msg->tf.swap(totalForce);
745  fid.resize(0);
746  totalForce.resize(0);
747 
748  if ( gfcount ) msg->gtf.swap(groupTotalForce);
749  msg->count += ( msg->fid.size() + gfcount );
750  gfcount = 0;
751 
752  DebugM(3,"Sending data (" << msg->p.size() << " positions, "
753  << msg->gcom.size() << " groups, " << msg->gridobjvalue.size()
754  << " grid objects) on client\n");
755  if ( hasPatchZero ) { msg->count++; msg->lat.add(lattice); }
756  if ( msg->count || msg->patchcount )
757  {
758  // CkPrintf("*** ComputeGlobal::sendData PE (%d) calling sendComputeGlobalData step: %d msg->count %d msg->patchcount %d\n", CkMyPe(), patchList[0].p->flags.step,msg->count, msg->patchcount);
759  comm->sendComputeGlobalData(msg);
760  }
761  else
762  {
763  // CkPrintf("*** ComputeGlobal::sendData PE (%d) skipping sendComputeGlobalData step: %d msg->count %d msg->patchcount %d\n", CkMyPe(), patchList[0].p->flags.step,msg->count, msg->patchcount);
764  // comm->sendComputeGlobalData(msg);
765 #ifdef NODEGROUP_FORCE_REGISTER
766  // this PE doesn't have message work to do
768  if (sp->CUDASOAintegrate) {
769  // we need to enter the barriers normally hit in sendComputeGlobalData
770  // CkPrintf("*** ComputeGlobal::sendData PE (%d) about to double stow\n");
771  comm->stowSuspendULT();
772  comm->stowSuspendULT();
773  // and the one in sendComputeGlobalResults
774  // comm->stowSuspendULT();
775  }
776 #endif
777  delete msg;
778  }
779  NAMD_EVENT_START(1, NamdProfileEvent::GM_GRESULTS);
780  // CkPrintf("*** ComputeGlobal::sendData PE (%d) calling enableComputeGlobalResults in sendData at step: %d \n", CkMyPe(), patchList[0].p->flags.step);
782  NAMD_EVENT_STOP(1, NamdProfileEvent::GM_GRESULTS);
783 }
784 
785 template<class T> void ComputeGlobal::computeGridForceGrid(FullAtomList::iterator aii,
787  ForceList::iterator gfii,
788  Lattice const &lattice,
789  int gridIndex,
790  T *grid,
791  BigReal &gridObjValue)
792 {
793  ForceList::iterator gfi = gfii;
794  FullAtomList::iterator ai = aii;
795  FullAtomList::iterator ae = aei;
796  Molecule *mol = Node::Object()->molecule;
797  for ( ; ai != ae; ai++, gfi++) {
798  *gfi = Vector(0.0, 0.0, 0.0);
799  if (! mol->is_atom_gridforced(ai->id, gridIndex)) {
800  continue;
801  }
802  Real scale;
803  Charge charge;
804  Vector dV;
805  float V;
806  mol->get_gridfrc_params(scale, charge, ai->id, gridIndex);
807  Position pos = grid->wrap_position(ai->position, lattice);
808  DebugM(1, "id = " << ai->id << ", scale = " << scale
809  << ", charge = " << charge << ", position = " << pos << "\n");
810  if (grid->compute_VdV(pos, V, dV)) {
811  // out-of-bounds atom
812  continue;
813  }
814  // ignore global gfScale
815  *gfi = -charge * scale * dV;
816  gridObjValue += charge * scale * V;
817  DebugM(1, "id = " << ai->id << ", force = " << *gfi << "\n");
818  }
819  DebugM(3, "gridObjValue = " << gridObjValue << "\n" << endi);
820 }
821 
822 void ComputeGlobal::computeGridObjects(ComputeGlobalDataMsg *msg)
823 {
824  DebugM(3,"computeGridObjects\n" << endi);
825  Molecule *mol = Node::Object()->molecule;
826  const Lattice &lattice = patchList[0].p->lattice;
827 
828  if (mol->numGridforceGrids < 1) {
829  NAMD_bug("No grids loaded in memory but ComputeGlobal has been requested to use them.");
830  }
831 
832  msg->gridobjindex.resize(numActiveGridObjects);
833  msg->gridobjindex.setall(-1);
834  msg->gridobjvalue.resize(numActiveGridObjects);
835  msg->gridobjvalue.setall(0.0);
836 
837  size_t ig = 0, gridobjcount = 0;
838 
839  // loop over home patches
841  for (ap = ap.begin(); ap != ap.end(); ap++) {
842 
843  msg->patchcount++;
844 
845  int const numAtoms = ap->p->getNumAtoms();
846  ForceList **gridForces = gridForcesPtrs[ap->p->getPatchID()];
847 
848  gridobjcount = 0;
849  for (ig = 0; ig < numGridObjects; ig++) {
850 
851  DebugM(2,"Processing grid index " << ig << "\n" << endi);
852 
853  // Only process here objects requested by the GlobalMasters
854  if (!gridObjActive[ig]) {
855  DebugM(2,"Skipping grid index " << ig << "; it is handled by "
856  "ComputeGridForce\n" << endi);
857  continue;
858  }
859 
860  ForceList *gridForcesGrid = gridForces[ig];
861  gridForcesGrid->resize(numAtoms);
862 
863  ForceList::iterator gfi = gridForcesGrid->begin();
864  FullAtomList::iterator ai = ap->p->getAtomList().begin();
865  FullAtomList::iterator ae = ap->p->getAtomList().end();
866 
867  DebugM(2, "computeGridObjects(): patch = " << ap->p->getPatchID()
868  << ", grid index = " << ig << "\n" << endi);
869  GridforceGrid *grid = mol->get_gridfrc_grid(ig);
870 
871  msg->gridobjindex[gridobjcount] = ig;
872  BigReal &gridobjvalue = msg->gridobjvalue[gridobjcount];
873 
874  computeGridForceGrid(ai, ae, gfi, ap->p->lattice, ig, grid, gridobjvalue);
875 
876  gridobjcount++;
877  }
878  }
879 
880  for (gridobjcount = 0; gridobjcount < numActiveGridObjects; gridobjcount++) {
881  DebugM(3, "Total gridObjValue[" << msg->gridobjindex[gridobjcount]
882  << "] = " << msg->gridobjvalue[gridobjcount] << "\n");
883  }
884 
885  DebugM(2,"computeGridObjects done\n");
886 }
887 
888 void ComputeGlobal::applyGridObjectForces(ComputeGlobalResultsMsg *msg,
889  Force *extForce_in,
890  Tensor *extVirial_in)
891 {
892  if (msg->gridobjforce.size() == 0) return;
893 
894  if (msg->gridobjforce.size() != numActiveGridObjects) {
895  NAMD_bug("ComputeGlobal received a different number of grid forces than active grids.");
896  }
897 
898  Molecule *mol = Node::Object()->molecule;
899  const Lattice &lattice = patchList[0].p->lattice;
900  AtomMap *atomMap = AtomMap::Object();
901  Force &extForce = *extForce_in;
902  Tensor &extVirial = *extVirial_in;
903 
904  // map applied forces from the message
905  BigRealList gridObjForces;
906  gridObjForces.resize(numGridObjects);
907  gridObjForces.setall(0.0);
908  BigRealList::iterator gridobjforce_i = msg->gridobjforce.begin();
909  BigRealList::iterator gridobjforce_e = msg->gridobjforce.end();
910  int ig;
911  for (ig = 0; gridobjforce_i != gridobjforce_e ;
912  gridobjforce_i++, ig++) {
913  if (!gridObjActive[ig]) continue;
914  gridObjForces[ig] = *gridobjforce_i;
915  }
916 
917  // loop over home patches
919  for (ap = ap.begin(); ap != ap.end(); ap++) {
920 
921  ForceList **gridForces = gridForcesPtrs[ap->p->getPatchID()];
922 
923  for (ig = 0; ig < numGridObjects; ig++) {
924 
925  if (!gridObjActive[ig]) continue;
926 
927  DebugM(2, "gof = " << gridObjForces[ig] << "\n" << endi);
928 
929  ForceList *gridForcesGrid = gridForces[ig];
930 
931  FullAtomList::iterator ai = ap->p->getAtomList().begin();
932  FullAtomList::iterator ae = ap->p->getAtomList().end();
933  Force *f = ap->r->f[Results::normal];
934  ForceList::iterator gfi = gridForcesGrid->begin();
935 
936  for ( ; ai != ae; ai++, gfi++) {
937  if (! mol->is_atom_gridforced(ai->id, ig)) {
938  *gfi = Vector(0.0, 0.0, 0.0);
939  continue;
940  }
941  LocalID localID = atomMap->localID(ai->id);
942  // forces were stored; flipping sign to get gradients
943  Vector const gridforceatom(-1.0 * (*gfi) * gridObjForces[ig]);
944  DebugM(2, "id = " << ai->id
945  << ", pid = " << localID.pid
946  << ", index = " << localID.index
947  << ", force = " << gridforceatom << "\n" << endi);
948  f[localID.index] += gridforceatom;
949  extForce += gridforceatom;
950  Position x_orig = ai->position;
951  Transform transform = ai->transform;
952  Position x_virial = lattice.reverse_transform(x_orig, transform);
953  extVirial += outer(gridforceatom, x_virial);
954  }
955  }
956  }
957  // extForce and extVirial are being communicated by calling function
958 }
959 
960 // This function is called by each HomePatch after force
961 // evaluation. It stores the indices and forces of the requested
962 // atoms here, to be sent to GlobalMasterServer during the next
963 // time step. The total force is the sum of three components:
964 // "normal", "nbond" and "slow", the latter two may be calculated
965 // less frequently, so their most recent values are stored in
966 // "f_saved" and used here. If we don't do full electrostatics,
967 // there's no "slow" part.
969 {
970  if ( ! forceSendEnabled ) NAMD_bug("ComputeGlobal::saveTotalForces called unexpectedly");
971  if ( ! forceSendActive ) return;
972 
974  if ( simParms->accelMDOn && simParms->accelMDDebugOn && simParms->accelMDdihe ) {
975  int num=homePatch->numAtoms;
976  FullAtomList &atoms = homePatch->atom;
977  ForceList &af=homePatch->f[Results::amdf];
978 
979  for (int i=0; i<num; ++i) {
980  int index = atoms[i].id;
981  if (index < endRequested && isRequested[index] & 1) {
982  fid.add(index);
983  totalForce.add(af[i]);
984  }
985  }
986  return;
987  }
988 
989  // printf("Start saving force at step: %d ####################################################\n",
990  // patchList[0].p->flags.step);
991  int fixedAtomsOn = simParms->fixedAtomsOn;
992  int num=homePatch->numAtoms;
993  FullAtomList &atoms = homePatch->atom;
994  ForceList &f1=homePatch->f[Results::normal], &f2=homePatch->f_saved[Results::nbond],
995  &f3=homePatch->f_saved[Results::slow];
996 
997  double *f1_soa_x = homePatch->patchDataSOA.f_normal_x;
998  double *f1_soa_y = homePatch->patchDataSOA.f_normal_y;
999  double *f1_soa_z = homePatch->patchDataSOA.f_normal_z;
1000  double *f2_soa_x = homePatch->patchDataSOA.f_saved_nbond_x;
1001  double *f2_soa_y = homePatch->patchDataSOA.f_saved_nbond_y;
1002  double *f2_soa_z = homePatch->patchDataSOA.f_saved_nbond_z;
1003  double *f3_soa_x = homePatch->patchDataSOA.f_saved_slow_x;
1004  double *f3_soa_y = homePatch->patchDataSOA.f_saved_slow_y;
1005  double *f3_soa_z = homePatch->patchDataSOA.f_saved_slow_z;
1006  int hasSOA = (simParms->CUDASOAintegrate);
1007  Force f_sum;
1008  double f_sum_x, f_sum_y, f_sum_z;
1009 
1010  #if 0
1011  for (int i=0; i<num; ++i) {
1012  int index = atoms[i].id;
1013  if (index < 20) {
1014  if (hasSOA) {
1015  CkPrintf("ForceSaved: atom %d, ForceN (%8.6f, %8.6f, %8.6f) \n", index, f1_soa_x[i], f1_soa_y[i], f1_soa_z[i]);
1016  CkPrintf(" atom %d, ForceNB (%8.6f, %8.6f, %8.6f) \n", index, f2_soa_x[i], f2_soa_y[i], f2_soa_z[i]);
1017  CkPrintf(" atom %d, ForceSL (%8.6f, %8.6f, %8.6f) \n", index, f3_soa_x[i], f3_soa_y[i], f3_soa_z[i]);
1018  } else {
1019  CkPrintf("ForceSaved: atom %d, ForceN (%8.6f, %8.6f, %8.6f) \n", index, f1[i].x, f1[i].y, f1[i].z);
1020  CkPrintf(" atom %d, ForceNB (%8.6f, %8.6f, %8.6f) \n", index, f2[i].x, f2[i].y, f2[i].z);
1021  // not memory safe to access slow forces all the time like this
1022  // CkPrintf(" atom %d, ForceSL (%8.6f, %8.6f, %8.6f) \n", index, f3[i].x, f3[i].y, f3[i].z);
1023  }
1024  }
1025  }
1026 
1027  printf("PE, PId (%d, %d) Stop saving at step: %d ####################################################\n",
1028  CkMyPe(), homePatch->patchID, patchList[0].p->flags.step);
1029  #endif
1030  if ( ! forceSendActive ) return;
1031  for (int i=0; i<num; ++i) {
1032  int index = atoms[i].id;
1033  char reqflag;
1034  if (index < endRequested && (reqflag = isRequested[index])) {
1035  if (hasSOA) {
1036  f_sum_x = f1_soa_x[i] + f2_soa_x[i];
1037  f_sum_y = f1_soa_y[i] + f2_soa_y[i];
1038  f_sum_z = f1_soa_z[i] + f2_soa_z[i];
1039  if (dofull) {
1040  f_sum_x += f3_soa_x[i];
1041  f_sum_y += f3_soa_y[i];
1042  f_sum_z += f3_soa_z[i];
1043  }
1044  f_sum.x = f_sum_x;
1045  f_sum.y = f_sum_y;
1046  f_sum.z = f_sum_z;
1047  } else {
1048  f_sum = f1[i]+f2[i];
1049  if (dofull)
1050  f_sum += f3[i];
1051  }
1052 
1053  if ( fixedAtomsOn && atoms[i].atomFixed )
1054  f_sum = 0.;
1055 
1056  if ( reqflag & 1 ) { // individual atom
1057  fid.add(index);
1058  totalForce.add(f_sum);
1059  }
1060  if ( reqflag & 2 ) { // part of group
1061  intpair *gpend = gpair.end();
1062  intpair *gpi = std::lower_bound(gpair.begin(),gpend,intpair(index,0));
1063  if ( gpi == gpend || gpi->first != index )
1064  NAMD_bug("ComputeGlobal::saveTotalForces gpair corrupted.");
1065  do {
1066  ++gfcount;
1067  groupTotalForce[gpi->second] += f_sum;
1068  } while ( ++gpi != gpend && gpi->first == index );
1069  }
1070  }
1071  }
1072 }
static Node * Object()
Definition: Node.h:86
#define NAMD_EVENT_STOP(eon, id)
GridforceGrid * get_gridfrc_grid(int gridnum) const
Definition: Molecule.h:1368
int size(void) const
Definition: ResizeArray.h:131
void recvResults(ComputeGlobalResultsMsg *)
NAMD_HOST_DEVICE Position reverse_transform(Position data, const Transform &t) const
Definition: Lattice.h:143
double * f_normal_z
Definition: NamdTypes.h:430
int32 ComputeID
Definition: NamdTypes.h:288
double * f_normal_y
Definition: NamdTypes.h:429
Bool globalMasterScaleByFrequency
static PatchMap * Object()
Definition: PatchMap.h:27
BigRealList gridobjvalue
Partial values of the GridForce objects from this message.
double * f_saved_slow_z
Definition: NamdTypes.h:445
NAMD_HOST_DEVICE Tensor outer(const Vector &v1, const Vector &v2)
Definition: Tensor.h:241
Definition: Vector.h:72
virtual void submit(void)=0
#define ADD_TENSOR_OBJECT(R, RL, D)
Definition: ReductionMgr.h:44
SimParameters * simParameters
Definition: Node.h:181
ComputeHomePatchList patchList
Bool CUDASOAintegrateMode
float Real
Definition: common.h:118
#define DebugM(x, y)
Definition: Debug.h:75
std::ostream & endi(std::ostream &s)
Definition: InfoStream.C:54
BigReal z
Definition: Vector.h:74
int8 i
Definition: NamdTypes.h:45
Position position
Definition: NamdTypes.h:78
int numGridforceGrids
Definition: Molecule.h:626
SubmitReduction * willSubmit(int setID, int size=-1)
Definition: ReductionMgr.C:368
int8 j
Definition: NamdTypes.h:45
void saveTotalForces(HomePatch *)
ResizeArrayIter< T > begin(void) const
static ReductionMgr * Object(void)
Definition: ReductionMgr.h:290
double * f_saved_slow_y
Definition: NamdTypes.h:444
ComputeGlobal(ComputeID, ComputeMgr *)
Definition: ComputeGlobal.C:38
int add(const Elem &elem)
Definition: ResizeArray.h:101
double * f_saved_slow_x
Definition: NamdTypes.h:443
Molecule stores the structural information for the system.
Definition: Molecule.h:174
int second
Definition: ComputeGlobal.h:24
void resize(int i)
Definition: ResizeArray.h:84
int32 index
Definition: NamdTypes.h:300
void setall(const Elem &elem)
Definition: ResizeArray.h:94
uint32 id
Definition: NamdTypes.h:160
ResizeArray< Force > ForceList
Definition: NamdTypes.h:277
double * f_normal_x
Definition: NamdTypes.h:428
int numPatches(void) const
Definition: PatchMap.h:59
#define NAMD_EVENT_START(eon, id)
const Elem * const_iterator
Definition: ResizeArray.h:38
void NAMD_bug(const char *err_msg)
Definition: common.C:195
LocalID localID(AtomID id)
Definition: AtomMap.h:78
ResizeArray< Lattice > lat
IntList gridobjindex
Indices of the GridForce objects contained in this message.
int numAtoms
Definition: Patch.h:151
BigReal x
Definition: Vector.h:74
double * f_saved_nbond_x
Definition: NamdTypes.h:440
virtual ~ComputeGlobal()
void get_gridfrc_params(Real &k, Charge &q, int atomnum, int gridnum) const
Definition: Molecule.h:1362
double * f_saved_nbond_z
Definition: NamdTypes.h:442
void enableComputeGlobalResults()
Definition: ComputeMgr.C:1407
PatchID pid
Definition: NamdTypes.h:299
void recvComputeGlobalResults(ComputeGlobalResultsMsg *)
Definition: ComputeMgr.C:1424
static AtomMap * Object()
Definition: AtomMap.h:37
iterator begin(void)
Definition: ResizeArray.h:36
const PatchID patchID
Definition: Patch.h:150
Definition: Tensor.h:15
iterator end(void)
Definition: ResizeArray.h:37
BigReal y
Definition: Vector.h:74
Mass mass
Definition: NamdTypes.h:218
#define ADD_VECTOR_OBJECT(R, RL, D)
Definition: ReductionMgr.h:28
int count
Numer of atoms processed for this message.
double * f_saved_nbond_y
Definition: NamdTypes.h:441
Bool is_atom_gridforced(int atomnum, int gridnum) const
Definition: Molecule.h:1256
void sendComputeGlobalData(ComputeGlobalDataMsg *)
Definition: ComputeMgr.C:1294
ForceList f[Results::maxNumForces]
Definition: Patch.h:214
int patchcount
Number of patches processed for this message.
void swap(ResizeArray< Elem > &ra)
Definition: ResizeArray.h:64
int32 PatchID
Definition: NamdTypes.h:287
ResizeArrayIter< T > end(void) const
Molecule * molecule
Definition: Node.h:179
float Charge
Definition: NamdTypes.h:38
int globalMasterFrequency
int8 k
Definition: NamdTypes.h:45
double BigReal
Definition: common.h:123
Transform transform
Definition: NamdTypes.h:229