NAMD
ComputeGlobal.C
Go to the documentation of this file.
1 
7 /*
8  Forwards atoms to master node for force evaluation.
9 */
10 
11 #include "InfoStream.h"
12 #include "Node.h"
13 #include "PatchMap.h"
14 #include "PatchMap.inl"
15 #include "AtomMap.h"
16 #include "ComputeGlobal.h"
17 #include "ComputeGlobalMsgs.h"
18 #include "GridForceGrid.h"
19 #include "PatchMgr.h"
20 #include "Molecule.h"
21 #include "ReductionMgr.h"
22 #include "ComputeMgr.h"
23 #include "ComputeMgr.decl.h"
24 #include "SimParameters.h"
25 #include "PatchData.h"
26 #include <stdio.h>
27 #include <algorithm>
28 #include "NamdEventsProfiling.h"
29 #define MIN_DEBUG_LEVEL 3
30 //#define DEBUGM
31 #include "Debug.h"
32 #define USE_GLOBALMASTER_VIRIAL_KERNEL 1
33 #include "GridForceGrid.inl"
34 #include "MGridforceParams.h"
35 
36 // CLIENTS
37 
40 {
41  DebugM(3,"Constructing client\n");
42  aid.resize(0);
43  gdef.resize(0);
44  comm = m;
45  firsttime = 1;
46  isRequested = 0;
47  isRequestedAllocSize = 0;
48  endRequested = 0;
49  numGroupsRequested = 0;
51  dofull = (sp->GBISserOn || sp->GBISOn || sp->fullDirectOn || sp->FMAOn || sp->PMEOn);
52  forceSendEnabled = 0;
53  if ( sp->tclForcesOn ) forceSendEnabled = 1;
54  if ( sp->colvarsOn ) forceSendEnabled = 1;
55  forceSendActive = 0;
56  fid.resize(0);
57  totalForce.resize(0);
58  gfcount = 0;
59  groupTotalForce.resize(0);
61  int numPatches = PatchMap::Object()->numPatches();
62  forcePtrs = new Force*[numPatches];
63  atomPtrs = new FullAtom*[numPatches];
64  for ( int i = 0; i < numPatches; ++i ) { forcePtrs[i] = 0; atomPtrs[i] = 0; }
65 
66  #ifdef NODEGROUP_FORCE_REGISTER
67  CProxy_PatchData cpdata(CkpvAccess(BOCclass_group).patchData);
68  PatchData *patchData = cpdata.ckLocalBranch();
69  nodeReduction = patchData->reduction;
70  #endif
71  if (sp->SOAintegrateOn || sp->CUDASOAintegrateMode) {
72  // Allocate memory for numPatches to access SOA data
73  mass_soa = new float*[numPatches];
74  pos_soa_x = new double*[numPatches];
75  pos_soa_y = new double*[numPatches];
76  pos_soa_z = new double*[numPatches];
77  force_soa_x = new double*[numPatches];
78  force_soa_y = new double*[numPatches];
79  force_soa_z = new double*[numPatches];
80  transform_soa_i = new int*[numPatches];
81  transform_soa_j = new int*[numPatches];
82  transform_soa_k = new int*[numPatches];
83  for ( int i = 0; i < numPatches; ++i ) {
84  mass_soa[i] = NULL;
85  pos_soa_x[i] = NULL;
86  pos_soa_y[i] = NULL;
87  pos_soa_z[i] = NULL;
88  force_soa_x[i] = NULL;
89  force_soa_y[i] = NULL;
90  force_soa_z[i] = NULL;
91  transform_soa_i[i] = NULL;
92  transform_soa_j[i] = NULL;
93  transform_soa_k[i] = NULL;
94  }
95  } else {
96  mass_soa = NULL;
97  pos_soa_x = NULL;
98  pos_soa_y = NULL;
99  pos_soa_z = NULL;
100  force_soa_x = NULL;
101  force_soa_y = NULL;
102  force_soa_z = NULL;
103  transform_soa_i = NULL;
104  transform_soa_j = NULL;
105  transform_soa_k = NULL;
106  }
107  gridForcesPtrs = new ForceList **[numPatches];
108  numGridObjects = numActiveGridObjects = 0;
109  for ( int i = 0; i < numPatches; ++i ) {
110  forcePtrs[i] = NULL; atomPtrs[i] = NULL;
111  gridForcesPtrs[i] = NULL;
112  }
113 }
114 
116 {
117  delete[] isRequested;
118  delete[] forcePtrs;
119  deleteGridObjects();
120  delete[] gridForcesPtrs;
121  delete[] atomPtrs;
122  delete reduction;
123 
124  if(mass_soa) delete [] mass_soa;
125  if(pos_soa_x) delete [] pos_soa_x;
126  if(pos_soa_y) delete [] pos_soa_y;
127  if(pos_soa_z) delete [] pos_soa_z;
128  if(force_soa_x) delete [] force_soa_x;
129  if(force_soa_y) delete [] force_soa_y;
130  if(force_soa_z) delete [] force_soa_z;
131  if(transform_soa_i) delete [] transform_soa_i;
132  if(transform_soa_j) delete [] transform_soa_j;
133  if(transform_soa_k) delete [] transform_soa_k;
134 }
135 
136 void ComputeGlobal::configure(AtomIDList &newaid, AtomIDList &newgdef, IntList &newgridobjid) {
137  DebugM(4,"Receiving configuration (" << newaid.size() <<
138  " atoms, " << newgdef.size() << " atoms/groups and " <<
139  newgridobjid.size() << " grid objects) on client\n" << endi);
140 
141  AtomIDList::iterator a, a_e;
142 
143  if ( forceSendEnabled ) {
144  // clear previous data
145  int max = -1;
146  for (a=newaid.begin(),a_e=newaid.end(); a!=a_e; ++a) {
147  if ( *a > max ) max = *a;
148  }
149  for (a=newgdef.begin(),a_e=newgdef.end(); a!=a_e; ++a) {
150  if ( *a > max ) max = *a;
151  }
152  endRequested = max+1;
153  if ( endRequested > isRequestedAllocSize ) {
154  delete [] isRequested;
155  isRequestedAllocSize = endRequested+10;
156  isRequested = new char[isRequestedAllocSize];
157  memset(isRequested, 0, isRequestedAllocSize);
158  } else {
159  for (a=aid.begin(),a_e=aid.end(); a!=a_e; ++a) {
160  isRequested[*a] = 0;
161  }
162  for (a=gdef.begin(),a_e=gdef.end(); a!=a_e; ++a) {
163  if ( *a != -1 ) isRequested[*a] = 0;
164  }
165  }
166  // reserve space
167  gpair.resize(0);
168  gpair.resize(newgdef.size());
169  gpair.resize(0);
170  }
171 
172  // store data
173  aid.swap(newaid);
174  gdef.swap(newgdef);
175 
176  if (newgridobjid.size()) configureGridObjects(newgridobjid);
177 
178  if ( forceSendEnabled ) {
179  int newgcount = 0;
180  for (a=aid.begin(),a_e=aid.end(); a!=a_e; ++a) {
181  isRequested[*a] = 1;
182  }
183  for (a=gdef.begin(),a_e=gdef.end(); a!=a_e; ++a) {
184  if ( *a == -1 ) ++newgcount;
185  else {
186  isRequested[*a] |= 2;
187  gpair.add(intpair(*a,newgcount));
188  }
189  }
190  std::sort(gpair.begin(),gpair.end());
191  numGroupsRequested = newgcount;
192  }
193  DebugM(3,"Done configure on client\n");
194 }
195 
196 void ComputeGlobal::deleteGridObjects()
197 {
198  if (numGridObjects == 0) return;
200  for (ap = ap.begin(); ap != ap.end(); ap++) {
201  ForceList **gridForces = gridForcesPtrs[ap->p->getPatchID()];
202  if (gridForces != NULL) {
203  for (size_t ig = 0; ig < numGridObjects; ig++) {
204  if (gridForces[ig] != NULL) {
205  delete gridForces[ig];
206  gridForces[ig] = NULL;
207  }
208  }
209  delete [] gridForces;
210  gridForces = NULL;
211  }
212  }
213  numGridObjects = numActiveGridObjects = 0;
214 }
215 
216 void ComputeGlobal::configureGridObjects(IntList &newgridobjid)
217 {
218  Molecule *mol = Node::Object()->molecule;
219 
220  deleteGridObjects();
221 
222  numGridObjects = mol->numGridforceGrids;
223  numActiveGridObjects = 0;
224 
225  gridObjActive.resize(numGridObjects);
226  gridObjActive.setall(0);
227 
228  IntList::const_iterator goid_i = newgridobjid.begin();
229  IntList::const_iterator goid_e = newgridobjid.end();
230  for ( ; goid_i != goid_e; goid_i++) {
231  if ((*goid_i < 0) || (*goid_i >= numGridObjects)) {
232  NAMD_bug("Requested illegal gridForceGrid index.");
233  } else {
234  DebugM(3,"Adding grid with index " << *goid_i << " to ComputeGlobal\n");
235  gridObjActive[*goid_i] = 1;
236  numActiveGridObjects++;
237  }
238  }
239 
240  for (size_t ig = 0; ig < numGridObjects; ig++) {
241  DebugM(3,"Grid index " << ig << " is active or inactive? "
242  << gridObjActive[ig] << "\n" << endi);
243  }
244 
246  for (ap = ap.begin(); ap != ap.end(); ap++) {
247  gridForcesPtrs[ap->p->getPatchID()] = new ForceList *[numGridObjects];
248  ForceList **gridForces = gridForcesPtrs[ap->p->getPatchID()];
249  for (size_t ig = 0; ig < numGridObjects; ig++) {
250  if (gridObjActive[ig]) {
251  gridForces[ig] = new ForceList;
252  } else {
253  gridForces[ig] = NULL;
254  }
255  }
256  }
257 }
258 
259 #if 0
260 void ComputeGlobal::recvConfig(ComputeGlobalConfigMsg *msg) {
261  DebugM(3,"Receiving configure on client\n");
262  configure(msg->aid,msg->gdef);
263  delete msg;
264  sendData();
265 }
266 #endif
267 
269  DebugM(3,"Receiving results (" << msg->aid.size() << " forces, "
270  << msg->newgdef.size() << " new group atoms) on client thread " << CthGetToken(CthSelf())->serialNo <<" msg->resendCoordinates " << msg->resendCoordinates << " msg->totalforces " << msg->totalforces<< "\n");
271 
272  forceSendActive = msg->totalforces;
273  if ( forceSendActive && ! forceSendEnabled ) NAMD_bug("ComputeGlobal::recvResults forceSendActive without forceSendEnabled");
274 
275  // set the forces only if we aren't going to resend the data
276  int setForces = !msg->resendCoordinates;
278  if(setForces) { // we are requested to
279  // Store forces to patches
280  AtomMap *atomMap = AtomMap::Object();
281  const Lattice & lattice = patchList[0].p->lattice;
283  Force **f = forcePtrs;
284  FullAtom **t = atomPtrs;
285  Force extForce = 0.;
286  Tensor extVirial;
287 
288  for (ap = ap.begin(); ap != ap.end(); ap++) {
289  (*ap).r = (*ap).forceBox->open();
290  f[(*ap).patchID] = (*ap).r->f[Results::normal];
291  t[(*ap).patchID] = (*ap).p->getAtomList().begin();
292 
293  if (sp->SOAintegrateOn || sp->CUDASOAintegrateMode) {
294  // Assigne the pointer to SOA data structure
295  PatchID pId = (*ap).patchID;
296  mass_soa[pId] = (*ap).p->patchDataSOA.mass;
297  force_soa_x[pId] = (*ap).p->patchDataSOA.f_global_x;
298  force_soa_y[pId] = (*ap).p->patchDataSOA.f_global_y;
299  force_soa_z[pId] = (*ap).p->patchDataSOA.f_global_z;
300  transform_soa_i[pId] = (*ap).p->patchDataSOA.transform_i;
301  transform_soa_j[pId] = (*ap).p->patchDataSOA.transform_j;
302  transform_soa_k[pId] = (*ap).p->patchDataSOA.transform_k;
303  }
304  }
305 
306 
307  AtomIDList::iterator a = msg->aid.begin();
308  AtomIDList::iterator a_e = msg->aid.end();
309  ForceList::iterator f2 = msg->f.begin();
311  for ( ; a != a_e; ++a, ++f2 ) {
312  Force f_atom;
313  f_atom = (*f2);
314  f_atom.x*=(float) sp->globalMasterFrequency;
315  f_atom.y*=(float) sp->globalMasterFrequency;
316  f_atom.z*=(float) sp->globalMasterFrequency;
317  }
318  if (sp->SOAintegrateOn || sp->CUDASOAintegrateMode) {
319  LocalID localID;
320  PatchID lpid;
321  int lidx;
322  Position x_orig, x_atom;
323  Transform trans;
324  Force f_atom;
325  for ( ; a != a_e; ++a, ++f2 ) {
326  DebugM(1,"processing atom "<<(*a)<<", F="<<(*f2)<<"...\n");
327  /* XXX if (*a) is out of bounds here we get a segfault */
328  localID = atomMap->localID(*a);
329  lpid = localID.pid;
330  lidx = localID.index;
331  if ( lpid == notUsed || ! f[lpid] ) continue;
332  f_atom = (*f2);
333  // printf("NAMD3-recv: atom %d, Before Force (%8.6f, %8.6f, %8.6f) \n",
334  // *a, force_soa_x[lpid][lidx], force_soa_y[lpid][lidx], force_soa_z[lpid][lidx]);
335  // printf("NAMD3-recv: atom %d, Added Force (%8.6f, %8.6f, %8.6f) \n", *a, f_atom.x, f_atom.y, f_atom.z);
336  force_soa_x[lpid][lidx] += f_atom.x;
337  force_soa_y[lpid][lidx] += f_atom.y;
338  force_soa_z[lpid][lidx] += f_atom.z;
339 #ifndef USE_GLOBALMASTER_VIRIAL_KERNEL
340  x_orig.x = pos_soa_x[lpid][lidx];
341  x_orig.y = pos_soa_y[lpid][lidx];
342  x_orig.z = pos_soa_z[lpid][lidx];
343  trans.i = transform_soa_i[lpid][lidx];
344  trans.j = transform_soa_j[lpid][lidx];
345  trans.k = transform_soa_k[lpid][lidx];
346  x_atom = lattice.reverse_transform(x_orig,trans);
347  extForce += f_atom;
348  extVirial += outer(f_atom,x_atom);
349 #endif
350  }
351  } else {
352  for ( ; a != a_e; ++a, ++f2 ) {
353  DebugM(1,"processing atom "<<(*a)<<", F="<<(*f2)<<"...\n");
354  /* XXX if (*a) is out of bounds here we get a segfault */
355  LocalID localID = atomMap->localID(*a);
356  if ( localID.pid == notUsed || ! f[localID.pid] ) continue;
357  Force f_atom = (*f2);
358  // printf("NAMD3-recv: atom %d, Before Force (%8.6f, %8.6f, %8.6f) \n",
359  // *a, f[localID.pid][localID.index].x, f[localID.pid][localID.index].y, f[localID.pid][localID.index].z);
360  // printf("NAMD3-recv: atom %d, Added Force (%8.6f, %8.6f, %8.6f) \n", *a, f_atom.x, f_atom.y, f_atom.z);
361  f[localID.pid][localID.index] += f_atom;
362  FullAtom &atom = t[localID.pid][localID.index];
363  Position x_orig = atom.position;
364  Transform trans = atom.transform;
365  Position x_atom = lattice.reverse_transform(x_orig,trans);
366  extForce += f_atom;
367  extVirial += outer(f_atom,x_atom);
368  }
369  }
370  DebugM(1,"done with the loop\n");
371 
372  // calculate forces for atoms in groups
373  AtomIDList::iterator g_i, g_e;
374  g_i = gdef.begin(); g_e = gdef.end();
375  ForceList::iterator gf_i = msg->gforce.begin();
376  //iout << iDEBUG << "recvResults\n" << endi;
377  if (sp->SOAintegrateOn || sp->CUDASOAintegrateMode) {
378  LocalID localID;
379  PatchID lpid;
380  int lidx;
381  Position x_orig, x_atom;
382  Transform trans;
383  Force f_atom;
384  for ( ; g_i != g_e; ++g_i, ++gf_i ) {
385  //iout << iDEBUG << *gf_i << '\n' << endi;
386  Vector accel = (*gf_i);
387  for ( ; *g_i != -1; ++g_i ) {
388  //iout << iDEBUG << *g_i << '\n' << endi;
389  localID = atomMap->localID(*g_i);
390  lpid = localID.pid;
391  lidx = localID.index;
392  if ( lpid == notUsed || ! f[lpid] ) continue;
393  f_atom = accel * mass_soa[lpid][lidx];
394 #if 0
395  if (*g_i < 20) {
396  CkPrintf("NAMD3-recv: group %d, Before Force (%8.6f, %8.6f, %8.6f) \n",
397  *g_i, force_soa_x[lpid][lidx], force_soa_y[lpid][lidx], force_soa_z[lpid][lidx]);
398  CkPrintf("NAMD3-recv: group %d, Added Force (%8.6f, %8.6f, %8.6f) \n", *g_i, f_atom.x, f_atom.y, f_atom.z);
399  }
400 #endif
401  force_soa_x[lpid][lidx] += f_atom.x;
402  force_soa_y[lpid][lidx] += f_atom.y;
403  force_soa_z[lpid][lidx] += f_atom.z;
404 #ifndef USE_GLOBALMASTER_VIRIAL_KERNEL
405  x_orig.x = pos_soa_x[lpid][lidx];
406  x_orig.y = pos_soa_y[lpid][lidx];
407  x_orig.z = pos_soa_z[lpid][lidx];
408  trans.i = transform_soa_i[lpid][lidx];
409  trans.j = transform_soa_j[lpid][lidx];
410  trans.k = transform_soa_k[lpid][lidx];
411  x_atom = lattice.reverse_transform(x_orig,trans);
412  extForce += f_atom;
413  extVirial += outer(f_atom,x_atom);
414 #endif
415  }
416  }
417  } else {
418  for ( ; g_i != g_e; ++g_i, ++gf_i ) {
419  //iout << iDEBUG << *gf_i << '\n' << endi;
420  Vector accel = (*gf_i);
421  for ( ; *g_i != -1; ++g_i ) {
422  //iout << iDEBUG << *g_i << '\n' << endi;
423  LocalID localID = atomMap->localID(*g_i);
424  if ( localID.pid == notUsed || ! f[localID.pid] ) continue;
425  FullAtom &atom = t[localID.pid][localID.index];
426  Force f_atom = accel * atom.mass;
427 #if 0
428  if (*g_i < 20) {
429  CkPrintf("NAMD2-recv: group %d, Before Force (%8.6f, %8.6f, %8.6f) \n",
430  *g_i, f[localID.pid][localID.index].x, f[localID.pid][localID.index].y, f[localID.pid][localID.index].z);
431  CkPrintf("NAMD2-recv: group %d, Added Force (%8.6f, %8.6f, %8.6f) \n", *g_i, f_atom.x, f_atom.y, f_atom.z);
432  }
433 #endif
434  f[localID.pid][localID.index] += f_atom;
435 
436  Position x_orig = atom.position;
437  Transform trans = atom.transform;
438  Position x_atom = lattice.reverse_transform(x_orig,trans);
439  extForce += f_atom;
440  extVirial += outer(f_atom,x_atom);
441  }
442  }
443  }
444  DebugM(1,"done with the groups\n");
445 
446  if (numActiveGridObjects > 0) {
447  applyGridObjectForces(msg, &extForce, &extVirial);
448  }
449  // printf("Finish receiving at step: %d ####################################################\n",
450  // patchList[0].p->flags.step);
451 
452  #ifdef NODEGROUP_FORCE_REGISTER
453  if (sp->CUDASOAintegrate) {
454 #ifndef USE_GLOBALMASTER_VIRIAL_KERNEL
455  ADD_VECTOR_OBJECT(nodeReduction,REDUCTION_EXT_FORCE_NORMAL,extForce);
456  ADD_TENSOR_OBJECT(nodeReduction,REDUCTION_VIRIAL_NORMAL,extVirial);
457 #endif
458  } else {
459  ADD_VECTOR_OBJECT(reduction,REDUCTION_EXT_FORCE_NORMAL,extForce);
460  ADD_TENSOR_OBJECT(reduction,REDUCTION_VIRIAL_NORMAL,extVirial);
461  reduction->submit();
462  }
463  #else
464  ADD_VECTOR_OBJECT(reduction,REDUCTION_EXT_FORCE_NORMAL,extForce);
465  ADD_TENSOR_OBJECT(reduction,REDUCTION_VIRIAL_NORMAL,extVirial);
466  reduction->submit();
467  #endif
468  }
469  // done setting the forces, close boxes below
470 
471  // Get reconfiguration if present
472  if ( msg->reconfig ) {
473  DebugM(3,"Reconfiguring\n");
474  configure(msg->newaid, msg->newgdef, msg->newgridobjid);
475  }
476 
477  // send another round of data if requested
478 
479  if(msg->resendCoordinates) {
480  DebugM(3,"Sending requested data right away\n");
481  // CkPrintf("*** Resending data on PE %d \n", CkMyPe());
482  sendData();
483  }
484 
485  groupTotalForce.resize(numGroupsRequested);
486  for ( int i=0; i<numGroupsRequested; ++i ) groupTotalForce[i] = 0;
487  DebugM(3,"resized\n");
488  if(setForces) {
489  DebugM(3,"setting forces\n");
491  Force **f = forcePtrs;
492  FullAtom **t = atomPtrs;
493  for (ap = ap.begin(); ap != ap.end(); ap++) {
494  CompAtom *x;
495  PatchID pId = (*ap).patchID;
496  if (!sp->CUDASOAintegrate) {
497  (*ap).positionBox->close(&x);
498  (*ap).forceBox->close(&((*ap).r));
499  DebugM(1,"closing boxes\n");
500  }
501  f[pId] = 0;
502  t[pId] = 0;
503  if (sp->CUDASOAintegrate || sp->SOAintegrateOn) {
504  // XXX Possibly code below is needed by SOAintegrate mode
505  mass_soa[pId] = NULL;
506  pos_soa_x[pId] = NULL;
507  pos_soa_y[pId] = NULL;
508  pos_soa_z[pId] = NULL;
509  force_soa_x[pId] = NULL;
510  force_soa_y[pId] = NULL;
511  force_soa_z[pId] = NULL;
512  transform_soa_i[pId] = NULL;
513  transform_soa_j[pId] = NULL;
514  transform_soa_k[pId] = NULL;
515  DebugM(2,"nulling ptrs\n");
516  }
517  }
518  DebugM(3,"done setting forces\n");
519  }
520 
521  #ifdef NODEGROUP_FORCE_REGISTER
522  if (!sp->CUDASOAintegrate) {
523  // CUDASOAintegrate handles this on PE 0 in sendComputeGlobalResults
524  delete msg;
525  }
526  #else
527  delete msg;
528  #endif
529  DebugM(3,"Done processing results\n");
530 }
531 
533 {
534  DebugM(2,"doWork thread " << CthGetToken(CthSelf())->serialNo << "\n");
535 
538  FullAtom **t = atomPtrs;
539  int step = patchList[0].p->flags.step;
540  if((step % sp->globalMasterFrequency) ==0)
541  {
542  DebugM(3,"doWork for step " << step <<"\n"<<endi);
543  // if(sp->CUDASOAintegrateOn) {
544  // hasPatchZero = 0;
545  // }
546 
547  for (ap = ap.begin(); ap != ap.end(); ap++) {
548  CompAtom *x = (*ap).positionBox->open();
549  t[(*ap).patchID] = (*ap).p->getAtomList().begin();
550 
551  if (sp->SOAintegrateOn || sp->CUDASOAintegrateMode) {
552  // Assigne the pointer to SOA data structure
553  PatchID pId = (*ap).patchID;
554  mass_soa[pId] = (*ap).p->patchDataSOA.mass;
555  pos_soa_x[pId] = (*ap).p->patchDataSOA.pos_x;
556  pos_soa_y[pId] = (*ap).p->patchDataSOA.pos_y;
557  pos_soa_z[pId] = (*ap).p->patchDataSOA.pos_z;
558  transform_soa_i[pId] = (*ap).p->patchDataSOA.transform_i;
559  transform_soa_j[pId] = (*ap).p->patchDataSOA.transform_j;
560  transform_soa_k[pId] = (*ap).p->patchDataSOA.transform_k;
561  // if(sp->CUDASOAintegrateOn && (pId == 0)) {
562  // hasPatchZero = 1;
563  // }
564  }
565  }
566 
567  if(!firsttime) {
568  // CkPrintf("*** Start NoFirstTime on PE %d \n", CkMyPe());
569  sendData();
570  // CkPrintf("*** End NoFirstTime on PE %d \n", CkMyPe());
571  } else {
572  // CkPrintf("*** Start FirstTime on PE %d \n", CkMyPe());
573  if ( hasPatchZero ) {
575  msg->lat.add(patchList[0].p->lattice);
576  msg->step = -1;
577  msg->count = 1;
578  msg->patchcount = 0;
579  // CkPrintf("***DoWork calling sendComputeGlobalData PE %d \n", CkMyPe());
580  comm->sendComputeGlobalData(msg);
581  }
582 #ifdef NODEGROUP_FORCE_REGISTER
583  else if (sp->CUDASOAintegrate) {
584 
585  // CkPrintf("***DoWork FirstTime barrier 1 on PE %d \n", CkMyPe());
586  comm->stowSuspendULT();
587  // CmiNodeBarrier();
588  // CkPrintf("***DoWork FirstTime barrier 2 on PE %d \n", CkMyPe());
589  comm->stowSuspendULT();
590  // CkPrintf("***DoWork out of barrier 2 on PE %d \n", CkMyPe());
591  // CmiNodeBarrier();
592  ComputeGlobalResultsMsg* resultsMsg = CkpvAccess(ComputeGlobalResultsMsg_instance);
593  // CkPrintf("*** ComputeGlobal::doWork PE (%d) calling recvComputeGlobalResults in doWork at step: %d \n",CkMyPe(), patchList[0].p->flags.step);
594  comm->recvComputeGlobalResults(resultsMsg);
595  }
596 #endif // NODEGROUP_FORCE_REGISTER
597  firsttime = 0;
598  // CkPrintf("*** ComputeGlobal::doWork PE (%d) calling enableComputeGlobalResults in doWork at step: %d \n",CkMyPe(), patchList[0].p->flags.step);
600 
601  // CkPrintf("*** End FirstTime on PE %d \n", CkMyPe());
602  }
603  }
604  else
605  {
606  DebugM(2,"skipping step "<< step <<"\n"<<endi);
607  /* TODO to support CPU side MTS we need to do something to avoid hang some distillation from sendData(); and the reductions
608  ADD_VECTOR_OBJECT(reduction,REDUCTION_EXT_FORCE_NORMAL,extForce);
609  ADD_TENSOR_OBJECT(reduction,REDUCTION_VIRIAL_NORMAL,extVirial);
610  reduction->submit();
611  and as yet undetermined message handling
612  */
613  }
614  DebugM(2,"done with doWork\n");
615 }
616 
617 void ComputeGlobal::sendData()
618 {
619  DebugM(2,"sendData\n");
620  // Get positions from patches
621  AtomMap *atomMap = AtomMap::Object();
622  const Lattice & lattice = patchList[0].p->lattice;
624  FullAtom **t = atomPtrs;
625 
628 
629  msg->step = patchList[0].p->flags.step;
630  msg->count = 0;
631  msg->patchcount = 0;
632 
633  // CkPrintf("*** PE (%d) Start sending at step: %d \n",
634  // CkMyPe(), patchList[0].p->flags.step);
635  AtomIDList::iterator a = aid.begin();
636  AtomIDList::iterator a_e = aid.end();
637  NAMD_EVENT_START(1, NamdProfileEvent::GM_MSGPADD);
638  if (sp->SOAintegrateOn || sp->CUDASOAintegrateMode) {
639  LocalID localID;
640  PatchID lpid;
641  int lidx;
642  Position x_orig;
643  Transform trans;
644 
645  for ( ; a != a_e; ++a ) {
646  localID = atomMap->localID(*a);
647  lpid = localID.pid;
648  lidx = localID.index;
649  if ( lpid == notUsed || ! t[lpid] ) continue;
650  msg->aid.add(*a);
651  msg->count++;
652  x_orig.x = pos_soa_x[lpid][lidx];
653  x_orig.y = pos_soa_y[lpid][lidx];
654  x_orig.z = pos_soa_z[lpid][lidx];
655  trans.i = transform_soa_i[lpid][lidx];
656  trans.j = transform_soa_j[lpid][lidx];
657  trans.k = transform_soa_k[lpid][lidx];
658  msg->p.add(lattice.reverse_transform(x_orig,trans));
659  // printf("NAMD3-send: step %d atom %d, POS (%8.6f, %8.6f, %8.6f) \n", patchList[0].p->flags.step, *a, x_orig.x, x_orig.y, x_orig.z);
660  }
661  } else {
662  for ( ; a != a_e; ++a ) {
663  LocalID localID = atomMap->localID(*a);
664  if ( localID.pid == notUsed || ! t[localID.pid] ) continue;
665  msg->aid.add(*a);
666  msg->count++;
667  FullAtom &atom = t[localID.pid][localID.index];
668  Position x_orig = atom.position;
669  Transform trans = atom.transform;
670  msg->p.add(lattice.reverse_transform(x_orig,trans));
671  // printf("NAMD2-send: step %d atom %d, POS (%8.6f, %8.6f, %8.6f) \n", patchList[0].p->flags.step, *a, x_orig.x, x_orig.y, x_orig.z);
672  }
673  }
674  NAMD_EVENT_STOP(1, NamdProfileEvent::GM_MSGPADD);
675  NAMD_EVENT_START(1, NamdProfileEvent::GM_GCOM);
676  // calculate group centers of mass
677  AtomIDList::iterator g_i, g_e;
678  g_i = gdef.begin(); g_e = gdef.end();
679  if (sp->SOAintegrateOn || sp->CUDASOAintegrateMode) {
680  LocalID localID;
681  PatchID lpid;
682  int lidx;
683  Position x_orig;
684  Transform trans;
685  for ( ; g_i != g_e; ++g_i ) {
686  Vector com(0,0,0);
687  BigReal mass = 0.;
688  for ( ; *g_i != -1; ++g_i ) {
689  localID = atomMap->localID(*g_i);
690  lpid = localID.pid;
691  lidx = localID.index;
692  if ( lpid == notUsed || ! t[lpid] ) continue;
693  msg->count++;
694  x_orig.x = pos_soa_x[lpid][lidx];
695  x_orig.y = pos_soa_y[lpid][lidx];
696  x_orig.z = pos_soa_z[lpid][lidx];
697  trans.i = transform_soa_i[lpid][lidx];
698  trans.j = transform_soa_j[lpid][lidx];
699  trans.k = transform_soa_k[lpid][lidx];
700  com += lattice.reverse_transform(x_orig,trans) * mass_soa[lpid][lidx];
701  mass += mass_soa[lpid][lidx];
702 #if 0
703  if (*g_i < 20) {
704  printf("NAMD3-send: step %d atom %d, POS (%8.6f, %8.6f, %8.6f) \n", patchList[0].p->flags.step, *g_i, x_orig.x, x_orig.y, x_orig.z);
705  }
706 #endif
707 
708  }
709  // CkPrintf("*** NAMD3-send (%d): step %d group %d, COM (%8.6f, %8.6f, %8.6f) \n",
710  // CkMyPe(), patchList[0].p->flags.step, *g_i, com.x, com.y, com.z);
711  DebugM(1,"Adding center of mass "<<com<<"\n");
712  NAMD_EVENT_START(1, NamdProfileEvent::GM_GCOMADD);
713  msg->gcom.add(com);
714  msg->gmass.add(mass);
715  NAMD_EVENT_STOP(1, NamdProfileEvent::GM_GCOMADD);
716  }
717  } else {
718  for ( ; g_i != g_e; ++g_i ) {
719  Vector com(0,0,0);
720  BigReal mass = 0.;
721  for ( ; *g_i != -1; ++g_i ) {
722  LocalID localID = atomMap->localID(*g_i);
723  if ( localID.pid == notUsed || ! t[localID.pid] ) continue;
724  msg->count++;
725  FullAtom &atom = t[localID.pid][localID.index];
726  Position x_orig = atom.position;
727  Transform trans = atom.transform;
728  com += lattice.reverse_transform(x_orig,trans) * atom.mass;
729  mass += atom.mass;
730 #if 0
731  if (*g_i < 20) {
732  printf("NAMD2-send: step %d atom %d, POS (%8.6f, %8.6f, %8.6f) \n", patchList[0].p->flags.step, *g_i, x_orig.x, x_orig.y, x_orig.z);
733  }
734 #endif
735  }
736  // CkPrintf("*** NAMD2-send (%d): step %d group %d, COM (%8.6f, %8.6f, %8.6f) \n",
737  // CkMyPe(), patchList[0].p->flags.step, *g_i, com.x, com.y, com.z);
738 
739 
740  DebugM(1,"Adding center of mass "<<com<<"\n");
741  NAMD_EVENT_START(1, NamdProfileEvent::GM_GCOMADD);
742  msg->gcom.add(com);
743  msg->gmass.add(mass);
744  NAMD_EVENT_STOP(1, NamdProfileEvent::GM_GCOMADD);
745  }
746  }
747  NAMD_EVENT_STOP(1, NamdProfileEvent::GM_GCOM);
748 // printf("Finish sending at step: %d ####################################################\n",
749 // patchList[0].p->flags.step);
750 
751  if (numActiveGridObjects > 0) {
752  if(!sp->CUDASOAintegrateMode) // SequencerCUDA handles this
753  computeGridObjects(msg);
754  }
755 
756  msg->fid.swap(fid);
757  msg->tf.swap(totalForce);
758  fid.resize(0);
759  totalForce.resize(0);
760 
761  if ( gfcount ) msg->gtf.swap(groupTotalForce);
762  msg->count += ( msg->fid.size() + gfcount );
763  gfcount = 0;
764 
765  DebugM(3,"Sending data (" << msg->p.size() << " positions, "
766  << msg->gcom.size() << " groups, " << msg->gridobjvalue.size()
767  << " grid objects) on client\n");
768  if ( hasPatchZero ) { msg->count++; msg->lat.add(lattice); }
769  if ( msg->count || msg->patchcount )
770  {
771  // CkPrintf("*** ComputeGlobal::sendData PE (%d) calling sendComputeGlobalData step: %d msg->count %d msg->patchcount %d\n", CkMyPe(), patchList[0].p->flags.step,msg->count, msg->patchcount);
772  comm->sendComputeGlobalData(msg);
773  }
774  else
775  {
776  // CkPrintf("*** ComputeGlobal::sendData PE (%d) skipping sendComputeGlobalData step: %d msg->count %d msg->patchcount %d\n", CkMyPe(), patchList[0].p->flags.step,msg->count, msg->patchcount);
777  // comm->sendComputeGlobalData(msg);
778 #ifdef NODEGROUP_FORCE_REGISTER
779  // this PE doesn't have message work to do
781  if (sp->CUDASOAintegrate) {
782  // we need to enter the barriers normally hit in sendComputeGlobalData
783  // CkPrintf("*** ComputeGlobal::sendData PE (%d) about to double stow\n");
784  comm->stowSuspendULT();
785  comm->stowSuspendULT();
786  // and the one in sendComputeGlobalResults
787  // comm->stowSuspendULT();
788  }
789 #endif
790  delete msg;
791  }
792  NAMD_EVENT_START(1, NamdProfileEvent::GM_GRESULTS);
793  // CkPrintf("*** ComputeGlobal::sendData PE (%d) calling enableComputeGlobalResults in sendData at step: %d \n", CkMyPe(), patchList[0].p->flags.step);
795  NAMD_EVENT_STOP(1, NamdProfileEvent::GM_GRESULTS);
796 }
797 
798 template<class T> void ComputeGlobal::computeGridForceGrid(FullAtomList::iterator aii,
800  ForceList::iterator gfii,
801  Lattice const &lattice,
802  int gridIndex,
803  T *grid,
804  BigReal &gridObjValue)
805 {
806  ForceList::iterator gfi = gfii;
807  FullAtomList::iterator ai = aii;
808  FullAtomList::iterator ae = aei;
809  Molecule *mol = Node::Object()->molecule;
810  for ( ; ai != ae; ai++, gfi++) {
811  *gfi = Vector(0.0, 0.0, 0.0);
812  if (! mol->is_atom_gridforced(ai->id, gridIndex)) {
813  continue;
814  }
815  Real scale;
816  Charge charge;
817  Vector dV;
818  float V;
819  mol->get_gridfrc_params(scale, charge, ai->id, gridIndex);
820  Position pos = grid->wrap_position(ai->position, lattice);
821  DebugM(1, "id = " << ai->id << ", scale = " << scale
822  << ", charge = " << charge << ", position = " << pos << "\n");
823  if (grid->compute_VdV(pos, V, dV)) {
824  // out-of-bounds atom
825  continue;
826  }
827  // ignore global gfScale
828  *gfi = -charge * scale * dV;
829  gridObjValue += charge * scale * V;
830  DebugM(1, "id = " << ai->id << ", force = " << *gfi << "\n");
831  }
832  DebugM(3, "gridObjValue = " << gridObjValue << "\n" << endi);
833 }
834 
835 void ComputeGlobal::computeGridObjects(ComputeGlobalDataMsg *msg)
836 {
837  DebugM(3,"computeGridObjects\n" << endi);
838  Molecule *mol = Node::Object()->molecule;
839  const Lattice &lattice = patchList[0].p->lattice;
840 
841  if (mol->numGridforceGrids < 1) {
842  NAMD_bug("No grids loaded in memory but ComputeGlobal has been requested to use them.");
843  }
844 
845  msg->gridobjindex.resize(numActiveGridObjects);
846  msg->gridobjindex.setall(-1);
847  msg->gridobjvalue.resize(numActiveGridObjects);
848  msg->gridobjvalue.setall(0.0);
849 
850  size_t ig = 0, gridobjcount = 0;
851 
852  // loop over home patches
854  for (ap = ap.begin(); ap != ap.end(); ap++) {
855 
856  msg->patchcount++;
857 
858  int const numAtoms = ap->p->getNumAtoms();
859  ForceList **gridForces = gridForcesPtrs[ap->p->getPatchID()];
860 
861  gridobjcount = 0;
862  for (ig = 0; ig < numGridObjects; ig++) {
863 
864  DebugM(2,"Processing grid index " << ig << "\n" << endi);
865 
866  // Only process here objects requested by the GlobalMasters
867  if (!gridObjActive[ig]) {
868  DebugM(2,"Skipping grid index " << ig << "; it is handled by "
869  "ComputeGridForce\n" << endi);
870  continue;
871  }
872 
873  ForceList *gridForcesGrid = gridForces[ig];
874  gridForcesGrid->resize(numAtoms);
875 
876  ForceList::iterator gfi = gridForcesGrid->begin();
877  FullAtomList::iterator ai = ap->p->getAtomList().begin();
878  FullAtomList::iterator ae = ap->p->getAtomList().end();
879 
880  DebugM(2, "computeGridObjects(): patch = " << ap->p->getPatchID()
881  << ", grid index = " << ig << "\n" << endi);
882  GridforceGrid *grid = mol->get_gridfrc_grid(ig);
883 
884  msg->gridobjindex[gridobjcount] = ig;
885  BigReal &gridobjvalue = msg->gridobjvalue[gridobjcount];
886 
887  computeGridForceGrid(ai, ae, gfi, ap->p->lattice, ig, grid, gridobjvalue);
888 
889  gridobjcount++;
890  }
891  }
892 
893  for (gridobjcount = 0; gridobjcount < numActiveGridObjects; gridobjcount++) {
894  DebugM(3, "Total gridObjValue[" << msg->gridobjindex[gridobjcount]
895  << "] = " << msg->gridobjvalue[gridobjcount] << "\n");
896  }
897 
898  DebugM(2,"computeGridObjects done\n");
899 }
900 
901 void ComputeGlobal::applyGridObjectForces(ComputeGlobalResultsMsg *msg,
902  Force *extForce_in,
903  Tensor *extVirial_in)
904 {
905  if (msg->gridobjforce.size() == 0) return;
906 
907  if (msg->gridobjforce.size() != numActiveGridObjects) {
908  NAMD_bug("ComputeGlobal received a different number of grid forces than active grids.");
909  }
910 
911  Molecule *mol = Node::Object()->molecule;
912  const Lattice &lattice = patchList[0].p->lattice;
913  AtomMap *atomMap = AtomMap::Object();
914  Force &extForce = *extForce_in;
915  Tensor &extVirial = *extVirial_in;
916 
917  // map applied forces from the message
918  BigRealList gridObjForces;
919  gridObjForces.resize(numGridObjects);
920  gridObjForces.setall(0.0);
921  BigRealList::iterator gridobjforce_i = msg->gridobjforce.begin();
922  BigRealList::iterator gridobjforce_e = msg->gridobjforce.end();
923  int ig;
924  for (ig = 0; gridobjforce_i != gridobjforce_e ;
925  gridobjforce_i++, ig++) {
926  if (!gridObjActive[ig]) continue;
927  gridObjForces[ig] = *gridobjforce_i;
928  }
929 
930  // loop over home patches
932  for (ap = ap.begin(); ap != ap.end(); ap++) {
933 
934  ForceList **gridForces = gridForcesPtrs[ap->p->getPatchID()];
935 
936  for (ig = 0; ig < numGridObjects; ig++) {
937 
938  if (!gridObjActive[ig]) continue;
939 
940  DebugM(2, "gof = " << gridObjForces[ig] << "\n" << endi);
941 
942  ForceList *gridForcesGrid = gridForces[ig];
943 
944  FullAtomList::iterator ai = ap->p->getAtomList().begin();
945  FullAtomList::iterator ae = ap->p->getAtomList().end();
946  Force *f = ap->r->f[Results::normal];
947  ForceList::iterator gfi = gridForcesGrid->begin();
948 
949  for ( ; ai != ae; ai++, gfi++) {
950  if (! mol->is_atom_gridforced(ai->id, ig)) {
951  *gfi = Vector(0.0, 0.0, 0.0);
952  continue;
953  }
954  LocalID localID = atomMap->localID(ai->id);
955  // forces were stored; flipping sign to get gradients
956  Vector const gridforceatom(-1.0 * (*gfi) * gridObjForces[ig]);
957  DebugM(2, "id = " << ai->id
958  << ", pid = " << localID.pid
959  << ", index = " << localID.index
960  << ", force = " << gridforceatom << "\n" << endi);
961  f[localID.index] += gridforceatom;
962  extForce += gridforceatom;
963  Position x_orig = ai->position;
964  Transform transform = ai->transform;
965  Position x_virial = lattice.reverse_transform(x_orig, transform);
966  extVirial += outer(gridforceatom, x_virial);
967  }
968  }
969  }
970  // extForce and extVirial are being communicated by calling function
971 }
972 
973 // This function is called by each HomePatch after force
974 // evaluation. It stores the indices and forces of the requested
975 // atoms here, to be sent to GlobalMasterServer during the next
976 // time step. The total force is the sum of three components:
977 // "normal", "nbond" and "slow", the latter two may be calculated
978 // less frequently, so their most recent values are stored in
979 // "f_saved" and used here. If we don't do full electrostatics,
980 // there's no "slow" part.
982 {
983  if ( ! forceSendEnabled ) NAMD_bug("ComputeGlobal::saveTotalForces called unexpectedly");
984  if ( ! forceSendActive ) return;
985 
987  if ( simParms->accelMDOn && simParms->accelMDDebugOn && simParms->accelMDdihe ) {
988  int num=homePatch->numAtoms;
989  FullAtomList &atoms = homePatch->atom;
990  ForceList &af=homePatch->f[Results::amdf];
991 
992  for (int i=0; i<num; ++i) {
993  int index = atoms[i].id;
994  if (index < endRequested && isRequested[index] & 1) {
995  fid.add(index);
996  totalForce.add(af[i]);
997  }
998  }
999  return;
1000  }
1001 
1002  // printf("Start saving force at step: %d ####################################################\n",
1003  // patchList[0].p->flags.step);
1004  int fixedAtomsOn = simParms->fixedAtomsOn;
1005  int num=homePatch->numAtoms;
1006  FullAtomList &atoms = homePatch->atom;
1007  ForceList &f1=homePatch->f[Results::normal], &f2=homePatch->f_saved[Results::nbond],
1008  &f3=homePatch->f_saved[Results::slow];
1009 
1010  double *f1_soa_x = homePatch->patchDataSOA.f_normal_x;
1011  double *f1_soa_y = homePatch->patchDataSOA.f_normal_y;
1012  double *f1_soa_z = homePatch->patchDataSOA.f_normal_z;
1013  double *f2_soa_x = homePatch->patchDataSOA.f_saved_nbond_x;
1014  double *f2_soa_y = homePatch->patchDataSOA.f_saved_nbond_y;
1015  double *f2_soa_z = homePatch->patchDataSOA.f_saved_nbond_z;
1016  double *f3_soa_x = homePatch->patchDataSOA.f_saved_slow_x;
1017  double *f3_soa_y = homePatch->patchDataSOA.f_saved_slow_y;
1018  double *f3_soa_z = homePatch->patchDataSOA.f_saved_slow_z;
1019  int hasSOA = (simParms->SOAintegrateOn || simParms->CUDASOAintegrateMode);
1020  Force f_sum;
1021  double f_sum_x, f_sum_y, f_sum_z;
1022 
1023  #if 0
1024  for (int i=0; i<num; ++i) {
1025  int index = atoms[i].id;
1026  if (index < 20) {
1027  if (hasSOA) {
1028  CkPrintf("ForceSaved: atom %d, ForceN (%8.6f, %8.6f, %8.6f) \n", index, f1_soa_x[i], f1_soa_y[i], f1_soa_z[i]);
1029  CkPrintf(" atom %d, ForceNB (%8.6f, %8.6f, %8.6f) \n", index, f2_soa_x[i], f2_soa_y[i], f2_soa_z[i]);
1030  CkPrintf(" atom %d, ForceSL (%8.6f, %8.6f, %8.6f) \n", index, f3_soa_x[i], f3_soa_y[i], f3_soa_z[i]);
1031  } else {
1032  CkPrintf("ForceSaved: atom %d, ForceN (%8.6f, %8.6f, %8.6f) \n", index, f1[i].x, f1[i].y, f1[i].z);
1033  CkPrintf(" atom %d, ForceNB (%8.6f, %8.6f, %8.6f) \n", index, f2[i].x, f2[i].y, f2[i].z);
1034  // not memory safe to access slow forces all the time like this
1035  // CkPrintf(" atom %d, ForceSL (%8.6f, %8.6f, %8.6f) \n", index, f3[i].x, f3[i].y, f3[i].z);
1036  }
1037  }
1038  }
1039 
1040  printf("PE, PId (%d, %d) Stop saving at step: %d ####################################################\n",
1041  CkMyPe(), homePatch->patchID, patchList[0].p->flags.step);
1042  #endif
1043  if ( ! forceSendActive ) return;
1044  for (int i=0; i<num; ++i) {
1045  int index = atoms[i].id;
1046  char reqflag;
1047  if (index < endRequested && (reqflag = isRequested[index])) {
1048  if (hasSOA) {
1049  f_sum_x = f1_soa_x[i] + f2_soa_x[i];
1050  f_sum_y = f1_soa_y[i] + f2_soa_y[i];
1051  f_sum_z = f1_soa_z[i] + f2_soa_z[i];
1052  if (dofull) {
1053  f_sum_x += f3_soa_x[i];
1054  f_sum_y += f3_soa_y[i];
1055  f_sum_z += f3_soa_z[i];
1056  }
1057  f_sum.x = f_sum_x;
1058  f_sum.y = f_sum_y;
1059  f_sum.z = f_sum_z;
1060  } else {
1061  f_sum = f1[i]+f2[i];
1062  if (dofull)
1063  f_sum += f3[i];
1064  }
1065 
1066  if ( fixedAtomsOn && atoms[i].atomFixed )
1067  f_sum = 0.;
1068 
1069  if ( reqflag & 1 ) { // individual atom
1070  fid.add(index);
1071  totalForce.add(f_sum);
1072  }
1073  if ( reqflag & 2 ) { // part of group
1074  intpair *gpend = gpair.end();
1075  intpair *gpi = std::lower_bound(gpair.begin(),gpend,intpair(index,0));
1076  if ( gpi == gpend || gpi->first != index )
1077  NAMD_bug("ComputeGlobal::saveTotalForces gpair corrupted.");
1078  do {
1079  ++gfcount;
1080  groupTotalForce[gpi->second] += f_sum;
1081  } while ( ++gpi != gpend && gpi->first == index );
1082  }
1083  }
1084  }
1085 }
static Node * Object()
Definition: Node.h:86
#define NAMD_EVENT_STOP(eon, id)
GridforceGrid * get_gridfrc_grid(int gridnum) const
Definition: Molecule.h:1363
int size(void) const
Definition: ResizeArray.h:131
void recvResults(ComputeGlobalResultsMsg *)
NAMD_HOST_DEVICE Position reverse_transform(Position data, const Transform &t) const
Definition: Lattice.h:143
double * f_normal_z
Definition: NamdTypes.h:420
int32 ComputeID
Definition: NamdTypes.h:278
double * f_normal_y
Definition: NamdTypes.h:419
Bool globalMasterScaleByFrequency
static PatchMap * Object()
Definition: PatchMap.h:27
BigRealList gridobjvalue
Partial values of the GridForce objects from this message.
double * f_saved_slow_z
Definition: NamdTypes.h:435
NAMD_HOST_DEVICE Tensor outer(const Vector &v1, const Vector &v2)
Definition: Tensor.h:241
Definition: Vector.h:72
#define ADD_TENSOR_OBJECT(R, RL, D)
Definition: ReductionMgr.h:44
SimParameters * simParameters
Definition: Node.h:181
ComputeHomePatchList patchList
Bool CUDASOAintegrateMode
float Real
Definition: common.h:118
#define DebugM(x, y)
Definition: Debug.h:75
std::ostream & endi(std::ostream &s)
Definition: InfoStream.C:54
BigReal z
Definition: Vector.h:74
int8 i
Definition: NamdTypes.h:44
Position position
Definition: NamdTypes.h:77
int numGridforceGrids
Definition: Molecule.h:624
SubmitReduction * willSubmit(int setID, int size=-1)
Definition: ReductionMgr.C:366
int8 j
Definition: NamdTypes.h:44
void saveTotalForces(HomePatch *)
ResizeArrayIter< T > begin(void) const
static ReductionMgr * Object(void)
Definition: ReductionMgr.h:279
NodeReduction * reduction
Definition: PatchData.h:133
double * f_saved_slow_y
Definition: NamdTypes.h:434
ComputeGlobal(ComputeID, ComputeMgr *)
Definition: ComputeGlobal.C:38
int add(const Elem &elem)
Definition: ResizeArray.h:101
double * f_saved_slow_x
Definition: NamdTypes.h:433
Molecule stores the structural information for the system.
Definition: Molecule.h:175
int second
Definition: ComputeGlobal.h:25
void resize(int i)
Definition: ResizeArray.h:84
int32 index
Definition: NamdTypes.h:290
void setall(const Elem &elem)
Definition: ResizeArray.h:94
uint32 id
Definition: NamdTypes.h:156
ResizeArray< Force > ForceList
Definition: NamdTypes.h:267
double * f_normal_x
Definition: NamdTypes.h:418
int numPatches(void) const
Definition: PatchMap.h:59
#define NAMD_EVENT_START(eon, id)
const Elem * const_iterator
Definition: ResizeArray.h:38
void NAMD_bug(const char *err_msg)
Definition: common.C:195
LocalID localID(AtomID id)
Definition: AtomMap.h:78
ResizeArray< Lattice > lat
IntList gridobjindex
Indices of the GridForce objects contained in this message.
int numAtoms
Definition: Patch.h:151
BigReal x
Definition: Vector.h:74
double * f_saved_nbond_x
Definition: NamdTypes.h:430
virtual ~ComputeGlobal()
void get_gridfrc_params(Real &k, Charge &q, int atomnum, int gridnum) const
Definition: Molecule.h:1357
double * f_saved_nbond_z
Definition: NamdTypes.h:432
void enableComputeGlobalResults()
Definition: ComputeMgr.C:1313
PatchID pid
Definition: NamdTypes.h:289
void recvComputeGlobalResults(ComputeGlobalResultsMsg *)
Definition: ComputeMgr.C:1330
static AtomMap * Object()
Definition: AtomMap.h:37
iterator begin(void)
Definition: ResizeArray.h:36
const PatchID patchID
Definition: Patch.h:150
Definition: Tensor.h:15
iterator end(void)
Definition: ResizeArray.h:37
BigReal y
Definition: Vector.h:74
Mass mass
Definition: NamdTypes.h:208
#define ADD_VECTOR_OBJECT(R, RL, D)
Definition: ReductionMgr.h:28
int count
Numer of atoms processed for this message.
double * f_saved_nbond_y
Definition: NamdTypes.h:431
Bool is_atom_gridforced(int atomnum, int gridnum) const
Definition: Molecule.h:1249
void sendComputeGlobalData(ComputeGlobalDataMsg *)
Definition: ComputeMgr.C:1200
void submit(void)
Definition: ReductionMgr.h:324
ForceList f[Results::maxNumForces]
Definition: Patch.h:214
int patchcount
Number of patches processed for this message.
void swap(ResizeArray< Elem > &ra)
Definition: ResizeArray.h:64
int32 PatchID
Definition: NamdTypes.h:277
ResizeArrayIter< T > end(void) const
Molecule * molecule
Definition: Node.h:179
float Charge
Definition: NamdTypes.h:38
int globalMasterFrequency
int8 k
Definition: NamdTypes.h:44
double BigReal
Definition: common.h:123
Transform transform
Definition: NamdTypes.h:219