NAMD
Public Member Functions | List of all members
ComputeGlobal Class Reference

#include <ComputeGlobal.h>

Inheritance diagram for ComputeGlobal:
ComputeHomePatches Compute

Public Member Functions

 ComputeGlobal (ComputeID, ComputeMgr *)
 
virtual ~ComputeGlobal ()
 
void doWork ()
 
void recvResults (ComputeGlobalResultsMsg *)
 
void saveTotalForces (HomePatch *)
 
int getForceSendActive () const
 
- Public Member Functions inherited from ComputeHomePatches
 ComputeHomePatches (ComputeID c)
 
virtual ~ComputeHomePatches ()
 
virtual void initialize ()
 
virtual void atomUpdate ()
 
FlagsgetFlags (void)
 
- Public Member Functions inherited from Compute
 Compute (ComputeID)
 
int type ()
 
virtual ~Compute ()
 
void setNumPatches (int n)
 
int getNumPatches ()
 
virtual void patchReady (PatchID, int doneMigration, int seq)
 
virtual int noWork ()
 
virtual void finishPatch (int)
 
int sequence (void)
 
int priority (void)
 
int getGBISPhase (void)
 
virtual void gbisP2PatchReady (PatchID, int seq)
 
virtual void gbisP3PatchReady (PatchID, int seq)
 

Additional Inherited Members

- Public Attributes inherited from Compute
const ComputeID cid
 
LDObjHandle ldObjHandle
 
LocalWorkMsg *const localWorkMsg
 
- Protected Member Functions inherited from Compute
void enqueueWork ()
 
- Protected Attributes inherited from ComputeHomePatches
int useAvgPositions
 
int hasPatchZero
 
ComputeHomePatchList patchList
 
PatchMappatchMap
 
- Protected Attributes inherited from Compute
int computeType
 
int basePriority
 
int gbisPhase
 
int gbisPhasePriority [3]
 

Detailed Description

Definition at line 36 of file ComputeGlobal.h.

Constructor & Destructor Documentation

◆ ComputeGlobal()

ComputeGlobal::ComputeGlobal ( ComputeID  c,
ComputeMgr m 
)

Definition at line 38 of file ComputeGlobal.C.

References SimParameters::colvarsOn, SimParameters::CUDASOAintegrateMode, DebugM, SimParameters::FMAOn, SimParameters::fullDirectOn, SimParameters::GBISOn, SimParameters::GBISserOn, PatchMap::numPatches(), PatchMap::Object(), Node::Object(), ReductionMgr::Object(), SimParameters::PMEOn, PatchData::reduction, REDUCTIONS_BASIC, ResizeArray< Elem >::resize(), Node::simParameters, SimParameters::SOAintegrateOn, SimParameters::tclForcesOn, and ReductionMgr::willSubmit().

40 {
41  DebugM(3,"Constructing client\n");
42  aid.resize(0);
43  gdef.resize(0);
44  comm = m;
45  firsttime = 1;
46  isRequested = 0;
47  isRequestedAllocSize = 0;
48  endRequested = 0;
49  numGroupsRequested = 0;
51  dofull = (sp->GBISserOn || sp->GBISOn || sp->fullDirectOn || sp->FMAOn || sp->PMEOn);
52  forceSendEnabled = 0;
53  if ( sp->tclForcesOn ) forceSendEnabled = 1;
54  if ( sp->colvarsOn ) forceSendEnabled = 1;
55  forceSendActive = 0;
56  fid.resize(0);
57  totalForce.resize(0);
58  gfcount = 0;
59  groupTotalForce.resize(0);
61  int numPatches = PatchMap::Object()->numPatches();
62  forcePtrs = new Force*[numPatches];
63  atomPtrs = new FullAtom*[numPatches];
64  for ( int i = 0; i < numPatches; ++i ) { forcePtrs[i] = 0; atomPtrs[i] = 0; }
65 
66  #ifdef NODEGROUP_FORCE_REGISTER
67  CProxy_PatchData cpdata(CkpvAccess(BOCclass_group).patchData);
68  PatchData *patchData = cpdata.ckLocalBranch();
69  nodeReduction = patchData->reduction;
70  #endif
71  if (sp->SOAintegrateOn || sp->CUDASOAintegrateMode) {
72  // Allocate memory for numPatches to access SOA data
73  mass_soa = new float*[numPatches];
74  pos_soa_x = new double*[numPatches];
75  pos_soa_y = new double*[numPatches];
76  pos_soa_z = new double*[numPatches];
77  force_soa_x = new double*[numPatches];
78  force_soa_y = new double*[numPatches];
79  force_soa_z = new double*[numPatches];
80  transform_soa_i = new int*[numPatches];
81  transform_soa_j = new int*[numPatches];
82  transform_soa_k = new int*[numPatches];
83  for ( int i = 0; i < numPatches; ++i ) {
84  mass_soa[i] = NULL;
85  pos_soa_x[i] = NULL;
86  pos_soa_y[i] = NULL;
87  pos_soa_z[i] = NULL;
88  force_soa_x[i] = NULL;
89  force_soa_y[i] = NULL;
90  force_soa_z[i] = NULL;
91  transform_soa_i[i] = NULL;
92  transform_soa_j[i] = NULL;
93  transform_soa_k[i] = NULL;
94  }
95  } else {
96  mass_soa = NULL;
97  pos_soa_x = NULL;
98  pos_soa_y = NULL;
99  pos_soa_z = NULL;
100  force_soa_x = NULL;
101  force_soa_y = NULL;
102  force_soa_z = NULL;
103  transform_soa_i = NULL;
104  transform_soa_j = NULL;
105  transform_soa_k = NULL;
106  }
107  gridForcesPtrs = new ForceList **[numPatches];
108  numGridObjects = numActiveGridObjects = 0;
109  for ( int i = 0; i < numPatches; ++i ) {
110  forcePtrs[i] = NULL; atomPtrs[i] = NULL;
111  gridForcesPtrs[i] = NULL;
112  }
113 }
static Node * Object()
Definition: Node.h:86
static PatchMap * Object()
Definition: PatchMap.h:27
Definition: Vector.h:72
SimParameters * simParameters
Definition: Node.h:181
Bool CUDASOAintegrateMode
#define DebugM(x, y)
Definition: Debug.h:75
SubmitReduction * willSubmit(int setID, int size=-1)
Definition: ReductionMgr.C:366
static ReductionMgr * Object(void)
Definition: ReductionMgr.h:279
NodeReduction * reduction
Definition: PatchData.h:133
void resize(int i)
Definition: ResizeArray.h:84
int numPatches(void) const
Definition: PatchMap.h:59
ComputeHomePatches(ComputeID c)

◆ ~ComputeGlobal()

ComputeGlobal::~ComputeGlobal ( )
virtual

Definition at line 115 of file ComputeGlobal.C.

116 {
117  delete[] isRequested;
118  delete[] forcePtrs;
119  deleteGridObjects();
120  delete[] gridForcesPtrs;
121  delete[] atomPtrs;
122  delete reduction;
123 
124  if(mass_soa) delete [] mass_soa;
125  if(pos_soa_x) delete [] pos_soa_x;
126  if(pos_soa_y) delete [] pos_soa_y;
127  if(pos_soa_z) delete [] pos_soa_z;
128  if(force_soa_x) delete [] force_soa_x;
129  if(force_soa_y) delete [] force_soa_y;
130  if(force_soa_z) delete [] force_soa_z;
131  if(transform_soa_i) delete [] transform_soa_i;
132  if(transform_soa_j) delete [] transform_soa_j;
133  if(transform_soa_k) delete [] transform_soa_k;
134 }

Member Function Documentation

◆ doWork()

void ComputeGlobal::doWork ( void  )
virtual

Reimplemented from Compute.

Definition at line 532 of file ComputeGlobal.C.

References ResizeArray< Elem >::add(), ResizeArrayIter< T >::begin(), ComputeGlobalDataMsg::count, SimParameters::CUDASOAintegrate, SimParameters::CUDASOAintegrateMode, DebugM, ComputeMgr::enableComputeGlobalResults(), ResizeArrayIter< T >::end(), endi(), SimParameters::globalMasterFrequency, ComputeHomePatches::hasPatchZero, ComputeGlobalDataMsg::lat, Node::Object(), ComputeGlobalDataMsg::patchcount, ComputeHomePatches::patchList, ComputeMgr::recvComputeGlobalResults(), ComputeMgr::sendComputeGlobalData(), Node::simParameters, SimParameters::SOAintegrateOn, and ComputeGlobalDataMsg::step.

533 {
534  DebugM(2,"doWork thread " << CthGetToken(CthSelf())->serialNo << "\n");
535 
538  FullAtom **t = atomPtrs;
539  int step = patchList[0].p->flags.step;
540  if((step % sp->globalMasterFrequency) ==0)
541  {
542  DebugM(3,"doWork for step " << step <<"\n"<<endi);
543  // if(sp->CUDASOAintegrateOn) {
544  // hasPatchZero = 0;
545  // }
546 
547  for (ap = ap.begin(); ap != ap.end(); ap++) {
548  CompAtom *x = (*ap).positionBox->open();
549  t[(*ap).patchID] = (*ap).p->getAtomList().begin();
550 
551  if (sp->SOAintegrateOn || sp->CUDASOAintegrateMode) {
552  // Assigne the pointer to SOA data structure
553  PatchID pId = (*ap).patchID;
554  mass_soa[pId] = (*ap).p->patchDataSOA.mass;
555  pos_soa_x[pId] = (*ap).p->patchDataSOA.pos_x;
556  pos_soa_y[pId] = (*ap).p->patchDataSOA.pos_y;
557  pos_soa_z[pId] = (*ap).p->patchDataSOA.pos_z;
558  transform_soa_i[pId] = (*ap).p->patchDataSOA.transform_i;
559  transform_soa_j[pId] = (*ap).p->patchDataSOA.transform_j;
560  transform_soa_k[pId] = (*ap).p->patchDataSOA.transform_k;
561  // if(sp->CUDASOAintegrateOn && (pId == 0)) {
562  // hasPatchZero = 1;
563  // }
564  }
565  }
566 
567  if(!firsttime) {
568  // CkPrintf("*** Start NoFirstTime on PE %d \n", CkMyPe());
569  sendData();
570  // CkPrintf("*** End NoFirstTime on PE %d \n", CkMyPe());
571  } else {
572  // CkPrintf("*** Start FirstTime on PE %d \n", CkMyPe());
573  if ( hasPatchZero ) {
575  msg->lat.add(patchList[0].p->lattice);
576  msg->step = -1;
577  msg->count = 1;
578  msg->patchcount = 0;
579  // CkPrintf("***DoWork calling sendComputeGlobalData PE %d \n", CkMyPe());
580  comm->sendComputeGlobalData(msg);
581  }
582 #ifdef NODEGROUP_FORCE_REGISTER
583  else if (sp->CUDASOAintegrate) {
584 
585  // CkPrintf("***DoWork FirstTime barrier 1 on PE %d \n", CkMyPe());
586  comm->stowSuspendULT();
587  // CmiNodeBarrier();
588  // CkPrintf("***DoWork FirstTime barrier 2 on PE %d \n", CkMyPe());
589  comm->stowSuspendULT();
590  // CkPrintf("***DoWork out of barrier 2 on PE %d \n", CkMyPe());
591  // CmiNodeBarrier();
592  ComputeGlobalResultsMsg* resultsMsg = CkpvAccess(ComputeGlobalResultsMsg_instance);
593  // CkPrintf("*** ComputeGlobal::doWork PE (%d) calling recvComputeGlobalResults in doWork at step: %d \n",CkMyPe(), patchList[0].p->flags.step);
594  comm->recvComputeGlobalResults(resultsMsg);
595  }
596 #endif // NODEGROUP_FORCE_REGISTER
597  firsttime = 0;
598  // CkPrintf("*** ComputeGlobal::doWork PE (%d) calling enableComputeGlobalResults in doWork at step: %d \n",CkMyPe(), patchList[0].p->flags.step);
600 
601  // CkPrintf("*** End FirstTime on PE %d \n", CkMyPe());
602  }
603  }
604  else
605  {
606  DebugM(2,"skipping step "<< step <<"\n"<<endi);
607  /* TODO to support CPU side MTS we need to do something to avoid hang some distillation from sendData(); and the reductions
608  ADD_VECTOR_OBJECT(reduction,REDUCTION_EXT_FORCE_NORMAL,extForce);
609  ADD_TENSOR_OBJECT(reduction,REDUCTION_VIRIAL_NORMAL,extVirial);
610  reduction->submit();
611  and as yet undetermined message handling
612  */
613  }
614  DebugM(2,"done with doWork\n");
615 }
static Node * Object()
Definition: Node.h:86
SimParameters * simParameters
Definition: Node.h:181
ComputeHomePatchList patchList
Bool CUDASOAintegrateMode
#define DebugM(x, y)
Definition: Debug.h:75
std::ostream & endi(std::ostream &s)
Definition: InfoStream.C:54
int add(const Elem &elem)
Definition: ResizeArray.h:101
ResizeArray< Lattice > lat
void enableComputeGlobalResults()
Definition: ComputeMgr.C:1313
void recvComputeGlobalResults(ComputeGlobalResultsMsg *)
Definition: ComputeMgr.C:1330
int count
Numer of atoms processed for this message.
void sendComputeGlobalData(ComputeGlobalDataMsg *)
Definition: ComputeMgr.C:1200
int patchcount
Number of patches processed for this message.
int32 PatchID
Definition: NamdTypes.h:277
int globalMasterFrequency

◆ getForceSendActive()

int ComputeGlobal::getForceSendActive ( ) const
inline

Definition at line 47 of file ComputeGlobal.h.

47 {return forceSendActive;}

◆ recvResults()

void ComputeGlobal::recvResults ( ComputeGlobalResultsMsg msg)

Definition at line 268 of file ComputeGlobal.C.

References ADD_TENSOR_OBJECT, ADD_VECTOR_OBJECT, ComputeGlobalResultsMsg::aid, ResizeArray< Elem >::begin(), ResizeArrayIter< T >::begin(), SimParameters::CUDASOAintegrate, SimParameters::CUDASOAintegrateMode, DebugM, ResizeArray< Elem >::end(), ResizeArrayIter< T >::end(), ComputeGlobalResultsMsg::f, ComputeGlobalResultsMsg::gforce, SimParameters::globalMasterFrequency, SimParameters::globalMasterScaleByFrequency, Transform::i, LocalID::index, Transform::j, Transform::k, AtomMap::localID(), FullAtom::mass, NAMD_bug(), ComputeGlobalResultsMsg::newaid, ComputeGlobalResultsMsg::newgdef, ComputeGlobalResultsMsg::newgridobjid, Results::normal, notUsed, AtomMap::Object(), Node::Object(), outer(), ComputeHomePatches::patchList, LocalID::pid, CompAtom::position, ComputeGlobalResultsMsg::reconfig, ComputeGlobalResultsMsg::resendCoordinates, ResizeArray< Elem >::resize(), Lattice::reverse_transform(), Node::simParameters, ResizeArray< Elem >::size(), SimParameters::SOAintegrateOn, SubmitReduction::submit(), ComputeGlobalResultsMsg::totalforces, FullAtom::transform, Vector::x, Vector::y, and Vector::z.

Referenced by ComputeMgr::recvComputeGlobalResults().

268  {
269  DebugM(3,"Receiving results (" << msg->aid.size() << " forces, "
270  << msg->newgdef.size() << " new group atoms) on client thread " << CthGetToken(CthSelf())->serialNo <<" msg->resendCoordinates " << msg->resendCoordinates << " msg->totalforces " << msg->totalforces<< "\n");
271 
272  forceSendActive = msg->totalforces;
273  if ( forceSendActive && ! forceSendEnabled ) NAMD_bug("ComputeGlobal::recvResults forceSendActive without forceSendEnabled");
274 
275  // set the forces only if we aren't going to resend the data
276  int setForces = !msg->resendCoordinates;
278  if(setForces) { // we are requested to
279  // Store forces to patches
280  AtomMap *atomMap = AtomMap::Object();
281  const Lattice & lattice = patchList[0].p->lattice;
283  Force **f = forcePtrs;
284  FullAtom **t = atomPtrs;
285  Force extForce = 0.;
286  Tensor extVirial;
287 
288  for (ap = ap.begin(); ap != ap.end(); ap++) {
289  (*ap).r = (*ap).forceBox->open();
290  f[(*ap).patchID] = (*ap).r->f[Results::normal];
291  t[(*ap).patchID] = (*ap).p->getAtomList().begin();
292 
293  if (sp->SOAintegrateOn || sp->CUDASOAintegrateMode) {
294  // Assigne the pointer to SOA data structure
295  PatchID pId = (*ap).patchID;
296  mass_soa[pId] = (*ap).p->patchDataSOA.mass;
297  force_soa_x[pId] = (*ap).p->patchDataSOA.f_global_x;
298  force_soa_y[pId] = (*ap).p->patchDataSOA.f_global_y;
299  force_soa_z[pId] = (*ap).p->patchDataSOA.f_global_z;
300  transform_soa_i[pId] = (*ap).p->patchDataSOA.transform_i;
301  transform_soa_j[pId] = (*ap).p->patchDataSOA.transform_j;
302  transform_soa_k[pId] = (*ap).p->patchDataSOA.transform_k;
303  }
304  }
305 
306 
307  AtomIDList::iterator a = msg->aid.begin();
308  AtomIDList::iterator a_e = msg->aid.end();
309  ForceList::iterator f2 = msg->f.begin();
311  for ( ; a != a_e; ++a, ++f2 ) {
312  Force f_atom;
313  f_atom = (*f2);
314  f_atom.x*=(float) sp->globalMasterFrequency;
315  f_atom.y*=(float) sp->globalMasterFrequency;
316  f_atom.z*=(float) sp->globalMasterFrequency;
317  }
318  if (sp->SOAintegrateOn || sp->CUDASOAintegrateMode) {
319  LocalID localID;
320  PatchID lpid;
321  int lidx;
322  Position x_orig, x_atom;
323  Transform trans;
324  Force f_atom;
325  for ( ; a != a_e; ++a, ++f2 ) {
326  DebugM(1,"processing atom "<<(*a)<<", F="<<(*f2)<<"...\n");
327  /* XXX if (*a) is out of bounds here we get a segfault */
328  localID = atomMap->localID(*a);
329  lpid = localID.pid;
330  lidx = localID.index;
331  if ( lpid == notUsed || ! f[lpid] ) continue;
332  f_atom = (*f2);
333  // printf("NAMD3-recv: atom %d, Before Force (%8.6f, %8.6f, %8.6f) \n",
334  // *a, force_soa_x[lpid][lidx], force_soa_y[lpid][lidx], force_soa_z[lpid][lidx]);
335  // printf("NAMD3-recv: atom %d, Added Force (%8.6f, %8.6f, %8.6f) \n", *a, f_atom.x, f_atom.y, f_atom.z);
336  force_soa_x[lpid][lidx] += f_atom.x;
337  force_soa_y[lpid][lidx] += f_atom.y;
338  force_soa_z[lpid][lidx] += f_atom.z;
339 #ifndef USE_GLOBALMASTER_VIRIAL_KERNEL
340  x_orig.x = pos_soa_x[lpid][lidx];
341  x_orig.y = pos_soa_y[lpid][lidx];
342  x_orig.z = pos_soa_z[lpid][lidx];
343  trans.i = transform_soa_i[lpid][lidx];
344  trans.j = transform_soa_j[lpid][lidx];
345  trans.k = transform_soa_k[lpid][lidx];
346  x_atom = lattice.reverse_transform(x_orig,trans);
347  extForce += f_atom;
348  extVirial += outer(f_atom,x_atom);
349 #endif
350  }
351  } else {
352  for ( ; a != a_e; ++a, ++f2 ) {
353  DebugM(1,"processing atom "<<(*a)<<", F="<<(*f2)<<"...\n");
354  /* XXX if (*a) is out of bounds here we get a segfault */
355  LocalID localID = atomMap->localID(*a);
356  if ( localID.pid == notUsed || ! f[localID.pid] ) continue;
357  Force f_atom = (*f2);
358  // printf("NAMD3-recv: atom %d, Before Force (%8.6f, %8.6f, %8.6f) \n",
359  // *a, f[localID.pid][localID.index].x, f[localID.pid][localID.index].y, f[localID.pid][localID.index].z);
360  // printf("NAMD3-recv: atom %d, Added Force (%8.6f, %8.6f, %8.6f) \n", *a, f_atom.x, f_atom.y, f_atom.z);
361  f[localID.pid][localID.index] += f_atom;
362  FullAtom &atom = t[localID.pid][localID.index];
363  Position x_orig = atom.position;
364  Transform trans = atom.transform;
365  Position x_atom = lattice.reverse_transform(x_orig,trans);
366  extForce += f_atom;
367  extVirial += outer(f_atom,x_atom);
368  }
369  }
370  DebugM(1,"done with the loop\n");
371 
372  // calculate forces for atoms in groups
373  AtomIDList::iterator g_i, g_e;
374  g_i = gdef.begin(); g_e = gdef.end();
375  ForceList::iterator gf_i = msg->gforce.begin();
376  //iout << iDEBUG << "recvResults\n" << endi;
377  if (sp->SOAintegrateOn || sp->CUDASOAintegrateMode) {
378  LocalID localID;
379  PatchID lpid;
380  int lidx;
381  Position x_orig, x_atom;
382  Transform trans;
383  Force f_atom;
384  for ( ; g_i != g_e; ++g_i, ++gf_i ) {
385  //iout << iDEBUG << *gf_i << '\n' << endi;
386  Vector accel = (*gf_i);
387  for ( ; *g_i != -1; ++g_i ) {
388  //iout << iDEBUG << *g_i << '\n' << endi;
389  localID = atomMap->localID(*g_i);
390  lpid = localID.pid;
391  lidx = localID.index;
392  if ( lpid == notUsed || ! f[lpid] ) continue;
393  f_atom = accel * mass_soa[lpid][lidx];
394 #if 0
395  if (*g_i < 20) {
396  CkPrintf("NAMD3-recv: group %d, Before Force (%8.6f, %8.6f, %8.6f) \n",
397  *g_i, force_soa_x[lpid][lidx], force_soa_y[lpid][lidx], force_soa_z[lpid][lidx]);
398  CkPrintf("NAMD3-recv: group %d, Added Force (%8.6f, %8.6f, %8.6f) \n", *g_i, f_atom.x, f_atom.y, f_atom.z);
399  }
400 #endif
401  force_soa_x[lpid][lidx] += f_atom.x;
402  force_soa_y[lpid][lidx] += f_atom.y;
403  force_soa_z[lpid][lidx] += f_atom.z;
404 #ifndef USE_GLOBALMASTER_VIRIAL_KERNEL
405  x_orig.x = pos_soa_x[lpid][lidx];
406  x_orig.y = pos_soa_y[lpid][lidx];
407  x_orig.z = pos_soa_z[lpid][lidx];
408  trans.i = transform_soa_i[lpid][lidx];
409  trans.j = transform_soa_j[lpid][lidx];
410  trans.k = transform_soa_k[lpid][lidx];
411  x_atom = lattice.reverse_transform(x_orig,trans);
412  extForce += f_atom;
413  extVirial += outer(f_atom,x_atom);
414 #endif
415  }
416  }
417  } else {
418  for ( ; g_i != g_e; ++g_i, ++gf_i ) {
419  //iout << iDEBUG << *gf_i << '\n' << endi;
420  Vector accel = (*gf_i);
421  for ( ; *g_i != -1; ++g_i ) {
422  //iout << iDEBUG << *g_i << '\n' << endi;
423  LocalID localID = atomMap->localID(*g_i);
424  if ( localID.pid == notUsed || ! f[localID.pid] ) continue;
425  FullAtom &atom = t[localID.pid][localID.index];
426  Force f_atom = accel * atom.mass;
427 #if 0
428  if (*g_i < 20) {
429  CkPrintf("NAMD2-recv: group %d, Before Force (%8.6f, %8.6f, %8.6f) \n",
430  *g_i, f[localID.pid][localID.index].x, f[localID.pid][localID.index].y, f[localID.pid][localID.index].z);
431  CkPrintf("NAMD2-recv: group %d, Added Force (%8.6f, %8.6f, %8.6f) \n", *g_i, f_atom.x, f_atom.y, f_atom.z);
432  }
433 #endif
434  f[localID.pid][localID.index] += f_atom;
435 
436  Position x_orig = atom.position;
437  Transform trans = atom.transform;
438  Position x_atom = lattice.reverse_transform(x_orig,trans);
439  extForce += f_atom;
440  extVirial += outer(f_atom,x_atom);
441  }
442  }
443  }
444  DebugM(1,"done with the groups\n");
445 
446  if (numActiveGridObjects > 0) {
447  applyGridObjectForces(msg, &extForce, &extVirial);
448  }
449  // printf("Finish receiving at step: %d ####################################################\n",
450  // patchList[0].p->flags.step);
451 
452  #ifdef NODEGROUP_FORCE_REGISTER
453  if (sp->CUDASOAintegrate) {
454 #ifndef USE_GLOBALMASTER_VIRIAL_KERNEL
455  ADD_VECTOR_OBJECT(nodeReduction,REDUCTION_EXT_FORCE_NORMAL,extForce);
456  ADD_TENSOR_OBJECT(nodeReduction,REDUCTION_VIRIAL_NORMAL,extVirial);
457 #endif
458  } else {
459  ADD_VECTOR_OBJECT(reduction,REDUCTION_EXT_FORCE_NORMAL,extForce);
460  ADD_TENSOR_OBJECT(reduction,REDUCTION_VIRIAL_NORMAL,extVirial);
461  reduction->submit();
462  }
463  #else
464  ADD_VECTOR_OBJECT(reduction,REDUCTION_EXT_FORCE_NORMAL,extForce);
465  ADD_TENSOR_OBJECT(reduction,REDUCTION_VIRIAL_NORMAL,extVirial);
466  reduction->submit();
467  #endif
468  }
469  // done setting the forces, close boxes below
470 
471  // Get reconfiguration if present
472  if ( msg->reconfig ) {
473  DebugM(3,"Reconfiguring\n");
474  configure(msg->newaid, msg->newgdef, msg->newgridobjid);
475  }
476 
477  // send another round of data if requested
478 
479  if(msg->resendCoordinates) {
480  DebugM(3,"Sending requested data right away\n");
481  // CkPrintf("*** Resending data on PE %d \n", CkMyPe());
482  sendData();
483  }
484 
485  groupTotalForce.resize(numGroupsRequested);
486  for ( int i=0; i<numGroupsRequested; ++i ) groupTotalForce[i] = 0;
487  DebugM(3,"resized\n");
488  if(setForces) {
489  DebugM(3,"setting forces\n");
491  Force **f = forcePtrs;
492  FullAtom **t = atomPtrs;
493  for (ap = ap.begin(); ap != ap.end(); ap++) {
494  CompAtom *x;
495  PatchID pId = (*ap).patchID;
496  if (!sp->CUDASOAintegrate) {
497  (*ap).positionBox->close(&x);
498  (*ap).forceBox->close(&((*ap).r));
499  DebugM(1,"closing boxes\n");
500  }
501  f[pId] = 0;
502  t[pId] = 0;
503  if (sp->CUDASOAintegrate || sp->SOAintegrateOn) {
504  // XXX Possibly code below is needed by SOAintegrate mode
505  mass_soa[pId] = NULL;
506  pos_soa_x[pId] = NULL;
507  pos_soa_y[pId] = NULL;
508  pos_soa_z[pId] = NULL;
509  force_soa_x[pId] = NULL;
510  force_soa_y[pId] = NULL;
511  force_soa_z[pId] = NULL;
512  transform_soa_i[pId] = NULL;
513  transform_soa_j[pId] = NULL;
514  transform_soa_k[pId] = NULL;
515  DebugM(2,"nulling ptrs\n");
516  }
517  }
518  DebugM(3,"done setting forces\n");
519  }
520 
521  #ifdef NODEGROUP_FORCE_REGISTER
522  if (!sp->CUDASOAintegrate) {
523  // CUDASOAintegrate handles this on PE 0 in sendComputeGlobalResults
524  delete msg;
525  }
526  #else
527  delete msg;
528  #endif
529  DebugM(3,"Done processing results\n");
530 }
static Node * Object()
Definition: Node.h:86
int size(void) const
Definition: ResizeArray.h:131
NAMD_HOST_DEVICE Position reverse_transform(Position data, const Transform &t) const
Definition: Lattice.h:143
Bool globalMasterScaleByFrequency
NAMD_HOST_DEVICE Tensor outer(const Vector &v1, const Vector &v2)
Definition: Tensor.h:241
Definition: Vector.h:72
#define ADD_TENSOR_OBJECT(R, RL, D)
Definition: ReductionMgr.h:44
SimParameters * simParameters
Definition: Node.h:181
ComputeHomePatchList patchList
Bool CUDASOAintegrateMode
#define DebugM(x, y)
Definition: Debug.h:75
BigReal z
Definition: Vector.h:74
int8 i
Definition: NamdTypes.h:44
Position position
Definition: NamdTypes.h:77
int8 j
Definition: NamdTypes.h:44
void resize(int i)
Definition: ResizeArray.h:84
int32 index
Definition: NamdTypes.h:290
void NAMD_bug(const char *err_msg)
Definition: common.C:195
LocalID localID(AtomID id)
Definition: AtomMap.h:78
BigReal x
Definition: Vector.h:74
PatchID pid
Definition: NamdTypes.h:289
static AtomMap * Object()
Definition: AtomMap.h:37
iterator begin(void)
Definition: ResizeArray.h:36
Definition: Tensor.h:15
iterator end(void)
Definition: ResizeArray.h:37
BigReal y
Definition: Vector.h:74
Mass mass
Definition: NamdTypes.h:208
#define ADD_VECTOR_OBJECT(R, RL, D)
Definition: ReductionMgr.h:28
void submit(void)
Definition: ReductionMgr.h:324
int32 PatchID
Definition: NamdTypes.h:277
int globalMasterFrequency
int8 k
Definition: NamdTypes.h:44
Transform transform
Definition: NamdTypes.h:219

◆ saveTotalForces()

void ComputeGlobal::saveTotalForces ( HomePatch homePatch)

Definition at line 981 of file ComputeGlobal.C.

References SimParameters::accelMDDebugOn, SimParameters::accelMDdihe, SimParameters::accelMDOn, ResizeArray< Elem >::add(), Results::amdf, ResizeArray< Elem >::begin(), SimParameters::CUDASOAintegrateMode, ResizeArray< Elem >::end(), Patch::f, PatchDataSOA::f_normal_x, PatchDataSOA::f_normal_y, PatchDataSOA::f_normal_z, PatchDataSOA::f_saved_nbond_x, PatchDataSOA::f_saved_nbond_y, PatchDataSOA::f_saved_nbond_z, PatchDataSOA::f_saved_slow_x, PatchDataSOA::f_saved_slow_y, PatchDataSOA::f_saved_slow_z, intpair::first, SimParameters::fixedAtomsOn, NAMD_bug(), Results::nbond, Results::normal, Patch::numAtoms, Node::Object(), Patch::patchID, ComputeHomePatches::patchList, intpair::second, Node::simParameters, Results::slow, SimParameters::SOAintegrateOn, Vector::x, Vector::y, and Vector::z.

Referenced by Sequencer::integrate(), Sequencer::integrate_SOA(), and Sequencer::minimize().

982 {
983  if ( ! forceSendEnabled ) NAMD_bug("ComputeGlobal::saveTotalForces called unexpectedly");
984  if ( ! forceSendActive ) return;
985 
987  if ( simParms->accelMDOn && simParms->accelMDDebugOn && simParms->accelMDdihe ) {
988  int num=homePatch->numAtoms;
989  FullAtomList &atoms = homePatch->atom;
990  ForceList &af=homePatch->f[Results::amdf];
991 
992  for (int i=0; i<num; ++i) {
993  int index = atoms[i].id;
994  if (index < endRequested && isRequested[index] & 1) {
995  fid.add(index);
996  totalForce.add(af[i]);
997  }
998  }
999  return;
1000  }
1001 
1002  // printf("Start saving force at step: %d ####################################################\n",
1003  // patchList[0].p->flags.step);
1004  int fixedAtomsOn = simParms->fixedAtomsOn;
1005  int num=homePatch->numAtoms;
1006  FullAtomList &atoms = homePatch->atom;
1007  ForceList &f1=homePatch->f[Results::normal], &f2=homePatch->f_saved[Results::nbond],
1008  &f3=homePatch->f_saved[Results::slow];
1009 
1010  double *f1_soa_x = homePatch->patchDataSOA.f_normal_x;
1011  double *f1_soa_y = homePatch->patchDataSOA.f_normal_y;
1012  double *f1_soa_z = homePatch->patchDataSOA.f_normal_z;
1013  double *f2_soa_x = homePatch->patchDataSOA.f_saved_nbond_x;
1014  double *f2_soa_y = homePatch->patchDataSOA.f_saved_nbond_y;
1015  double *f2_soa_z = homePatch->patchDataSOA.f_saved_nbond_z;
1016  double *f3_soa_x = homePatch->patchDataSOA.f_saved_slow_x;
1017  double *f3_soa_y = homePatch->patchDataSOA.f_saved_slow_y;
1018  double *f3_soa_z = homePatch->patchDataSOA.f_saved_slow_z;
1019  int hasSOA = (simParms->SOAintegrateOn || simParms->CUDASOAintegrateMode);
1020  Force f_sum;
1021  double f_sum_x, f_sum_y, f_sum_z;
1022 
1023  #if 0
1024  for (int i=0; i<num; ++i) {
1025  int index = atoms[i].id;
1026  if (index < 20) {
1027  if (hasSOA) {
1028  CkPrintf("ForceSaved: atom %d, ForceN (%8.6f, %8.6f, %8.6f) \n", index, f1_soa_x[i], f1_soa_y[i], f1_soa_z[i]);
1029  CkPrintf(" atom %d, ForceNB (%8.6f, %8.6f, %8.6f) \n", index, f2_soa_x[i], f2_soa_y[i], f2_soa_z[i]);
1030  CkPrintf(" atom %d, ForceSL (%8.6f, %8.6f, %8.6f) \n", index, f3_soa_x[i], f3_soa_y[i], f3_soa_z[i]);
1031  } else {
1032  CkPrintf("ForceSaved: atom %d, ForceN (%8.6f, %8.6f, %8.6f) \n", index, f1[i].x, f1[i].y, f1[i].z);
1033  CkPrintf(" atom %d, ForceNB (%8.6f, %8.6f, %8.6f) \n", index, f2[i].x, f2[i].y, f2[i].z);
1034  // not memory safe to access slow forces all the time like this
1035  // CkPrintf(" atom %d, ForceSL (%8.6f, %8.6f, %8.6f) \n", index, f3[i].x, f3[i].y, f3[i].z);
1036  }
1037  }
1038  }
1039 
1040  printf("PE, PId (%d, %d) Stop saving at step: %d ####################################################\n",
1041  CkMyPe(), homePatch->patchID, patchList[0].p->flags.step);
1042  #endif
1043  if ( ! forceSendActive ) return;
1044  for (int i=0; i<num; ++i) {
1045  int index = atoms[i].id;
1046  char reqflag;
1047  if (index < endRequested && (reqflag = isRequested[index])) {
1048  if (hasSOA) {
1049  f_sum_x = f1_soa_x[i] + f2_soa_x[i];
1050  f_sum_y = f1_soa_y[i] + f2_soa_y[i];
1051  f_sum_z = f1_soa_z[i] + f2_soa_z[i];
1052  if (dofull) {
1053  f_sum_x += f3_soa_x[i];
1054  f_sum_y += f3_soa_y[i];
1055  f_sum_z += f3_soa_z[i];
1056  }
1057  f_sum.x = f_sum_x;
1058  f_sum.y = f_sum_y;
1059  f_sum.z = f_sum_z;
1060  } else {
1061  f_sum = f1[i]+f2[i];
1062  if (dofull)
1063  f_sum += f3[i];
1064  }
1065 
1066  if ( fixedAtomsOn && atoms[i].atomFixed )
1067  f_sum = 0.;
1068 
1069  if ( reqflag & 1 ) { // individual atom
1070  fid.add(index);
1071  totalForce.add(f_sum);
1072  }
1073  if ( reqflag & 2 ) { // part of group
1074  intpair *gpend = gpair.end();
1075  intpair *gpi = std::lower_bound(gpair.begin(),gpend,intpair(index,0));
1076  if ( gpi == gpend || gpi->first != index )
1077  NAMD_bug("ComputeGlobal::saveTotalForces gpair corrupted.");
1078  do {
1079  ++gfcount;
1080  groupTotalForce[gpi->second] += f_sum;
1081  } while ( ++gpi != gpend && gpi->first == index );
1082  }
1083  }
1084  }
1085 }
static Node * Object()
Definition: Node.h:86
double * f_normal_z
Definition: NamdTypes.h:420
double * f_normal_y
Definition: NamdTypes.h:419
double * f_saved_slow_z
Definition: NamdTypes.h:435
Definition: Vector.h:72
SimParameters * simParameters
Definition: Node.h:181
ComputeHomePatchList patchList
Bool CUDASOAintegrateMode
BigReal z
Definition: Vector.h:74
double * f_saved_slow_y
Definition: NamdTypes.h:434
int add(const Elem &elem)
Definition: ResizeArray.h:101
double * f_saved_slow_x
Definition: NamdTypes.h:433
int second
Definition: ComputeGlobal.h:25
double * f_normal_x
Definition: NamdTypes.h:418
void NAMD_bug(const char *err_msg)
Definition: common.C:195
int numAtoms
Definition: Patch.h:151
BigReal x
Definition: Vector.h:74
double * f_saved_nbond_x
Definition: NamdTypes.h:430
double * f_saved_nbond_z
Definition: NamdTypes.h:432
iterator begin(void)
Definition: ResizeArray.h:36
const PatchID patchID
Definition: Patch.h:150
iterator end(void)
Definition: ResizeArray.h:37
BigReal y
Definition: Vector.h:74
double * f_saved_nbond_y
Definition: NamdTypes.h:431
ForceList f[Results::maxNumForces]
Definition: Patch.h:214

The documentation for this class was generated from the following files: