NAMD
ComputeGridForceCUDA.C
Go to the documentation of this file.
1 #include "ComputeGridForceCUDA.h"
4 #include "Molecule.h"
5 #include "Node.h"
6 #include "HomePatch.h"
7 #include "SimParameters.h"
8 
9 
10 #define GF_OVERLAPCHECK_FREQ 1000
11 #define MIN_DEBUG_LEVEL 4
12 //#define DEBUGM
13 #include "Debug.h"
14 #ifdef NODEGROUP_FORCE_REGISTER
15 
16 ComputeGridForceCUDA::ComputeGridForceCUDA(
17  std::vector<HomePatch*> &a_patchList,
18  std::vector<AtomMap*> &atomMapsList,
19  cudaStream_t stream)
20 {
21  DebugM(4, "ComputeGridForceCUDA\n" << endi);
22  this->stream=stream;
23  this->patchList=a_patchList;
24  Molecule *molecule = Node::Object()->molecule;
26  allocate_device<unsigned int>(&d_tbcatomic, 1);
27 
28  // create the list of atoms that need grid forces
29  // allocate the grid structures we need on the device
30  // extract the data we need from molecule
31  // sync grid data from the host to the device
32 
33  numGriddedAtoms=createGriddedLists();
34 
35 }
36 
37 /*
38  * update griddedAtomsSOAIndex for atoms that are local
39  */
40 int ComputeGridForceCUDA::updateGriddedAtoms(
41  std::vector<AtomMap*> &atomMapsList,
42  std::vector<CudaLocalRecord> &localRecords,
43  std::vector<HomePatch*> &patches,
44  const int *h_globalToLocalID,
45  bool mGpuOn)
46 {
47 
48  Molecule *molecule = Node::Object()->molecule;
49  DebugM(4, "ComputeGridForceCUDA::updateGriddedLists "<< numGriddedAtomsLocal <<"\n"<< endi);
50 
51  if(mGpuOn)
52  { // we can't use the atomMapsList from SequencerCUDA in mGpuOn
53  // go HomePatch by HomePatch with the vector from DeviceData
54  for(int i = 0; i <patches.size(); ++i){
55  HomePatch *p = patches[i];
56  for(int j = 0; j < p->getNumAtoms(); j++){
57  for (int gridnum = 0; gridnum < molecule->numGridforceGrids; gridnum++){
58  int gid = p->getAtomList()[j].id; // Gets global ID of corresponding Ful}
59  if (molecule->is_atom_gridforced(gid, gridnum)){
60  LocalID lid;
61  // the local id is a tuple of patch Id and the patch local index
62  lid.pid=p->getPatchID();
63  lid.index=j;
64  int soaPid = h_globalToLocalID[lid.pid]; // Converts global patch ID to its local position in our SOA data structures
65  std::pair<int,int> mapkey(gridnum,gid);
66  int soaIndex = localRecords[soaPid].bufferOffset + lid.index;
67  DebugM(1, "["<< CkMyPe() <<"] ComputeGridForceCUDA::updateGriddedLists gid "<< gid << " patch " << lid.pid << " atom " << j << " soaIndex[" << griddedAtomsGlobalIndexMap[mapkey] <<"] = "<< griddedAtomsSOAIndex[griddedAtomsGlobalIndexMap[mapkey]] <<"->" << soaIndex <<"\n"<< endi);
68  griddedAtomsSOAIndex[griddedAtomsGlobalIndexMap[mapkey]] = soaIndex;
69  }
70  }
71  }
72  }
73  }
74  else
75  { // use the atomMaplist on single gpu to save time
76  for(int i = 0 ; i < numGriddedAtoms; i++){
77  int gid = griddedAtomsGlobalIndex[i];
78  LocalID lid;
79  // Search for a valid localID in all atoms
80  for(int j = 0 ; j < atomMapsList.size(); j++)
81  {
82  lid = atomMapsList[j]->localID(gid);
83  if( lid.pid != -1) break;
84  }
85  //JM NOTE: Fields of lid need to be != -1, bc the atom needs to be somewhere
86  // otherwise we have a bug
87  if(lid.pid == -1)
88  {
89  NAMD_bug(" LocalAtomID not found in patchMap");
90  }
91  int soaPid = h_globalToLocalID[lid.pid]; // Converts global patch ID to its local position in our SOA data structures
92  int soaIndex = localRecords[soaPid].bufferOffset + lid.index;
93  DebugM(1, "ComputeGridForceCUDA::updateGriddedLists soa[" << i <<"] <- "<< griddedAtomsSOAIndex[i] <<"->" << soaIndex <<"\n"<< endi);
94  griddedAtomsSOAIndex[i] = soaIndex;
95  }
96  }
97  // now that we've fixed the SOAIndex for gridded atoms, we need to
98  // trim our local atom structures to the gridded atoms in the local
99  // patch list. If we knew exactly which ones left and which ones
100  // arrived, this could be done more surgically, but this works.
101 
102  gridOffsetsLocal.clear();
103  griddedAtomsLocalIndex.clear();
104  gridOffsetsLocal.push_back(0); // we start at offset 0
105  DebugM(3, "ComputeGridForceCUDA::updateGriddedLists grids "<< molecule->numGridforceGrids <<"\n" << endi);
106  for (int gridnum = 0; gridnum < molecule->numGridforceGrids; gridnum++)
107  {
108  DebugM(2, "ComputeGridForceCUDA::updateGriddedLists patches "<< patches.size() <<"\n" << endi);
109  for(int i = 0; i <patches.size(); ++i){
110  HomePatch *p = patches[i];
111  DebugM(1, "ComputeGridForceCUDA::updateGriddedLists patch " << p->getPatchID() <<" atoms "<< p->getNumAtoms() <<"\n" << endi);
112  for(int j = 0; j < p->getNumAtoms(); j++){
113  int gid = p->getAtomList()[j].id; // Gets global ID of corresponding Ful}
114  if (molecule->is_atom_gridforced(gid, gridnum)){
115  std::pair<int,int> mapkey(gridnum,gid);
116  int mapoffset= griddedAtomsGlobalIndexMap[mapkey];
117  if( mapoffset <0)
118  {
119  NAMD_bug("how is this not in our map?");
120  }
121  griddedAtomsLocalIndex.push_back(mapoffset);
122  DebugM(1, "ComputeGridForceCUDA::updateGriddedLists patch " << p->getPatchID() <<" atom "<< j << " gid "<<gid<< " gridnum " << gridnum << " mapoffset "<< mapoffset << " localIndex.size() " << griddedAtomsLocalIndex.size()<< "\n" << endi);
123  }
124  }
125  DebugM(2, "ComputeGridForceCUDA::updateGriddedLists patch " << p->getPatchID() <<" gridded atoms "<< griddedAtomsLocalIndex.size() <<"\n" << endi);
126  }
127  int size = griddedAtomsLocalIndex.size();
128  gridOffsetsLocal.push_back(size);
129  }
130 
131  numGriddedAtomsLocal = griddedAtomsLocalIndex.size();
132  DebugM(4, "ComputeGridForceCUDA::updateGriddedLists numGriddedAtomsLocal now "<< numGriddedAtomsLocal <<"\n" << endi);
133  // copy indices to device
134  if(numGriddedAtomsLocal>0)
135  {
136  int* soaPtr=griddedAtomsSOAIndex.data();
137  copy_HtoD_sync<int>(soaPtr, d_griddedAtomsSOAIndex, griddedAtomsSOAIndex.size());
138  int* idxPtr=griddedAtomsLocalIndex.data();
139  copy_HtoD_sync<int>(idxPtr, d_griddedAtomsLocalIndex, griddedAtomsLocalIndex.size());
140  }
141  return numGriddedAtomsLocal;
142 }
143 
144 
145 /*
146  * We make host side arrays that we can sync over for the atomIdx,
147  * gridFrcParams.k, gridFrcParams.q for all the atoms that have
148  * is_atom_gridforced == TRUE. That way we don't skip around
149  * non-contiguous references into the molecule for the handful of
150  * atoms.
151 
152  * The SOA indexand LocalIndex get updated by migrations, and that is
153  * what we'll iterate over.
154 
155  */
156 
157 
158 int ComputeGridForceCUDA::createGriddedLists()
159 {
160  // get the total count across all grids so we can just stream
161  // through the whole thing
162  DebugM(4, "ComputeGridForceCUDA::createGriddedLists\n" << endi);
163  Molecule *molecule = Node::Object()->molecule;
165  Atom *atoms = molecule->getAtoms();
166  int gridTotalSize=0;
167  numGriddedAtoms=0;
168  numGriddedAtomsLocal=0;
169  gridOffsets.clear();
170  gridOffsets.push_back(0); // 0th grid has 0th offset
171  gridOffsetsLocal.push_back(0); // 0th grid has 0th offset
172  gridCoordsOffsets.push_back(0);
173  griddedAtomsGlobalIndex.clear();
174  griddedAtomsSOAIndex.clear();
175  for (int gridnum = 0; gridnum < molecule->numGridforceGrids; gridnum++) {
176  // atoms can be in multiple grids, so we go grid by grid
177  int numAtoms = molecule->numAtoms;
178  for(int j = 0; j < numAtoms; j++){
179  if (molecule->is_atom_gridforced(j, gridnum)){
180  griddedAtomsGlobalIndex.push_back(j);
181  // we're going to have to look up the reverse of GID to index
182  // a lot, so make a map
183  std::pair<int,int> mapkey(gridnum,j);
184  griddedAtomsGlobalIndexMap[mapkey]= griddedAtomsGlobalIndex.size();
185  }
186  }
187  gridOffsets.push_back(griddedAtomsGlobalIndex.size());
188  auto *mol_grid=molecule->get_gridfrc_grid(gridnum);
189  GridforceFullBaseGrid* h_grid;
190  if( mol_grid->get_grid_type() == 1)
191  h_grid = (GridforceFullMainGrid *) mol_grid;
192  else
193  NAMD_bug("GridforceGridCUDA::createGriddedLists called with unsupported grid type!");
194  int size =h_grid->size;
195  gridCoordsOffsets.push_back(size);
196  gridTotalSize += size;
197  DebugM(2, "ComputeGridForceCUDA::createGriddedLists grid "<<gridnum <<" has " << size << " points" << " and " << gridOffsets[gridnum+1] - gridOffsets[gridnum] <<" atoms\n"<<endi);
198  }
199 
200  for (int gridnum = 0; gridnum < molecule->numGridforceGrids; gridnum++) {
201  // atoms can be in multiple grids, so we go grid by grid
202  for(int i = 0; i < patchList.size(); i++){
203  HomePatch *p = patchList[i];
204  for(int j = 0; j < p->getNumAtoms(); j++){
205  int gid = p->getAtomList()[j].id; // Gets global ID of corresponding FullAtom
206  if (molecule->is_atom_gridforced(gid, gridnum)){
207  std::pair<int,int> mapkey(gridnum,gid);
208  int globalIndex=griddedAtomsGlobalIndexMap[mapkey];
209  griddedAtomsLocalIndex.push_back(globalIndex);
210  }
211  }
212  }
213  gridOffsetsLocal.push_back(griddedAtomsLocalIndex.size());
214  }
215 
216  numGriddedAtoms=griddedAtomsGlobalIndex.size();
217  DebugM(3, "ComputeGridForceCUDA::createGriddedLists numGriddedAtoms " << numGriddedAtoms << " numAtoms "<< molecule->numAtoms << "grids " << molecule->numGridforceGrids << "\n" << endi);
218  // size to fit all the gridded atoms
219  griddedAtomsSOAIndex.resize(numGriddedAtoms);
220  numGriddedAtomsLocal=0;
221  if(numGriddedAtoms > 0)
222  {
223  allocate_host<float>(&h_gridded_charge, numGriddedAtoms);
224  allocate_host<float>(&h_gridded_scale, numGriddedAtoms);
225  allocate_device<float>(&d_gridded_charge, numGriddedAtoms);
226  allocate_device<float>(&d_gridded_scale, numGriddedAtoms);
227  allocate_device<GridforceGridCUDA>(&d_grids, molecule->numGridforceGrids);
228  allocate_device<int>(&d_griddedAtomsSOAIndex, numGriddedAtoms);
229  allocate_device<int>(&d_griddedAtomsLocalIndex, numGriddedAtoms);
230  /*
231  * we flatten it all into one array of gridded atoms for all the atoms
232  */
233  allocate_device<float>(&d_gridCoords, gridTotalSize);
234  // for the multiple grid case, we need grid specific buffers for
235  // these, because their kernel launches may run in parallel
236  allocate_device<double>(&d_extEnergy_G, molecule->numGridforceGrids);
237  allocate_device<double3>(&d_netForce_G, molecule->numGridforceGrids*3);
238  allocate_device<cudaTensor>(&d_extVirial_G, molecule->numGridforceGrids);
239  allocate_host<double>(&h_extEnergy_G, molecule->numGridforceGrids);
240  allocate_host<double3>(&h_netForce_G, molecule->numGridforceGrids*3);
241  allocate_host<cudaTensor>(&h_extVirial_G, molecule->numGridforceGrids);
242 
243  for (int gridnum = 0; gridnum < molecule->numGridforceGrids; gridnum++) {
244  int offset = gridOffsets[gridnum];
245  auto *mol_grid=molecule->get_gridfrc_grid(gridnum);
246  GridforceFullBaseGrid* h_grid;
247  if( mol_grid->get_grid_type() == 1)
248  h_grid = (GridforceFullMainGrid *) mol_grid;
249  else
250  NAMD_bug("GridforceGridCUDA::createGriddedLists called with unsupported grid type!");
251  int grid_size = h_grid->size;
252  for(int gridIdx = offset; gridIdx < gridOffsets[gridnum+1]; gridIdx++)
253  {
254  int gid = griddedAtomsGlobalIndex[gridIdx];
255  if (molecule->is_atom_gridforced(gid, gridnum)){
256  molecule->get_gridfrc_params(h_gridded_scale[gridIdx], h_gridded_charge[gridIdx], gid, gridnum);
257 
258  }
259  }
260 
261  h_grids.push_back(GridforceGridCUDA(h_grid->get_k0(),
262  h_grid->get_k1(),
263  h_grid->get_k2(),
264  h_grid->size,
265  h_grid->dk[0],
266  h_grid->dk[1],
267  h_grid->dk[2],
268  h_grid->factor,
269  h_grid->get_origin(),
270  h_grid->get_center(),
271  h_grid->cont[0],
272  h_grid->cont[1],
273  h_grid->cont[2],
274  h_grid->gapinv[0],
275  h_grid->gapinv[1],
276  h_grid->gapinv[2],
277  h_grid->gap[0],
278  h_grid->gap[1],
279  h_grid->gap[2],
280  h_grid->inv,
281  h_grid->offset[0],
282  h_grid->offset[1],
283  h_grid->offset[2],
284  h_grid->get_scale(),
285  &d_gridCoords[gridCoordsOffsets[gridnum]]));
286  copy_HtoD_sync<float>(h_grid->grid, &d_gridCoords[gridCoordsOffsets[gridnum]], h_grid->size );
287 
288  }
289  GridforceGridCUDA* gPtr=h_grids.data();
290  copy_HtoD_sync<GridforceGridCUDA>(gPtr, d_grids, molecule->numGridforceGrids );
291  copy_HtoD_sync<float>(h_gridded_charge, d_gridded_charge, numGriddedAtoms);
292  copy_HtoD_sync<float>(h_gridded_scale, d_gridded_scale, numGriddedAtoms);
293  cudaCheck(cudaMemset(d_tbcatomic, 0, sizeof(unsigned int))); // sets the scala
294  }
295  return numGriddedAtoms;
296 }
297 
298 
299 
301 void ComputeGridForceCUDA::doForce(
302  const int doEnergy,
303  const int doVirial,
304  const Lattice lat,
305  const int timeStep,
306  const double* d_pos_x,
307  const double* d_pos_y,
308  const double* d_pos_z,
309  const char3* d_transform,
310  double* f_normal_x,
311  double* f_normal_y,
312  double* f_normal_z,
313  cudaStream_t stream)
314 {
315  // iterate through the grids like nonCUDA
316  DebugM(4, "ComputeGridForceCUDA::doForce\n" << endi);
318  Molecule *mol = Node::Object()->molecule;
319  if(numGriddedAtomsLocal>0)
320  {
321  HomePatch *homePatch = patchList[0];
322  // EJB- CPU side code has this check, so blindly reimplemented here
323  if (homePatch->flags.step % GF_OVERLAPCHECK_FREQ == 0) {
324  for (int gridnuml = 0; gridnuml < mol->numGridforceGrids; gridnuml++) {
325  // only check on node 0 and every GF_OVERLAPCHECK_FREQ steps
326  if (simParams->langevinPistonOn || simParams->berendsenPressureOn) {
327  // check for grid overlap if pressure control is on not needed
328  // without pressure control, since the check is also performed
329  // on startup
330  GridforceGrid *grid = mol->get_gridfrc_grid(gridnuml);
331  if (!grid->fits_lattice(homePatch->lattice)) {
332  char errmsg[512];
333  if (grid->get_checksize()) {
334  sprintf(errmsg, "Warning: Periodic cell basis too small for Gridforce grid %d. Set gridforcechecksize off in configuration file to ignore.\n", gridnuml);
335  NAMD_die(errmsg);
336  }
337  }
338  }
339  }
340  }
341  // The atoms that need gridding, along with their static data
342  // (charge, scaling, etc.) are already collated. Launch computeGridForce
343  // kernel with the grid, the collated arrays of charge and
344  // scale, the atom position arrays, to compute accumulated
345  // forces into the device side force arrays along with the
346  // energy and virial contribution from each gridForced atom
347  for (int gridnum = 0; gridnum < mol->numGridforceGrids; gridnum++) {
348  DebugM(3, "ComputeGridForceCUDA::doForce grid "<< gridnum << " from local offset " << gridOffsetsLocal[gridnum] << " to " << gridOffsetsLocal[gridnum+1]<< "\n" << endi);
349  h_extEnergy_G[gridnum] = 0.;
350  h_netForce_G[gridnum].x = 0;
351  h_netForce_G[gridnum].y = 0;
352  h_netForce_G[gridnum].z = 0;
353  h_extVirial_G[gridnum].xx = 0;
354  h_extVirial_G[gridnum].xy = 0;
355  h_extVirial_G[gridnum].xz = 0;
356  h_extVirial_G[gridnum].yx = 0;
357  h_extVirial_G[gridnum].yy = 0;
358  h_extVirial_G[gridnum].yz = 0;
359  h_extVirial_G[gridnum].zx = 0;
360  h_extVirial_G[gridnum].zy = 0;
361  h_extVirial_G[gridnum].zz = 0;
362  computeGridForce(doEnergy, doVirial, d_grids[gridnum], lat, d_pos_x, d_pos_y, d_pos_z, d_transform, d_griddedAtomsSOAIndex, &d_griddedAtomsLocalIndex[gridOffsetsLocal[gridnum]], d_gridded_charge, d_gridded_scale, f_normal_x, f_normal_y, f_normal_z, &h_netForce_G[gridnum], &d_netForce_G[gridnum], &h_extEnergy_G[gridnum], &d_extEnergy_G[gridnum], &h_extVirial_G[gridnum], &d_extVirial_G[gridnum], gridOffsetsLocal[gridnum+1]-gridOffsetsLocal[gridnum], d_tbcatomic, stream);
363  }
364  }
365  else
366  {
367  // CkPrintf("[%d] computeGridForce NO griddedAtoms %d\n",CkMyPe(),numGriddedAtoms);
368  }
369 }
370 void ComputeGridForceCUDA::zeroOutEnergyVirialForcesAcrossGrids(
371  double* h_extEnergy, double3* h_netForce, cudaTensor* h_virial)
372 {
373  Molecule *mol = Node::Object()->molecule;
374  h_extEnergy[0] = 0;
375  h_netForce->x = 0;
376  h_netForce->y = 0;
377  h_netForce->z = 0;
378  h_virial->xx = 0;
379  h_virial->xy = 0;
380  h_virial->xz = 0;
381  h_virial->yx = 0;
382  h_virial->yy = 0;
383  h_virial->yz = 0;
384  h_virial->zx = 0;
385  h_virial->zy = 0;
386  h_virial->zz = 0;
387  for (int gridnum = 0; gridnum < mol->numGridforceGrids; gridnum++) {
388  h_extEnergy_G[gridnum] = 0.;
389  h_netForce_G[gridnum].x = 0;
390  h_netForce_G[gridnum].y = 0;
391  h_netForce_G[gridnum].z = 0;
392  h_extVirial_G->xx = 0;
393  h_extVirial_G->xy = 0;
394  h_extVirial_G->xz = 0;
395  h_extVirial_G->yx = 0;
396  h_extVirial_G->yy = 0;
397  h_extVirial_G->yz = 0;
398  h_extVirial_G->zx = 0;
399  h_extVirial_G->zy = 0;
400  h_extVirial_G->zz = 0;
401  }
402 
403 }
404 
405 void ComputeGridForceCUDA::sumEnergyVirialForcesAcrossGrids(double* h_extEnergy,
406  double3* h_netForce,
407  cudaTensor* h_virial)
408 {
409  Molecule *mol = Node::Object()->molecule;
410  for (int gridnum = 0; gridnum < mol->numGridforceGrids; gridnum++) {
411  DebugM(2, "ComputeGridForceCUDA::sumEnergyVirialForcesAcrossGrids in grid "<< gridnum <<" energy "<< h_extEnergy_G[gridnum] <<"\n" << endi);
412  h_extEnergy[0] += h_extEnergy_G[gridnum];
413 
414 
415  h_netForce->x += h_netForce_G[gridnum].x;
416  h_netForce->y += h_netForce_G[gridnum].y;
417  h_netForce->z += h_netForce_G[gridnum].z;
418  h_virial[0] += h_extVirial_G[gridnum];
419  DebugM(2, "ComputeGridForceCUDA::sumEnergyVirialForcesAcrossGrids out grid "<< gridnum <<" energy "<< h_extEnergy_G[gridnum] <<"\n" << endi);
420  }
421 }
425 void ComputeGridForceCUDA::destroyGriddedLists()
426 {
427  DebugM(4, "ComputeGridForceCUDA::destroyGriddedLists\n" << endi);
428  if(numGriddedAtoms>0){
429  deallocate_host<float>(&h_gridded_charge);
430  deallocate_host<float>(&h_gridded_scale);
431  deallocate_device<float>(&d_gridded_charge);
432  deallocate_device<float>(&d_gridded_scale);
433  deallocate_device<float>(&d_gridCoords);
434  deallocate_device<int>(&d_griddedAtomsSOAIndex);
435  deallocate_device<int>(&d_griddedAtomsLocalIndex);
436  deallocate_device<GridforceGridCUDA>(&d_grids);
437  deallocate_device<float>(&d_gridCoords);
438  deallocate_device<double>(&d_extEnergy_G);
439  deallocate_device<double3>(&d_netForce_G);
440  deallocate_device<cudaTensor>(&d_extVirial_G);
441  deallocate_host<double>(&h_extEnergy_G);
442  deallocate_host<double3>(&h_netForce_G);
443  deallocate_host<cudaTensor>(&h_extVirial_G);
444 
445  }
446 }
447 
448 ComputeGridForceCUDA::~ComputeGridForceCUDA()
449 {
450  DebugM(4, "ComputeGridForceCUDA::~ComputeGridForceCUDA\n" << endi);
451  destroyGriddedLists();
452  deallocate_device<unsigned int>(&d_tbcatomic);
453 }
454 
455 
456 
457 #endif //NODEGROUP_FORCE_REGISTER
static Node * Object()
Definition: Node.h:86
BigReal yy
Definition: CudaUtils.h:80
Vector get_scale(void) const
Definition: GridForceGrid.h:96
int getNumAtoms() const
Definition: Patch.h:105
GridforceGrid * get_gridfrc_grid(int gridnum) const
Definition: Molecule.h:1363
Position get_center(void) const
Definition: GridForceGrid.h:92
Lattice & lattice
Definition: Patch.h:127
BigReal zz
Definition: CudaUtils.h:84
BigReal yx
Definition: CudaUtils.h:79
Position get_origin(void) const
Definition: GridForceGrid.h:93
SimParameters * simParameters
Definition: Node.h:181
BigReal yz
Definition: CudaUtils.h:81
int get_k0(void) const
#define DebugM(x, y)
Definition: Debug.h:75
std::ostream & endi(std::ostream &s)
Definition: InfoStream.C:54
int numGridforceGrids
Definition: Molecule.h:624
Molecule stores the structural information for the system.
Definition: Molecule.h:175
Flags flags
Definition: Patch.h:128
int32 index
Definition: NamdTypes.h:290
FullAtomList & getAtomList()
Definition: HomePatch.h:528
bool fits_lattice(const Lattice &lattice)
Definition: GridForceGrid.C:84
void NAMD_bug(const char *err_msg)
Definition: common.C:195
BigReal xx
Definition: CudaUtils.h:76
BigReal zx
Definition: CudaUtils.h:82
PatchID getPatchID() const
Definition: Patch.h:114
int numAtoms
Definition: Molecule.h:585
void get_gridfrc_params(Real &k, Charge &q, int atomnum, int gridnum) const
Definition: Molecule.h:1357
void NAMD_die(const char *err_msg)
Definition: common.C:147
PatchID pid
Definition: NamdTypes.h:289
virtual Bool get_checksize(void) const =0
BigReal xz
Definition: CudaUtils.h:78
Atom * getAtoms() const
Definition: Molecule.h:519
#define GF_OVERLAPCHECK_FREQ
#define simParams
Definition: Output.C:129
int get_k1(void) const
Bool is_atom_gridforced(int atomnum, int gridnum) const
Definition: Molecule.h:1249
#define cudaCheck(stmt)
Definition: CudaUtils.h:233
Molecule * molecule
Definition: Node.h:179
BigReal zy
Definition: CudaUtils.h:83
BigReal xy
Definition: CudaUtils.h:77
int step
Definition: PatchTypes.h:16
int get_k2(void) const