NAMD
ComputeOneFourNbTholes.C
Go to the documentation of this file.
1 #include "ComputeNonbondedUtil.h"
3 #include "Molecule.h"
4 #include "common.h"
5 #include "structures.h"
6 #include "Debug.h"
7 #include "PressureProfile.h"
8 
9 // static initialization
14 
16  Molecule* mol, int* count, int32*** byatom,
17  OneFourNbThole** structarray) {
18 #ifdef MEM_OPT_VERSION
19  NAMD_die("Should not be called in OneFourNbTholeElem::getMoleculePointers in memory optimized version!");
20 #else
21  *count = mol->numOneFourNbTholes;
22  *byatom = mol->oneFourNbTholesByAtom;
23  *structarray = mol->oneFourNbTholes;
24 #endif
25 }
26 
28  Parameters *p, const OneFourNbTholeValue **v) {
29  *v = NULL;
30 }
31 
33  OneFourNbTholeElem *tuples, int ntuple,
34  BigReal *reduction, BigReal *pressureProfileData) {
35  const Lattice & lattice = tuples[0].p[0]->p->lattice;
36  const bool doEnergy = tuples[0].p[0]->p->flags.doEnergy;
37  const bool doVirial = tuples[0].p[0]->p->flags.doVirial;
39  /*
40  * Although CHARMM Drude force field expects the 1-4 scaling factor to be 1.0,
41  * I try my best to generalize the case here. The GPU nonbonded kernel differs
42  * from the CPU kernel as the former always calculate the full 1-4 interactions
43  * without scaling (or just with a scaling factor 1.0), and then the bonded
44  * kernel "modifiedExclusionForce" in ComputeBondedCUDAKernel.cu actually
45  * computes a complementary part scaled by -(1.0 - ComputeNonbondedUtil::scale14).
46  *
47  * As a result, the following scaling factor uses the same convention. In the
48  * GPU kernel, the NbThole correction of 1-4 interactions is computed by
49  * calcForceEnergyNbThole, so the following code should only computes the
50  * complementary part.
51  *
52  * For the CPU kernel, the NbThole correction in ComputeNonbondedBase.h skips
53  * modified exclusions, so we just compute the 1-4 NbThole correction with
54  * scaling below.
55  */
56  BigReal s;
57 #if defined (NAMD_CUDA) || defined(NAMD_HIP)
58  s = -(1.0 - ComputeNonbondedUtil::scale14);
59 #else
61 #endif
62  if (s == 0.0) return;
64  if (simParams->alchOn && ntuple > 0) {
65  NAMD_die("Alchemical calculation for 1-4 atom pairs with nonbonded "
66  "Thole parameters is not supported.");
67  }
68  for (int ituple=0; ituple<ntuple; ++ituple) {
69  const OneFourNbTholeElem &tup = tuples[ituple];
70  // const AtomID (&atomID)[OneFourNbTholeElem::size](tup.atomID);
73  const OneFourNbTholeValue* const(&value)(tup.value);
74  DebugM(3, "::computeForce() localIndex = " << localIndex[0] << " "
75  << localIndex[1] << " " << localIndex[2] << " "
76  << localIndex[3] << std::endl);
77  const BigReal aa = value->aa;
78  // Calculate the vectors between atoms
79  const Position & rai = p[0]->x[localIndex[0]].position; // atom i
80  const Position & rdi = p[1]->x[localIndex[1]].position; // atom i's Drude
81  const Position & raj = p[2]->x[localIndex[2]].position; // atom j
82  const Position & rdj = p[3]->x[localIndex[3]].position; // atom j's Drude
83 
84  const BigReal qqaa = CC * p[0]->x[localIndex[0]].charge * p[2]->x[localIndex[2]].charge;
85  const BigReal qqad = CC * p[0]->x[localIndex[0]].charge * p[3]->x[localIndex[3]].charge;
86  const BigReal qqda = CC * p[1]->x[localIndex[1]].charge * p[2]->x[localIndex[2]].charge;
87  const BigReal qqdd = CC * p[1]->x[localIndex[1]].charge * p[3]->x[localIndex[3]].charge;
88 
89  // r_ij = r_i - r_j
90  const Vector raa = lattice.delta(rai,raj); // shortest vector image: rai - raj
91  const Vector rad = lattice.delta(rai,rdj); // shortest vector image: rai - rdj
92  const Vector rda = lattice.delta(rdi,raj); // shortest vector image: rdi - raj
93  const Vector rdd = lattice.delta(rdi,rdj); // shortest vector image: rdi - rdj
94 
95  // 1/r, r = |r_ij|
96  const BigReal raa_invlen = raa.rlength(); // reciprocal of length
97  const BigReal rad_invlen = rad.rlength();
98  const BigReal rda_invlen = rda.rlength();
99  const BigReal rdd_invlen = rdd.rlength();
100 
101  // ar
102  const BigReal auaa = aa / raa_invlen;
103  const BigReal auad = aa / rad_invlen;
104  const BigReal auda = aa / rda_invlen;
105  const BigReal audd = aa / rdd_invlen;
106 
107  // exp(-ar)
108  const BigReal expauaa = exp(-auaa);
109  const BigReal expauad = exp(-auad);
110  const BigReal expauda = exp(-auda);
111  const BigReal expaudd = exp(-audd);
112 
113  // (1 + ar/2)
114  BigReal polyauaa = 1 + 0.5*auaa;
115  BigReal polyauad = 1 + 0.5*auad;
116  BigReal polyauda = 1 + 0.5*auda;
117  BigReal polyaudd = 1 + 0.5*audd;
118 
119  BigReal ethole;
120  if (doEnergy) {
121  ethole = 0;
122  ethole += qqaa * raa_invlen * (-polyauaa * expauaa);
123  ethole += qqad * rad_invlen * (-polyauad * expauad);
124  ethole += qqda * rda_invlen * (-polyauda * expauda);
125  ethole += qqdd * rdd_invlen * (-polyaudd * expaudd);
126  }
127 
128  polyauaa = 1 + auaa*polyauaa;
129  polyauad = 1 + auad*polyauad;
130  polyauda = 1 + auda*polyauda;
131  polyaudd = 1 + audd*polyaudd;
132 
133  const BigReal raa_invlen3 = raa_invlen * raa_invlen * raa_invlen;
134  const BigReal rad_invlen3 = rad_invlen * rad_invlen * rad_invlen;
135  const BigReal rda_invlen3 = rda_invlen * rda_invlen * rda_invlen;
136  const BigReal rdd_invlen3 = rdd_invlen * rdd_invlen * rdd_invlen;
137 
138  const BigReal dfaa = qqaa * raa_invlen3 * (polyauaa*expauaa);
139  const BigReal dfad = qqad * rad_invlen3 * (polyauad*expauad);
140  const BigReal dfda = qqda * rda_invlen3 * (polyauda*expauda);
141  const BigReal dfdd = qqdd * rdd_invlen3 * (polyaudd*expaudd);
142 
143  const Vector faa = -dfaa * raa;
144  const Vector fad = -dfad * rad;
145  const Vector fda = -dfda * rda;
146  const Vector fdd = -dfdd * rdd;
147 
148  p[0]->f[localIndex[0]] += faa + fad;
149  p[1]->f[localIndex[1]] += fda + fdd;
150  p[2]->f[localIndex[2]] -= faa + fda;
151  p[3]->f[localIndex[3]] -= fad + fdd;
152 
153  if (doEnergy) {
154  reduction[OneFourNbTholeEnergyIndex] += ethole;
155  }
156 
157  if (doVirial) {
158  reduction[virialIndex_XX] += faa.x * raa.x + fad.x * rad.x
159  + fda.x * rda.x + fdd.x * rdd.x;
160  reduction[virialIndex_XY] += faa.x * raa.y + fad.x * rad.y
161  + fda.x * rda.y + fdd.x * rdd.y;
162  reduction[virialIndex_XZ] += faa.x * raa.z + fad.x * rad.z
163  + fda.x * rda.z + fdd.x * rdd.z;
164  reduction[virialIndex_YX] += faa.y * raa.x + fad.y * rad.x
165  + fda.y * rda.x + fdd.y * rdd.x;
166  reduction[virialIndex_YY] += faa.y * raa.y + fad.y * rad.y
167  + fda.y * rda.y + fdd.y * rdd.y;
168  reduction[virialIndex_YZ] += faa.y * raa.z + fad.y * rad.z
169  + fda.y * rda.z + fdd.y * rdd.z;
170  reduction[virialIndex_ZX] += faa.z * raa.x + fad.z * rad.x
171  + fda.z * rda.x + fdd.z * rdd.x;
172  reduction[virialIndex_ZY] += faa.z * raa.y + fad.z * rad.y
173  + fda.z * rda.y + fdd.z * rdd.y;
174  reduction[virialIndex_ZZ] += faa.z * raa.z + fad.z * rad.z
175  + fda.z * rda.z + fdd.z * rdd.z;
176  }
177 
178  if (pressureProfileData) {
179  BigReal zai = p[0]->x[localIndex[0]].position.z;
180  BigReal zdi = p[1]->x[localIndex[1]].position.z;
181  BigReal zaj = p[2]->x[localIndex[2]].position.z;
182  BigReal zdj = p[3]->x[localIndex[3]].position.z;
183  int nai = (int)floor((zai-pressureProfileMin)/pressureProfileThickness);
184  int ndi = (int)floor((zdi-pressureProfileMin)/pressureProfileThickness);
185  int naj = (int)floor((zaj-pressureProfileMin)/pressureProfileThickness);
186  int ndj = (int)floor((zdj-pressureProfileMin)/pressureProfileThickness);
191  int pai = p[0]->x[localIndex[0]].partition;
192  int pdi = p[1]->x[localIndex[1]].partition;
193  int paj = p[2]->x[localIndex[2]].partition;
194  int pdj = p[3]->x[localIndex[3]].partition;
195  int pn = pressureProfileAtomTypes;
197  pai, paj, pn, faa.x * raa.x, faa.y * raa.y, faa.z * raa.z,
198  pressureProfileData);
200  pai, pdj, pn, fad.x * rad.x, fad.y * rad.y, fad.z * rad.z,
201  pressureProfileData);
203  pdi, paj, pn, fda.x * rda.x, fda.y * rda.y, fda.z * rda.z,
204  pressureProfileData);
206  pdi, pdj, pn, fdd.x * rdd.x, fdd.y * rdd.y, fdd.z * rdd.z,
207  pressureProfileData);
208  }
209  }
210 }
211 
213 {
218  ADD_TENSOR(reduction,REDUCTION_VIRIAL_NORMAL,data,virialIndex);
219 }
220 
222  return 0x7FFFFFFF &((atomID[0]<<24) + (atomID[1]<<16) + (atomID[2]<<8) + atomID[3]);
223 }
224 
226  scale = 0;
227 }
228 
230  NAMD_die("Can't use OneFourNbThole with memory optimized version of NAMD.");
231 }
232 
234  atomID[0] = a->atom1;
235  atomID[1] = a->atom2;
236  atomID[2] = a->atom3;
237  atomID[3] = a->atom4;
238  value = a; // expect v to be NULL
239 }
240 
242  AtomID atom0, AtomID atom1, AtomID atom2, AtomID atom3) {
243  // atoms arranged: HEAVY DRUDE HEAVY DRUDE
244  if (atom0 > atom2) { // Swap heavy atoms so lowest is first!
245  std::swap(atom0, atom2);
246  std::swap(atom1, atom3);
247  }
248  atomID[0] = atom0;
249  atomID[1] = atom1;
250  atomID[2] = atom2;
251  atomID[3] = atom3;
252 }
253 
255  return (a.atomID[0] == atomID[0] && a.atomID[1] == atomID[1] &&
256  a.atomID[2] == atomID[2] && a.atomID[3] == atomID[3]);
257 }
258 
260  return (atomID[0] < a.atomID[0] ||
261  (atomID[0] == a.atomID[0] &&
262  (atomID[1] < a.atomID[1] ||
263  (atomID[1] == a.atomID[1] &&
264  (atomID[2] < a.atomID[2] ||
265  (atomID[2] == a.atomID[2] &&
266  atomID[3] < a.atomID[3]
267  ))))));
268 }
static Node * Object()
Definition: Node.h:86
static void getParameterPointers(Parameters *, const OneFourNbTholeValue **)
Lattice & lattice
Definition: Patch.h:127
#define ADD_TENSOR(R, RL, D, DL)
Definition: ReductionMgr.h:33
TuplePatchElem * p[size]
void pp_reduction(int nslabs, int n1, int n2, int atype1, int atype2, int numtypes, BigReal vxx, BigReal vyy, BigReal vzz, BigReal *reduction)
int numOneFourNbTholes
Definition: Molecule.h:616
Definition: Vector.h:72
SimParameters * simParameters
Definition: Node.h:181
#define COULOMB
Definition: common.h:53
int32_t int32
Definition: common.h:38
BigReal & item(int i)
Definition: ReductionMgr.h:336
#define DebugM(x, y)
Definition: Debug.h:75
BigReal z
Definition: Vector.h:74
Position position
Definition: NamdTypes.h:78
static BigReal pressureProfileThickness
void swap(Array< T > &s, Array< T > &t)
Definition: MsmMap.h:319
Molecule stores the structural information for the system.
Definition: Molecule.h:174
Flags flags
Definition: Patch.h:128
const OneFourNbTholeValue * value
Charge charge
Definition: NamdTypes.h:79
void pp_clamp(int &n, int nslabs)
static void getMoleculePointers(Molecule *, int *, int32 ***, OneFourNbThole **)
int doEnergy
Definition: PatchTypes.h:20
uint8 partition
Definition: NamdTypes.h:81
BigReal x
Definition: Vector.h:74
void NAMD_die(const char *err_msg)
Definition: common.C:147
int operator<(const OneFourNbTholeElem &a) const
#define simParams
Definition: Output.C:131
int32 AtomID
Definition: NamdTypes.h:35
int doVirial
Definition: PatchTypes.h:21
int operator==(const OneFourNbTholeElem &a) const
NAMD_HOST_DEVICE BigReal rlength(void) const
Definition: Vector.h:210
static void computeForce(OneFourNbTholeElem *, int, BigReal *, BigReal *)
static void submitReductionData(BigReal *, SubmitReduction *)
double BigReal
Definition: common.h:123
NAMD_HOST_DEVICE Vector delta(const Position &pos1, const Position &pos2) const
Definition: Lattice.h:149
static BigReal pressureProfileMin