7 #ifndef COMPUTESELFTUPLES_H
8 #define COMPUTESELFTUPLES_H
14 template <
class T,
class S,
class P>
class SelfTuples :
public HomeTuples<T, S, P> {
17 SelfTuples(
int type=-1) : HomeTuples<T,S,P>(type) {}
22 const std::vector<int>& pids = std::vector<int>()) {
24 if (isBasePatch != NULL) {
25 iout <<
iWARN <<
"Non-NULL isBasePatch detected in SelfTuples::loadTuples()" <<
endi;
30 #ifdef MEM_OPT_VERSION
31 typename ElemTraits<T>::signature *allSigs;
42 #ifdef MEM_OPT_VERSION
43 allSigs = ElemTraits<T>::get_sig_pointer(node->
molecule);
45 T::getMoleculePointers(node->
molecule,
46 &numTuples, &tuplesByAtom, &tupleStructs);
49 T::getParameterPointers(node->
parameters, &tupleValues);
51 this->tupleList.clear();
60 Real invLesFactor = lesOn ?
77 if (pids.size() == 0) ai = ai.begin();
79 int numPid = (pids.size() == 0) ? tuplePatchList.
size() : pids.size();
81 for (
int ipid=0;ipid < numPid;ipid++) {
86 if (pids.size() == 0) {
99 for (
int j=0; j < numAtoms; j++)
101 #ifdef MEM_OPT_VERSION
102 typename ElemTraits<T>::signature *thisAtomSig =
103 &allSigs[ElemTraits<T>::get_sig_id(atomExt[j])];
105 T::getTupleInfo(thisAtomSig, &numTuples, &allTuples);
106 for(
int k=0; k<numTuples; k++) {
107 T t(atomExt[j].
id, &allTuples[k], tupleValues);
110 int32 *curTuple = tuplesByAtom[atomExt[j].
id];
112 for( ; *curTuple != -1; ++curTuple) {
113 T t(&tupleStructs[*curTuple],tupleValues);
116 aid[0] = atomMap->
localID(t.atomID[0]);
117 int homepatch = aid[0].
pid;
122 int is_fep_ss = partition[0] > 2;
124 int fep_tuple_type = 0;
125 for (i=1; i < T::size; i++) {
126 aid[i] = atomMap->
localID(t.atomID[i]);
127 samepatch = samepatch && ( homepatch == aid[i].
pid );
132 is_fep_ss &= partition[i] > 2;
133 is_fep_sd |= (abs(partition[i] - partition[0]) == 2);
134 fep_tuple_type = partition[i]; }
136 if (sdScaling && is_fep_sd) {
137 for (i=0; i < num_unpert_bonds; i++) {
139 && t.atomID[0]==unpert_bonds[i].
atom1
140 && t.atomID[1]==unpert_bonds[i].
atom2) is_fep_sd = 0;
142 for (i=0; i < num_unpert_angles; i++) {
144 && t.atomID[0]==unpert_angles[i].
atom1
145 && t.atomID[1]==unpert_angles[i].
atom2
146 && t.atomID[2]==unpert_angles[i].
atom3) is_fep_sd = 0;
148 for (i=0; i < num_unpert_dihedrals; i++) {
150 && t.atomID[0]==unpert_dihedrals[i].
atom1
151 && t.atomID[1]==unpert_dihedrals[i].
atom2
152 && t.atomID[2]==unpert_dihedrals[i].
atom3
153 && t.atomID[3]==unpert_dihedrals[i].
atom4) is_fep_sd = 0;
156 if (T::size < 4 && !soluteScalingAll) has_ss =
false;
158 t.scale = (!has_les && !has_ss) ? 1.0 : ( has_les ? invLesFactor : soluteScalingFactor );
159 if (is_fep_ss) t.scale = (fep_tuple_type == 4) ? OneMinusLambda : Lambda;
160 if (is_fep_sd && sdScaling) t.scale = (fep_tuple_type == 4 || fep_tuple_type == 2) ? OneMinusLambda : Lambda;
163 for(i=0; i < T::size; i++) {
165 t.localIndex[i] = aid[i].
index;
167 #ifdef MEM_OPT_VERSION
172 for(i=0; i<T::size; i++){
176 if(!allfixed) this->tupleList.push_back(t);
178 this->tupleList.push_back(t);
181 this->tupleList.push_back(t);
195 #ifndef USE_HOMETUPLES
198 virtual void loadTuples(
void) {
201 #ifdef MEM_OPT_VERSION
202 typename ElemTraits<T>::signature *allSigs;
204 int32 **tuplesByAtom;
208 const P *tupleValues;
211 #ifdef MEM_OPT_VERSION
212 allSigs = ElemTraits<T>::get_sig_pointer(node->
molecule);
214 T::getMoleculePointers(node->
molecule,
215 &numTuples, &tuplesByAtom, &tupleStructs);
218 T::getParameterPointers(node->
parameters, &tupleValues);
220 this->tupleList.resize(0);
223 int partition[T::size];
229 Real invLesFactor = lesOn ?
247 for ( ai = ai.
begin(); ai != ai.
end(); ai++ )
256 for (
int j=0; j < numAtoms; j++)
258 #ifdef MEM_OPT_VERSION
259 typename ElemTraits<T>::signature *thisAtomSig =
260 &allSigs[ElemTraits<T>::get_sig_id(atomExt[j])];
262 T::getTupleInfo(thisAtomSig, &numTuples, &allTuples);
263 for(
int k=0; k<numTuples; k++) {
264 T t(atomExt[j].
id, &allTuples[k], tupleValues);
267 int32 *curTuple = tuplesByAtom[atomExt[j].
id];
269 for( ; *curTuple != -1; ++curTuple) {
270 T t(&tupleStructs[*curTuple],tupleValues);
273 aid[0] = this->atomMap->
localID(t.atomID[0]);
274 int homepatch = aid[0].
pid;
279 int is_fep_ss = partition[0] > 2;
281 int fep_tuple_type = 0;
282 for (i=1; i < T::size; i++) {
283 aid[i] = this->atomMap->
localID(t.atomID[i]);
284 samepatch = samepatch && ( homepatch == aid[i].
pid );
289 is_fep_ss &= partition[i] > 2;
290 is_fep_sd |= (abs(partition[i] - partition[0]) == 2);
291 fep_tuple_type = partition[i]; }
293 if (T::size < 4 && !soluteScalingAll) has_ss =
false;
294 if (sdScaling && is_fep_sd) {
295 for (i=0; i < num_unpert_bonds; i++) {
297 && t.atomID[0]==unpert_bonds[i].
atom1
298 && t.atomID[1]==unpert_bonds[i].
atom2) is_fep_sd = 0;
300 for (i=0; i < num_unpert_angles; i++) {
302 && t.atomID[0]==unpert_angles[i].
atom1
303 && t.atomID[1]==unpert_angles[i].
atom2
304 && t.atomID[2]==unpert_angles[i].
atom3) is_fep_sd = 0;
306 for (i=0; i < num_unpert_dihedrals; i++) {
308 && t.atomID[0]==unpert_dihedrals[i].
atom1
309 && t.atomID[1]==unpert_dihedrals[i].
atom2
310 && t.atomID[2]==unpert_dihedrals[i].
atom3
311 && t.atomID[3]==unpert_dihedrals[i].
atom4) is_fep_sd = 0;
315 t.scale = (!has_les && !has_ss) ? 1.0 : ( has_les ? invLesFactor : soluteScalingFactor );
316 if (is_fep_ss) t.scale = (fep_tuple_type == 4) ? OneMinusLambda : Lambda;
317 if (is_fep_sd && sdScaling) t.scale = (fep_tuple_type == 4 || fep_tuple_type == 2) ? OneMinusLambda : Lambda;
320 for(i=0; i < T::size; i++) {
322 t.localIndex[i] = aid[i].
index;
324 #ifdef MEM_OPT_VERSION
329 for(i=0; i<T::size; i++){
333 if(!allfixed) this->tupleList.add(t);
335 this->tupleList.add(t);
338 this->tupleList.add(t);
357 for (ap = ap.
begin(); ap != ap.
end(); ap++) {
358 ap->p->unregisterPositionPickup(
this,&(ap->positionBox));
359 ap->p->unregisterAvgPositionPickup(
this,&(ap->avgPositionBox));
360 ap->p->unregisterForceDeposit(
this,&(ap->forceBox));
370 #ifdef USE_HOMETUPLES
371 this->tuples =
new SelfTuples<T, S, P>();
374 this->tuplePatchList.
clear();
378 this->setNumPatches(this->tuplePatchList.
size());
380 this->doLoadTuples =
true;
382 int myNode = CkMyPe();
396 #ifdef TRACE_COMPUTE_OBJECTS
397 double traceObjStartTime = CmiWallTimer();
402 #ifdef TRACE_COMPUTE_OBJECTS
Elem * find(const Elem &elem)
#define COMPUTE_PROXY_PRIORITY
int num_alch_unpert_Dihedrals
#define TRACE_COMPOBJ_IDOFFSET
static void partition(int *order, const FullAtom *atoms, int begin, int end)
static PatchMap * Object()
Angle * alch_unpert_angles
SimParameters * simParameters
int num_alch_unpert_Bonds
std::ostream & endi(std::ostream &s)
std::ostream & iWARN(std::ostream &s)
int num_alch_unpert_Angles
int add(const Elem &elem)
Dihedral * alch_unpert_dihedrals
virtual ~ComputeSelfTuples()
UniqueSetIter< T > begin(void) const
#define COMPUTE_HOME_PRIORITY
BigReal soluteScalingFactor
ComputeSelfTuples(ComputeID c, PatchID p)
unsigned char get_ss_type(int anum) const
LocalID localID(AtomID id)
virtual void doWork(void)
unsigned char get_fep_type(int anum) const
UniqueSetIter< T > end(void) const
virtual void initialize(void)
#define PATCH_PRIORITY(PID)