7 #ifndef COMPUTEHOMETUPLES_H 8 #define COMPUTEHOMETUPLES_H 99 #ifdef MEM_OPT_VERSION 100 template <
class T>
struct ElemTraits {
102 static signature* get_sig_pointer(
Molecule *mol) {
return mol->atomSigPool; }
103 static int get_sig_id(
const CompAtomExt &a) {
return a.sigId; }
106 template <>
struct ElemTraits <
ExclElem> {
108 static signature* get_sig_pointer(
Molecule *mol) {
return mol->exclSigPool; }
109 static int get_sig_id(
const CompAtomExt &a) {
return a.exclId; }
113 #ifdef USE_HOMETUPLES 121 Tuples(
int type) : type(type) {}
135 int getType() {
return type;}
136 virtual void submitTupleCount(
SubmitReduction *reduction,
int tupleCount)=0;
138 virtual int getNumTuples()=0;
139 virtual void* getTupleList()=0;
141 const std::vector<int>& pids = std::vector<int>())=0;
148 template <
class T,
class S,
class P>
class HomeTuples :
public Tuples {
150 std::vector<T> tupleList;
154 HomeTuples(
int type=-1) : Tuples(type) {}
156 #if __cplusplus < 201103L 160 virtual void* getTupleList() final {
161 return (
void*)tupleList.data();
164 virtual void submitTupleCount(
SubmitReduction *reduction,
int tupleCount)
final {
165 reduction->item(T::reductionChecksumLabel) += (
BigReal)tupleCount;
175 virtual int getNumTuples() final {
176 return tupleList.size();
180 const std::vector<int>& pids = std::vector<int>()) {
182 if (isBasePatch == NULL) {
183 NAMD_bug(
"NULL isBasePatch detected in HomeTuples::loadTuples()");
188 #ifdef MEM_OPT_VERSION 189 typename ElemTraits<T>::signature *allSigs;
191 int32 **tuplesByAtom;
195 const P *tupleValues;
200 #ifdef MEM_OPT_VERSION 201 allSigs = ElemTraits<T>::get_sig_pointer(node->
molecule);
203 T::getMoleculePointers(node->
molecule,
204 &numTuples, &tuplesByAtom, &tupleStructs);
207 T::getParameterPointers(node->
parameters, &tupleValues);
218 Real invLesFactor = lesOn ?
234 if (pids.size() == 0) ai = ai.begin();
236 int numPid = (pids.size() == 0) ? tuplePatchList.
size() : pids.size();
238 for (
int ipid=0;ipid < numPid;ipid++) {
243 if (pids.size() == 0) {
246 atomExt = (*ai).xExt;
258 for (
int j=0; j < numAtoms; j++)
261 #ifdef MEM_OPT_VERSION 262 typename ElemTraits<T>::signature *thisAtomSig =
263 &allSigs[ElemTraits<T>::get_sig_id(atomExt[j])];
265 T::getTupleInfo(thisAtomSig, &numTuples, &allTuples);
266 for(
int k=0; k<numTuples; k++) {
267 T t(atomExt[j].
id, &allTuples[k], tupleValues);
270 int32 *curTuple = tuplesByAtom[atomExt[j].
id];
271 for( ; *curTuple != -1; ++curTuple) {
272 T t(&tupleStructs[*curTuple],tupleValues);
275 aid[0] = atomMap->
localID(t.atomID[0]);
276 int homepatch = aid[0].
pid;
283 int fep_tuple_type = 0;
284 for (i=1; i < T::size; i++) {
285 aid[i] = atomMap->
localID(t.atomID[i]);
286 samepatch = samepatch && ( homepatch == aid[i].
pid );
295 if (sdScaling && is_fep_sd) {
304 for (i=0; i < num_unpert_bonds; i++) {
306 && t.atomID[0]==unpert_bonds[i].atom1
307 && t.atomID[1]==unpert_bonds[i].atom2) is_fep_sd = 0;
309 for (i=0; i < num_unpert_angles; i++) {
311 && t.atomID[0]==unpert_angles[i].atom1
312 && t.atomID[1]==unpert_angles[i].atom2
313 && t.atomID[2]==unpert_angles[i].atom3) is_fep_sd = 0;
315 for (i=0; i < num_unpert_dihedrals; i++) {
317 && t.atomID[0]==unpert_dihedrals[i].atom1
318 && t.atomID[1]==unpert_dihedrals[i].atom2
319 && t.atomID[2]==unpert_dihedrals[i].atom3
320 && t.atomID[3]==unpert_dihedrals[i].atom4) is_fep_sd = 0;
323 if (T::size < 4 && !soluteScalingAll) has_ss =
false;
324 if ( samepatch )
continue;
325 t.scale = (!has_les && !has_ss) ? 1.0 : ( has_les ? invLesFactor : soluteScalingFactor );
326 if (is_fep_ss) t.scale = (fep_tuple_type == 4) ? OneMinusLambda : Lambda;
327 if (is_fep_sd && sdScaling) t.scale = (fep_tuple_type == 4 || fep_tuple_type == 2) ? OneMinusLambda : Lambda;
329 for (i=1; i < T::size; i++) {
330 homepatch = patchMap->
downstream(homepatch,aid[i].pid);
332 if ( homepatch !=
notUsed && isBasePatch[homepatch] ) {
334 for (i=0; i < T::size; i++) {
337 #ifdef MEM_OPT_VERSION 340 iout <<
iWARN <<
"Tuple " << *curTuple <<
" with atoms ";
343 for( erri = 0; erri < T::size; erri++ ) {
344 iout << t.atomID[erri] <<
"(" << aid[erri].
pid <<
") ";
346 iout <<
"missing patch " << aid[i].
pid <<
"\n" <<
endi;
349 t.localIndex[i] = aid[i].
index;
352 #ifdef MEM_OPT_VERSION 356 for(i=0; i<T::size; i++){
360 if(!allfixed) tupleList.push_back(t);
362 tupleList.push_back(t);
365 tupleList.push_back(t);
380 #ifndef USE_HOMETUPLES 384 #ifdef MEM_OPT_VERSION 385 typename ElemTraits<T>::signature *allSigs;
387 int32 **tuplesByAtom;
391 const P *tupleValues;
394 #ifdef MEM_OPT_VERSION 395 allSigs = ElemTraits<T>::get_sig_pointer(node->
molecule);
397 T::getMoleculePointers(node->
molecule,
398 &numTuples, &tuplesByAtom, &tupleStructs);
401 T::getParameterPointers(node->
parameters, &tupleValues);
412 Real invLesFactor = lesOn ?
429 for ( ai = ai.
begin(); ai != ai.
end(); ai++ )
437 for (
int j=0; j < numAtoms; j++)
440 #ifdef MEM_OPT_VERSION 441 typename ElemTraits<T>::signature *thisAtomSig =
442 &allSigs[ElemTraits<T>::get_sig_id(atomExt[j])];
444 T::getTupleInfo(thisAtomSig, &numTuples, &allTuples);
445 for(
int k=0; k<numTuples; k++) {
446 T t(atomExt[j].
id, &allTuples[k], tupleValues);
449 int32 *curTuple = tuplesByAtom[atomExt[j].
id];
450 for( ; *curTuple != -1; ++curTuple) {
451 T t(&tupleStructs[*curTuple],tupleValues);
454 aid[0] = atomMap->
localID(t.atomID[0]);
455 int homepatch = aid[0].
pid;
462 int fep_tuple_type = 0;
463 for (i=1; i < T::size; i++) {
464 aid[i] = atomMap->
localID(t.atomID[i]);
465 samepatch = samepatch && ( homepatch == aid[i].
pid );
475 if (sdScaling && is_fep_sd) {
476 for (i=0; i < num_unpert_bonds; i++) {
478 && t.atomID[0]==unpert_bonds[i].
atom1 479 && t.atomID[1]==unpert_bonds[i].
atom2) is_fep_sd = 0;
481 for (i=0; i < num_unpert_angles; i++) {
483 && t.atomID[0]==unpert_angles[i].
atom1 484 && t.atomID[1]==unpert_angles[i].
atom2 485 && t.atomID[2]==unpert_angles[i].
atom3) is_fep_sd = 0;
487 for (i=0; i < num_unpert_dihedrals; i++) {
489 && t.atomID[0]==unpert_dihedrals[i].
atom1 490 && t.atomID[1]==unpert_dihedrals[i].
atom2 491 && t.atomID[2]==unpert_dihedrals[i].
atom3 492 && t.atomID[3]==unpert_dihedrals[i].
atom4) is_fep_sd = 0;
495 if (T::size < 4 && !soluteScalingAll) has_ss =
false;
496 if ( samepatch )
continue;
497 t.scale = (!has_les && !has_ss) ? 1.0 : ( has_les ? invLesFactor : soluteScalingFactor );
498 if (is_fep_ss) t.scale = (fep_tuple_type == 4) ? OneMinusLambda : Lambda;
499 if (is_fep_sd && sdScaling) t.scale = (fep_tuple_type == 4 || fep_tuple_type == 2) ? OneMinusLambda : Lambda;
501 for (i=1; i < T::size; i++) {
502 homepatch = patchMap->
downstream(homepatch,aid[i].pid);
504 if ( homepatch !=
notUsed && isBasePatch[homepatch] ) {
506 for (i=0; i < T::size; i++) {
509 #ifdef MEM_OPT_VERSION 512 iout <<
iWARN <<
"Tuple " << *curTuple <<
" with atoms ";
515 for( erri = 0; erri < T::size; erri++ ) {
516 iout << t.atomID[erri] <<
"(" << aid[erri].
pid <<
") ";
518 iout <<
"missing patch " << aid[i].
pid <<
"\n" <<
endi;
521 t.localIndex[i] = aid[i].
index;
524 #ifdef MEM_OPT_VERSION 529 for(i=0; i<T::size; i++){
533 if(!allfixed) tupleList.add(t);
551 #ifdef USE_HOMETUPLES 552 HomeTuples<T, S, P>* tuples;
579 pressureProfileSlabs = T::pressureProfileSlabs =
584 int numAtomTypePairs = n*n;
585 pressureProfileData =
new BigReal[3*pressureProfileSlabs*numAtomTypePairs];
587 pressureProfileReduction = NULL;
588 pressureProfileData = NULL;
590 doLoadTuples =
false;
592 #ifdef USE_HOMETUPLES 607 pressureProfileSlabs = T::pressureProfileSlabs =
612 int numAtomTypePairs = n*n;
613 pressureProfileData =
new BigReal[3*pressureProfileSlabs*numAtomTypePairs];
615 pressureProfileReduction = NULL;
616 pressureProfileData = NULL;
618 doLoadTuples =
false;
620 isBasePatch =
new char[nPatches];
622 for (i=0; i<nPatches; ++i) { isBasePatch[i] = 0; }
623 for (i=0; i<pids.
size(); ++i) { isBasePatch[pids[i]] = 1; }
624 #ifdef USE_HOMETUPLES 633 delete [] isBasePatch;
634 delete pressureProfileReduction;
635 delete pressureProfileData;
636 #ifdef USE_HOMETUPLES 637 if (tuples != NULL)
delete tuples;
647 #if defined(NAMD_CUDA) || defined(NAMD_HIP) 651 #ifdef USE_HOMETUPLES 652 tuples =
new HomeTuples<T, S, P>();
656 tuplePatchList.
clear();
660 for (pid=0; pid<nPatches; ++pid) {
661 if ( isBasePatch[pid] ) {
662 #if defined(NAMD_CUDA) || defined(NAMD_HIP) 673 for (pid=0; pid<nPatches; ++pid)
if ( isBasePatch[pid] ) {
675 for (
int i = 0; i < numNeighbors; ++i ) {
677 #if defined(NAMD_CUDA) || defined(NAMD_HIP) 685 setNumPatches(tuplePatchList.
size());
710 for (ap = ap.
begin(); ap != ap.
end(); ap++) {
719 BigReal reductionData[T::reductionDataSize];
721 int numAtomTypes = T::pressureProfileAtomTypes;
722 int numAtomTypePairs = numAtomTypes*numAtomTypes;
724 for (
int i = 0; i < T::reductionDataSize; ++i ) reductionData[i] = 0;
725 if (pressureProfileData) {
726 memset(pressureProfileData, 0, 3*pressureProfileSlabs*numAtomTypePairs*
sizeof(
BigReal));
729 newap = newap.
begin();
731 T::pressureProfileThickness = lattice.
c().
z / pressureProfileSlabs;
732 T::pressureProfileMin = lattice.
origin().
z - 0.5*lattice.
c().
z;
736 if ( doLoadTuples ) {
737 #ifdef USE_HOMETUPLES 742 doLoadTuples =
false;
745 #ifdef USE_HOMETUPLES 746 T *al = (T *)tuples->getTupleList();
747 const int ntuple = tuples->getNumTuples();
749 T *al = tupleList.
begin();
750 const int ntuple = tupleList.
size();
752 if ( ntuple ) T::computeForce(al, ntuple, reductionData, pressureProfileData);
753 tupleCount += ntuple;
758 T::submitReductionData(reductionData,reduction);
759 reduction->
item(T::reductionChecksumLabel) += (
BigReal)tupleCount;
762 if (pressureProfileReduction) {
766 const int arraysize = 3*pressureProfileSlabs;
767 const BigReal *data = pressureProfileData;
768 for (
int i=0; i<numAtomTypes; i++) {
769 for (
int j=0; j<numAtomTypes; j++) {
772 if (ii > jj) {
int tmp=ii; ii=jj; jj=tmp; }
773 const int reductionOffset =
774 (ii*numAtomTypes - (ii*(ii+1))/2 + jj)*arraysize;
775 for (
int k=0; k<arraysize; k++) {
776 pressureProfileReduction->
item(reductionOffset+k) += data[k];
781 pressureProfileReduction->
submit();
786 for (ap = ap.
begin(); ap != ap.
end(); ap++) {
Elem * find(const Elem &elem)
UniqueSet< TuplePatchElem > TuplePatchList
#define COMPUTE_PROXY_PRIORITY
int num_alch_unpert_Dihedrals
Box< Patch, CompAtom > * registerAvgPositionPickup(Compute *cid)
NAMD_HOST_DEVICE Vector c() const
static ProxyMgr * Object()
static void partition(int *order, const FullAtom *atoms, int begin, int end)
static PatchMap * Object()
Angle * alch_unpert_angles
virtual void submit(void)=0
SimParameters * simParameters
int num_alch_unpert_Bonds
BigReal * pressureProfileData
void startWork(const LDObjHandle &handle)
std::ostream & endi(std::ostream &s)
virtual ~ComputeHomeTuples()
int downstream(int pid1, int pid2)
int upstreamNeighbors(int pid, PatchID *neighbor_ids)
TuplePatchList tuplePatchList
ComputeHomeTuples(ComputeID c)
SubmitReduction * pressureProfileReduction
SubmitReduction * willSubmit(int setID, int size=-1)
std::ostream & iWARN(std::ostream &s)
int num_alch_unpert_Angles
unsigned char get_ss_type(int anum) const
int add(const Elem &elem)
static ReductionMgr * Object(void)
Patch * patch(PatchID pid)
virtual void initialize(void)
Dihedral * alch_unpert_dihedrals
Molecule stores the structural information for the system.
UniqueSetIter< T > begin(void) const
TuplePatchElem(Patch *p_param, Compute *cid)
int numPatches(void) const
void NAMD_bug(const char *err_msg)
TuplePatchElem(PatchID pid=-1)
BigReal soluteScalingFactor
int operator<(const TuplePatchElem &elem) const
LocalID localID(AtomID id)
PatchID getPatchID() const
Box< Patch, CompAtom > * positionBox
void createProxy(PatchID pid)
UniqueSetIter< T > end(void) const
virtual void doWork(void)
static LdbCoordinator * Object()
static AtomMap * Object()
unsigned char get_fep_type(int anum) const
void endWork(const LDObjHandle &handle)
ResizeArray< T > tupleList
int pressureProfileAtomTypes
int operator==(const TuplePatchElem &elem) const
SubmitReduction * reduction
ComputeHomeTuples(ComputeID c, PatchIDList &pids)
virtual void loadTuples(void)
Box< Patch, CompAtom > * avgPositionBox
UniqueSetIter< TuplePatchElem > TuplePatchListIter
Box< Patch, Results > * forceBox
NAMD_HOST_DEVICE Vector origin() const
void close(Data **const t)
Box< Patch, CompAtom > * registerPositionPickup(Compute *cid)
CompAtomExt * getCompAtomExtInfo()
Box< Patch, Results > * registerForceDeposit(Compute *cid)