7 #ifndef COMPUTEHOMETUPLES_H 8 #define COMPUTEHOMETUPLES_H 99 #ifdef MEM_OPT_VERSION 100 template <
class T>
struct ElemTraits {
102 static signature* get_sig_pointer(
Molecule *mol) {
return mol->atomSigPool; }
103 static int get_sig_id(
const CompAtomExt &a) {
return a.sigId; }
106 template <>
struct ElemTraits <
ExclElem> {
108 static signature* get_sig_pointer(
Molecule *mol) {
return mol->exclSigPool; }
109 static int get_sig_id(
const CompAtomExt &a) {
return a.exclId; }
113 #ifdef USE_HOMETUPLES 121 Tuples(
int type) : type(type) {}
126 int getType() {
return type;}
127 virtual void submitTupleCount(
SubmitReduction *reduction,
int tupleCount)=0;
129 virtual int getNumTuples()=0;
130 virtual void* getTupleList()=0;
132 const std::vector<int>& pids = std::vector<int>())=0;
139 template <
class T,
class S,
class P>
class HomeTuples :
public Tuples {
141 std::vector<T> tupleList;
145 HomeTuples(
int type=-1) : Tuples(type) {}
147 #if __cplusplus < 201103L 151 virtual void* getTupleList() final {
152 return (
void*)tupleList.data();
155 virtual void submitTupleCount(
SubmitReduction *reduction,
int tupleCount)
final {
156 reduction->item(T::reductionChecksumLabel) += (
BigReal)tupleCount;
166 virtual int getNumTuples() final {
167 return tupleList.size();
171 const std::vector<int>& pids = std::vector<int>()) {
173 if (isBasePatch == NULL) {
174 NAMD_bug(
"NULL isBasePatch detected in HomeTuples::loadTuples()");
179 #ifdef MEM_OPT_VERSION 180 typename ElemTraits<T>::signature *allSigs;
182 int32 **tuplesByAtom;
186 const P *tupleValues;
191 #ifdef MEM_OPT_VERSION 192 allSigs = ElemTraits<T>::get_sig_pointer(node->
molecule);
194 T::getMoleculePointers(node->
molecule,
195 &numTuples, &tuplesByAtom, &tupleStructs);
198 T::getParameterPointers(node->
parameters, &tupleValues);
209 Real invLesFactor = lesOn ?
225 if (pids.size() == 0) ai = ai.begin();
227 int numPid = (pids.size() == 0) ? tuplePatchList.
size() : pids.size();
229 for (
int ipid=0;ipid < numPid;ipid++) {
234 if (pids.size() == 0) {
237 atomExt = (*ai).xExt;
249 for (
int j=0; j < numAtoms; j++)
252 #ifdef MEM_OPT_VERSION 253 typename ElemTraits<T>::signature *thisAtomSig =
254 &allSigs[ElemTraits<T>::get_sig_id(atomExt[j])];
256 T::getTupleInfo(thisAtomSig, &numTuples, &allTuples);
257 for(
int k=0; k<numTuples; k++) {
258 T t(atomExt[j].
id, &allTuples[k], tupleValues);
261 int32 *curTuple = tuplesByAtom[atomExt[j].
id];
262 for( ; *curTuple != -1; ++curTuple) {
263 T t(&tupleStructs[*curTuple],tupleValues);
266 aid[0] = atomMap->
localID(t.atomID[0]);
267 int homepatch = aid[0].
pid;
274 int fep_tuple_type = 0;
275 for (i=1; i < T::size; i++) {
276 aid[i] = atomMap->
localID(t.atomID[i]);
277 samepatch = samepatch && ( homepatch == aid[i].
pid );
286 if (sdScaling && is_fep_sd) {
295 for (i=0; i < num_unpert_bonds; i++) {
297 && t.atomID[0]==unpert_bonds[i].atom1
298 && t.atomID[1]==unpert_bonds[i].atom2) is_fep_sd = 0;
300 for (i=0; i < num_unpert_angles; i++) {
302 && t.atomID[0]==unpert_angles[i].atom1
303 && t.atomID[1]==unpert_angles[i].atom2
304 && t.atomID[2]==unpert_angles[i].atom3) is_fep_sd = 0;
306 for (i=0; i < num_unpert_dihedrals; i++) {
308 && t.atomID[0]==unpert_dihedrals[i].atom1
309 && t.atomID[1]==unpert_dihedrals[i].atom2
310 && t.atomID[2]==unpert_dihedrals[i].atom3
311 && t.atomID[3]==unpert_dihedrals[i].atom4) is_fep_sd = 0;
314 if (T::size < 4 && !soluteScalingAll) has_ss =
false;
315 if ( samepatch )
continue;
316 t.scale = (!has_les && !has_ss) ? 1.0 : ( has_les ? invLesFactor : soluteScalingFactor );
317 if (is_fep_ss) t.scale = (fep_tuple_type == 4) ? OneMinusLambda : Lambda;
318 if (is_fep_sd && sdScaling) t.scale = (fep_tuple_type == 4 || fep_tuple_type == 2) ? OneMinusLambda : Lambda;
320 for (i=1; i < T::size; i++) {
321 homepatch = patchMap->
downstream(homepatch,aid[i].pid);
323 if ( homepatch !=
notUsed && isBasePatch[homepatch] ) {
325 for (i=0; i < T::size; i++) {
328 #ifdef MEM_OPT_VERSION 331 iout <<
iWARN <<
"Tuple " << *curTuple <<
" with atoms ";
334 for( erri = 0; erri < T::size; erri++ ) {
335 iout << t.atomID[erri] <<
"(" << aid[erri].
pid <<
") ";
337 iout <<
"missing patch " << aid[i].
pid <<
"\n" <<
endi;
340 t.localIndex[i] = aid[i].
index;
343 #ifdef MEM_OPT_VERSION 347 for(i=0; i<T::size; i++){
351 if(!allfixed) tupleList.push_back(t);
353 tupleList.push_back(t);
356 tupleList.push_back(t);
371 #ifndef USE_HOMETUPLES 375 #ifdef MEM_OPT_VERSION 376 typename ElemTraits<T>::signature *allSigs;
378 int32 **tuplesByAtom;
382 const P *tupleValues;
385 #ifdef MEM_OPT_VERSION 386 allSigs = ElemTraits<T>::get_sig_pointer(node->
molecule);
388 T::getMoleculePointers(node->
molecule,
389 &numTuples, &tuplesByAtom, &tupleStructs);
392 T::getParameterPointers(node->
parameters, &tupleValues);
403 Real invLesFactor = lesOn ?
420 for ( ai = ai.
begin(); ai != ai.
end(); ai++ )
428 for (
int j=0; j < numAtoms; j++)
431 #ifdef MEM_OPT_VERSION 432 typename ElemTraits<T>::signature *thisAtomSig =
433 &allSigs[ElemTraits<T>::get_sig_id(atomExt[j])];
435 T::getTupleInfo(thisAtomSig, &numTuples, &allTuples);
436 for(
int k=0; k<numTuples; k++) {
437 T t(atomExt[j].
id, &allTuples[k], tupleValues);
440 int32 *curTuple = tuplesByAtom[atomExt[j].
id];
441 for( ; *curTuple != -1; ++curTuple) {
442 T t(&tupleStructs[*curTuple],tupleValues);
445 aid[0] = atomMap->
localID(t.atomID[0]);
446 int homepatch = aid[0].
pid;
453 int fep_tuple_type = 0;
454 for (i=1; i < T::size; i++) {
455 aid[i] = atomMap->
localID(t.atomID[i]);
456 samepatch = samepatch && ( homepatch == aid[i].
pid );
466 if (sdScaling && is_fep_sd) {
467 for (i=0; i < num_unpert_bonds; i++) {
469 && t.atomID[0]==unpert_bonds[i].
atom1 470 && t.atomID[1]==unpert_bonds[i].
atom2) is_fep_sd = 0;
472 for (i=0; i < num_unpert_angles; i++) {
474 && t.atomID[0]==unpert_angles[i].
atom1 475 && t.atomID[1]==unpert_angles[i].
atom2 476 && t.atomID[2]==unpert_angles[i].
atom3) is_fep_sd = 0;
478 for (i=0; i < num_unpert_dihedrals; i++) {
480 && t.atomID[0]==unpert_dihedrals[i].
atom1 481 && t.atomID[1]==unpert_dihedrals[i].
atom2 482 && t.atomID[2]==unpert_dihedrals[i].
atom3 483 && t.atomID[3]==unpert_dihedrals[i].
atom4) is_fep_sd = 0;
486 if (T::size < 4 && !soluteScalingAll) has_ss =
false;
487 if ( samepatch )
continue;
488 t.scale = (!has_les && !has_ss) ? 1.0 : ( has_les ? invLesFactor : soluteScalingFactor );
489 if (is_fep_ss) t.scale = (fep_tuple_type == 4) ? OneMinusLambda : Lambda;
490 if (is_fep_sd && sdScaling) t.scale = (fep_tuple_type == 4 || fep_tuple_type == 2) ? OneMinusLambda : Lambda;
492 for (i=1; i < T::size; i++) {
493 homepatch = patchMap->
downstream(homepatch,aid[i].pid);
495 if ( homepatch !=
notUsed && isBasePatch[homepatch] ) {
497 for (i=0; i < T::size; i++) {
500 #ifdef MEM_OPT_VERSION 503 iout <<
iWARN <<
"Tuple " << *curTuple <<
" with atoms ";
506 for( erri = 0; erri < T::size; erri++ ) {
507 iout << t.atomID[erri] <<
"(" << aid[erri].
pid <<
") ";
509 iout <<
"missing patch " << aid[i].
pid <<
"\n" <<
endi;
512 t.localIndex[i] = aid[i].
index;
515 #ifdef MEM_OPT_VERSION 520 for(i=0; i<T::size; i++){
524 if(!allfixed) tupleList.add(t);
542 #ifdef USE_HOMETUPLES 543 HomeTuples<T, S, P>* tuples;
570 pressureProfileSlabs = T::pressureProfileSlabs =
575 int numAtomTypePairs = n*n;
576 pressureProfileData =
new BigReal[3*pressureProfileSlabs*numAtomTypePairs];
578 pressureProfileReduction = NULL;
579 pressureProfileData = NULL;
581 doLoadTuples =
false;
583 #ifdef USE_HOMETUPLES 598 pressureProfileSlabs = T::pressureProfileSlabs =
603 int numAtomTypePairs = n*n;
604 pressureProfileData =
new BigReal[3*pressureProfileSlabs*numAtomTypePairs];
606 pressureProfileReduction = NULL;
607 pressureProfileData = NULL;
609 doLoadTuples =
false;
611 isBasePatch =
new char[nPatches];
613 for (i=0; i<nPatches; ++i) { isBasePatch[i] = 0; }
614 for (i=0; i<pids.
size(); ++i) { isBasePatch[pids[i]] = 1; }
615 #ifdef USE_HOMETUPLES 624 delete [] isBasePatch;
625 delete pressureProfileReduction;
626 delete pressureProfileData;
627 #ifdef USE_HOMETUPLES 628 if (tuples != NULL)
delete tuples;
638 #if defined(NAMD_CUDA) || defined(NAMD_HIP) 642 #ifdef USE_HOMETUPLES 643 tuples =
new HomeTuples<T, S, P>();
647 tuplePatchList.
clear();
651 for (pid=0; pid<nPatches; ++pid) {
652 if ( isBasePatch[pid] ) {
653 #if defined(NAMD_CUDA) || defined(NAMD_HIP) 664 for (pid=0; pid<nPatches; ++pid)
if ( isBasePatch[pid] ) {
666 for (
int i = 0; i < numNeighbors; ++i ) {
668 #if defined(NAMD_CUDA) || defined(NAMD_HIP) 676 setNumPatches(tuplePatchList.
size());
701 for (ap = ap.
begin(); ap != ap.
end(); ap++) {
710 BigReal reductionData[T::reductionDataSize];
712 int numAtomTypes = T::pressureProfileAtomTypes;
713 int numAtomTypePairs = numAtomTypes*numAtomTypes;
715 for (
int i = 0; i < T::reductionDataSize; ++i ) reductionData[i] = 0;
716 if (pressureProfileData) {
717 memset(pressureProfileData, 0, 3*pressureProfileSlabs*numAtomTypePairs*
sizeof(
BigReal));
720 newap = newap.
begin();
722 T::pressureProfileThickness = lattice.
c().
z / pressureProfileSlabs;
723 T::pressureProfileMin = lattice.
origin().
z - 0.5*lattice.
c().
z;
727 if ( doLoadTuples ) {
728 #ifdef USE_HOMETUPLES 733 doLoadTuples =
false;
736 #ifdef USE_HOMETUPLES 737 T *al = (T *)tuples->getTupleList();
738 const int ntuple = tuples->getNumTuples();
740 T *al = tupleList.
begin();
741 const int ntuple = tupleList.
size();
743 if ( ntuple ) T::computeForce(al, ntuple, reductionData, pressureProfileData);
744 tupleCount += ntuple;
749 T::submitReductionData(reductionData,reduction);
750 reduction->
item(T::reductionChecksumLabel) += (
BigReal)tupleCount;
753 if (pressureProfileReduction) {
757 const int arraysize = 3*pressureProfileSlabs;
758 const BigReal *data = pressureProfileData;
759 for (
int i=0; i<numAtomTypes; i++) {
760 for (
int j=0; j<numAtomTypes; j++) {
763 if (ii > jj) {
int tmp=ii; ii=jj; jj=tmp; }
764 const int reductionOffset =
765 (ii*numAtomTypes - (ii*(ii+1))/2 + jj)*arraysize;
766 for (
int k=0; k<arraysize; k++) {
767 pressureProfileReduction->
item(reductionOffset+k) += data[k];
772 pressureProfileReduction->
submit();
777 for (ap = ap.
begin(); ap != ap.
end(); ap++) {
Elem * find(const Elem &elem)
UniqueSet< TuplePatchElem > TuplePatchList
#define COMPUTE_PROXY_PRIORITY
int num_alch_unpert_Dihedrals
Box< Patch, CompAtom > * registerAvgPositionPickup(Compute *cid)
NAMD_HOST_DEVICE Vector c() const
static ProxyMgr * Object()
static void partition(int *order, const FullAtom *atoms, int begin, int end)
static PatchMap * Object()
Angle * alch_unpert_angles
SimParameters * simParameters
int num_alch_unpert_Bonds
BigReal * pressureProfileData
void startWork(const LDObjHandle &handle)
std::ostream & endi(std::ostream &s)
virtual ~ComputeHomeTuples()
int downstream(int pid1, int pid2)
int upstreamNeighbors(int pid, PatchID *neighbor_ids)
TuplePatchList tuplePatchList
ComputeHomeTuples(ComputeID c)
SubmitReduction * pressureProfileReduction
SubmitReduction * willSubmit(int setID, int size=-1)
std::ostream & iWARN(std::ostream &s)
int num_alch_unpert_Angles
unsigned char get_ss_type(int anum) const
int add(const Elem &elem)
static ReductionMgr * Object(void)
Patch * patch(PatchID pid)
virtual void initialize(void)
Dihedral * alch_unpert_dihedrals
Molecule stores the structural information for the system.
UniqueSetIter< T > begin(void) const
TuplePatchElem(Patch *p_param, Compute *cid)
int numPatches(void) const
void NAMD_bug(const char *err_msg)
TuplePatchElem(PatchID pid=-1)
BigReal soluteScalingFactor
int operator<(const TuplePatchElem &elem) const
LocalID localID(AtomID id)
PatchID getPatchID() const
Box< Patch, CompAtom > * positionBox
void createProxy(PatchID pid)
UniqueSetIter< T > end(void) const
virtual void doWork(void)
static LdbCoordinator * Object()
static AtomMap * Object()
unsigned char get_fep_type(int anum) const
void endWork(const LDObjHandle &handle)
ResizeArray< T > tupleList
int pressureProfileAtomTypes
int operator==(const TuplePatchElem &elem) const
SubmitReduction * reduction
ComputeHomeTuples(ComputeID c, PatchIDList &pids)
virtual void loadTuples(void)
Box< Patch, CompAtom > * avgPositionBox
UniqueSetIter< TuplePatchElem > TuplePatchListIter
Box< Patch, Results > * forceBox
NAMD_HOST_DEVICE Vector origin() const
void close(Data **const t)
Box< Patch, CompAtom > * registerPositionPickup(Compute *cid)
CompAtomExt * getCompAtomExtInfo()
Box< Patch, Results > * registerForceDeposit(Compute *cid)