7 #ifndef COMPUTEHOMETUPLES_H
8 #define COMPUTEHOMETUPLES_H
99 #ifdef MEM_OPT_VERSION
100 template <
class T>
struct ElemTraits {
102 static signature* get_sig_pointer(
Molecule *mol) {
return mol->atomSigPool; }
103 static int get_sig_id(
const CompAtomExt &a) {
return a.sigId; }
106 template <>
struct ElemTraits <
ExclElem> {
108 static signature* get_sig_pointer(
Molecule *mol) {
return mol->exclSigPool; }
109 static int get_sig_id(
const CompAtomExt &a) {
return a.exclId; }
113 #ifdef USE_HOMETUPLES
121 Tuples(
int type) : type(type) {}
126 int getType() {
return type;}
127 virtual void submitTupleCount(
SubmitReduction *reduction,
int tupleCount)=0;
129 virtual int getNumTuples()=0;
130 virtual void* getTupleList()=0;
132 const std::vector<int>& pids = std::vector<int>())=0;
139 template <
class T,
class S,
class P>
class HomeTuples :
public Tuples {
141 std::vector<T> tupleList;
145 HomeTuples(
int type=-1) : Tuples(type) {}
147 #if __cplusplus < 201103L
151 virtual void* getTupleList() final {
152 return (
void*)tupleList.data();
155 virtual void submitTupleCount(
SubmitReduction *reduction,
int tupleCount)
final {
156 reduction->item(T::reductionChecksumLabel) += (
BigReal)tupleCount;
166 virtual int getNumTuples() final {
167 return tupleList.size();
171 const std::vector<int>& pids = std::vector<int>()) {
173 if (isBasePatch == NULL) {
174 NAMD_bug(
"NULL isBasePatch detected in HomeTuples::loadTuples()");
179 #ifdef MEM_OPT_VERSION
180 typename ElemTraits<T>::signature *allSigs;
182 int32 **tuplesByAtom;
186 const P *tupleValues;
191 #ifdef MEM_OPT_VERSION
192 allSigs = ElemTraits<T>::get_sig_pointer(node->
molecule);
194 T::getMoleculePointers(node->
molecule,
195 &numTuples, &tuplesByAtom, &tupleStructs);
198 T::getParameterPointers(node->
parameters, &tupleValues);
209 Real invLesFactor = lesOn ?
225 if (pids.size() == 0) ai = ai.begin();
227 int numPid = (pids.size() == 0) ? tuplePatchList.
size() : pids.size();
229 for (
int ipid=0;ipid < numPid;ipid++) {
234 if (pids.size() == 0) {
237 atomExt = (*ai).xExt;
247 for (
int j=0; j < numAtoms; j++)
250 #ifdef MEM_OPT_VERSION
251 typename ElemTraits<T>::signature *thisAtomSig =
252 &allSigs[ElemTraits<T>::get_sig_id(atomExt[j])];
254 T::getTupleInfo(thisAtomSig, &numTuples, &allTuples);
255 for(
int k=0; k<numTuples; k++) {
256 T t(atomExt[j].
id, &allTuples[k], tupleValues);
259 int32 *curTuple = tuplesByAtom[atomExt[j].
id];
260 for( ; *curTuple != -1; ++curTuple) {
261 T t(&tupleStructs[*curTuple],tupleValues);
264 aid[0] = atomMap->
localID(t.atomID[0]);
265 int homepatch = aid[0].
pid;
270 int is_fep_ss = partition[0] > 2;
272 int fep_tuple_type = 0;
273 for (i=1; i < T::size; i++) {
274 aid[i] = atomMap->
localID(t.atomID[i]);
275 samepatch = samepatch && ( homepatch == aid[i].
pid );
280 is_fep_ss &= partition[i] > 2;
281 is_fep_sd |= (abs(partition[i] - partition[0]) == 2);
282 fep_tuple_type = partition[i]; }
284 if (sdScaling && is_fep_sd) {
293 for (i=0; i < num_unpert_bonds; i++) {
295 && t.atomID[0]==unpert_bonds[i].
atom1
296 && t.atomID[1]==unpert_bonds[i].
atom2) is_fep_sd = 0;
298 for (i=0; i < num_unpert_angles; i++) {
300 && t.atomID[0]==unpert_angles[i].
atom1
301 && t.atomID[1]==unpert_angles[i].
atom2
302 && t.atomID[2]==unpert_angles[i].
atom3) is_fep_sd = 0;
304 for (i=0; i < num_unpert_dihedrals; i++) {
306 && t.atomID[0]==unpert_dihedrals[i].
atom1
307 && t.atomID[1]==unpert_dihedrals[i].
atom2
308 && t.atomID[2]==unpert_dihedrals[i].
atom3
309 && t.atomID[3]==unpert_dihedrals[i].
atom4) is_fep_sd = 0;
312 if (T::size < 4 && !soluteScalingAll) has_ss =
false;
313 if ( samepatch )
continue;
314 t.scale = (!has_les && !has_ss) ? 1.0 : ( has_les ? invLesFactor : soluteScalingFactor );
315 if (is_fep_ss) t.scale = (fep_tuple_type == 4) ? OneMinusLambda : Lambda;
316 if (is_fep_sd && sdScaling) t.scale = (fep_tuple_type == 4 || fep_tuple_type == 2) ? OneMinusLambda : Lambda;
318 for (i=1; i < T::size; i++) {
319 homepatch = patchMap->
downstream(homepatch,aid[i].pid);
321 if ( homepatch !=
notUsed && isBasePatch[homepatch] ) {
323 for (i=0; i < T::size; i++) {
326 #ifdef MEM_OPT_VERSION
329 iout << iWARN <<
"Tuple " << *curTuple <<
" with atoms ";
332 for( erri = 0; erri < T::size; erri++ ) {
333 iout << t.atomID[erri] <<
"(" << aid[erri].
pid <<
") ";
335 iout <<
"missing patch " << aid[i].
pid <<
"\n" <<
endi;
338 t.localIndex[i] = aid[i].
index;
341 #ifdef MEM_OPT_VERSION
345 for(i=0; i<T::size; i++){
349 if(!allfixed) tupleList.push_back(t);
351 tupleList.push_back(t);
354 tupleList.push_back(t);
369 #ifndef USE_HOMETUPLES
373 #ifdef MEM_OPT_VERSION
374 typename ElemTraits<T>::signature *allSigs;
376 int32 **tuplesByAtom;
380 const P *tupleValues;
383 #ifdef MEM_OPT_VERSION
384 allSigs = ElemTraits<T>::get_sig_pointer(node->
molecule);
386 T::getMoleculePointers(node->
molecule,
387 &numTuples, &tuplesByAtom, &tupleStructs);
390 T::getParameterPointers(node->
parameters, &tupleValues);
395 int partition[T::size];
401 Real invLesFactor = lesOn ?
418 for ( ai = ai.
begin(); ai != ai.
end(); ai++ )
426 for (
int j=0; j < numAtoms; j++)
429 #ifdef MEM_OPT_VERSION
430 typename ElemTraits<T>::signature *thisAtomSig =
431 &allSigs[ElemTraits<T>::get_sig_id(atomExt[j])];
433 T::getTupleInfo(thisAtomSig, &numTuples, &allTuples);
434 for(
int k=0; k<numTuples; k++) {
435 T t(atomExt[j].
id, &allTuples[k], tupleValues);
438 int32 *curTuple = tuplesByAtom[atomExt[j].
id];
439 for( ; *curTuple != -1; ++curTuple) {
440 T t(&tupleStructs[*curTuple],tupleValues);
443 aid[0] = atomMap->
localID(t.atomID[0]);
444 int homepatch = aid[0].
pid;
449 int is_fep_ss = partition[0] > 2;
451 int fep_tuple_type = 0;
452 for (i=1; i < T::size; i++) {
453 aid[i] = atomMap->
localID(t.atomID[i]);
454 samepatch = samepatch && ( homepatch == aid[i].
pid );
459 is_fep_ss &= partition[i] > 2;
460 is_fep_sd |= (abs(partition[i] - partition[0]) == 2);
461 fep_tuple_type = partition[i];
464 if (sdScaling && is_fep_sd) {
465 for (i=0; i < num_unpert_bonds; i++) {
467 && t.atomID[0]==unpert_bonds[i].
atom1
468 && t.atomID[1]==unpert_bonds[i].
atom2) is_fep_sd = 0;
470 for (i=0; i < num_unpert_angles; i++) {
472 && t.atomID[0]==unpert_angles[i].
atom1
473 && t.atomID[1]==unpert_angles[i].
atom2
474 && t.atomID[2]==unpert_angles[i].
atom3) is_fep_sd = 0;
476 for (i=0; i < num_unpert_dihedrals; i++) {
478 && t.atomID[0]==unpert_dihedrals[i].
atom1
479 && t.atomID[1]==unpert_dihedrals[i].
atom2
480 && t.atomID[2]==unpert_dihedrals[i].
atom3
481 && t.atomID[3]==unpert_dihedrals[i].
atom4) is_fep_sd = 0;
484 if (T::size < 4 && !soluteScalingAll) has_ss =
false;
485 if ( samepatch )
continue;
486 t.scale = (!has_les && !has_ss) ? 1.0 : ( has_les ? invLesFactor : soluteScalingFactor );
487 if (is_fep_ss) t.scale = (fep_tuple_type == 4) ? OneMinusLambda : Lambda;
488 if (is_fep_sd && sdScaling) t.scale = (fep_tuple_type == 4 || fep_tuple_type == 2) ? OneMinusLambda : Lambda;
490 for (i=1; i < T::size; i++) {
491 homepatch = patchMap->
downstream(homepatch,aid[i].pid);
493 if ( homepatch !=
notUsed && isBasePatch[homepatch] ) {
495 for (i=0; i < T::size; i++) {
498 #ifdef MEM_OPT_VERSION
501 iout << iWARN <<
"Tuple " << *curTuple <<
" with atoms ";
504 for( erri = 0; erri < T::size; erri++ ) {
505 iout << t.atomID[erri] <<
"(" << aid[erri].
pid <<
") ";
507 iout <<
"missing patch " << aid[i].
pid <<
"\n" <<
endi;
510 t.localIndex[i] = aid[i].
index;
513 #ifdef MEM_OPT_VERSION
518 for(i=0; i<T::size; i++){
522 if(!allfixed) tupleList.add(t);
540 #ifdef USE_HOMETUPLES
541 HomeTuples<T, S, P>* tuples;
568 pressureProfileSlabs = T::pressureProfileSlabs =
573 int numAtomTypePairs = n*n;
574 pressureProfileData =
new BigReal[3*pressureProfileSlabs*numAtomTypePairs];
576 pressureProfileReduction = NULL;
577 pressureProfileData = NULL;
579 doLoadTuples =
false;
581 #ifdef USE_HOMETUPLES
596 pressureProfileSlabs = T::pressureProfileSlabs =
601 int numAtomTypePairs = n*n;
602 pressureProfileData =
new BigReal[3*pressureProfileSlabs*numAtomTypePairs];
604 pressureProfileReduction = NULL;
605 pressureProfileData = NULL;
607 doLoadTuples =
false;
609 isBasePatch =
new char[nPatches];
611 for (i=0; i<nPatches; ++i) { isBasePatch[i] = 0; }
612 for (i=0; i<pids.
size(); ++i) { isBasePatch[pids[i]] = 1; }
613 #ifdef USE_HOMETUPLES
622 delete [] isBasePatch;
623 delete pressureProfileReduction;
624 delete pressureProfileData;
625 #ifdef USE_HOMETUPLES
626 if (tuples != NULL)
delete tuples;
636 #if defined(NAMD_CUDA) || defined(NAMD_HIP)
640 #ifdef USE_HOMETUPLES
641 tuples =
new HomeTuples<T, S, P>();
645 tuplePatchList.
clear();
649 for (pid=0; pid<nPatches; ++pid) {
650 if ( isBasePatch[pid] ) {
651 #if defined(NAMD_CUDA) || defined(NAMD_HIP)
662 for (pid=0; pid<nPatches; ++pid)
if ( isBasePatch[pid] ) {
664 for (
int i = 0; i < numNeighbors; ++i ) {
666 #if defined(NAMD_CUDA) || defined(NAMD_HIP)
674 setNumPatches(tuplePatchList.
size());
700 for (ap = ap.
begin(); ap != ap.
end(); ap++) {
701 ap->x = ap->positionBox->open();
702 ap->xExt = ap->p->getCompAtomExtInfo();
703 if ( ap->p->flags.doMolly ) ap->x_avg = ap->avgPositionBox->open();
704 ap->r = ap->forceBox->open();
709 BigReal reductionData[T::reductionDataSize];
711 int numAtomTypes = T::pressureProfileAtomTypes;
712 int numAtomTypePairs = numAtomTypes*numAtomTypes;
714 for (
int i = 0; i < T::reductionDataSize; ++i ) reductionData[i] = 0;
715 if (pressureProfileData) {
716 memset(pressureProfileData, 0, 3*pressureProfileSlabs*numAtomTypePairs*
sizeof(
BigReal));
719 newap = newap.
begin();
720 const Lattice &lattice = newap->p->lattice;
721 T::pressureProfileThickness = lattice.
c().
z / pressureProfileSlabs;
722 T::pressureProfileMin = lattice.
origin().
z - 0.5*lattice.
c().
z;
726 if ( doLoadTuples ) {
727 #ifdef USE_HOMETUPLES
732 doLoadTuples =
false;
735 #ifdef USE_HOMETUPLES
736 T *al = (T *)tuples->getTupleList();
737 const int ntuple = tuples->getNumTuples();
739 T *al = tupleList.
begin();
740 const int ntuple = tupleList.
size();
742 if ( ntuple ) T::computeForce(al, ntuple, reductionData, pressureProfileData);
743 tupleCount += ntuple;
748 T::submitReductionData(reductionData,reduction);
749 reduction->
item(T::reductionChecksumLabel) += (
BigReal)tupleCount;
752 if (pressureProfileReduction) {
756 const int arraysize = 3*pressureProfileSlabs;
757 const BigReal *data = pressureProfileData;
758 for (
int i=0; i<numAtomTypes; i++) {
759 for (
int j=0; j<numAtomTypes; j++) {
762 if (ii > jj) {
int tmp=ii; ii=jj; jj=tmp; }
763 const int reductionOffset =
764 (ii*numAtomTypes - (ii*(ii+1))/2 + jj)*arraysize;
765 for (
int k=0; k<arraysize; k++) {
766 pressureProfileReduction->
item(reductionOffset+k) += data[k];
771 pressureProfileReduction->
submit();
776 for (ap = ap.
begin(); ap != ap.
end(); ap++) {
777 ap->positionBox->close(&(ap->x));
778 if ( ap->p->flags.doMolly ) ap->avgPositionBox->close(&(ap->x_avg));
779 ap->forceBox->close(&(ap->r));
Elem * find(const Elem &elem)
UniqueSet< TuplePatchElem > TuplePatchList
#define COMPUTE_PROXY_PRIORITY
int num_alch_unpert_Dihedrals
Box< Patch, CompAtom > * registerAvgPositionPickup(Compute *cid)
static ProxyMgr * Object()
static void partition(int *order, const FullAtom *atoms, int begin, int end)
static PatchMap * Object()
Angle * alch_unpert_angles
SimParameters * simParameters
int num_alch_unpert_Bonds
BigReal * pressureProfileData
void startWork(const LDObjHandle &handle)
std::ostream & endi(std::ostream &s)
virtual ~ComputeHomeTuples()
int downstream(int pid1, int pid2)
int upstreamNeighbors(int pid, PatchID *neighbor_ids)
TuplePatchList tuplePatchList
ComputeHomeTuples(ComputeID c)
SubmitReduction * pressureProfileReduction
SubmitReduction * willSubmit(int setID, int size=-1)
std::ostream & iWARN(std::ostream &s)
int num_alch_unpert_Angles
int add(const Elem &elem)
static ReductionMgr * Object(void)
Patch * patch(PatchID pid)
virtual void initialize(void)
Dihedral * alch_unpert_dihedrals
UniqueSetIter< T > begin(void) const
TuplePatchElem(Patch *p_param, Compute *cid)
void NAMD_bug(const char *err_msg)
TuplePatchElem(PatchID pid=-1)
BigReal soluteScalingFactor
unsigned char get_ss_type(int anum) const
LocalID localID(AtomID id)
Box< Patch, CompAtom > * positionBox
void createProxy(PatchID pid)
virtual void doWork(void)
static LdbCoordinator * Object()
static AtomMap * Object()
unsigned char get_fep_type(int anum) const
void endWork(const LDObjHandle &handle)
ResizeArray< T > tupleList
UniqueSetIter< T > end(void) const
int pressureProfileAtomTypes
int numPatches(void) const
SubmitReduction * reduction
ComputeHomeTuples(ComputeID c, PatchIDList &pids)
virtual void loadTuples(void)
Box< Patch, CompAtom > * avgPositionBox
UniqueSetIter< TuplePatchElem > TuplePatchListIter
Box< Patch, Results > * forceBox
int operator<(const TuplePatchElem &elem) const
Box< Patch, CompAtom > * registerPositionPickup(Compute *cid)
int operator==(const TuplePatchElem &elem) const
Box< Patch, Results > * registerForceDeposit(Compute *cid)