| version 1.1064 | version 1.1065 |
|---|
| |
| #ifndef COMPUTEHOMETUPLES_H | #ifndef COMPUTEHOMETUPLES_H |
| #define COMPUTEHOMETUPLES_H | #define COMPUTEHOMETUPLES_H |
| | |
| | #ifdef USE_HOMETUPLES |
| | #include <vector> |
| | #endif |
| #include "NamdTypes.h" | #include "NamdTypes.h" |
| #include "common.h" | #include "common.h" |
| #include "structures.h" | #include "structures.h" |
| |
| #include "AtomMap.h" | #include "AtomMap.h" |
| #include "ComputeHomeTuples.h" | #include "ComputeHomeTuples.h" |
| #include "PatchMgr.h" | #include "PatchMgr.h" |
| | #include "ProxyMgr.h" |
| #include "HomePatchList.h" | #include "HomePatchList.h" |
| #include "Molecule.h" | #include "Molecule.h" |
| #include "Parameters.h" | #include "Parameters.h" |
| |
| }; | }; |
| #endif | #endif |
| | |
| | #ifdef USE_HOMETUPLES |
| | // |
| | // Simple base class for HomeTuples and SelfTuples that stores the type of the tuple |
| | // |
| | class Tuples { |
| | private: |
| | int type; |
| | protected: |
| | Tuples(int type) : type(type) {} |
| | public: |
| | // Tuple types |
| | enum {BOND=0, ANGLE, DIHEDRAL, IMPROPER, EXCLUSION, CROSSTERM, NUM_TUPLE_TYPES}; |
| | |
| | int getType() {return type;} |
| | virtual void submitTupleCount(SubmitReduction *reduction, int tupleCount)=0; |
| | // virtual void copyTupleData(void* tupleData)=0; |
| | virtual int getNumTuples()=0; |
| | virtual void* getTupleList()=0; |
| | virtual void loadTuples(TuplePatchList& tuplePatchList, const char* isBasePatch, AtomMap *atomMap, |
| | const std::vector<int>& pids = std::vector<int>())=0; |
| | }; |
| | |
| | // |
| | // HomeTuples class. These are created and stored in ComputeBondedCUDA::registerCompute() |
| | // e.g.: new HomeTuples<BondElem, Bond, BondValue>(BOND) |
| | // |
| | template <class T, class S, class P> class HomeTuples : public Tuples { |
| | protected: |
| | std::vector<T> tupleList; |
| | |
| | public: |
| | |
| | HomeTuples(int type=-1) : Tuples(type) {} |
| | |
| | #if __cplusplus < 201103L |
| | #define final |
| | #endif |
| | |
| | virtual void* getTupleList() final { |
| | return (void*)tupleList.data(); |
| | } |
| | |
| | virtual void submitTupleCount(SubmitReduction *reduction, int tupleCount) final { |
| | reduction->item(T::reductionChecksumLabel) += (BigReal)tupleCount; |
| | } |
| | |
| | // virtual void copyTupleData(void* tupleData) final { |
| | // for (int i=0;i < tupleList.size();i++) { |
| | // tupleData[i] = |
| | // } |
| | // T::loadTupleData(tupleData); |
| | // } |
| | |
| | virtual int getNumTuples() final { |
| | return tupleList.size(); |
| | } |
| | |
| | virtual void loadTuples(TuplePatchList& tuplePatchList, const char* isBasePatch, AtomMap *atomMap, |
| | const std::vector<int>& pids = std::vector<int>()) { |
| | |
| | if (isBasePatch == NULL) { |
| | NAMD_bug("NULL isBasePatch detected in HomeTuples::loadTuples()"); |
| | } |
| | |
| | int numTuples; |
| | |
| | #ifdef MEM_OPT_VERSION |
| | typename ElemTraits<T>::signature *allSigs; |
| | #else |
| | int32 **tuplesByAtom; |
| | /* const (need to propagate const) */ S *tupleStructs; |
| | #endif |
| | |
| | const P *tupleValues; |
| | Node *node = Node::Object(); |
| | PatchMap *patchMap = PatchMap::Object(); |
| | // AtomMap *atomMap = AtomMap::Object(); |
| | |
| | #ifdef MEM_OPT_VERSION |
| | allSigs = ElemTraits<T>::get_sig_pointer(node->molecule); |
| | #else |
| | T::getMoleculePointers(node->molecule, |
| | &numTuples, &tuplesByAtom, &tupleStructs); |
| | #endif |
| | |
| | T::getParameterPointers(node->parameters, &tupleValues); |
| | |
| | tupleList.clear(); |
| | |
| | LocalID aid[T::size]; |
| | |
| | const int lesOn = node->simParameters->lesOn; |
| | Real invLesFactor = lesOn ? |
| | 1.0/node->simParameters->lesFactor : |
| | 1.0; |
| | |
| | // cycle through each patch and gather all tuples |
| | TuplePatchListIter ai(tuplePatchList); |
| | if (pids.size() == 0) ai = ai.begin(); |
| | |
| | int numPid = (pids.size() == 0) ? tuplePatchList.size() : pids.size(); |
| | |
| | for (int ipid=0;ipid < numPid;ipid++) { |
| | // Patch *patch; |
| | int numAtoms; |
| | CompAtomExt *atomExt; |
| | // Take next patch |
| | if (pids.size() == 0) { |
| | Patch* patch = (*ai).p; |
| | numAtoms = patch->getNumAtoms(); |
| | atomExt = (*ai).xExt; |
| | ai++; |
| | } else { |
| | TuplePatchElem *tpe = tuplePatchList.find(TuplePatchElem(pids[ipid])); |
| | Patch* patch = tpe->p; |
| | numAtoms = patch->getNumAtoms(); |
| | atomExt = tpe->xExt; |
| | } |
| | |
| | // cycle through each atom in the patch and load up tuples |
| | for (int j=0; j < numAtoms; j++) |
| | { |
| | /* cycle through each tuple */ |
| | #ifdef MEM_OPT_VERSION |
| | typename ElemTraits<T>::signature *thisAtomSig = |
| | &allSigs[ElemTraits<T>::get_sig_id(atomExt[j])]; |
| | TupleSignature *allTuples; |
| | T::getTupleInfo(thisAtomSig, &numTuples, &allTuples); |
| | for(int k=0; k<numTuples; k++) { |
| | T t(atomExt[j].id, &allTuples[k], tupleValues); |
| | #else |
| | /* get list of all tuples for the atom */ |
| | int32 *curTuple = tuplesByAtom[atomExt[j].id]; |
| | for( ; *curTuple != -1; ++curTuple) { |
| | T t(&tupleStructs[*curTuple],tupleValues); |
| | #endif |
| | register int i; |
| | aid[0] = atomMap->localID(t.atomID[0]); |
| | int homepatch = aid[0].pid; |
| | int samepatch = 1; |
| | int has_les = lesOn && node->molecule->get_fep_type(t.atomID[0]); |
| | for (i=1; i < T::size; i++) { |
| | aid[i] = atomMap->localID(t.atomID[i]); |
| | samepatch = samepatch && ( homepatch == aid[i].pid ); |
| | has_les |= lesOn && node->molecule->get_fep_type(t.atomID[i]); |
| | } |
| | if ( samepatch ) continue; |
| | t.scale = has_les ? invLesFactor : 1; |
| | for (i=1; i < T::size; i++) { |
| | homepatch = patchMap->downstream(homepatch,aid[i].pid); |
| | } |
| | if ( homepatch != notUsed && isBasePatch[homepatch] ) { |
| | TuplePatchElem *p; |
| | for (i=0; i < T::size; i++) { |
| | t.p[i] = p = tuplePatchList.find(TuplePatchElem(aid[i].pid)); |
| | if ( ! p ) { |
| | #ifdef MEM_OPT_VERSION |
| | iout << iWARN << "Tuple with atoms "; |
| | #else |
| | iout << iWARN << "Tuple " << *curTuple << " with atoms "; |
| | #endif |
| | int erri; |
| | for( erri = 0; erri < T::size; erri++ ) { |
| | iout << t.atomID[erri] << "(" << aid[erri].pid << ") "; |
| | } |
| | iout << "missing patch " << aid[i].pid << "\n" << endi; |
| | break; |
| | } |
| | t.localIndex[i] = aid[i].index; |
| | } |
| | if ( ! p ) continue; |
| | #ifdef MEM_OPT_VERSION |
| | //avoid adding Tuples whose atoms are all fixed |
| | if(node->simParameters->fixedAtomsOn && !node->simParameters->fixedAtomsForces) { |
| | int allfixed = 1; |
| | for(i=0; i<T::size; i++){ |
| | CompAtomExt *one = &(t.p[i]->xExt[aid[i].index]); |
| | allfixed = allfixed & one->atomFixed; |
| | } |
| | if(!allfixed) tupleList.push_back(t); |
| | }else{ |
| | tupleList.push_back(t); |
| | } |
| | #else |
| | tupleList.push_back(t); |
| | #endif |
| | } |
| | } |
| | } |
| | } |
| | } |
| | |
| | }; |
| | #endif |
| | |
| template <class T, class S, class P> class ComputeHomeTuples : public Compute { | template <class T, class S, class P> class ComputeHomeTuples : public Compute { |
| | |
| protected: | protected: |
| | |
| | #ifndef USE_HOMETUPLES |
| virtual void loadTuples(void) { | virtual void loadTuples(void) { |
| int numTuples; | int numTuples; |
| | |
| |
| } | } |
| } | } |
| } | } |
| | #endif |
| | |
| int doLoadTuples; | int doLoadTuples; |
| | |
| protected: | protected: |
| | |
| | #ifdef USE_HOMETUPLES |
| | HomeTuples<T, S, P>* tuples; |
| | TuplePatchList tuplePatchList; |
| | #else |
| ResizeArray<T> tupleList; | ResizeArray<T> tupleList; |
| TuplePatchList tuplePatchList; | TuplePatchList tuplePatchList; |
| | #endif |
| | |
| PatchMap *patchMap; | PatchMap *patchMap; |
| AtomMap *atomMap; | AtomMap *atomMap; |
| |
| } | } |
| doLoadTuples = false; | doLoadTuples = false; |
| isBasePatch = 0; | isBasePatch = 0; |
| | #ifdef USE_HOMETUPLES |
| | tuples = NULL; |
| | #endif |
| } | } |
| | |
| ComputeHomeTuples(ComputeID c, PatchIDList &pids) : Compute(c) { | ComputeHomeTuples(ComputeID c, PatchIDList &pids) : Compute(c) { |
| |
| int i; | int i; |
| for (i=0; i<nPatches; ++i) { isBasePatch[i] = 0; } | for (i=0; i<nPatches; ++i) { isBasePatch[i] = 0; } |
| for (i=0; i<pids.size(); ++i) { isBasePatch[pids[i]] = 1; } | for (i=0; i<pids.size(); ++i) { isBasePatch[pids[i]] = 1; } |
| | #ifdef USE_HOMETUPLES |
| | tuples = NULL; |
| | #endif |
| } | } |
| | |
| public: | public: |
| |
| delete [] isBasePatch; | delete [] isBasePatch; |
| delete pressureProfileReduction; | delete pressureProfileReduction; |
| delete pressureProfileData; | delete pressureProfileData; |
| | #ifdef USE_HOMETUPLES |
| | if (tuples != NULL) delete tuples; |
| | #endif |
| } | } |
| | |
| //====================================================================== | //====================================================================== |
| |
| //====================================================================== | //====================================================================== |
| virtual void initialize(void) { | virtual void initialize(void) { |
| | |
| | #ifdef NAMD_CUDA |
| | ProxyMgr *proxyMgr = ProxyMgr::Object(); |
| | #endif |
| | |
| | #ifdef USE_HOMETUPLES |
| | tuples = new HomeTuples<T, S, P>(); |
| | #endif |
| | |
| // Start with empty list | // Start with empty list |
| tuplePatchList.clear(); | tuplePatchList.clear(); |
| | |
| |
| int pid; | int pid; |
| for (pid=0; pid<nPatches; ++pid) { | for (pid=0; pid<nPatches; ++pid) { |
| if ( isBasePatch[pid] ) { | if ( isBasePatch[pid] ) { |
| | #ifdef NAMD_CUDA |
| | proxyMgr->createProxy(pid); |
| | #endif |
| Patch *patch = patchMap->patch(pid); | Patch *patch = patchMap->patch(pid); |
| tuplePatchList.add(TuplePatchElem(patch, this)); | tuplePatchList.add(TuplePatchElem(patch, this)); |
| } | } |
| |
| int numNeighbors = patchMap->upstreamNeighbors(pid,neighbors); | int numNeighbors = patchMap->upstreamNeighbors(pid,neighbors); |
| for ( int i = 0; i < numNeighbors; ++i ) { | for ( int i = 0; i < numNeighbors; ++i ) { |
| if ( ! tuplePatchList.find(TuplePatchElem(neighbors[i])) ) { | if ( ! tuplePatchList.find(TuplePatchElem(neighbors[i])) ) { |
| | #ifdef NAMD_CUDA |
| | proxyMgr->createProxy(neighbors[i]); |
| | #endif |
| Patch *patch = patchMap->patch(neighbors[i]); | Patch *patch = patchMap->patch(neighbors[i]); |
| tuplePatchList.add(TuplePatchElem(patch, this)); | tuplePatchList.add(TuplePatchElem(patch, this)); |
| } | } |
| |
| | |
| if ( ! Node::Object()->simParameters->commOnly ) { | if ( ! Node::Object()->simParameters->commOnly ) { |
| if ( doLoadTuples ) { | if ( doLoadTuples ) { |
| | #ifdef USE_HOMETUPLES |
| | tuples->loadTuples(tuplePatchList, isBasePatch, AtomMap::Object()); |
| | #else |
| loadTuples(); | loadTuples(); |
| | #endif |
| doLoadTuples = false; | doLoadTuples = false; |
| } | } |
| // take triplet and pass with tuple info to force eval | // take triplet and pass with tuple info to force eval |
| | #ifdef USE_HOMETUPLES |
| | T *al = (T *)tuples->getTupleList(); |
| | const int ntuple = tuples->getNumTuples(); |
| | #else |
| T *al = tupleList.begin(); | T *al = tupleList.begin(); |
| const int ntuple = tupleList.size(); | const int ntuple = tupleList.size(); |
| | #endif |
| if ( ntuple ) T::computeForce(al, ntuple, reductionData, pressureProfileData); | if ( ntuple ) T::computeForce(al, ntuple, reductionData, pressureProfileData); |
| tupleCount += ntuple; | tupleCount += ntuple; |
| } | } |