7 #define MIN_DEBUG_LEVEL 2 11 #ifdef NODEGROUP_FORCE_REGISTER 13 ComputeRestraintsCUDA::ComputeRestraintsCUDA(
14 std::vector<HomePatch*> &patchList,
15 std::vector<AtomMap*> &atomMapsList,
23 nConstrainedAtoms = 0;
29 selConsOn =
simParams->selectConstraintsOn;
38 movConsOn =
simParams->movingConstraintsOn;
55 spheConsOn =
simParams->sphericalConstraintsOn;
57 spheConsCenter.x =
simParams->sphericalConstrCenter.x;
58 spheConsCenter.y =
simParams->sphericalConstrCenter.y;
59 spheConsCenter.z =
simParams->sphericalConstrCenter.z;
65 for(
int gid = 0; gid < numAtoms; gid++){
68 constrainedAtomsIndexMap[gid]=h_constrainedID.size();
69 h_constrainedID.push_back(gid);
74 h_cons_x.push_back(refPos.
x);
75 h_cons_y.push_back(refPos.
y);
76 h_cons_z.push_back(refPos.
z);
79 this->stream = stream;
81 allocate_host<int>(&h_constrainedSOA, nConstrainedAtoms);
82 allocate_device<unsigned int>(&d_tbcatomic, 1);
83 allocate_device<int>(&d_constrainedSOA, nConstrainedAtoms);
84 allocate_device<int>(&d_constrainedID, nConstrainedAtoms);
85 allocate_device<double>(&d_k, nConstrainedAtoms);
86 allocate_device<double>(&d_cons_x, nConstrainedAtoms);
87 allocate_device<double>(&d_cons_y, nConstrainedAtoms);
88 allocate_device<double>(&d_cons_z, nConstrainedAtoms);
90 copy_HtoD_sync<double>(h_k.data(), d_k, nConstrainedAtoms);
91 copy_HtoD_sync<double>(h_cons_x.data(), d_cons_x, nConstrainedAtoms);
92 copy_HtoD_sync<double>(h_cons_y.data(), d_cons_y, nConstrainedAtoms);
93 copy_HtoD_sync<double>(h_cons_z.data(), d_cons_z, nConstrainedAtoms);
94 copy_HtoD_sync<int>(h_constrainedID.data(), d_constrainedID, nConstrainedAtoms);
96 cudaCheck(cudaMemset(d_tbcatomic, 0,
sizeof(
unsigned int)));
101 void ComputeRestraintsCUDA::updateRestrainedAtoms(
102 std::vector<AtomMap*> &atomMapsList,
103 std::vector<CudaLocalRecord> &localRecords,
104 const int* h_globalToLocalID
108 DebugM(4,
"ComputeGridForceCUDA::updateRestrainedAtoms "<< nConstrainedAtoms <<
"\n"<<
endi);
109 constrainedLocalAtomsIndex.clear();
112 for(
int i = 0; i < nConstrainedAtoms; i++){
114 gid = h_constrainedID[i];
117 for(
int j = 0 ; j < atomMapsList.size(); j++){
118 lid = atomMapsList[j]->localID(gid);
119 if( lid.
pid != -1)
break;
127 NAMD_bug(
" LocalAtomID not found in patchMap");
134 int soaPid = h_globalToLocalID[lid.
pid];
135 int soaIndex = localRecords[soaPid].bufferOffset + lid.
index;
136 int mapoffset= constrainedAtomsIndexMap[gid];
137 h_constrainedSOA[mapoffset] = soaIndex;
138 constrainedLocalAtomsIndex.push_back(mapoffset);
139 DebugM(2,
"ComputeRestraintsCUDA::updateRestrainedAtoms gid " << gid <<
" mapoffset "<< mapoffset <<
" constrainedLocalAtomsIndexSize "<<constrainedLocalAtomsIndex.size() <<
"\n" <<
endi);
144 copy_HtoD_sync<int>(h_constrainedSOA, d_constrainedSOA, nConstrainedAtoms);
145 int* idxPtr = constrainedLocalAtomsIndex.data();
147 copy_HtoD_sync<int>(idxPtr, d_constrainedID, constrainedLocalAtomsIndex.size());
152 void ComputeRestraintsCUDA::doForce(
171 if (constrainedLocalAtomsIndex.empty())
return;
174 computeRestrainingForce(
178 constrainedLocalAtomsIndex.size(),
222 ComputeRestraintsCUDA::~ComputeRestraintsCUDA()
224 deallocate_device<unsigned int>(&d_tbcatomic);
225 deallocate_device<int>(&d_constrainedSOA);
226 deallocate_device<int>(&d_constrainedID);
227 deallocate_device<double>(&d_cons_x);
228 deallocate_device<double>(&d_cons_y);
229 deallocate_device<double>(&d_cons_z);
230 deallocate_device<double>(&d_k);
233 #endif // NODEGROUP_FORCE_REGISTER
SimParameters * simParameters
std::ostream & endi(std::ostream &s)
Molecule stores the structural information for the system.
void NAMD_bug(const char *err_msg)
void get_cons_params(Real &k, Vector &refPos, int atomnum) const
Bool is_atom_constrained(int atomnum) const