23 #include "ComputeMgr.decl.h"
38 DebugM(3,
"Constructing client\n");
44 isRequestedAllocSize = 0;
46 numGroupsRequested = 0;
51 if ( sp->
colvarsOn ) forceSendEnabled = 1;
59 forcePtrs =
new Force*[numPatches];
60 atomPtrs =
new FullAtom*[numPatches];
61 gridForcesPtrs =
new ForceList **[numPatches];
62 numGridObjects = numActiveGridObjects = 0;
63 for (
int i = 0; i < numPatches; ++i ) {
64 forcePtrs[i] = NULL; atomPtrs[i] = NULL;
65 gridForcesPtrs[i] = NULL;
74 delete[] gridForcesPtrs;
80 DebugM(4,
"Receiving configuration (" << newaid.
size() <<
81 " atoms, " << newgdef.
size() <<
" atoms/groups and " <<
82 newgridobjid.
size() <<
" grid objects) on client\n" <<
endi);
86 if ( forceSendEnabled ) {
89 for (a=newaid.
begin(),a_e=newaid.
end(); a!=a_e; ++a) {
90 if ( *a > max ) max = *a;
92 for (a=newgdef.
begin(),a_e=newgdef.
end(); a!=a_e; ++a) {
93 if ( *a > max ) max = *a;
96 if ( endRequested > isRequestedAllocSize ) {
97 delete [] isRequested;
98 isRequestedAllocSize = endRequested+10;
99 isRequested =
new char[isRequestedAllocSize];
100 memset(isRequested, 0, isRequestedAllocSize);
102 for (a=aid.
begin(),a_e=aid.
end(); a!=a_e; ++a) {
105 for (a=gdef.
begin(),a_e=gdef.
end(); a!=a_e; ++a) {
106 if ( *a != -1 ) isRequested[*a] = 0;
119 if (newgridobjid.
size()) configureGridObjects(newgridobjid);
121 if ( forceSendEnabled ) {
123 for (a=aid.
begin(),a_e=aid.
end(); a!=a_e; ++a) {
126 for (a=gdef.
begin(),a_e=gdef.
end(); a!=a_e; ++a) {
127 if ( *a == -1 ) ++newgcount;
129 isRequested[*a] |= 2;
134 numGroupsRequested = newgcount;
138 void ComputeGlobal::deleteGridObjects()
140 if (numGridObjects == 0)
return;
142 for (ap = ap.begin(); ap != ap.end(); ap++) {
143 ForceList **gridForces = gridForcesPtrs[ap->p->getPatchID()];
144 if (gridForces != NULL) {
145 for (
size_t ig = 0; ig < numGridObjects; ig++) {
146 if (gridForces[ig] != NULL) {
147 delete gridForces[ig];
148 gridForces[ig] = NULL;
151 delete [] gridForces;
155 numGridObjects = numActiveGridObjects = 0;
158 void ComputeGlobal::configureGridObjects(
IntList &newgridobjid)
165 numActiveGridObjects = 0;
167 gridObjActive.
resize(numGridObjects);
172 for ( ; goid_i != goid_e; goid_i++) {
173 if ((*goid_i < 0) || (*goid_i >= numGridObjects)) {
174 NAMD_bug(
"Requested illegal gridForceGrid index.");
176 DebugM(3,
"Adding grid with index " << *goid_i <<
" to ComputeGlobal\n");
177 gridObjActive[*goid_i] = 1;
178 numActiveGridObjects++;
182 for (
size_t ig = 0; ig < numGridObjects; ig++) {
183 DebugM(3,
"Grid index " << ig <<
" is active or inactive? "
184 << gridObjActive[ig] <<
"\n" <<
endi);
188 for (ap = ap.begin(); ap != ap.end(); ap++) {
189 gridForcesPtrs[ap->p->getPatchID()] =
new ForceList *[numGridObjects];
190 ForceList **gridForces = gridForcesPtrs[ap->p->getPatchID()];
191 for (
size_t ig = 0; ig < numGridObjects; ig++) {
192 if (gridObjActive[ig]) {
195 gridForces[ig] = NULL;
202 void ComputeGlobal::recvConfig(ComputeGlobalConfigMsg *msg) {
203 DebugM(3,
"Receiving configure on client\n");
204 configure(msg->aid,msg->gdef);
211 DebugM(3,
"Receiving results (" << msg->
aid.
size() <<
" forces, "
212 << msg->
newgdef.
size() <<
" new group atoms) on client\n");
215 if ( forceSendActive && ! forceSendEnabled )
NAMD_bug(
"ComputeGlobal::recvResults forceSendActive without forceSendEnabled");
225 Force **f = forcePtrs;
230 for (ap = ap.
begin(); ap != ap.
end(); ap++) {
231 (*ap).r = (*ap).forceBox->open();
233 t[(*ap).patchID] = (*ap).p->getAtomList().begin();
239 for ( ; a != a_e; ++a, ++f2 ) {
240 DebugM(1,
"processing atom "<<(*a)<<
", F="<<(*f2)<<
"...\n");
244 Force f_atom = (*f2);
245 f[localID.
pid][localID.
index] += f_atom;
251 extVirial +=
outer(f_atom,x_atom);
253 DebugM(1,
"done with the loop\n");
257 g_i = gdef.
begin(); g_e = gdef.
end();
260 for ( ; g_i != g_e; ++g_i, ++gf_i ) {
263 for ( ; *g_i != -1; ++g_i ) {
269 f[localID.
pid][localID.
index] += f_atom;
274 extVirial +=
outer(f_atom,x_atom);
277 DebugM(1,
"done with the groups\n");
279 if (numActiveGridObjects > 0) {
280 applyGridObjectForces(msg, &extForce, &extVirial);
291 DebugM(3,
"Reconfiguring\n");
298 DebugM(3,
"Sending requested data right away\n");
302 groupTotalForce.
resize(numGroupsRequested);
303 for (
int i=0; i<numGroupsRequested; ++i ) groupTotalForce[i] = 0;
307 Force **f = forcePtrs;
309 for (ap = ap.
begin(); ap != ap.
end(); ap++) {
311 (*ap).positionBox->close(&x);
312 (*ap).forceBox->close(&((*ap).r));
313 f[(*ap).patchID] = 0;
314 t[(*ap).patchID] = 0;
319 DebugM(3,
"Done processing results\n");
329 for (ap = ap.
begin(); ap != ap.
end(); ap++) {
331 t[(*ap).patchID] = (*ap).p->getAtomList().begin();
334 if(!firsttime) sendData();
347 DebugM(2,
"done with doWork\n");
350 void ComputeGlobal::sendData()
366 for ( ; a != a_e; ++a ) {
379 g_i = gdef.
begin(); g_e = gdef.
end();
380 for ( ; g_i != g_e; ++g_i ) {
383 for ( ; *g_i != -1; ++g_i ) {
393 DebugM(1,
"Adding center of mass "<<com<<
"\n");
398 if (numActiveGridObjects > 0) {
399 computeGridObjects(msg);
407 if ( gfcount ) msg->
gtf.
swap(groupTotalForce);
411 DebugM(3,
"Sending data (" << msg->
p.
size() <<
" positions, "
413 <<
" grid objects) on client\n");
432 for ( ; ai != ae; ai++, gfi++) {
433 *gfi =
Vector(0.0, 0.0, 0.0);
442 Position pos = grid->wrap_position(ai->position, lattice);
443 DebugM(1,
"id = " << ai->id <<
", scale = " << scale
444 <<
", charge = " << charge <<
", position = " << pos <<
"\n");
445 if (grid->compute_VdV(pos, V, dV)) {
450 *gfi = -charge * scale * dV;
451 gridObjValue += charge * scale * V;
452 DebugM(1,
"id = " << ai->id <<
", force = " << *gfi <<
"\n");
454 DebugM(3,
"gridObjValue = " << gridObjValue <<
"\n" <<
endi);
464 NAMD_bug(
"No grids loaded in memory but ComputeGlobal has been requested to use them.");
472 size_t ig = 0, gridobjcount = 0;
476 for (ap = ap.begin(); ap != ap.end(); ap++) {
480 int const numAtoms = ap->p->getNumAtoms();
481 ForceList **gridForces = gridForcesPtrs[ap->p->getPatchID()];
484 for (ig = 0; ig < numGridObjects; ig++) {
486 DebugM(2,
"Processing grid index " << ig <<
"\n" <<
endi);
489 if (!gridObjActive[ig]) {
490 DebugM(2,
"Skipping grid index " << ig <<
"; it is handled by "
491 "ComputeGridForce\n" <<
endi);
495 ForceList *gridForcesGrid = gridForces[ig];
496 gridForcesGrid->
resize(numAtoms);
502 DebugM(2,
"computeGridObjects(): patch = " << ap->p->getPatchID()
503 <<
", grid index = " << ig <<
"\n" <<
endi);
512 computeGridForceGrid(ai, ae, gfi, ap->p->lattice, ig, g, gridobjvalue);
517 computeGridForceGrid(ai, ae, gfi, ap->p->lattice, ig, g, gridobjvalue);
524 for (gridobjcount = 0; gridobjcount < numActiveGridObjects; gridobjcount++) {
529 DebugM(2,
"computeGridObjects done\n");
539 NAMD_bug(
"ComputeGlobal received a different number of grid forces than active grids.");
545 Force &extForce = *extForce_in;
546 Tensor &extVirial = *extVirial_in;
550 gridObjForces.
resize(numGridObjects);
551 gridObjForces.
setall(0.0);
555 for (ig = 0; gridobjforce_i != gridobjforce_e ;
556 gridobjforce_i++, ig++) {
557 if (!gridObjActive[ig])
continue;
558 gridObjForces[ig] = *gridobjforce_i;
563 for (ap = ap.begin(); ap != ap.end(); ap++) {
565 ForceList **gridForces = gridForcesPtrs[ap->p->getPatchID()];
567 for (ig = 0; ig < numGridObjects; ig++) {
569 if (!gridObjActive[ig])
continue;
571 DebugM(2,
"gof = " << gridObjForces[ig] <<
"\n" <<
endi);
573 ForceList *gridForcesGrid = gridForces[ig];
580 for ( ; ai != ae; ai++, gfi++) {
582 *gfi =
Vector(0.0, 0.0, 0.0);
587 Vector const gridforceatom(-1.0 * (*gfi) * gridObjForces[ig]);
588 DebugM(2,
"id = " << ai->id
589 <<
", pid = " << localID.
pid
590 <<
", index = " << localID.
index
591 <<
", force = " << gridforceatom <<
"\n" <<
endi);
592 f[localID.
index] += gridforceatom;
593 extForce += gridforceatom;
597 extVirial +=
outer(gridforceatom, x_virial);
614 if ( ! forceSendEnabled )
NAMD_bug(
"ComputeGlobal::saveTotalForces called unexpectedly");
615 if ( ! forceSendActive )
return;
622 for (
int i=0; i<num; ++i) {
623 int index = atoms[i].id;
624 if (index < endRequested && isRequested[index] & 1) {
626 totalForce.
add(af[i]);
639 for (
int i=0; i<num; ++i) {
640 int index = atoms[i].id;
642 if (index < endRequested && (reqflag = isRequested[index])) {
646 if ( fixedAtomsOn && atoms[i].atomFixed ) f_sum = 0.;
649 totalForce.
add(f_sum);
654 if ( gpi == gpend || gpi->
first != index )
655 NAMD_bug(
"ComputeGlobal::saveTotalForces gpair corrupted.");
658 groupTotalForce[gpi->
second] += f_sum;
659 }
while ( ++gpi != gpend && gpi->
first == index );
void recvResults(ComputeGlobalResultsMsg *)
const Elem * const_iterator
static PatchMap * Object()
BigRealList gridobjvalue
Partial values of the GridForce objects from this message.
#define ADD_TENSOR_OBJECT(R, RL, D)
SimParameters * simParameters
ComputeHomePatchList patchList
static __thread atom * atoms
std::ostream & endi(std::ostream &s)
SubmitReduction * willSubmit(int setID, int size=-1)
void saveTotalForces(HomePatch *)
static ReductionMgr * Object(void)
void get_gridfrc_params(Real &k, Charge &q, int atomnum, int gridnum) const
ComputeGlobal(ComputeID, ComputeMgr *)
ResizeArray< Force > ForceList
ResizeArrayIter< T > end(void) const
void NAMD_bug(const char *err_msg)
LocalID localID(AtomID id)
ResizeArray< Lattice > lat
IntList gridobjindex
Indices of the GridForce objects contained in this message.
void setall(const Elem &elem)
__global__ void const int const TileList *__restrict__ TileExcl *__restrict__ const int *__restrict__ const int const float2 *__restrict__ cudaTextureObject_t const int *__restrict__ const float3 const float3 const float3 const float4 *__restrict__ const float cudaTextureObject_t cudaTextureObject_t float const PatchPairRecord *__restrict__ const int *__restrict__ const int2 *__restrict__ const unsigned int *__restrict__ unsigned int *__restrict__ int *__restrict__ int *__restrict__ TileListStat *__restrict__ const BoundingBox *__restrict__ float *__restrict__ float *__restrict__ float *__restrict__ float *__restrict__ float *__restrict__ float *__restrict__ float *__restrict__ float *__restrict__ const int numPatches
GridforceGrid * get_gridfrc_grid(int gridnum) const
void enableComputeGlobalResults()
static AtomMap * Object()
Tensor outer(const Vector &v1, const Vector &v2)
int add(const Elem &elem)
BlockRadixSort::TempStorage sort
int numPatches(void) const
void swap(ResizeArray< Elem > &ra)
Position reverse_transform(Position data, const Transform &t) const
#define ADD_VECTOR_OBJECT(R, RL, D)
int count
Numer of atoms processed for this message.
k< npairi;++k){TABENERGY(const int numtypes=simParams->tableNumTypes;const float table_spacing=simParams->tableSpacing;const int npertype=(int)(namdnearbyint(simParams->tableMaxDist/simParams->tableSpacing)+1);) int table_i=(r2iilist[2 *k] >> 14)+r2_delta_expc;const int j=pairlisti[k];#define p_j BigReal diffa=r2list[k]-r2_table[table_i];#define table_four_i TABENERGY(register const int tabtype=-1-(lj_pars->A< 0?lj_pars->A:0);) BigReal kqq=kq_i *p_j-> charge
void sendComputeGlobalData(ComputeGlobalDataMsg *)
ForceList f[Results::maxNumForces]
int patchcount
Number of patches processed for this message.
ResizeArrayIter< T > begin(void) const
Bool is_atom_gridforced(int atomnum, int gridnum) const