OptPmeCompute Class Reference

#include <OptPme.h>

Inheritance diagram for OptPmeCompute:

ComputeHomePatches Compute List of all members.

Public Member Functions

 OptPmeCompute (ComputeID c)
virtual ~OptPmeCompute ()
void doWork ()
void doWorkOnPeer ()
void sendPencils ()
void copyPencils (OptPmeGridMsg *)
void ungridForces_init ()
void ungridForces_compute (int istart, int iend)
void ungridForces_finalize ()
void setMgr (OptPmeMgr *mgr)
int getNumLocalAtoms ()

Public Attributes

double * zline_storage
float * sp_zstorage

Detailed Description

Definition at line 53 of file OptPme.h.


Constructor & Destructor Documentation

OptPmeCompute::OptPmeCompute ( ComputeID  c  ) 

Definition at line 589 of file OptPme.C.

References DebugM, Node::Object(), ReductionMgr::Object(), REDUCTIONS_BASIC, Node::simParameters, simParams, ComputeHomePatches::useAvgPositions, and ReductionMgr::willSubmit().

00589                                         :
00590   ComputeHomePatches(c)
00591 {
00592   DebugM(4,"OptPmeCompute created.\n");
00593 
00594   CProxy_OptPmeMgr::ckLocalBranch(
00595         CkpvAccess(BOCclass_group).computePmeMgr)->setCompute(this);
00596 
00597   _initialized = false;
00598 
00599   useAvgPositions = 1;
00600   localResults = NULL;
00601 
00602   reduction = ReductionMgr::Object()->willSubmit(REDUCTIONS_BASIC);
00603   SimParameters *simParams = Node::Object()->simParameters;
00604 }

OptPmeCompute::~OptPmeCompute (  )  [virtual]

Definition at line 804 of file OptPme.C.

References zline_storage.

00805 {
00806   delete [] zline_storage;
00807   delete [] sp_zstorage;
00808   delete [] q_arr;
00809 }


Member Function Documentation

void OptPmeCompute::copyPencils ( OptPmeGridMsg  ) 

Definition at line 983 of file OptPme.C.

References PmeGrid::dim2, OptPmeGridMsg::qgrid, OptPmeGridMsg::xlen, OptPmeGridMsg::xstart, OptPmeGridMsg::ylen, and OptPmeGridMsg::ystart.

Referenced by OptPmeMgr::recvUngrid().

00983                                                   {
00984 
00985   if (!_initialized) initializeOptPmeCompute();
00986 
00987   int ibegin = msg->xstart;
00988   int iend   = msg->xstart + msg->xlen;
00989   int jbegin = msg->ystart;
00990   int jend   = msg->ylen;
00991   int fcount = zlen * msg->xlen * msg->ylen;
00992 
00993   float *qmsg = msg->qgrid;
00994   double *data = q_arr[ibegin * myGrid.dim2 + jbegin];  
00995 
00996 #pragma disjoint (*qmsg, *data)
00997 #pragma unroll(8)
00998   for ( int k=0; k<fcount; ++k ) 
00999     data[k] = *(qmsg++);
01000 }

void OptPmeCompute::doWork (  )  [virtual]

Reimplemented from Compute.

Definition at line 812 of file OptPme.C.

References OptPmeMgr::_iter, ResizeArrayIter< T >::begin(), PmeParticle::cg, DebugM, ResizeArrayIter< T >::end(), ComputeNonbondedUtil::ewaldcof, many_to_many_start, PmeGrid::order, ComputeHomePatches::patchList, scale_n_copy_coordinates(), SQRT_PI, OptPmeMgr::subcompute_msgs, SubmitReduction::submit(), and x.

00813 {
00814   DebugM(4,"Entering OptPmeCompute::doWork().\n");
00815 
00816 #ifdef TRACE_COMPUTE_OBJECTS
00817   double traceObjStartTime = CmiWallTimer();
00818 #endif
00819 
00820   if (!_initialized) initializeOptPmeCompute();
00821 
00822   ResizeArrayIter<PatchElem> ap(patchList);
00823 
00824   // Skip computations if nothing to do.
00825   if ( ! patchList[0].p->flags.doFullElectrostatics )
00826   {
00827     for (ap = ap.begin(); ap != ap.end(); ap++) {
00828       CompAtom *x = (*ap).positionBox->open();
00829       Results *r = (*ap).forceBox->open();
00830       (*ap).positionBox->close(&x);
00831       (*ap).forceBox->close(&r);
00832     }
00833     reduction->submit();
00834     return;
00835   }
00836 
00837   myMgr->_iter ++;  //this is a pme step
00838 
00839   // allocate storage
00840   numLocalAtoms = 0;
00841   for (ap = ap.begin(); ap != ap.end(); ap++) {
00842     numLocalAtoms += (*ap).p->getNumAtoms();
00843   }
00844 
00845   Lattice &lattice = patchList[0].p->flags.lattice;
00846 
00847   localData = new PmeParticle[numLocalAtoms];
00848   // get positions and charges
00849   PmeParticle * data_ptr = localData;
00850 
00851   int natoms = 0;
00852   //  if (myMgr->constant_pressure)
00853   //resetPatchCoordinates(lattice);  //Update patch coordinates with new lattice
00854 
00855   for (ap = ap.begin(); ap != ap.end(); ap++) {
00856 #ifdef NETWORK_PROGRESS
00857     CmiNetworkProgress();
00858 #endif
00859     
00860     CompAtom *x = (*ap).positionBox->open();
00861     if ( patchList[0].p->flags.doMolly ) {
00862       (*ap).positionBox->close(&x);
00863       x = (*ap).avgPositionBox->open();
00864     }
00865     int numAtoms = (*ap).p->getNumAtoms();        
00866     int order_1  = myGrid.order - 1;
00867     scale_n_copy_coordinates(x, localData, numAtoms, 
00868                              lattice, myGrid,
00869                              xstart + order_1, xlen - order_1,
00870                              ystart + order_1, ylen - order_1,
00871                              zstart + order_1, zlen - order_1,
00872                              strayChargeErrors);
00873     natoms += numAtoms;
00874     
00875     if ( patchList[0].p->flags.doMolly ) { (*ap).avgPositionBox->close(&x); }
00876     else { (*ap).positionBox->close(&x); }
00877   }
00878 
00879   numLocalAtoms = natoms;  //Exclude all atoms out of range
00880 
00881   // calculate self energy
00882   BigReal ewaldcof = ComputeNonbondedUtil::ewaldcof;
00883   evir = 0;
00884   BigReal selfEnergy = 0;
00885   data_ptr = localData;
00886   int i;
00887   for(i=0; i<numLocalAtoms; ++i)
00888   {
00889     selfEnergy += data_ptr->cg * data_ptr->cg;
00890     ++data_ptr;
00891   }
00892   selfEnergy *= -1. * ewaldcof / SQRT_PI;
00893   evir[0] += selfEnergy;
00894 
00895 #if 0
00896   if (myMgr->_iter > many_to_many_start) {
00897     OptPmeSubComputeMsg *smsg = myMgr->subcompute_msgs[1]; //not self
00898     CProxy_OptPmeMgr pmeProxy (CkpvAccess(BOCclass_group).computePmeMgr);
00899     pmeProxy[CmiNodeFirst(CmiMyNode())+smsg->dest].doWorkOnPeer(smsg);
00900   }
00901   else
00902 #endif
00903     doWorkOnPeer();
00904 
00905 #ifdef TRACE_COMPUTE_OBJECTS
00906   traceUserBracketEvent(TRACE_COMPOBJ_IDOFFSET+this->cid, traceObjStartTime, CmiWallTimer());
00907 #endif
00908 
00909 }

void OptPmeCompute::doWorkOnPeer (  ) 

Definition at line 911 of file OptPme.C.

References OptPmeMgr::_iter, OptPmeMgr::constant_pressure, OptPmeRealSpace::fill_charges(), OptPmeMgr::handle, many_to_many_start, ComputeHomePatches::patchList, PHASE_GR, pme_d2f(), sendPencils(), OptPmeMgr::xPencil, and zline_storage.

Referenced by OptPmeMgr::doWorkOnPeer().

00912 {
00913   Lattice &lattice = patchList[0].p->flags.lattice;
00914   double **q = q_arr;
00915   memset( (void*) zline_storage, 0, zlen * nzlines * sizeof(double) );
00916 
00917   myRealSpace = new OptPmeRealSpace(myGrid,numLocalAtoms);
00918   if (!strayChargeErrors)
00919     myRealSpace->fill_charges(q, localData, zstart, zlen);
00920 
00921   if (myMgr->constant_pressure && patchList[0].patchID == 0)
00922     myMgr->xPencil.recvLattice (lattice);
00923   
00924   if (myMgr->_iter <= many_to_many_start)
00925     sendPencils();
00926   else {
00927     pme_d2f (sp_zstorage, zline_storage, nzlines * zlen);
00928     CmiDirect_manytomany_start (myMgr->handle, PHASE_GR);
00929   }
00930 }

int OptPmeCompute::getNumLocalAtoms (  )  [inline]

Definition at line 66 of file OptPme.h.

Referenced by OptPmeMgr::ungridCalc().

00066 { return numLocalAtoms; }

void OptPmeCompute::sendPencils (  ) 

Definition at line 934 of file OptPme.C.

References PmeGrid::block1, PmeGrid::block2, PmeGrid::dim2, PmeGrid::dim3, PmeGrid::K1, PmeGrid::K2, OptPmeGridMsg::patchID, ComputeHomePatches::patchList, PRIORITY_SIZE, OptPmeGridMsg::qgrid, ResizeArray< Elem >::size(), OptPmeGridMsg::sourceNode, PmeGrid::xBlocks, OptPmeGridMsg::xlen, OptPmeGridMsg::xstart, PmeGrid::yBlocks, OptPmeGridMsg::ylen, OptPmeGridMsg::ystart, PmeGrid::zBlocks, OptPmeGridMsg::zlen, OptPmeMgr::zPencil, and OptPmeGridMsg::zstart.

Referenced by doWorkOnPeer().

00934                                 {  
00935 
00936   //iout << iPE << " Sending charge grid for " << numLocalAtoms << " atoms to FFT with " << myMgr->numPencilsActive << " messages" <<".\n" << endi;
00937 
00938   int xBlocks = myGrid.xBlocks;
00939   int yBlocks = myGrid.yBlocks;
00940   int zBlocks = myGrid.zBlocks;
00941 
00942   int K1 = myGrid.K1;
00943   int K2 = myGrid.K2;
00944   int dim2 = myGrid.dim2;
00945   int dim3 = myGrid.dim3;
00946   int block1 = myGrid.block1;
00947   int block2 = myGrid.block2;
00948 
00949   //Lattice lattice = patchList[0].p->flags.lattice;
00950 
00951   int nactive = 0;  
00952   for (int idx = 0; idx < pencilVec.size(); idx++) {
00953     int xstart = pencilVec[idx].xmin;
00954     int ystart = pencilVec[idx].ymin;
00955     int xlen   = pencilVec[idx].xmax - pencilVec[idx].xmin + 1;
00956     int ylen   = pencilVec[idx].ymax - pencilVec[idx].ymin + 1;
00957     int ib     = pencilVec[idx].ib;
00958     int jb     = pencilVec[idx].jb;
00959     double *data = pencilVec[idx].data;
00960     
00961     int fcount = xlen * ylen * zlen;
00962     OptPmeGridMsg *msg = new (fcount, PRIORITY_SIZE) OptPmeGridMsg;
00963     msg->zstart = zstart;
00964     msg->zlen   = zlen;
00965     msg->xstart = xstart;
00966     msg->xlen   = xlen;
00967     msg->ystart = ystart;
00968     msg->ylen   = ylen;
00969     msg->sourceNode = CkMyPe();
00970     msg->patchID    = patchList[0].patchID;
00971     
00972     float *qmsg = msg->qgrid;   
00973 #pragma disjoint (*data, *qmsg)
00974 #pragma unroll(8)
00975     for ( int k=0; k< fcount; ++k ) 
00976       *(qmsg++) = data[k];    
00977     
00978     myMgr->zPencil(ib,jb,0).recvGrid(msg);
00979   }
00980 }

void OptPmeCompute::setMgr ( OptPmeMgr mgr  )  [inline]

Definition at line 64 of file OptPme.h.

Referenced by OptPmeMgr::setCompute().

00064 { myMgr = mgr; }

void OptPmeCompute::ungridForces_compute ( int  istart,
int  iend 
)

Definition at line 1013 of file OptPme.C.

References OptPmeRealSpace::compute_forces(), Node::Object(), ComputeHomePatches::patchList, scale_forces(), Node::simParameters, and simParams.

Referenced by OptPmeMgr::ungridCalc(), and OptPmeMgr::ungridCalc_subcompute().

01015 {
01016     Vector *gridResults;
01017     gridResults = localResults;
01018 
01019     if (iend == 0)
01020       iend = numLocalAtoms;
01021     
01022     SimParameters *simParams = Node::Object()->simParameters;
01023     Vector pairForce = 0.;
01024     Lattice &lattice = patchList[0].p->flags.lattice;
01025     if(!simParams->commOnly) {
01026 #ifdef NETWORK_PROGRESS
01027       CmiNetworkProgress();
01028 #endif      
01029       if (!strayChargeErrors) {
01030         myRealSpace->compute_forces(q_arr, localData, gridResults, 
01031                                     zstart, zlen, istart, iend);
01032         scale_forces(gridResults + istart, iend - istart, lattice);
01033       }
01034     }
01035 }

void OptPmeCompute::ungridForces_finalize (  ) 

Definition at line 1037 of file OptPme.C.

References ResizeArrayIter< T >::begin(), ResizeArrayIter< T >::end(), f, SubmitReduction::item(), Node::Object(), ComputeHomePatches::patchList, REDUCTION_ELECT_ENERGY_SLOW, REDUCTION_STRAY_CHARGE_ERRORS, Node::simParameters, simParams, Results::slow, SubmitReduction::submit(), Vector::x, x, Vector::y, and Vector::z.

Referenced by OptPmeMgr::ungridCalc(), and OptPmeMgr::ungridCalc_subcompute_done().

01037                                           {
01038     SimParameters *simParams = Node::Object()->simParameters;
01039     delete myRealSpace;
01040     
01041     delete [] localData;
01042     //    delete [] localPartition;
01043     
01044     Vector *results_ptr = localResults;
01045     ResizeArrayIter<PatchElem> ap(patchList);
01046     
01047     // add in forces
01048     for (ap = ap.begin(); ap != ap.end(); ap++) {
01049       Results *r = (*ap).forceBox->open();
01050       Force *f = r->f[Results::slow];
01051       int numAtoms = (*ap).p->getNumAtoms();
01052       
01053       if ( ! strayChargeErrors && ! simParams->commOnly ) {
01054         for(int i=0; i<numAtoms; ++i) {
01055           f[i].x += results_ptr->x;
01056           f[i].y += results_ptr->y;
01057           f[i].z += results_ptr->z;
01058           ++results_ptr;
01059         }
01060       }
01061   
01062       (*ap).forceBox->close(&r);
01063     }
01064 
01065     delete [] localResults;
01066    
01067     double scale = 1.;
01068 
01069     reduction->item(REDUCTION_ELECT_ENERGY_SLOW) += evir[0] * scale;
01070     reduction->item(REDUCTION_STRAY_CHARGE_ERRORS) += strayChargeErrors;
01071     strayChargeErrors = 0;
01072     reduction->submit();
01073 }

void OptPmeCompute::ungridForces_init (  ) 

Definition at line 1002 of file OptPme.C.

References OptPmeMgr::_iter, many_to_many_start, pme_f2d(), and zline_storage.

Referenced by OptPmeMgr::ungridCalc().

01002                                       {
01003 
01004     //printf ("%d: In OptPMECompute::ungridforces_init\n", CkMyPe());
01005 
01006     if (myMgr->_iter > many_to_many_start)
01007       pme_f2d (zline_storage, sp_zstorage, nzlines * zlen);
01008 
01009     localResults = new Vector[numLocalAtoms];
01010     //memset (localResults, 0, sizeof (Vector) * numLocalAtoms);
01011 }


Member Data Documentation

float* OptPmeCompute::sp_zstorage

Definition at line 69 of file OptPme.h.

double* OptPmeCompute::zline_storage

Definition at line 68 of file OptPme.h.

Referenced by doWorkOnPeer(), ungridForces_init(), and ~OptPmeCompute().


The documentation for this class was generated from the following files:
Generated on Sun Sep 24 01:17:19 2017 for NAMD by  doxygen 1.4.7