NAMD
|
#include <CudaComputeNonbondedKernel.h>
Public Member Functions | |
CudaComputeNonbondedKernel (int deviceID, CudaNonbondedTables &cudaNonbondedTables, bool doStreaming) | |
~CudaComputeNonbondedKernel () | |
void | updateVdwTypesExcl (const int atomStorageSize, const int *h_vdwTypes, const int2 *h_exclIndexMaxDiff, const int *h_atomIndex, cudaStream_t stream) |
void | updateVdwTypesExclOnGPU (CudaTileListKernel &tlKernel, const int numPatches, const int atomStorageSize, const bool alchOn, CudaLocalRecord *localRecords, const int *d_vdwTypes, const int *d_id, const int *d_sortOrder, const int *d_partition, cudaStream_t stream) |
void | nonbondedForce (CudaTileListKernel &tlKernel, const int atomStorageSize, const bool atomsChanged, const bool doMinimize, const bool doPairlist, const bool doEnergy, const bool doVirial, const bool doSlow, const bool doAlch, const bool doAlchVdwForceSwitching, const bool doFEP, const bool doTI, const bool doNbThole, const bool doTable, const float3 lata, const float3 latb, const float3 latc, const float4 *h_xyzq, const float cutoff2, const CudaNBConstants nbConstants, float4 *d_forces, float4 *d_forcesSlow, float4 *h_forces, float4 *h_forcesSlow, AlchData *fepFlags, bool lambdaWindowUpdated, char *part, bool CUDASOAintegratorOn, bool useDeviceMigration, const float drudeNbtholeCut2, cudaStream_t stream) |
void | reduceVirialEnergy (CudaTileListKernel &tlKernel, const int atomStorageSize, const bool doEnergy, const bool doVirial, const bool doSlow, const bool doGBIS, float4 *d_forces, float4 *d_forcesSlow, VirialEnergy *d_virialEnergy, cudaStream_t stream) |
void | getVirialEnergy (VirialEnergy *h_virialEnergy, cudaStream_t stream) |
void | bindExclusions (int numExclusions, unsigned int *exclusion_bits) |
int * | getPatchReadyQueue () |
void | reallocate_forceSOA (int atomStorageSize) |
void | setExclusionsByAtom (int2 *h_data, const int num_atoms) |
void | updateDrudeData (const int atomStorageSize, const float *h_drudeAtomAlpha, const int *h_isDrude, cudaStream_t stream) |
Static Public Member Functions | |
static __device__ __host__ __forceinline__ int | computeNumTiles (const int numAtoms, const int tilesize=WARPSIZE) |
static __device__ __host__ __forceinline__ int | computeAtomPad (const int numAtoms, const int tilesize=WARPSIZE) |
Definition at line 52 of file CudaComputeNonbondedKernel.h.
CudaComputeNonbondedKernel::CudaComputeNonbondedKernel | ( | int | deviceID, |
CudaNonbondedTables & | cudaNonbondedTables, | ||
bool | doStreaming | ||
) |
CudaComputeNonbondedKernel::~CudaComputeNonbondedKernel | ( | ) |
void CudaComputeNonbondedKernel::bindExclusions | ( | int | numExclusions, |
unsigned int * | exclusion_bits | ||
) |
|
inlinestatic |
Definition at line 101 of file CudaComputeNonbondedKernel.h.
References computeNumTiles().
|
inlinestatic |
Definition at line 96 of file CudaComputeNonbondedKernel.h.
Referenced by computeAtomPad().
int* CudaComputeNonbondedKernel::getPatchReadyQueue | ( | ) |
Referenced by CudaComputeNonbonded::launchWork().
void CudaComputeNonbondedKernel::getVirialEnergy | ( | VirialEnergy * | h_virialEnergy, |
cudaStream_t | stream | ||
) |
void CudaComputeNonbondedKernel::nonbondedForce | ( | CudaTileListKernel & | tlKernel, |
const int | atomStorageSize, | ||
const bool | atomsChanged, | ||
const bool | doMinimize, | ||
const bool | doPairlist, | ||
const bool | doEnergy, | ||
const bool | doVirial, | ||
const bool | doSlow, | ||
const bool | doAlch, | ||
const bool | doAlchVdwForceSwitching, | ||
const bool | doFEP, | ||
const bool | doTI, | ||
const bool | doNbThole, | ||
const bool | doTable, | ||
const float3 | lata, | ||
const float3 | latb, | ||
const float3 | latc, | ||
const float4 * | h_xyzq, | ||
const float | cutoff2, | ||
const CudaNBConstants | nbConstants, | ||
float4 * | d_forces, | ||
float4 * | d_forcesSlow, | ||
float4 * | h_forces, | ||
float4 * | h_forcesSlow, | ||
AlchData * | fepFlags, | ||
bool | lambdaWindowUpdated, | ||
char * | part, | ||
bool | CUDASOAintegratorOn, | ||
bool | useDeviceMigration, | ||
const float | drudeNbtholeCut2, | ||
cudaStream_t | stream | ||
) |
void CudaComputeNonbondedKernel::reallocate_forceSOA | ( | int | atomStorageSize | ) |
void CudaComputeNonbondedKernel::reduceVirialEnergy | ( | CudaTileListKernel & | tlKernel, |
const int | atomStorageSize, | ||
const bool | doEnergy, | ||
const bool | doVirial, | ||
const bool | doSlow, | ||
const bool | doGBIS, | ||
float4 * | d_forces, | ||
float4 * | d_forcesSlow, | ||
VirialEnergy * | d_virialEnergy, | ||
cudaStream_t | stream | ||
) |
Referenced by CudaComputeNonbonded::launchWork().
void CudaComputeNonbondedKernel::setExclusionsByAtom | ( | int2 * | h_data, |
const int | num_atoms | ||
) |
void CudaComputeNonbondedKernel::updateDrudeData | ( | const int | atomStorageSize, |
const float * | h_drudeAtomAlpha, | ||
const int * | h_isDrude, | ||
cudaStream_t | stream | ||
) |
void CudaComputeNonbondedKernel::updateVdwTypesExcl | ( | const int | atomStorageSize, |
const int * | h_vdwTypes, | ||
const int2 * | h_exclIndexMaxDiff, | ||
const int * | h_atomIndex, | ||
cudaStream_t | stream | ||
) |
void CudaComputeNonbondedKernel::updateVdwTypesExclOnGPU | ( | CudaTileListKernel & | tlKernel, |
const int | numPatches, | ||
const int | atomStorageSize, | ||
const bool | alchOn, | ||
CudaLocalRecord * | localRecords, | ||
const int * | d_vdwTypes, | ||
const int * | d_id, | ||
const int * | d_sortOrder, | ||
const int * | d_partition, | ||
cudaStream_t | stream | ||
) |