1 #ifndef CUDACOMPUTENONBONDEDKERNEL_H 2 #define CUDACOMPUTENONBONDEDKERNEL_H 57 const bool doStreaming;
60 int2 *d_exclusionsByAtom;
61 unsigned int* overflowExclusions;
62 size_t overflowExclusionsSize;
64 int2* exclIndexMaxDiff;
65 size_t exclIndexMaxDiffSize;
75 unsigned int* patchNumCount;
76 size_t patchNumCountSize;
79 size_t patchReadyQueueSize;
81 float *force_x, *force_y, *force_z, *force_w;
83 float *forceSlow_x, *forceSlow_y, *forceSlow_z, *forceSlow_w;
89 float* drudeAtomAlpha;
90 size_t drudeAtomAlphaSize;
95 static __device__ __host__ __forceinline__
int 97 return (numAtoms+tilesize-1)/tilesize;
100 static __device__ __host__ __forceinline__
int 106 const int2* h_exclIndexMaxDiff,
const int* h_atomIndex, cudaStream_t stream);
109 const int numPatches,
const int atomStorageSize,
const bool alchOn,
111 const int* d_vdwTypes,
const int* d_id,
const int* d_sortOrder,
112 const int* d_partition, cudaStream_t stream);
115 const int atomStorageSize,
const bool atomsChanged,
const bool doMinimize,
116 const bool doPairlist,
const bool doEnergy,
const bool doVirial,
117 const bool doSlow,
const bool doAlch,
const bool doAlchVdwForceSwitching,
118 const bool doFEP,
const bool doTI,
119 const bool doNbThole,
const bool doTable,
120 const float3 lata,
const float3 latb,
const float3 latc,
121 const float4* h_xyzq,
const float cutoff2,
123 float4* d_forces, float4* d_forcesSlow,
124 float4* h_forces, float4* h_forcesSlow,
AlchData *fepFlags,
125 bool lambdaWindowUpdated,
char *part,
126 bool CUDASOAintegratorOn,
bool useDeviceMigration,
127 const float drudeNbtholeCut2,
128 cudaStream_t stream);
131 const int atomStorageSize,
const bool doEnergy,
const bool doVirial,
const bool doSlow,
const bool doGBIS,
132 float4* d_forces, float4* d_forcesSlow,
137 void bindExclusions(
int numExclusions,
unsigned int* exclusion_bits);
145 void updateDrudeData(
const int atomStorageSize,
const float* h_drudeAtomAlpha,
const int* h_isDrude, cudaStream_t stream);
149 #endif // CUDACOMPUTENONBONDEDKERNEL_H
Alchemical datastructure that holds the lambda-relevant paramenters for FEP/TI.
CudaComputeNonbondedKernel(int deviceID, CudaNonbondedTables &cudaNonbondedTables, bool doStreaming)
void updateVdwTypesExcl(const int atomStorageSize, const int *h_vdwTypes, const int2 *h_exclIndexMaxDiff, const int *h_atomIndex, cudaStream_t stream)
void updateDrudeData(const int atomStorageSize, const float *h_drudeAtomAlpha, const int *h_isDrude, cudaStream_t stream)
~CudaComputeNonbondedKernel()
void reallocate_forceSOA(int atomStorageSize)
void bindExclusions(int numExclusions, unsigned int *exclusion_bits)
static __device__ __host__ __forceinline__ int computeAtomPad(const int numAtoms, const int tilesize=WARPSIZE)
void reduceVirialEnergy(CudaTileListKernel &tlKernel, const int atomStorageSize, const bool doEnergy, const bool doVirial, const bool doSlow, const bool doGBIS, float4 *d_forces, float4 *d_forcesSlow, VirialEnergy *d_virialEnergy, cudaStream_t stream)
void nonbondedForce(CudaTileListKernel &tlKernel, const int atomStorageSize, const bool atomsChanged, const bool doMinimize, const bool doPairlist, const bool doEnergy, const bool doVirial, const bool doSlow, const bool doAlch, const bool doAlchVdwForceSwitching, const bool doFEP, const bool doTI, const bool doNbThole, const bool doTable, const float3 lata, const float3 latb, const float3 latc, const float4 *h_xyzq, const float cutoff2, const CudaNBConstants nbConstants, float4 *d_forces, float4 *d_forcesSlow, float4 *h_forces, float4 *h_forcesSlow, AlchData *fepFlags, bool lambdaWindowUpdated, char *part, bool CUDASOAintegratorOn, bool useDeviceMigration, const float drudeNbtholeCut2, cudaStream_t stream)
void setExclusionsByAtom(int2 *h_data, const int num_atoms)
void updateVdwTypesExclOnGPU(CudaTileListKernel &tlKernel, const int numPatches, const int atomStorageSize, const bool alchOn, CudaLocalRecord *localRecords, const int *d_vdwTypes, const int *d_id, const int *d_sortOrder, const int *d_partition, cudaStream_t stream)
void getVirialEnergy(VirialEnergy *h_virialEnergy, cudaStream_t stream)
static __device__ __host__ __forceinline__ int computeNumTiles(const int numAtoms, const int tilesize=WARPSIZE)
int * getPatchReadyQueue()