4 #ifdef NODEGROUP_FORCE_REGISTER 7 DeviceData::DeviceData(){
22 slow_patchPositionsSize = 0;
23 slow_pencilPatchIndexSize = 0;
28 slow_patchPositions = NULL;
29 slow_pencilPatchIndex = NULL;
42 DeviceData::~DeviceData(){
57 #ifdef NODEGROUP_FORCE_REGISTER 58 cudaBondedList =
new ComputeBondedCUDA*[CkNumPes()];
61 nodeLock = CmiCreateLock();
62 suspendCounter.store(CmiMyNodeSize());
63 updateCounter.store(0);
71 #ifdef NODEGROUP_FORCE_REGISTER 72 updateCounter.store(devData.size());
80 #ifdef NODEGROUP_FORCE_REGISTER 83 free(h_soa_sortOrder);
86 free(h_soa_migrationDestination);
90 free(h_soa_partition);
92 free(h_tupleCount.bond);
93 free(h_tupleCount.angle);
94 free(h_tupleCount.dihedral);
95 free(h_tupleCount.improper);
96 free(h_tupleCount.modifiedExclusion);
97 free(h_tupleCount.exclusion);
98 free(h_tupleCount.crossterm);
99 free(h_tupleOffset.bond);
100 free(h_tupleOffset.angle);
101 free(h_tupleOffset.dihedral);
102 free(h_tupleOffset.improper);
103 free(h_tupleOffset.modifiedExclusion);
104 free(h_tupleOffset.exclusion);
105 free(h_tupleOffset.crossterm);
106 free(h_tupleDataStage.bond);
107 free(h_tupleDataStage.angle);
108 free(h_tupleDataStage.dihedral);
109 free(h_tupleDataStage.improper);
110 free(h_tupleDataStage.modifiedExclusion);
111 free(h_tupleDataStage.exclusion);
112 free(h_tupleDataStage.crossterm);
113 CmiDestroyLock(nodeLock);
118 #include "PatchData.def.h"
Broadcast object for intra-node GPU-resident broadcasts.
NodeReduction * reductionBackend
NodeReduction * reductionBackendSave
CollectionMaster * ptrCollectionMaster
NodeBroadcast * nodeBroadcast
void setDeviceKernelUpdateCounter()