23 #include "ComputeMgr.decl.h" 29 #define MIN_DEBUG_LEVEL 3 32 #define USE_GLOBALMASTER_VIRIAL_KERNEL 1 41 DebugM(3,
"Constructing client\n");
47 isRequestedAllocSize = 0;
49 numGroupsRequested = 0;
54 if ( sp->
colvarsOn ) forceSendEnabled = 1;
62 forcePtrs =
new Force*[numPatches];
63 atomPtrs =
new FullAtom*[numPatches];
64 for (
int i = 0; i < numPatches; ++i ) { forcePtrs[i] = 0; atomPtrs[i] = 0; }
66 #ifdef NODEGROUP_FORCE_REGISTER 67 CProxy_PatchData cpdata(CkpvAccess(BOCclass_group).patchData);
68 PatchData *patchData = cpdata.ckLocalBranch();
73 mass_soa =
new float*[numPatches];
74 pos_soa_x =
new double*[numPatches];
75 pos_soa_y =
new double*[numPatches];
76 pos_soa_z =
new double*[numPatches];
77 force_soa_x =
new double*[numPatches];
78 force_soa_y =
new double*[numPatches];
79 force_soa_z =
new double*[numPatches];
80 transform_soa_i =
new int*[numPatches];
81 transform_soa_j =
new int*[numPatches];
82 transform_soa_k =
new int*[numPatches];
83 for (
int i = 0; i < numPatches; ++i ) {
88 force_soa_x[i] = NULL;
89 force_soa_y[i] = NULL;
90 force_soa_z[i] = NULL;
91 transform_soa_i[i] = NULL;
92 transform_soa_j[i] = NULL;
93 transform_soa_k[i] = NULL;
103 transform_soa_i = NULL;
104 transform_soa_j = NULL;
105 transform_soa_k = NULL;
107 gridForcesPtrs =
new ForceList **[numPatches];
108 numGridObjects = numActiveGridObjects = 0;
109 for (
int i = 0; i < numPatches; ++i ) {
110 forcePtrs[i] = NULL; atomPtrs[i] = NULL;
111 gridForcesPtrs[i] = NULL;
117 delete[] isRequested;
120 delete[] gridForcesPtrs;
124 if(mass_soa)
delete [] mass_soa;
125 if(pos_soa_x)
delete [] pos_soa_x;
126 if(pos_soa_y)
delete [] pos_soa_y;
127 if(pos_soa_z)
delete [] pos_soa_z;
128 if(force_soa_x)
delete [] force_soa_x;
129 if(force_soa_y)
delete [] force_soa_y;
130 if(force_soa_z)
delete [] force_soa_z;
131 if(transform_soa_i)
delete [] transform_soa_i;
132 if(transform_soa_j)
delete [] transform_soa_j;
133 if(transform_soa_k)
delete [] transform_soa_k;
137 DebugM(4,
"Receiving configuration (" << newaid.
size() <<
138 " atoms, " << newgdef.
size() <<
" atoms/groups and " <<
139 newgridobjid.
size() <<
" grid objects) on client\n" <<
endi);
143 if ( forceSendEnabled ) {
146 for (a=newaid.
begin(),a_e=newaid.
end(); a!=a_e; ++a) {
147 if ( *a > max ) max = *a;
149 for (a=newgdef.
begin(),a_e=newgdef.
end(); a!=a_e; ++a) {
150 if ( *a > max ) max = *a;
152 endRequested = max+1;
153 if ( endRequested > isRequestedAllocSize ) {
154 delete [] isRequested;
155 isRequestedAllocSize = endRequested+10;
156 isRequested =
new char[isRequestedAllocSize];
157 memset(isRequested, 0, isRequestedAllocSize);
159 for (a=aid.
begin(),a_e=aid.
end(); a!=a_e; ++a) {
162 for (a=gdef.
begin(),a_e=gdef.
end(); a!=a_e; ++a) {
163 if ( *a != -1 ) isRequested[*a] = 0;
176 if (newgridobjid.
size()) configureGridObjects(newgridobjid);
178 if ( forceSendEnabled ) {
180 for (a=aid.
begin(),a_e=aid.
end(); a!=a_e; ++a) {
183 for (a=gdef.
begin(),a_e=gdef.
end(); a!=a_e; ++a) {
184 if ( *a == -1 ) ++newgcount;
186 isRequested[*a] |= 2;
190 std::sort(gpair.
begin(),gpair.
end());
191 numGroupsRequested = newgcount;
193 DebugM(3,
"Done configure on client\n");
196 void ComputeGlobal::deleteGridObjects()
198 if (numGridObjects == 0)
return;
200 for (ap = ap.begin(); ap != ap.end(); ap++) {
201 ForceList **gridForces = gridForcesPtrs[ap->p->getPatchID()];
202 if (gridForces != NULL) {
203 for (
size_t ig = 0; ig < numGridObjects; ig++) {
204 if (gridForces[ig] != NULL) {
205 delete gridForces[ig];
206 gridForces[ig] = NULL;
209 delete [] gridForces;
213 numGridObjects = numActiveGridObjects = 0;
216 void ComputeGlobal::configureGridObjects(
IntList &newgridobjid)
223 numActiveGridObjects = 0;
225 gridObjActive.
resize(numGridObjects);
230 for ( ; goid_i != goid_e; goid_i++) {
231 if ((*goid_i < 0) || (*goid_i >= numGridObjects)) {
232 NAMD_bug(
"Requested illegal gridForceGrid index.");
234 DebugM(3,
"Adding grid with index " << *goid_i <<
" to ComputeGlobal\n");
235 gridObjActive[*goid_i] = 1;
236 numActiveGridObjects++;
240 for (
size_t ig = 0; ig < numGridObjects; ig++) {
241 DebugM(3,
"Grid index " << ig <<
" is active or inactive? " 242 << gridObjActive[ig] <<
"\n" <<
endi);
246 for (ap = ap.begin(); ap != ap.end(); ap++) {
247 gridForcesPtrs[ap->p->getPatchID()] =
new ForceList *[numGridObjects];
248 ForceList **gridForces = gridForcesPtrs[ap->p->getPatchID()];
249 for (
size_t ig = 0; ig < numGridObjects; ig++) {
250 if (gridObjActive[ig]) {
253 gridForces[ig] = NULL;
260 void ComputeGlobal::recvConfig(ComputeGlobalConfigMsg *msg) {
261 DebugM(3,
"Receiving configure on client\n");
262 configure(msg->aid,msg->gdef);
269 DebugM(3,
"Receiving results (" << msg->
aid.
size() <<
" forces, " 270 << msg->
newgdef.
size() <<
" new group atoms) on client thread " << CthGetToken(CthSelf())->serialNo <<
" msg->resendCoordinates " << msg->
resendCoordinates <<
" msg->totalforces " << msg->
totalforces<<
"\n");
273 if ( forceSendActive && ! forceSendEnabled )
NAMD_bug(
"ComputeGlobal::recvResults forceSendActive without forceSendEnabled");
283 Force **f = forcePtrs;
288 for (ap = ap.
begin(); ap != ap.
end(); ap++) {
289 (*ap).r = (*ap).forceBox->open();
291 t[(*ap).patchID] = (*ap).p->getAtomList().begin();
296 mass_soa[pId] = (*ap).p->patchDataSOA.mass;
297 force_soa_x[pId] = (*ap).p->patchDataSOA.f_global_x;
298 force_soa_y[pId] = (*ap).p->patchDataSOA.f_global_y;
299 force_soa_z[pId] = (*ap).p->patchDataSOA.f_global_z;
300 transform_soa_i[pId] = (*ap).p->patchDataSOA.transform_i;
301 transform_soa_j[pId] = (*ap).p->patchDataSOA.transform_j;
302 transform_soa_k[pId] = (*ap).p->patchDataSOA.transform_k;
311 for ( ; a != a_e; ++a, ++f2 ) {
325 for ( ; a != a_e; ++a, ++f2 ) {
326 DebugM(1,
"processing atom "<<(*a)<<
", F="<<(*f2)<<
"...\n");
328 localID = atomMap->
localID(*a);
330 lidx = localID.
index;
331 if ( lpid ==
notUsed || ! f[lpid] )
continue;
336 force_soa_x[lpid][lidx] += f_atom.
x;
337 force_soa_y[lpid][lidx] += f_atom.
y;
338 force_soa_z[lpid][lidx] += f_atom.
z;
339 #ifndef USE_GLOBALMASTER_VIRIAL_KERNEL 340 x_orig.
x = pos_soa_x[lpid][lidx];
341 x_orig.
y = pos_soa_y[lpid][lidx];
342 x_orig.
z = pos_soa_z[lpid][lidx];
343 trans.
i = transform_soa_i[lpid][lidx];
344 trans.
j = transform_soa_j[lpid][lidx];
345 trans.
k = transform_soa_k[lpid][lidx];
348 extVirial +=
outer(f_atom,x_atom);
352 for ( ; a != a_e; ++a, ++f2 ) {
353 DebugM(1,
"processing atom "<<(*a)<<
", F="<<(*f2)<<
"...\n");
357 Force f_atom = (*f2);
361 f[localID.
pid][localID.
index] += f_atom;
367 extVirial +=
outer(f_atom,x_atom);
370 DebugM(1,
"done with the loop\n");
374 g_i = gdef.
begin(); g_e = gdef.
end();
384 for ( ; g_i != g_e; ++g_i, ++gf_i ) {
387 for ( ; *g_i != -1; ++g_i ) {
389 localID = atomMap->
localID(*g_i);
391 lidx = localID.
index;
392 if ( lpid ==
notUsed || ! f[lpid] )
continue;
393 f_atom = accel * mass_soa[lpid][lidx];
396 CkPrintf(
"NAMD3-recv: group %d, Before Force (%8.6f, %8.6f, %8.6f) \n",
397 *g_i, force_soa_x[lpid][lidx], force_soa_y[lpid][lidx], force_soa_z[lpid][lidx]);
398 CkPrintf(
"NAMD3-recv: group %d, Added Force (%8.6f, %8.6f, %8.6f) \n", *g_i, f_atom.
x, f_atom.
y, f_atom.
z);
401 force_soa_x[lpid][lidx] += f_atom.
x;
402 force_soa_y[lpid][lidx] += f_atom.
y;
403 force_soa_z[lpid][lidx] += f_atom.
z;
404 #ifndef USE_GLOBALMASTER_VIRIAL_KERNEL 405 x_orig.
x = pos_soa_x[lpid][lidx];
406 x_orig.
y = pos_soa_y[lpid][lidx];
407 x_orig.
z = pos_soa_z[lpid][lidx];
408 trans.
i = transform_soa_i[lpid][lidx];
409 trans.
j = transform_soa_j[lpid][lidx];
410 trans.
k = transform_soa_k[lpid][lidx];
413 extVirial +=
outer(f_atom,x_atom);
418 for ( ; g_i != g_e; ++g_i, ++gf_i ) {
421 for ( ; *g_i != -1; ++g_i ) {
429 CkPrintf(
"NAMD2-recv: group %d, Before Force (%8.6f, %8.6f, %8.6f) \n",
431 CkPrintf(
"NAMD2-recv: group %d, Added Force (%8.6f, %8.6f, %8.6f) \n", *g_i, f_atom.
x, f_atom.
y, f_atom.
z);
434 f[localID.
pid][localID.
index] += f_atom;
440 extVirial +=
outer(f_atom,x_atom);
444 DebugM(1,
"done with the groups\n");
446 if (numActiveGridObjects > 0) {
447 applyGridObjectForces(msg, &extForce, &extVirial);
452 #ifdef NODEGROUP_FORCE_REGISTER 454 #ifndef USE_GLOBALMASTER_VIRIAL_KERNEL 473 DebugM(3,
"Reconfiguring\n");
480 DebugM(3,
"Sending requested data right away\n");
485 groupTotalForce.
resize(numGroupsRequested);
486 for (
int i=0; i<numGroupsRequested; ++i ) groupTotalForce[i] = 0;
489 DebugM(3,
"setting forces\n");
491 Force **f = forcePtrs;
493 for (ap = ap.
begin(); ap != ap.
end(); ap++) {
497 (*ap).positionBox->close(&x);
498 (*ap).forceBox->close(&((*ap).r));
499 DebugM(1,
"closing boxes\n");
505 mass_soa[pId] = NULL;
506 pos_soa_x[pId] = NULL;
507 pos_soa_y[pId] = NULL;
508 pos_soa_z[pId] = NULL;
509 force_soa_x[pId] = NULL;
510 force_soa_y[pId] = NULL;
511 force_soa_z[pId] = NULL;
512 transform_soa_i[pId] = NULL;
513 transform_soa_j[pId] = NULL;
514 transform_soa_k[pId] = NULL;
515 DebugM(2,
"nulling ptrs\n");
518 DebugM(3,
"done setting forces\n");
521 #ifdef NODEGROUP_FORCE_REGISTER 529 DebugM(3,
"Done processing results\n");
534 DebugM(2,
"doWork thread " << CthGetToken(CthSelf())->serialNo <<
"\n");
542 DebugM(3,
"doWork for step " << step <<
"\n"<<
endi);
547 for (ap = ap.
begin(); ap != ap.
end(); ap++) {
548 CompAtom *x = (*ap).positionBox->open();
549 t[(*ap).patchID] = (*ap).p->getAtomList().begin();
554 mass_soa[pId] = (*ap).p->patchDataSOA.mass;
555 pos_soa_x[pId] = (*ap).p->patchDataSOA.pos_x;
556 pos_soa_y[pId] = (*ap).p->patchDataSOA.pos_y;
557 pos_soa_z[pId] = (*ap).p->patchDataSOA.pos_z;
558 transform_soa_i[pId] = (*ap).p->patchDataSOA.transform_i;
559 transform_soa_j[pId] = (*ap).p->patchDataSOA.transform_j;
560 transform_soa_k[pId] = (*ap).p->patchDataSOA.transform_k;
582 #ifdef NODEGROUP_FORCE_REGISTER 586 comm->stowSuspendULT();
589 comm->stowSuspendULT();
596 #endif // NODEGROUP_FORCE_REGISTER 606 DebugM(2,
"skipping step "<< step <<
"\n"<<
endi);
614 DebugM(2,
"done with doWork\n");
617 void ComputeGlobal::sendData()
645 for ( ; a != a_e; ++a ) {
646 localID = atomMap->
localID(*a);
648 lidx = localID.
index;
649 if ( lpid ==
notUsed || ! t[lpid] )
continue;
652 x_orig.
x = pos_soa_x[lpid][lidx];
653 x_orig.
y = pos_soa_y[lpid][lidx];
654 x_orig.
z = pos_soa_z[lpid][lidx];
655 trans.
i = transform_soa_i[lpid][lidx];
656 trans.
j = transform_soa_j[lpid][lidx];
657 trans.
k = transform_soa_k[lpid][lidx];
662 for ( ; a != a_e; ++a ) {
678 g_i = gdef.
begin(); g_e = gdef.
end();
685 for ( ; g_i != g_e; ++g_i ) {
688 for ( ; *g_i != -1; ++g_i ) {
689 localID = atomMap->
localID(*g_i);
691 lidx = localID.
index;
692 if ( lpid ==
notUsed || ! t[lpid] )
continue;
694 x_orig.
x = pos_soa_x[lpid][lidx];
695 x_orig.
y = pos_soa_y[lpid][lidx];
696 x_orig.
z = pos_soa_z[lpid][lidx];
697 trans.
i = transform_soa_i[lpid][lidx];
698 trans.
j = transform_soa_j[lpid][lidx];
699 trans.
k = transform_soa_k[lpid][lidx];
701 mass += mass_soa[lpid][lidx];
704 printf(
"NAMD3-send: step %d atom %d, POS (%8.6f, %8.6f, %8.6f) \n",
patchList[0].p->flags.step, *g_i, x_orig.
x, x_orig.
y, x_orig.
z);
711 DebugM(1,
"Adding center of mass "<<com<<
"\n");
718 for ( ; g_i != g_e; ++g_i ) {
721 for ( ; *g_i != -1; ++g_i ) {
732 printf(
"NAMD2-send: step %d atom %d, POS (%8.6f, %8.6f, %8.6f) \n",
patchList[0].p->flags.step, *g_i, x_orig.
x, x_orig.
y, x_orig.
z);
740 DebugM(1,
"Adding center of mass "<<com<<
"\n");
751 if (numActiveGridObjects > 0) {
753 computeGridObjects(msg);
761 if ( gfcount ) msg->
gtf.
swap(groupTotalForce);
765 DebugM(3,
"Sending data (" << msg->
p.
size() <<
" positions, " 767 <<
" grid objects) on client\n");
778 #ifdef NODEGROUP_FORCE_REGISTER 784 comm->stowSuspendULT();
785 comm->stowSuspendULT();
810 for ( ; ai != ae; ai++, gfi++) {
811 *gfi =
Vector(0.0, 0.0, 0.0);
821 DebugM(1,
"id = " << ai->
id <<
", scale = " << scale
822 <<
", charge = " << charge <<
", position = " << pos <<
"\n");
823 if (grid->compute_VdV(pos, V, dV)) {
828 *gfi = -charge * scale * dV;
829 gridObjValue += charge * scale * V;
830 DebugM(1,
"id = " << ai->
id <<
", force = " << *gfi <<
"\n");
832 DebugM(3,
"gridObjValue = " << gridObjValue <<
"\n" <<
endi);
842 NAMD_bug(
"No grids loaded in memory but ComputeGlobal has been requested to use them.");
850 size_t ig = 0, gridobjcount = 0;
854 for (ap = ap.begin(); ap != ap.end(); ap++) {
858 int const numAtoms = ap->p->getNumAtoms();
859 ForceList **gridForces = gridForcesPtrs[ap->p->getPatchID()];
862 for (ig = 0; ig < numGridObjects; ig++) {
864 DebugM(2,
"Processing grid index " << ig <<
"\n" <<
endi);
867 if (!gridObjActive[ig]) {
868 DebugM(2,
"Skipping grid index " << ig <<
"; it is handled by " 869 "ComputeGridForce\n" <<
endi);
873 ForceList *gridForcesGrid = gridForces[ig];
874 gridForcesGrid->
resize(numAtoms);
880 DebugM(2,
"computeGridObjects(): patch = " << ap->p->getPatchID()
881 <<
", grid index = " << ig <<
"\n" <<
endi);
887 computeGridForceGrid(ai, ae, gfi, ap->p->lattice, ig, grid, gridobjvalue);
893 for (gridobjcount = 0; gridobjcount < numActiveGridObjects; gridobjcount++) {
898 DebugM(2,
"computeGridObjects done\n");
908 NAMD_bug(
"ComputeGlobal received a different number of grid forces than active grids.");
914 Force &extForce = *extForce_in;
915 Tensor &extVirial = *extVirial_in;
919 gridObjForces.
resize(numGridObjects);
920 gridObjForces.
setall(0.0);
924 for (ig = 0; gridobjforce_i != gridobjforce_e ;
925 gridobjforce_i++, ig++) {
926 if (!gridObjActive[ig])
continue;
927 gridObjForces[ig] = *gridobjforce_i;
932 for (ap = ap.begin(); ap != ap.end(); ap++) {
934 ForceList **gridForces = gridForcesPtrs[ap->p->getPatchID()];
936 for (ig = 0; ig < numGridObjects; ig++) {
938 if (!gridObjActive[ig])
continue;
940 DebugM(2,
"gof = " << gridObjForces[ig] <<
"\n" <<
endi);
942 ForceList *gridForcesGrid = gridForces[ig];
949 for ( ; ai != ae; ai++, gfi++) {
951 *gfi =
Vector(0.0, 0.0, 0.0);
956 Vector const gridforceatom(-1.0 * (*gfi) * gridObjForces[ig]);
958 <<
", pid = " << localID.
pid 959 <<
", index = " << localID.
index 960 <<
", force = " << gridforceatom <<
"\n" <<
endi);
961 f[localID.
index] += gridforceatom;
962 extForce += gridforceatom;
966 extVirial +=
outer(gridforceatom, x_virial);
983 if ( ! forceSendEnabled )
NAMD_bug(
"ComputeGlobal::saveTotalForces called unexpectedly");
984 if ( ! forceSendActive )
return;
992 for (
int i=0; i<num; ++i) {
993 int index = atoms[i].id;
994 if (index < endRequested && isRequested[index] & 1) {
996 totalForce.
add(af[i]);
1010 double *f1_soa_x = homePatch->patchDataSOA.
f_normal_x;
1011 double *f1_soa_y = homePatch->patchDataSOA.
f_normal_y;
1012 double *f1_soa_z = homePatch->patchDataSOA.
f_normal_z;
1021 double f_sum_x, f_sum_y, f_sum_z;
1024 for (
int i=0; i<num; ++i) {
1025 int index = atoms[i].id;
1028 CkPrintf(
"ForceSaved: atom %d, ForceN (%8.6f, %8.6f, %8.6f) \n", index, f1_soa_x[i], f1_soa_y[i], f1_soa_z[i]);
1029 CkPrintf(
" atom %d, ForceNB (%8.6f, %8.6f, %8.6f) \n", index, f2_soa_x[i], f2_soa_y[i], f2_soa_z[i]);
1030 CkPrintf(
" atom %d, ForceSL (%8.6f, %8.6f, %8.6f) \n", index, f3_soa_x[i], f3_soa_y[i], f3_soa_z[i]);
1032 CkPrintf(
"ForceSaved: atom %d, ForceN (%8.6f, %8.6f, %8.6f) \n", index, f1[i].x, f1[i].y, f1[i].z);
1033 CkPrintf(
" atom %d, ForceNB (%8.6f, %8.6f, %8.6f) \n", index, f2[i].x, f2[i].y, f2[i].z);
1040 printf(
"PE, PId (%d, %d) Stop saving at step: %d ####################################################\n",
1043 if ( ! forceSendActive )
return;
1044 for (
int i=0; i<num; ++i) {
1045 int index = atoms[i].id;
1047 if (index < endRequested && (reqflag = isRequested[index])) {
1049 f_sum_x = f1_soa_x[i] + f2_soa_x[i];
1050 f_sum_y = f1_soa_y[i] + f2_soa_y[i];
1051 f_sum_z = f1_soa_z[i] + f2_soa_z[i];
1053 f_sum_x += f3_soa_x[i];
1054 f_sum_y += f3_soa_y[i];
1055 f_sum_z += f3_soa_z[i];
1061 f_sum = f1[i]+f2[i];
1066 if ( fixedAtomsOn && atoms[i].atomFixed )
1069 if ( reqflag & 1 ) {
1071 totalForce.
add(f_sum);
1073 if ( reqflag & 2 ) {
1076 if ( gpi == gpend || gpi->
first != index )
1077 NAMD_bug(
"ComputeGlobal::saveTotalForces gpair corrupted.");
1080 groupTotalForce[gpi->
second] += f_sum;
1081 }
while ( ++gpi != gpend && gpi->
first == index );
#define NAMD_EVENT_STOP(eon, id)
GridforceGrid * get_gridfrc_grid(int gridnum) const
void recvResults(ComputeGlobalResultsMsg *)
NAMD_HOST_DEVICE Position reverse_transform(Position data, const Transform &t) const
Bool globalMasterScaleByFrequency
static PatchMap * Object()
BigRealList gridobjvalue
Partial values of the GridForce objects from this message.
NAMD_HOST_DEVICE Tensor outer(const Vector &v1, const Vector &v2)
#define ADD_TENSOR_OBJECT(R, RL, D)
SimParameters * simParameters
ComputeHomePatchList patchList
Bool CUDASOAintegrateMode
std::ostream & endi(std::ostream &s)
SubmitReduction * willSubmit(int setID, int size=-1)
void saveTotalForces(HomePatch *)
ResizeArrayIter< T > begin(void) const
static ReductionMgr * Object(void)
NodeReduction * reduction
ComputeGlobal(ComputeID, ComputeMgr *)
int add(const Elem &elem)
Molecule stores the structural information for the system.
void setall(const Elem &elem)
ResizeArray< Force > ForceList
int numPatches(void) const
#define NAMD_EVENT_START(eon, id)
const Elem * const_iterator
void NAMD_bug(const char *err_msg)
LocalID localID(AtomID id)
ResizeArray< Lattice > lat
IntList gridobjindex
Indices of the GridForce objects contained in this message.
void get_gridfrc_params(Real &k, Charge &q, int atomnum, int gridnum) const
void enableComputeGlobalResults()
void recvComputeGlobalResults(ComputeGlobalResultsMsg *)
static AtomMap * Object()
#define ADD_VECTOR_OBJECT(R, RL, D)
int count
Numer of atoms processed for this message.
Bool is_atom_gridforced(int atomnum, int gridnum) const
void sendComputeGlobalData(ComputeGlobalDataMsg *)
ForceList f[Results::maxNumForces]
int patchcount
Number of patches processed for this message.
void swap(ResizeArray< Elem > &ra)
ResizeArrayIter< T > end(void) const
int globalMasterFrequency