21 #include "ParallelIOMgr.decl.h"    28 #define MIN_DEBUG_LEVEL 3    34 #if !(defined(__NVCC__) || defined(__HIPCC__))    51       if ( s == -10 ) 
NAMD_bug(
"seq == free in CollectionMgr");
    54       remaining = numClients;
    63       if ( msg->
status != vstatus ) {
    64         NAMD_bug(
"CollectProxyVectorInstance vstatus mismatch");
    66       if ( msg->
seq != seq ) {
    67         NAMD_bug(
"CollectProxyVectorInstance seq mismatch");
    70       for( 
int i = 0; i < size; ++i ) { aid.add(msg->
aid[i]); }
    73         for( 
int i = 0; i < size; ++i ) { data.add(msg->
data[i]); }
    77         for( 
int i = 0; i < size; ++i ) { fdata.add(msg->
fdata[i]); } 
    79       const int atoms_per_message_target = 100000;
    80       return ( ! --remaining || aid.size() > atoms_per_message_target );
    84       int numAtoms = aid.
size();
    90         for(
int j=0; j<numAtoms; j++) {
    92           msg->
data[j] = data[j];
    96         for(
int j=0; j<numAtoms; j++) {
    98           msg->
fdata[j] = fdata[j];
   102         for(
int j=0; j<numAtoms; j++) {
   103           msg->
aid[j] = aid[j];
   104           msg->
data[j] = data[j];
   105           msg->
fdata[j] = fdata[j];
   110         msg->
size = numAtoms;
   113       if ( remaining ) reset(seq,vstatus,remaining);
   136       for( ; c != c_e && (*c)->
seq != msg->
seq; ++c );
   139         for( ; c != c_e && (*c)->
notfree(); ++c );
   146       if ( (*c)->append(msg) ) {
   164     CkpvAccess(BOCclass_group).ioMgr = thisgroup;
   167     inputProcArray = NULL;
   169     outputProcArray = NULL;
   174     totalMV.x = totalMV.y = totalMV.z = 0.0;
   177     numTotalExclusions = 0;
   178     numCalcExclusions = 0;
   179     numCalcFullExclusions = 0;
   181     isOKToRecvHPAtoms = 
false;
   187 #ifdef MEM_OPT_VERSION   198 #if COLLECT_PERFORMANCE_DATA   199     numFixedAtomLookup = 0;
   205     delete [] inputProcArray;
   206     delete [] outputProcArray;
   208     delete [] clusterSize;
   210 #ifdef MEM_OPT_VERSION   217 #ifndef OUTPUT_SINGLE_FILE   218 #error OUTPUT_SINGLE_FILE not defined!   227     numInputProcs = simParameters->numinputprocs;
   228     numOutputProcs = simParameters->numoutputprocs;
   229     numOutputWrts = simParameters->numoutputwrts;
   231     numProxiesPerOutputProc = std::min((
int)sqrt(CkNumPes()),(CkNumPes()-1)/numOutputProcs-1);
   232     if ( numProxiesPerOutputProc < 2 ) numProxiesPerOutputProc = 0;
   235         iout << 
iINFO << 
"Running with " <<numInputProcs<<
" input processors.\n"<<
endi;
   236         #if OUTPUT_SINGLE_FILE   237         iout << 
iINFO << 
"Running with " <<numOutputProcs<<
" output processors ("<<numOutputWrts<<
" of them will output simultaneously).\n"<<
endi;
   239         iout << 
iINFO << 
"Running with " <<numOutputProcs<<
" output processors, and each of them will output to its own separate file.\n"<<
endi;
   241         if ( numProxiesPerOutputProc ) {
   242             iout << 
iINFO << 
"Running with " <<numProxiesPerOutputProc<<
" proxies per output processor.\n"<<
endi;
   248     inputProcArray = 
new int[numInputProcs];
   250     for(
int i=0; i<numInputProcs; ++i) {
   253     std::sort(inputProcArray, inputProcArray+numInputProcs);
   254     for(
int i=0; i<numInputProcs; ++i) {
   255       if ( CkMyPe() == inputProcArray[i] ) {
   256         if ( myInputRank != -1 ) 
NAMD_bug(
"Duplicate input proc");
   262       iout << 
iINFO << 
"INPUT PROC LOCATIONS:";
   264       for ( i=0; i<numInputProcs && i < 10; ++i ) {
   265         iout << 
" " << inputProcArray[i];
   267       if ( i<numInputProcs ) 
iout << 
" ... " << inputProcArray[numInputProcs-1];
   272     if(myInputRank!=-1) {
   275         int numMyAtoms = numInitMyAtomsOnInput();
   276         initAtoms.resize(numMyAtoms+100);  
   277         initAtoms.resize(numMyAtoms);
   278         tmpRecvAtoms.resize(0);
   281         tmpRecvAtoms.resize(0);
   288     outputProcArray = 
new int[numOutputProcs];
   289     outputProcFlags = 
new char[CkNumPes()];
   290     outputProxyArray = 
new int[numOutputProcs*numProxiesPerOutputProc];
   291     myOutputProxies = 
new int[numOutputProcs];
   293     myOutputProxyRank = -1;
   294     for(
int i=0; i<numOutputProcs; ++i) {
   297     std::sort(outputProcArray, outputProcArray+numOutputProcs);
   298     for(
int i=0; i<numOutputProcs*numProxiesPerOutputProc; ++i) {
   301     std::sort(outputProxyArray, outputProxyArray+numOutputProcs*numProxiesPerOutputProc,
   303     for(
int i=0; i<CkNumPes(); ++i) {
   304       outputProcFlags[i] = 0;
   306     for(
int i=0; i<numOutputProcs; ++i) {
   307       outputProcFlags[outputProcArray[i]] = 1;
   308       if ( CkMyPe() == outputProcArray[i] ) {
   309         if ( myOutputRank != -1 ) 
NAMD_bug(
"Duplicate output proc");
   313     for(
int i=0; i<numOutputProcs*numProxiesPerOutputProc; ++i) {
   314       if ( CkMyPe() == outputProxyArray[i] ) {
   315         if ( myOutputRank != -1 ) 
NAMD_bug(
"Output proxy is also output proc");
   316         if ( myOutputProxyRank != -1 ) 
NAMD_bug(
"Duplicate output proxy");
   317         myOutputProxyRank = i;
   321     for(
int i=0; i<numOutputProcs; ++i) {
   322       if ( numProxiesPerOutputProc ) {
   323         myOutputProxies[i] = outputProxyArray[myProxySet*numOutputProcs+i];
   325         myOutputProxies[i] = outputProcArray[i];
   330     myOutputProxyPositions = 0;
   331     myOutputProxyVelocities = 0;
   332     myOutputProxyForces = 0;
   335       iout << 
iINFO << 
"OUTPUT PROC LOCATIONS:";
   337       for ( i=0; i<numOutputProcs && i < 10; ++i ) {
   338         iout << 
" " << outputProcArray[i];
   340       if ( i<numOutputProcs ) 
iout << 
" ... " << outputProcArray[numOutputProcs-1];
   345 #ifdef MEM_OPT_VERSION   346     if(myOutputRank!=-1) {
   347         midCM = 
new CollectionMidMaster(
this);
   349     remoteClusters.clear();
   359    return outputProcFlags[pe];
   363   return CProxy_ParallelIOMgr::ckLocalBranch(CkpvAccess(BOCclass_group).ioMgr)->isOutputProcessor(pe);
   370 #ifdef MEM_OPT_VERSION   371     if(myInputRank!=-1) {
   372         int myAtomLIdx, myAtomUIdx;
   373         getMyAtomsInitRangeOnInput(myAtomLIdx, myAtomUIdx);
   376         molecule->read_binary_atom_info(myAtomLIdx, myAtomUIdx, initAtoms);
   382         readCoordinatesAndVelocity();
   386         int oRank=atomRankOnOutput(myAtomLIdx);
   387         for(
int i=oRank; i<numOutputProcs; i++) {
   389             getAtomsRangeOnOutput(lIdx, uIdx, i);
   390             if(lIdx > myAtomUIdx) 
break;
   391             int fid = lIdx>myAtomLIdx?lIdx:myAtomLIdx;
   392             int tid = uIdx>myAtomUIdx?myAtomUIdx:uIdx;
   393             for(
int j=fid; j<=tid; j++) initAtoms[j-myAtomLIdx].outputRank = i;
   398     if(myOutputRank!=-1) {
   400         if(!(simParameters->wrapAll || simParameters->wrapWater)) 
return;
   401         readInfoForParOutput();
   406 void ParallelIOMgr::readCoordinatesAndVelocity()
   408 #ifdef MEM_OPT_VERSION   410     int myAtomLIdx, myAtomUIdx;
   411     getMyAtomsInitRangeOnInput(myAtomLIdx, myAtomUIdx);
   412     int myNumAtoms = myAtomUIdx-myAtomLIdx+1;
   419     FILE *ifp = fopen(simParameters->binCoorFile, 
"rb");
   422         sprintf(s, 
"The binary coordinate file %s cannot be opened on proc %d\n", simParameters->binCoorFile, CkMyPe());
   427     fread(&filelen, 
sizeof(
int32),1,ifp);
   428     char lenbuf[
sizeof(
int32)];
   429     memcpy(lenbuf, (
const char *)&filelen, 
sizeof(
int32));
   431     if(!memcmp(lenbuf, (
const char *)&filelen, 
sizeof(
int32))) {
   432         iout << 
iWARN << 
"Number of atoms in binary file " << simParameters->binCoorFile
   433              <<
" is palindromic, assuming same endian.\n" << 
endi;
   435     if(filelen!=molecule->numAtoms) {
   437         memcpy((
void *)&filelen, lenbuf,
sizeof(
int32));
   439     if(filelen!=molecule->numAtoms) {
   441         sprintf(s, 
"Incorrect atom count in binary file %s", simParameters->binCoorFile);
   447     if ( _fseeki64(ifp, offsetPos, SEEK_CUR) )
   449     if ( fseeko(ifp, offsetPos, SEEK_CUR) )
   453         sprintf(s, 
"Error in seeking binary file %s on proc %d",  simParameters->binCoorFile, CkMyPe());
   456     size_t totalRead = fread(tmpData, 
sizeof(
Vector), myNumAtoms, ifp);
   457     if(totalRead!=myNumAtoms) {
   459         sprintf(s, 
"Error in reading binary file %s on proc %d",  simParameters->binCoorFile, CkMyPe());
   462     if(needFlip) 
flipNum((
char *)tmpData, 
sizeof(
BigReal), myNumAtoms*3);
   464     for(
int i=0; i<myNumAtoms; i++) initAtoms[i].position = tmpData[i];
   468     if(!simParameters->binVelFile) {
   472         ifp = fopen(simParameters->binVelFile, 
"rb");
   475             sprintf(s, 
"The binary velocity file %s cannot be opened on proc %d\n", simParameters->binVelFile, CkMyPe());
   479         fread(&filelen, 
sizeof(
int32),1,ifp);
   480         memcpy(lenbuf, (
const char *)&filelen, 
sizeof(
int32));
   482         if(!memcmp(lenbuf, (
const char *)&filelen, 
sizeof(
int32))) {
   483             iout << 
iWARN << 
"Number of atoms in binary file " << simParameters->binVelFile
   484                  <<
" is palindromic, assuming same endian.\n" << 
endi;
   486         if(filelen!=molecule->numAtoms) {
   488             memcpy((
void *)&filelen, lenbuf,
sizeof(
int32));
   490         if(filelen!=molecule->numAtoms) {
   492             sprintf(s, 
"Incorrect atom count in binary file %s", simParameters->binVelFile);
   499         if ( _fseeki64(ifp, offsetPos, SEEK_CUR) )
   501         if ( fseeko(ifp, offsetPos, SEEK_CUR) )
   505             sprintf(s, 
"Error in seeking binary file %s on proc %d",  simParameters->binVelFile, CkMyPe());
   508         totalRead = fread(tmpData, 
sizeof(
Vector), myNumAtoms, ifp);
   509         if(totalRead!=myNumAtoms) {
   511             sprintf(s, 
"Error in reading binary file %s on proc %d",  simParameters->binVelFile, CkMyPe());
   514         if(needFlip) 
flipNum((
char *)tmpData, 
sizeof(
BigReal), myNumAtoms*3);
   516         for(
int i=0; i<myNumAtoms; i++) initAtoms[i].velocity = tmpData[i];
   521     if(!simParameters->binRefFile) {
   522         for(
int i=0; i<myNumAtoms; i++) initAtoms[i].fixedPosition = initAtoms[i].position;
   524         ifp = fopen(simParameters->binRefFile, 
"rb");
   527             sprintf(s, 
"The binary reference coordinate file %s cannot be opened on proc %d\n", simParameters->binRefFile, CkMyPe());
   531         fread(&filelen, 
sizeof(
int32),1,ifp);
   532         memcpy(lenbuf, (
const char *)&filelen, 
sizeof(
int32));
   534         if(!memcmp(lenbuf, (
const char *)&filelen, 
sizeof(
int32))) {
   535             iout << 
iWARN << 
"Number of atoms in binary file " << simParameters->binRefFile
   536                  <<
" is palindromic, assuming same endian.\n" << 
endi;
   538         if(filelen!=molecule->numAtoms) {
   540             memcpy((
void *)&filelen, lenbuf,
sizeof(
int32));
   542         if(filelen!=molecule->numAtoms) {
   544             sprintf(s, 
"Incorrect atom count in binary file %s", simParameters->binRefFile);
   551         if ( _fseeki64(ifp, offsetPos, SEEK_CUR) )
   553         if ( fseeko(ifp, offsetPos, SEEK_CUR) )
   557             sprintf(s, 
"Error in seeking binary file %s on proc %d",  simParameters->binRefFile, CkMyPe());
   560         totalRead = fread(tmpData, 
sizeof(
Vector), myNumAtoms, ifp);
   561         if(totalRead!=myNumAtoms) {
   563             sprintf(s, 
"Error in reading binary file %s on proc %d",  simParameters->binRefFile, CkMyPe());
   566         if(needFlip) 
flipNum((
char *)tmpData, 
sizeof(
BigReal), myNumAtoms*3);
   568         for(
int i=0; i<myNumAtoms; i++) initAtoms[i].fixedPosition = tmpData[i];
   575 void ParallelIOMgr::readInfoForParOutput()
   578     getMyAtomsRangeOnOutput(fromIdx,toIdx);
   579     int numMyAtoms = toIdx-fromIdx+1;
   581     clusterID = 
new int[numMyAtoms];
   582     clusterSize = 
new int[numMyAtoms];
   586     FILE *ifp = fopen(simParameters->binAtomFile, 
"rb");
   590     fread(&magicNum, 
sizeof(
int), 1, ifp);
   596     isWater = 
new char[numMyAtoms];
   598     int64 offset = 
sizeof(char)*((
int64)(fromIdx-molecule->numAtoms));
   600     if ( _fseeki64(ifp, offset, SEEK_END) )
   602     if ( fseeko(ifp, offset, SEEK_END) )
   606         sprintf(s, 
"Error in seeking binary file %s on proc %d",  simParameters->binAtomFile, CkMyPe());
   609     fread(
isWater, 
sizeof(
char), numMyAtoms, ifp);
   613     offset = 
sizeof(int)*((
int64)(fromIdx-molecule->numAtoms))
   614                    - 
sizeof(char)*((
int64)(molecule->numAtoms));
   616     if ( _fseeki64(ifp, offset, SEEK_END) )
   618     if ( fseeko(ifp, offset, SEEK_END) )
   622         sprintf(s, 
"Error in seeking binary file %s on proc %d",  simParameters->binAtomFile, CkMyPe());
   625     fread(clusterID, 
sizeof(
int), numMyAtoms, ifp);
   626     if(needFlip) 
flipNum((
char *)clusterID, 
sizeof(
int), numMyAtoms);
   631     for(
int i=0; i<numMyAtoms; i++) {
   633         int cid = clusterID[i];
   639         CmiAssert(cid<=toIdx);
   646                 remoteClusters.add(one);
   651             int lidx = cid-fromIdx;
   661     printf(
"output[%d]=%d: prepare to send %d remote msgs for cluster size\n",
   662            myOutputRank, CkMyPe(), remoteClusters.size());
   665     numRemoteClusters = remoteClusters.size();
   667     CProxy_ParallelIOMgr pIO(thisgroup);
   669     for(iter=iter.begin(); iter!=iter.end(); iter++) {
   674         int dstRank = atomRankOnOutput(iter->clusterId);
   675         pIO[outputProcArray[dstRank]].recvClusterSize(msg);
   683   std::vector<string> inputs;
   684   std::vector<string> outputs;
   685   std::vector<uint16> tags;
   686   std::vector<int> freqs;
   687   std::vector<OUTPUTFILETYPE> types;
   690     for(
int index=0; index<16;++index)
   698     CProxy_ParallelIOMgr pIO(thisgroup);
   699     pIO.recvDcdParams(tags,
   713                                   std::vector<std::string> inputFileNames,
   714                                   std::vector<std::string> outputFileNames,
   715                                   std::vector<int> freqs,
   716                                   std::vector<OUTPUTFILETYPE> types)
   723       for(
int index=0; index < 16 ;++index)
   740 #ifdef MEM_OPT_VERSION   746       int selFromIdx, selToIdx; 
   747       getMyAtomsRangeOnOutput(selFromIdx, selToIdx);
   748       DebugM(3, 
"["<<CkMyPe()<<
"]"<< 
" ParallelIOMgr::readInfoForParOutDcdSelection from " << selFromIdx << 
" to " << selToIdx <<
"\n");
   749       for(
int index=0; index < 16; ++index)
   752           if(filesystem::path(dcdSelectionInputFile).extension() == 
".idx")
   756               IndexFile dcdSelectionInputIdx(dcdSelectionInputFile);
   757               std::vector<uint32> indexVec=dcdSelectionInputIdx.
getAllElements();
   761               auto start = std::lower_bound(indexVec.begin(), indexVec.end(), selFromIdx);
   762               off_t startOffset = std::distance(indexVec.begin(), start);
   763               auto end = std::lower_bound(start, indexVec.end(), selToIdx);
   764               size_t size = std::distance(start,end);
   766               std::vector <uint32> dcdSelectionIndexReverseMap(size);
   767               for(
size_t offset=0; offset<size; ++offset)
   769                   dcdSelectionIndexReverseMap[offset] = indexVec[startOffset+offset];
   772               midCM->parOut->setDcdSelectionParams(index, startOffset, size, dcdSelectionIndexReverseMap);
   792     if(myOutputRank==-1) 
return;
   793     if(!(simParameters->wrapAll || simParameters->wrapWater)) 
return;
   796     getMyAtomsRangeOnOutput(fromIdx,toIdx);
   799     for(
int i=0; i<csmBuf.size(); i++) {
   805     CProxy_ParallelIOMgr pIO(thisgroup);
   806     for(
int i=0; i<csmBuf.size(); i++) {
   810         pIO[outputProcArray[msg->
srcRank]].recvFinalClusterSize(msg);
   812     numRemoteReqs = csmBuf.size();
   820     if(numRemoteClusters!=0){
   821         recvFinalClusterSize(NULL);
   824         int numMyAtoms = toIdx-fromIdx+1;
   825         for(
int i=0; i<numMyAtoms; i++) {
   826             int lidx = clusterID[i]-fromIdx;
   827             clusterSize[i] = clusterSize[lidx];
   830         #if 0 //write out cluster debug info   832         sprintf(fname, 
"cluster.par.%d", CkMyPe());
   833         FILE *ofp = fopen(fname, 
"w");
   834         for(
int i=0; i<numMyAtoms; i++) {
   835             fprintf(ofp, 
"%d: %d: %d\n", i+fromIdx, clusterID[i], clusterSize[i]);
   849         CmiAssert(ret!=NULL);
   855     if(++numCSMAck == (numRemoteClusters+1)) {
   858         getMyAtomsRangeOnOutput(fromIdx,toIdx);
   859         int numMyAtoms = toIdx-fromIdx+1;
   861         for(
int i=0; i<numMyAtoms; i++) {
   862             int cid = clusterID[i];
   863             int lidx = cid-fromIdx;
   870                 clusterSize[i] = clusterSize[lidx];
   874         remoteClusters.clear();
   876 #if 0 //write out cluster debug info   878         sprintf(fname, 
"cluster.par.%d", CkMyPe());
   879         FILE *ofp = fopen(fname, 
"w");
   880         for(
int i=0; i<numMyAtoms; i++) {
   881             fprintf(ofp, 
"%d: %d: %d\n", i+fromIdx, clusterID[i], clusterSize[i]);
   891     if(myInputRank==-1) 
return;
   899     for(
int i=0; i<initAtoms.size(); i++) {
   901         int parentRank = atomInitRankOnInput(initAtoms[i].MPID);
   902         if(parentRank != myInputRank) {
   903             toMigrateList.
add(i);
   904             initAtoms[i].isValid = 
false;
   905             int tmp = parentRank - myInputRank;
   906             tmp = tmp>0 ? tmp : -tmp;
   907             if(tmp > maxOffset) maxOffset = tmp;
   916     for(
int i=0; i<toMigrateList.
size(); i++) {
   917         int idx = toMigrateList[i];
   918         int parentRank = atomInitRankOnInput(initAtoms[idx].MPID);
   920         int offset = parentRank - myInputRank + maxOffset;
   921         migLists[offset].
add(initAtoms[idx]);
   924     CProxy_ParallelIOMgr pIO(thisgroup);
   925     for(
int i=0; i<2*maxOffset+1; i++) {
   926         int migLen = migLists[i].
size();
   931             int destRank = i-maxOffset+myInputRank;
   932             pIO[inputProcArray[destRank]].recvAtomsMGrp(msg);
   937     toMigrateList.
clear();
   943     for(
int i=0; i<msg->
length; i++) {
   944         tmpRecvAtoms.add((msg->
atomList)[i]);
   951     if(myInputRank==-1) 
return;
   953     for(
int i=0; i<tmpRecvAtoms.size(); i++) {
   954         tmpRecvAtoms[i].isValid = 
true;
   955         initAtoms.add(tmpRecvAtoms[i]);
   957     tmpRecvAtoms.clear();
   960     std::sort(initAtoms.begin(), initAtoms.end());
   965     int numFixedRigidBonds = 0;
   966     if(molecule->numRigidBonds){
   967         int parentIsFixed = 0;
   968         for(
int i=0; i<initAtoms.size(); i++) {
   977                     numFixedRigidBonds++;
   982                     numFixedRigidBonds++;
   988     int numFixedGroups = 0;
   989     if(molecule->numFixedAtoms){        
   990         for(
int i=0; i<initAtoms.size();) {
  1001                     if(!allFixed) 
break;
  1003                 if(allFixed) numFixedGroups++;                
  1009     CProxy_ParallelIOMgr pIO(thisgroup);
  1013     pIO[0].recvHydroBasedCounter(msg);
  1018 #ifdef MEM_OPT_VERSION  1019     if(myInputRank==-1) 
return;
  1021     CProxy_ParallelIOMgr pIO(thisgroup);
  1041     for(
int i=0; i<initAtoms.size(); i++) {
  1056         if(initAtoms[i].rigidBondLength > 0.0) msg->
numRigidBonds++;
  1063     if(molecule->numFixedAtoms>0 && ! simParameters->fixedAtomsForces) {
  1069         int sAId = initAtoms[0].id;
  1071         for(
int i=0; i<initAtoms.size(); i++) {
  1074             int myAId = initAtoms[i].id;
  1077             if(!initAtoms[i].atomFixed) {
  1089             for(
int j=0; j<thisSig->
bondCnt; j++) {            
  1091                 int a1 = myAId + bsig->
offset[0];
  1096             for(
int j=0; j<thisSig->
angleCnt; j++) {            
  1098                 int a1 = myAId + bsig->
offset[0];
  1099                 int a2 = myAId + bsig->
offset[1];
  1100                 if(!isAtomFixed(sAId, a1) || !isAtomFixed(sAId, a2)) 
  1107                 int a1 = myAId + bsig->
offset[0];
  1108                 int a2 = myAId + bsig->
offset[1];
  1109                 int a3 = myAId + bsig->
offset[2];
  1110                 if(!isAtomFixed(sAId, a1) || 
  1111                    !isAtomFixed(sAId, a2) ||
  1112                    !isAtomFixed(sAId, a3)) 
  1119                 int a1 = myAId + bsig->
offset[0];
  1120                 int a2 = myAId + bsig->
offset[1];
  1121                 int a3 = myAId + bsig->
offset[2];
  1122                 if(!isAtomFixed(sAId, a1) || 
  1123                    !isAtomFixed(sAId, a2) ||
  1124                    !isAtomFixed(sAId, a3)) 
  1131                 int a1 = myAId + bsig->
offset[0];
  1132                 int a2 = myAId + bsig->
offset[1];
  1133                 int a3 = myAId + bsig->
offset[2];
  1134                 int a4 = myAId + bsig->
offset[3];
  1135                 int a5 = myAId + bsig->
offset[4];
  1136                 int a6 = myAId + bsig->
offset[5];
  1137                 int a7 = myAId + bsig->
offset[6];
  1139                 if(!isAtomFixed(sAId, a1) || 
  1140                    !isAtomFixed(sAId, a2) ||
  1141                    !isAtomFixed(sAId, a3) ||
  1142                    !isAtomFixed(sAId, a4) ||
  1143                    !isAtomFixed(sAId, a5) ||
  1144                    !isAtomFixed(sAId, a6) ||
  1145                    !isAtomFixed(sAId, a7)) 
  1156                 int thisAId = exclSig->
modOffset[j]+myAId;
  1163                 int a1 = myAId + bsig->
offset[0];
  1164                 int a2 = myAId + bsig->
offset[1];
  1165                 if(!isAtomFixed(sAId, a1) || 
  1166                    !isAtomFixed(sAId, a2))
  1170 #if COLLECT_PERFORMANCE_DATA  1171         printf(
"Num fixedAtom lookup on proc %d is %d\n", CkMyPe(), numFixedAtomLookup);
  1185     if(!simParameters->comMove) {
  1193         for (
int i=0; i<initAtoms.size(); i++) {            
  1194             msg->
totalMV += initAtoms[i].mass * initAtoms[i].velocity;
  1199     pIO[0].recvMolInfo(msg);
  1206     molecule->numBonds += msg->
numBonds;
  1224     if(!simParameters->comMove) {
  1228     if(++procsReceived == numInputProcs) {
  1230         msg->
numBonds = molecule->numBonds;
  1248         if(!simParameters->comMove) {
  1252         CProxy_ParallelIOMgr pIO(thisgroup);
  1253         pIO.bcastMolInfo(msg);
  1262 #ifdef MEM_OPT_VERSION  1263     if(myInputRank!=-1) {
  1264         if(!simParameters->comMove) {
  1267             for (
int i=0; i<initAtoms.size(); i++) initAtoms[i].velocity -= val;
  1296         iout << 
iINFO << 
"LOADED " << molecule->numTotalExclusions << 
" TOTAL EXCLUSIONS\n" << 
endi;
  1297         if(!simParameters->comMove) {
  1298             iout << 
iINFO << 
"REMOVING COM VELOCITY "  1311     if(++hydroMsgRecved == numInputProcs){
  1314         CProxy_ParallelIOMgr pIO(thisgroup);
  1315         pIO.bcastHydroBasedCounter(msg);
  1321 #ifdef MEM_OPT_VERSION  1332         iout << 
iINFO << 
"****************************\n";
  1333         iout << 
iINFO << 
"STRUCTURE SUMMARY:\n";
  1334         iout << 
iINFO << molecule->numAtoms << 
" ATOMS\n";
  1335         iout << 
iINFO << molecule->numBonds << 
" BONDS\n";
  1336         iout << 
iINFO << molecule->numAngles << 
" ANGLES\n";
  1337         iout << 
iINFO << molecule->numDihedrals << 
" DIHEDRALS\n";
  1338         iout << 
iINFO << molecule->numImpropers << 
" IMPROPERS\n";
  1339         iout << 
iINFO << molecule->numCrossterms << 
" CROSSTERMS\n";
  1340         iout << 
iINFO << molecule->numExclusions << 
" EXCLUSIONS\n";
  1343         if ((molecule->numMultipleDihedrals) && (simParameters->paraTypeXplorOn)){
  1344             iout << 
iINFO << molecule->numMultipleDihedrals 
  1345              << 
" DIHEDRALS WITH MULTIPLE PERIODICITY (BASED ON PSF FILE)\n";
  1347         if ((molecule->numMultipleDihedrals) && (simParameters->paraTypeCharmmOn)){
  1348             iout << 
iINFO << molecule->numMultipleDihedrals 
  1349          << 
" DIHEDRALS WITH MULTIPLE PERIODICITY IGNORED (BASED ON PSF FILE) \n";
  1351          << 
" CHARMM MULTIPLICITIES BASED ON PARAMETER FILE INFO! \n";
  1355         if (molecule->numMultipleImpropers){
  1356             iout << 
iINFO << molecule->numMultipleImpropers 
  1357                  << 
" IMPROPERS WITH MULTIPLE PERIODICITY\n";
  1360         if (simParameters->fixedAtomsOn)
  1361            iout << 
iINFO << molecule->numFixedAtoms << 
" FIXED ATOMS\n";
  1364         if (simParameters->rigidBonds)        
  1365            iout << 
iINFO << molecule->numRigidBonds << 
" RIGID BONDS\n";        
  1367         if (simParameters->fixedAtomsOn && simParameters->rigidBonds)        
  1368            iout << 
iINFO << molecule->numFixedRigidBonds <<
  1369                 " RIGID BONDS BETWEEN FIXED ATOMS\n";
  1371         iout << 
iINFO << molecule->num_deg_freedom(1)
  1372              << 
" DEGREES OF FREEDOM\n";
  1374         iout << 
iINFO << molecule->numHydrogenGroups << 
" HYDROGEN GROUPS\n";
  1375         iout << 
iINFO << molecule->maxHydrogenGroupSize
  1376             << 
" ATOMS IN LARGEST HYDROGEN GROUP\n";
  1377         iout << 
iINFO << molecule->numMigrationGroups << 
" MIGRATION GROUPS\n";
  1378         iout << 
iINFO << molecule->maxMigrationGroupSize
  1379             << 
" ATOMS IN LARGEST MIGRATION GROUP\n";
  1380         if (simParameters->fixedAtomsOn)
  1382            iout << 
iINFO << molecule->numFixedGroups <<
  1383                 " HYDROGEN GROUPS WITH ALL ATOMS FIXED\n";
  1386         iout << 
iINFO << 
"TOTAL MASS = " << totalMass << 
" amu\n"; 
  1387         iout << 
iINFO << 
"TOTAL CHARGE = " << totalCharge << 
" e\n"; 
  1389         BigReal volume = simParameters->lattice.volume();
  1392                 << ((totalMass/volume) / 0.6022) << 
" g/cm^3\n";
  1394                 << (molecule->numAtoms/volume) << 
" atoms/A^3\n";
  1397         iout << 
iINFO << 
"*****************************\n";
  1406     if(myInputRank==-1) 
return;
  1416     CProxy_PatchMgr pm(CkpvAccess(BOCclass_group).patchMgr);
  1417     PatchMgr *patchMgr = pm.ckLocalBranch();
  1420     const Lattice lattice = simParameters->lattice;
  1421     for(
int i=0; i<initAtoms.size(); i++) {
  1427         eachPatchAtomList[pid].push_back(i);
  1430     CProxy_ParallelIOMgr pIO(thisgroup);
  1433     for(
int i=0; i<numPatches; i++) {
  1434         int cursize = eachPatchAtomList[i].size();
  1435         if(cursize>0) patchCnt++;
  1439     if(simParameters->fixedAtomsOn) {
  1447     for(
int i=0; i<numPatches; i++) {
  1448         int cursize = eachPatchAtomList[i].size();
  1450             if ( cursize > USHRT_MAX ) {
  1452               sprintf(errstr, 
"Patch %d exceeds %d atoms.", i, USHRT_MAX);
  1461     if(simParameters->fixedAtomsOn) {
  1463         for(
int i=0; i<numPatches; i++) {
  1464             int cursize = eachPatchAtomList[i].size();
  1467                 for(
int j=0; j<cursize; j++) {
  1468                     int aid = eachPatchAtomList[i][j];
  1470                     fixedCnt += initAtoms[aid].atomFixed;
  1478     pIO[0].recvAtomsCntPerPatch(msg);
  1484 #ifdef MEM_OPT_VERSION  1486     for(
int i=0; i<msg->
length; i++) {
  1488         int oldNum = patchMap->numAtoms(pid);
  1491           sprintf(errstr, 
"Patch %d exceeds %d atoms.", pid, USHRT_MAX);
  1494         patchMap->setNumAtoms(pid, oldNum+msg->
atomsCntList[i]);
  1495         if(simParameters->fixedAtomsOn) {
  1496             oldNum = patchMap->numFixedAtoms(pid);
  1502     if(++procsReceived == numInputProcs) {
  1507         for(
int i=0; i<patchMap->
numPatches(); i++) {
  1508             int cnt = patchMap->numAtoms(i);
  1516         iout << 
iINFO << 
"LARGEST PATCH (" << maxPatch <<
  1517              ") HAS " << maxAtoms << 
" ATOMS\n" << 
endi;
  1518         if ( totalAtoms !=  
Node::Object()->molecule->numAtoms ) {
  1520           sprintf(errstr, 
"Incorrect atom count in void ParallelIOMgr::recvAtomsCntPerPatch: %d vs %d", totalAtoms, 
Node::Object()->molecule->numAtoms);
  1534 #ifdef MEM_OPT_VERSION  1535     if(myInputRank==-1) 
return;
  1537     if ( sendAtomsThread == 0 ) {
  1539       CthAwaken(sendAtomsThread);
  1542     sendAtomsThread = 0;
  1543     numAcksOutstanding = 0;
  1553     for(
int i=0; i<numPatches; i++) {
  1554         if(eachPatchAtomList[i].size()==0) 
continue;
  1555         int onPE = patchMap->
node(i);
  1556         if ( procList[onPE].size() == 0 ) pesToSend.
add(onPE);
  1557         procList[onPE].
add(i);
  1565     CProxy_ParallelIOMgr pIO(thisgroup);
  1566     for(
int k=0; k<pesToSend.
size(); k++) {
  1567         const int i = pesToSend[k];
  1568         int len = procList[i].
size();
  1569         if(len==0) 
continue;
  1573         for(
int j=0; j<len; j++) {
  1574             int pid = procList[i][j];
  1575             int atomCnt = eachPatchAtomList[pid].
size();
  1577             if ( numAcksOutstanding >= 10 ) {
  1578               sendAtomsThread = CthSelf();
  1581             ++numAcksOutstanding;
  1584             msg->
from = CkMyPe();
  1589             for(
int k=0; k<atomCnt; k++, atomIdx++) {
  1590                 int aid = eachPatchAtomList[pid][k];
  1594                 one.hydVal = initAtoms[aid].hydList;
  1597             pIO[i].recvAtomsToHomePatchProcs(msg);
  1600         procList[i].
clear();
  1614   --numAcksOutstanding;
  1615   if ( sendAtomsThread ) {
  1616     CthAwaken(sendAtomsThread);
  1617     sendAtomsThread = 0;
  1623     CProxy_ParallelIOMgr pIO(thisgroup);
  1624     pIO[msg->
from].ackAtomsToHomePatchProcs();
  1626     if(!isOKToRecvHPAtoms) {
  1627         prepareHomePatchAtomList();
  1628         isOKToRecvHPAtoms = 
true;
  1631     int numRecvPatches = msg->
patchCnt;
  1633     for(
int i=0; i<numRecvPatches; i++) {
  1636         int idx = binaryFindHPID(pid);
  1637         for(
int j=0; j<size; j++, aid++) {
  1638             hpAtomsList[idx].add(msg->
allAtoms[aid]);
  1645 void ParallelIOMgr::prepareHomePatchAtomList()
  1648     for(
int i=0; i<patchMap->
numPatches(); i++) {
  1649         if(patchMap->
node(i)==CkMyPe()) {
  1653     if(hpIDList.size()>0)
  1657 int ParallelIOMgr::binaryFindHPID(
int pid)
  1664     lIdx=hpIDList.size()-1;
  1666     while(rIdx<=lIdx ) {
  1667         int idx = (rIdx+lIdx)/2;
  1668         int curPid = hpIDList[idx];
  1672         } 
else if(pid<curPid) {
  1681     CmiAssert(retIdx!=-1);
  1687 #ifdef MEM_OPT_VERSION  1690     int numPids = hpIDList.size();
  1693         if(assignedPids == 0) 
return; 
  1698         CmiAssert(isOKToRecvHPAtoms == 
false);        
  1700         CProxy_PatchMgr pm(CkpvAccess(BOCclass_group).patchMgr);
  1701         PatchMgr *patchMgr = pm.ckLocalBranch();
  1702         for(
int i=0; i<patchMap->
numPatches(); i++) {
  1703             if(patchMap->
node(i)==CkMyPe()) {
  1711     CProxy_PatchMgr pm(CkpvAccess(BOCclass_group).patchMgr);
  1712     PatchMgr *patchMgr = pm.ckLocalBranch();
  1715     for(
int i=0; i<numPids; i++) {
  1716         int pid = hpIDList[i];
  1719         std::sort(hpAtomsList[i].begin(), hpAtomsList[i].end());
  1725     delete [] hpAtomsList;
  1733 #ifdef MEM_OPT_VERSION  1734     molecule->delAtomNames();
  1735     molecule->delChargeSpace();
  1739     if(!CkMyPe() && !simParameters->freeEnergyOn)
  1740         molecule->delMassSpace();
  1742     molecule->delFixedAtoms();
  1750 int ParallelIOMgr::numMyAtoms(
int rank, 
int numProcs)
  1752     if(rank==-1) 
return -1;
  1753     int avgNum = molecule->numAtoms/numProcs;
  1754     int remainder = molecule->numAtoms%numProcs;
  1755     if(rank<remainder) 
return avgNum+1;
  1759 int ParallelIOMgr::atomRank(
int atomID, 
int numProcs)
  1761     int avgNum = molecule->numAtoms/numProcs;
  1762     int remainder = molecule->numAtoms%numProcs;
  1763     int midLimit = remainder*(avgNum+1);
  1765     if(atomID<midLimit) {
  1766         idx = atomID/(avgNum+1);
  1768         idx = remainder+(atomID-midLimit)/avgNum;
  1773 void ParallelIOMgr::getMyAtomsRange(
int &lowerIdx, 
int &upperIdx, 
int rank, 
int numProcs)
  1781     int avgNum = molecule->numAtoms/numProcs;
  1782     int remainder = molecule->numAtoms%numProcs;
  1783     if(rank<remainder) {
  1784         lowerIdx = rank*(avgNum+1);
  1785         upperIdx = lowerIdx+avgNum;
  1787         int midLimit = remainder*(avgNum+1);
  1788         lowerIdx = midLimit+(rank-remainder)*avgNum;
  1789         upperIdx = lowerIdx+avgNum-1;
  1793 int ParallelIOMgr::calcMyOutputProxyClients() {
  1795   int myOutputProxyClients = 0;
  1796   int myset = myOutputProxyRank / numOutputProcs;
  1797   for(
int i=0; i<CkNumPes(); ++i) {
  1798     if ( (i*numProxiesPerOutputProc)/CkNumPes() == myset && 
  1800       ++myOutputProxyClients;
  1803   return myOutputProxyClients;
  1808 #ifdef MEM_OPT_VERSION  1809   if ( myOutputRank != -1 ) {
  1810     int ready = midCM->receivePositions(msg);
  1812         CProxy_CollectionMaster cm(mainMaster);
  1813         cm.receiveOutputPosReady(msg->
seq);
  1816   } 
else if ( myOutputProxyRank != -1 ) {
  1817     if ( ! myOutputProxyPositions ) {
  1821     if ( newmsg ) thisProxy[outputProcArray[myOutputProxyRank%numOutputProcs]].receivePositions(newmsg);
  1824     NAMD_bug(
"ParallelIOMgr::receivePositions on bad pe");
  1831 #ifdef MEM_OPT_VERSION  1832   if ( myOutputRank != -1 ) {
  1833     int ready = midCM->receiveVelocities(msg);
  1835         CProxy_CollectionMaster cm(mainMaster);
  1836         cm.receiveOutputVelReady(msg->
seq);        
  1839   } 
else if ( myOutputProxyRank != -1 ) {
  1840     if ( ! myOutputProxyVelocities ) {
  1844     if ( newmsg ) thisProxy[outputProcArray[myOutputProxyRank%numOutputProcs]].receiveVelocities(newmsg);
  1847     NAMD_bug(
"ParallelIOMgr::receiveVelocities on bad pe");
  1854 #ifdef MEM_OPT_VERSION  1855   if ( myOutputRank != -1 ) {
  1856     int ready = midCM->receiveForces(msg);
  1858         CProxy_CollectionMaster cm(mainMaster);
  1859         cm.receiveOutputForceReady(msg->
seq);        
  1862   } 
else if ( myOutputProxyRank != -1 ) {
  1863     if ( ! myOutputProxyForces ) {
  1867     if ( newmsg ) thisProxy[outputProcArray[myOutputProxyRank%numOutputProcs]].receiveForces(newmsg);
  1870     NAMD_bug(
"ParallelIOMgr::receiveForces on bad pe");
  1878 #ifdef MEM_OPT_VERSION  1879   DebugM(3, 
"["<<CkMyPe()<<
"]"<<
"ParallelIOMgr::disposePositions"<<
"\n");
  1880   double iotime = CmiWallTimer();
  1881   midCM->disposePositions(seq);
  1882   iotime = CmiWallTimer()-iotime+prevT;
  1884 #if OUTPUT_SINGLE_FILE      1886     if(myOutputRank == getMyOutputGroupHighestRank()) {
  1888         CProxy_CollectionMaster cm(mainMaster);
  1889         cm.startNextRoundOutputPos(iotime);
  1891         CProxy_ParallelIOMgr io(thisgroup);
  1892         io[outputProcArray[myOutputRank+1]].disposePositions(seq, iotime);
  1896         CProxy_CollectionMaster cm(mainMaster);
  1897         cm.startNextRoundOutputPos(iotime);
  1905 #ifdef MEM_OPT_VERSION  1906         double iotime = CmiWallTimer();
  1907     midCM->disposeVelocities(seq);
  1908         iotime = CmiWallTimer()-iotime+prevT;
  1910 #if OUTPUT_SINGLE_FILE  1912     if(myOutputRank==getMyOutputGroupHighestRank()) {
  1914         CProxy_CollectionMaster cm(mainMaster);
  1915         cm.startNextRoundOutputVel(iotime);
  1917         CProxy_ParallelIOMgr io(thisgroup);
  1918         io[outputProcArray[myOutputRank+1]].disposeVelocities(seq, iotime);
  1922         CProxy_CollectionMaster cm(mainMaster);
  1923         cm.startNextRoundOutputVel(iotime);     
  1931 #ifdef MEM_OPT_VERSION  1932         double iotime = CmiWallTimer();
  1933     midCM->disposeForces(seq);
  1934         iotime = CmiWallTimer()-iotime+prevT;
  1936 #if OUTPUT_SINGLE_FILE  1938     if(myOutputRank==getMyOutputGroupHighestRank()) {
  1940         CProxy_CollectionMaster cm(mainMaster);
  1941         cm.startNextRoundOutputForce(iotime);
  1943         CProxy_ParallelIOMgr io(thisgroup);
  1944         io[outputProcArray[myOutputRank+1]].disposeForces(seq, iotime);
  1948         CProxy_CollectionMaster cm(mainMaster);
  1949         cm.startNextRoundOutputForce(iotime);   
  1958 #ifdef MEM_OPT_VERSION  1959     coorInstance = midCM->getReadyPositions(seq);
  1961     coorInstance->lattice = lat; 
  1962     int fromAtomID = coorInstance->fromAtomID;
  1963     int toAtomID = coorInstance->toAtomID;
  1971     int dsize = data.
size();    
  1972     int numMyAtoms = toAtomID-fromAtomID+1;
  1973     tmpCoorCon = 
new Vector[numMyAtoms];    
  1976     for(
int i=0; i<numMyAtoms; i++){
  1977         tmpCoorCon[i] = 0.0;
  1978         int cid = clusterID[i];
  1985                     one.
dsum = fdata[i];
  1989                 remoteCoors.
add(one);                 
  1992                     ret->
dsum += fdata[i];
  1994                     ret->
dsum += data[i];               
  1998                 tmpCoorCon[cid-fromAtomID] += fdata[i];
  2000                 tmpCoorCon[cid-fromAtomID] += data[i];
  2006     CmiAssert(numRemoteClusters == remoteCoors.size());
  2008     CProxy_ParallelIOMgr pIO(thisgroup);
  2010     for(iter=iter.
begin(); iter!=iter.
end(); iter++){
  2014         msg->
dsum = iter->dsum;
  2015         int dstRank = atomRankOnOutput(iter->clusterId);
  2016         pIO[outputProcArray[dstRank]].recvClusterCoor(msg);
  2021     recvClusterCoor(NULL);
  2032     if(msg!=NULL) ccmBuf.add(msg);
  2035     if(++numReqRecved == (numRemoteReqs+1)){
  2037         integrateClusterCoor();
  2041 void ParallelIOMgr::integrateClusterCoor(){
  2042 #ifdef MEM_OPT_VERSION  2043     int fromIdx = coorInstance->fromAtomID;
  2044     int toIdx = coorInstance->toAtomID;
  2045     for(
int i=0; i<ccmBuf.size(); i++){
  2048         tmpCoorCon[lidx] += msg->
dsum;
  2052     CProxy_ParallelIOMgr pIO(thisgroup);
  2053     for(
int i=0; i<ccmBuf.size(); i++){
  2056         if(simParameters->wrapAll || 
isWater[lidx]) {
  2057             Lattice *lat = &(coorInstance->lattice);
  2058             Vector coni = tmpCoorCon[lidx]/clusterSize[lidx];
  2059             msg->
dsum = (simParameters->wrapNearest ?
  2064         pIO[outputProcArray[msg->
srcRank]].recvFinalClusterCoor(msg);
  2071     if(numRemoteClusters!=0){
  2072         recvFinalClusterCoor(NULL);
  2076         int numMyAtoms = toIdx-fromIdx+1;
  2079         for(
int i=0; i<numMyAtoms; i++){
  2080             if(!simParameters->wrapAll && !
isWater[i]) 
continue;
  2081             int lidx = clusterID[i]-fromIdx;
  2084                 Lattice *lat = &(coorInstance->lattice);
  2085                 Vector coni = tmpCoorCon[lidx]/clusterSize[lidx];
  2086                 tmpCoorCon[lidx] = (simParameters->wrapNearest ?
  2089             if(data.
size()) data[i] += tmpCoorCon[lidx]; 
  2091             if(fdata.
size()) fdata[i] = fdata[i] + tmpCoorCon[lidx]; 
  2094         delete [] tmpCoorCon;
  2096         CProxy_CollectionMaster cm(mainMaster);
  2097         cm.wrapCoorFinished();
  2103 #ifdef MEM_OPT_VERSION  2112     if(++numCSMAck == (numRemoteClusters+1)){        
  2114         int fromIdx = coorInstance->fromAtomID;
  2115         int toIdx = coorInstance->toAtomID;
  2116         int numMyAtoms = toIdx-fromIdx+1;
  2120         for(
int i=0; i<numMyAtoms; i++){
  2121             if(!simParameters->wrapAll && !
isWater[i]) 
continue;
  2122             int cid = clusterID[i];
  2123             int lidx = cid-fromIdx;
  2128                 if(data.
size()) data[i] += fone->
dsum; 
  2129                 if(fdata.
size()) fdata[i] = fdata[i] + fone->
dsum; 
  2132                     Lattice *lat = &(coorInstance->lattice);
  2133                     Vector coni = tmpCoorCon[lidx]/clusterSize[lidx];
  2134                     tmpCoorCon[lidx] = (simParameters->wrapNearest ?
  2137                 if(data.
size()) data[i] += tmpCoorCon[lidx]; 
  2138                 if(fdata.
size()) fdata[i] = fdata[i] + tmpCoorCon[lidx];
  2142         delete [] tmpCoorCon;
  2144         CProxy_CollectionMaster cm(mainMaster);
  2145         cm.wrapCoorFinished();
  2147         remoteCoors.clear();
  2151 #include "ParallelIOMgr.def.h" 
int64 numCalcFullExclusions
 
DCDParams dcdSelectionParams[16]
 
static CollectionMgr * Object()
 
unsigned short * fixedAtomsCntList
 
std::ostream & iINFO(std::ostream &s)
 
void sendAtomsToHomePatchProcs()
 
void flipNum(char *elem, int elemSize, int numElems)
 
PatchID assignToPatch(Position p, const Lattice &l)
 
#define COMPRESSED_PSF_MAGICNUM
 
void NAMD_err(const char *err_msg)
 
char inputFilename[NAMD_FILENAME_BUFFER_SIZE]
 
void initTmpPatchAtomsList()
 
static int * peCompactOrdering
 
void recvClusterSize(ClusterSizeMsg *msg)
 
static PatchMap * Object()
 
ResizeArray< CollectProxyVectorInstance * > data
 
TupleSignature * improperSigs
 
SimParameters * simParameters
 
void integrateClusterSize()
 
CollectVectorVarMsg * buildMsg()
 
void recvAtomsMGrp(MoveInputAtomsMsg *msg)
 
void createHomePatch(PatchID pid, FullAtomList &a)
 
TupleSignature * dihedralSigs
 
std::ostream & endi(std::ostream &s)
 
void disposePositions(int seq, double prevT)
 
std::ostream & iWARN(std::ostream &s)
 
TupleSignature * crosstermSigs
 
#define NAMD_FILENAME_BUFFER_SIZE
 
int add(const Elem &elem)
 
Molecule stores the structural information for the system. 
 
void wrapCoor(int seq, Lattice lat)
 
UniqueSetIter< T > begin(void) const
 
void receiveForces(CollectVectorVarMsg *msg)
 
CollectVectorVarMsg::DataStatus vstatus
 
void recvAtomsToHomePatchProcs(MovePatchAtomsMsg *msg)
 
void integrateMigratedAtoms()
 
void reorder(Elem *a, int n)
 
CkChareID getMasterChareID()
 
TupleSignature * gromacsPairSigs
 
void disposeForces(int seq, double prevT)
 
char outFilename[NAMD_FILENAME_BUFFER_SIZE]
 
int numPatches(void) const
 
void recvClusterCoor(ClusterCoorMsg *msg)
 
int append(CollectVectorVarMsg *msg)
 
void NAMD_bug(const char *err_msg)
 
void recvDcdParams(std::vector< uint16 > tags, std::vector< std::string > inputFileNames, std::vector< std::string > outputFileNames, std::vector< int > freqs, std::vector< OUTPUTFILETYPE > types)
 
void call_sendAtomsToHomePatchProcs(void *arg)
 
void recvAtomsCntPerPatch(AtomsCntPerPatchMsg *msg)
 
void recvHydroBasedCounter(HydroBasedMsg *msg)
 
void recvFinalClusterCoor(ClusterCoorMsg *msg)
 
std::vector< int > * getTmpPatchAtomsList()
 
void readInfoForParOutDcdSelection()
 
TupleSignature * bondSigs
 
void NAMD_die(const char *err_msg)
 
UniqueSetIter< T > end(void) const
 
CollectProxyVectorSequence(int nc)
 
void ackAtomsToHomePatchProcs()
 
static int * peDiffuseOrdering
 
void initialize(Node *node)
 
CollectProxyVectorInstance()
 
static int * peCompactOrderingIndex
 
WorkDistrib * workDistrib
 
void recvFinalClusterSize(ClusterSizeMsg *msg)
 
ResizeArray< FloatVector > fdata
 
NAMD_HOST_DEVICE Vector wrap_nearest_delta(Position pos1) const
 
void disposeVelocities(int seq, double prevT)
 
void calcAtomsInEachPatch()
 
unsigned short * atomsCntList
 
bool isOutputProcessor(int pe)
 
int numPatchesOnNode(int node)
 
std::vector< unsigned int > getAllElements()
 
ResizeArray< Vector > data
 
void delTmpPatchAtomsList()
 
void bcastHydroBasedCounter(HydroBasedMsg *msg)
 
void receiveVelocities(CollectVectorVarMsg *msg)
 
void recvMolInfo(MolInfoMsg *msg)
 
int isOutputProcessor(int pe)
 
NAMD_HOST_DEVICE Vector wrap_delta(const Position &pos1) const
 
void bcastMolInfo(MolInfoMsg *msg)
 
TupleSignature * angleSigs
 
void reset(int s, CollectVectorVarMsg::DataStatus v, int numClients)
 
CollectVectorVarMsg * submitData(CollectVectorVarMsg *msg)
 
void receivePositions(CollectVectorVarMsg *msg)
 
HashPool< AtomSigInfo > atomSigPool