From 461fe2b87e40ec2508ca1217c9c5f0963f58487c Mon Sep 17 00:00:00 2001 From: Anna Wellmann <a.wellmann@tu-braunschweig.de> Date: Wed, 6 Oct 2021 16:29:24 +0200 Subject: [PATCH] Remove unused communication --- .../Communication/ExchangeData27.cpp | 42 ------------------- 1 file changed, 42 deletions(-) diff --git a/src/gpu/VirtualFluids_GPU/Communication/ExchangeData27.cpp b/src/gpu/VirtualFluids_GPU/Communication/ExchangeData27.cpp index 5c4ee37b4..0bf106248 100644 --- a/src/gpu/VirtualFluids_GPU/Communication/ExchangeData27.cpp +++ b/src/gpu/VirtualFluids_GPU/Communication/ExchangeData27.cpp @@ -43,20 +43,6 @@ void exchangeCollDataXGPU27(Parameter *para, vf::gpu::Communicator *comm, CudaMe para->getParH(level)->recvProcessNeighborX[i].rankNeighbor); } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - ////start non blocking MPI send - //for (unsigned int i = 0; i < (unsigned int)(para->getNumberOfProcessNeighborsX(level, "send")); i++) - //{ - // comm->nbSendDataGPU(para->getParH(level)->sendProcessNeighborX[i].f[0], - // para->getParH(level)->sendProcessNeighborX[i].numberOfFs, - // para->getParH(level)->sendProcessNeighborX[i].rankNeighbor); - //} - ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - ////Waitall - //if (0 < (unsigned int)(para->getNumberOfProcessNeighborsX(level, "send"))) - //{ - // comm->waitallGPU(); - //} - ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // wait for memcopy device to host to finish before sending data if (para->getUseStreams()) cudaStreamSynchronize(stream); @@ -152,20 +138,6 @@ void exchangeCollDataYGPU27(Parameter *para, vf::gpu::Communicator *comm, CudaMe para->getParH(level)->recvProcessNeighborY[i].rankNeighbor); } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - ////start non blocking MPI send - //for (unsigned int i = 0; i < (unsigned int)(para->getNumberOfProcessNeighborsY(level, "send")); i++) - //{ - // comm->nbSendDataGPU(para->getParH(level)->sendProcessNeighborY[i].f[0], - // para->getParH(level)->sendProcessNeighborY[i].numberOfFs, - // para->getParH(level)->sendProcessNeighborY[i].rankNeighbor); - //} - ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - ////Waitall - //if (0 < (unsigned int)(para->getNumberOfProcessNeighborsY(level, "send"))) - //{ - // comm->waitallGPU(); - //} - ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // wait for memcopy device to host to finish before sending data if (para->getUseStreams()) cudaStreamSynchronize(stream); @@ -279,20 +251,6 @@ void exchangeCollDataZGPU27(Parameter *para, vf::gpu::Communicator *comm, CudaMe para->getParH(level)->recvProcessNeighborZ[i].rankNeighbor); } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - ////start non blocking MPI send - //for (unsigned int i = 0; i < (unsigned int)(para->getNumberOfProcessNeighborsZ(level, "send")); i++) - //{ - // comm->nbSendDataGPU(para->getParH(level)->sendProcessNeighborZ[i].f[0], - // para->getParH(level)->sendProcessNeighborZ[i].numberOfFs, - // para->getParH(level)->sendProcessNeighborZ[i].rankNeighbor); - //} - ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - ////Waitall - //if (0 < (unsigned int)(para->getNumberOfProcessNeighborsZ(level, "send"))) - //{ - // comm->waitallGPU(); - //} - ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // wait for memcopy device to host to finish before sending data if (para->getUseStreams()) cudaStreamSynchronize(stream); -- GitLab