From 7e029eaf9f8e48c3c51e3bbf7c76df967ba32e16 Mon Sep 17 00:00:00 2001 From: Anna Wellmann <a.wellmann@tu-bs.de> Date: Wed, 27 Apr 2022 08:36:47 +0200 Subject: [PATCH] Add conditional to copyEdgeNodes for reduced comm --- .../Communication/ExchangeData27.cpp | 21 ++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/src/gpu/VirtualFluids_GPU/Communication/ExchangeData27.cpp b/src/gpu/VirtualFluids_GPU/Communication/ExchangeData27.cpp index 69ac95e25..f6d20161b 100644 --- a/src/gpu/VirtualFluids_GPU/Communication/ExchangeData27.cpp +++ b/src/gpu/VirtualFluids_GPU/Communication/ExchangeData27.cpp @@ -250,7 +250,12 @@ void exchangeCollDataYGPU27(Parameter *para, vf::gpu::Communicator *comm, CudaMe /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // edge nodes: copy received node values from x if (para->getUseStreams() && para->getNumberOfProcessNeighborsX(level, "recv") > 0) { - copyEdgeNodes(para->getParH(level)->edgeNodesXtoY, para->getParH(level)->recvProcessNeighborX, *sendProcessNeighborHost); + if(para->getParH(level)->sendProcessNeighborY[0].numberOfNodes == (*sendProcessNeighborHost)[0].numberOfNodes){ + // check if in communication of all nodes (as opposed to reduced communication after fine to coarse) + copyEdgeNodes(para->getParH(level)->edgeNodesXtoY, para->getParH(level)->recvProcessNeighborX, *sendProcessNeighborHost); + } else{ + copyEdgeNodes(para->getParH(level)->edgeNodesXtoY, para->getParH(level)->recvProcessNeighborsAfterFtoCX, *sendProcessNeighborHost); + } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// startBlockingMpiSend((unsigned int)(*sendProcessNeighborHost).size(), comm, sendProcessNeighborHost); @@ -338,12 +343,22 @@ void exchangeCollDataZGPU27(Parameter *para, vf::gpu::Communicator *comm, CudaMe //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // edge nodes: copy received node values from x if (para->getUseStreams() && para->getNumberOfProcessNeighborsX(level, "recv") > 0) { - copyEdgeNodes(para->getParH(level)->edgeNodesXtoZ, para->getParH(level)->recvProcessNeighborX, *sendProcessNeighborHost); + if(para->getParH(level)->sendProcessNeighborZ[0].numberOfNodes == (*sendProcessNeighborHost)[0].numberOfNodes){ + // check if in communication of all nodes (as opposed to reduced communication after fine to coarse) + copyEdgeNodes(para->getParH(level)->edgeNodesXtoZ, para->getParH(level)->recvProcessNeighborX, *sendProcessNeighborHost); + } else{ + copyEdgeNodes(para->getParH(level)->edgeNodesXtoZ, para->getParH(level)->recvProcessNeighborsAfterFtoCX, *sendProcessNeighborHost); + } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // edge nodes: copy received node values from y if (para->getUseStreams() && para->getNumberOfProcessNeighborsY(level, "recv") > 0) { - copyEdgeNodes(para->getParH(level)->edgeNodesYtoZ, para->getParH(level)->recvProcessNeighborY, *sendProcessNeighborHost); + if(para->getParH(level)->sendProcessNeighborZ[0].numberOfNodes == (*sendProcessNeighborHost)[0].numberOfNodes){ + // check if in communication of all nodes (as opposed to reduced communication after fine to coarse) + copyEdgeNodes(para->getParH(level)->edgeNodesYtoZ, para->getParH(level)->recvProcessNeighborY, *sendProcessNeighborHost); + } else{ + copyEdgeNodes(para->getParH(level)->edgeNodesYtoZ, para->getParH(level)->recvProcessNeighborsAfterFtoCY, *sendProcessNeighborHost); + } } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// startBlockingMpiSend((unsigned int)(*sendProcessNeighborHost).size(), comm, sendProcessNeighborHost); -- GitLab