Skip to content
Snippets Groups Projects
Commit 461fe2b8 authored by Anna Wellmann's avatar Anna Wellmann
Browse files

Remove unused communication

parent 1e29ce7a
No related branches found
No related tags found
1 merge request!104Add Communication Hiding to GPU version
......@@ -43,20 +43,6 @@ void exchangeCollDataXGPU27(Parameter *para, vf::gpu::Communicator *comm, CudaMe
para->getParH(level)->recvProcessNeighborX[i].rankNeighbor);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////start non blocking MPI send
//for (unsigned int i = 0; i < (unsigned int)(para->getNumberOfProcessNeighborsX(level, "send")); i++)
//{
// comm->nbSendDataGPU(para->getParH(level)->sendProcessNeighborX[i].f[0],
// para->getParH(level)->sendProcessNeighborX[i].numberOfFs,
// para->getParH(level)->sendProcessNeighborX[i].rankNeighbor);
//}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////Waitall
//if (0 < (unsigned int)(para->getNumberOfProcessNeighborsX(level, "send")))
//{
// comm->waitallGPU();
//}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// wait for memcopy device to host to finish before sending data
if (para->getUseStreams())
cudaStreamSynchronize(stream);
......@@ -152,20 +138,6 @@ void exchangeCollDataYGPU27(Parameter *para, vf::gpu::Communicator *comm, CudaMe
para->getParH(level)->recvProcessNeighborY[i].rankNeighbor);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////start non blocking MPI send
//for (unsigned int i = 0; i < (unsigned int)(para->getNumberOfProcessNeighborsY(level, "send")); i++)
//{
// comm->nbSendDataGPU(para->getParH(level)->sendProcessNeighborY[i].f[0],
// para->getParH(level)->sendProcessNeighborY[i].numberOfFs,
// para->getParH(level)->sendProcessNeighborY[i].rankNeighbor);
//}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////Waitall
//if (0 < (unsigned int)(para->getNumberOfProcessNeighborsY(level, "send")))
//{
// comm->waitallGPU();
//}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// wait for memcopy device to host to finish before sending data
if (para->getUseStreams())
cudaStreamSynchronize(stream);
......@@ -279,20 +251,6 @@ void exchangeCollDataZGPU27(Parameter *para, vf::gpu::Communicator *comm, CudaMe
para->getParH(level)->recvProcessNeighborZ[i].rankNeighbor);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////start non blocking MPI send
//for (unsigned int i = 0; i < (unsigned int)(para->getNumberOfProcessNeighborsZ(level, "send")); i++)
//{
// comm->nbSendDataGPU(para->getParH(level)->sendProcessNeighborZ[i].f[0],
// para->getParH(level)->sendProcessNeighborZ[i].numberOfFs,
// para->getParH(level)->sendProcessNeighborZ[i].rankNeighbor);
//}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////Waitall
//if (0 < (unsigned int)(para->getNumberOfProcessNeighborsZ(level, "send")))
//{
// comm->waitallGPU();
//}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// wait for memcopy device to host to finish before sending data
if (para->getUseStreams())
cudaStreamSynchronize(stream);
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment