Skip to content
Snippets Groups Projects
Commit 712f30c2 authored by Anna Wellmann's avatar Anna Wellmann
Browse files

Rmove unused gatherNups function and rename sumNups

parent 943ebdb1
No related branches found
No related tags found
1 merge request!233Add communicator interface
...@@ -27,8 +27,7 @@ public: ...@@ -27,8 +27,7 @@ public:
virtual void resetRequest() = 0; virtual void resetRequest() = 0;
////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////
virtual int mapCudaDevice(const int &rank, const int &size, const std::vector<unsigned int> &devices, const int &maxdev) = 0; virtual int mapCudaDevice(const int &rank, const int &size, const std::vector<unsigned int> &devices, const int &maxdev) = 0;
virtual std::vector<double> gatherNUPS(double processNups) = 0; virtual double reduceSum(double quantityPerProcess) = 0;
virtual double sumNups(double processNups) = 0;
////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////
virtual void receive_send(uint *buffer_receive, int size_buffer_recv, int neighbor_rank_recv, uint *buffer_send, virtual void receive_send(uint *buffer_receive, int size_buffer_recv, int neighbor_rank_recv, uint *buffer_send,
int size_buffer_send, int neighbor_rank_send) const override = 0; int size_buffer_send, int neighbor_rank_send) const override = 0;
......
...@@ -216,21 +216,9 @@ int MpiCommunicator::mapCudaDevice(const int &rank, const int &size, const std:: ...@@ -216,21 +216,9 @@ int MpiCommunicator::mapCudaDevice(const int &rank, const int &size, const std::
return device; return device;
} }
std::vector<double> MpiCommunicator::gatherNUPS(double processNups) double MpiCommunicator::reduceSum(double quantityPerProcess)
{ {
double *buffer_send = &processNups; double *buffer_send = &quantityPerProcess;
double *buffer_recv = (double *)malloc(sizeof(double) * this->numprocs);
MPI_Gather(buffer_send, 1, MPI_DOUBLE, buffer_recv, 1, MPI_DOUBLE, 0, commGPU);
if (this->PID == 0)
return std::vector<double>(buffer_recv, buffer_recv + this->numprocs);
return std::vector<double>();
}
double MpiCommunicator::sumNups(double processNups)
{
double *buffer_send = &processNups;
double *buffer_recv = (double *)malloc(sizeof(double)); double *buffer_recv = (double *)malloc(sizeof(double));
MPI_Reduce(buffer_send, buffer_recv, 1, MPI_DOUBLE, MPI_SUM, 0, commGPU); MPI_Reduce(buffer_send, buffer_recv, 1, MPI_DOUBLE, MPI_SUM, 0, commGPU);
......
...@@ -55,8 +55,7 @@ public: ...@@ -55,8 +55,7 @@ public:
void exchngDataGeo(int *sbuf_t, int *rbuf_t, int *sbuf_b, int *rbuf_b, int count); void exchngDataGeo(int *sbuf_t, int *rbuf_t, int *sbuf_b, int *rbuf_b, int count);
MPI_Comm getMpiCommunicator(); MPI_Comm getMpiCommunicator();
int mapCudaDevice(const int &rank, const int &size, const std::vector<unsigned int> &devices, const int &maxdev) override; int mapCudaDevice(const int &rank, const int &size, const std::vector<unsigned int> &devices, const int &maxdev) override;
std::vector<double> gatherNUPS(double processNups) override; double reduceSum(double quantityPerProcess) override;
double sumNups(double processNups) override;
////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////
void receive_send(uint *buffer_receive, int size_buffer_recv, int neighbor_rank_recv, uint *buffer_send, void receive_send(uint *buffer_receive, int size_buffer_recv, int neighbor_rank_recv, uint *buffer_send,
int size_buffer_send, int neighbor_rank_send) const override; int size_buffer_send, int neighbor_rank_send) const override;
......
...@@ -52,7 +52,7 @@ void Timer::outputPerformance(uint t, Parameter* para, vf::gpu::Communicator& co ...@@ -52,7 +52,7 @@ void Timer::outputPerformance(uint t, Parameter* para, vf::gpu::Communicator& co
// When using multiple GPUs, sum the nups of all processes // When using multiple GPUs, sum the nups of all processes
if (communicator.getNumberOfProcess() > 1) { if (communicator.getNumberOfProcess() > 1) {
double nupsSum = communicator.sumNups(fnups); double nupsSum = communicator.reduceSum(fnups);
if (communicator.getPID() == 0) if (communicator.getPID() == 0)
VF_LOG_INFO("Sum of all {} processes: Nups in Mio: {:.1f}", communicator.getNumberOfProcess(), nupsSum); VF_LOG_INFO("Sum of all {} processes: Nups in Mio: {:.1f}", communicator.getNumberOfProcess(), nupsSum);
} }
......
...@@ -276,11 +276,7 @@ public: ...@@ -276,11 +276,7 @@ public:
{ {
return 0; return 0;
}; };
std::vector<double> gatherNUPS(double processNups) override double reduceSum(double quantityPerProcess) override
{
return {};
};
double sumNups(double processNups) override
{ {
return 0; return 0;
}; };
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment