From 35ee3826a14b503f88e2d1549615e349180982fb Mon Sep 17 00:00:00 2001
From: Anna Wellmann <a.wellmann@tu-bs.de>
Date: Mon, 25 Apr 2022 09:20:59 +0200
Subject: [PATCH] Rename variables in copyEdgeNodes

---
 .../Communication/ExchangeData27.cpp          | 22 +++++++++----------
 1 file changed, 11 insertions(+), 11 deletions(-)

diff --git a/src/gpu/VirtualFluids_GPU/Communication/ExchangeData27.cpp b/src/gpu/VirtualFluids_GPU/Communication/ExchangeData27.cpp
index 2d2573417..d792c4b1e 100644
--- a/src/gpu/VirtualFluids_GPU/Communication/ExchangeData27.cpp
+++ b/src/gpu/VirtualFluids_GPU/Communication/ExchangeData27.cpp
@@ -306,25 +306,25 @@ void scatterNodesFromRecvBufferZGPU27AfterFtoC(Parameter *para, int level, int s
 void copyEdgeNodes(std::vector<LBMSimulationParameter::EdgeNodePositions> &edgeNodes, std::vector<ProcessNeighbor27> &recvProcessNeighborHostAllNodes, std::vector<ProcessNeighbor27> &sendProcessNeighborHostAllNodes,  
                    std::vector<ProcessNeighbor27> &sendProcessNeighborHost)
 {
-        uint indexInSubdomainX = 0;
-        uint indexInSubdomainZ = 0;
-        uint numNodesInBufferX = 0;
-        uint numNodesInBufferZ = 0;
+        uint indexInSubdomainRecv = 0;
+        uint indexInSubdomainSend = 0;
+        uint numNodesInBufferRecv = 0;
+        uint numNodesInBufferSend = 0;
 #pragma omp parallel for
         for (uint i = 0; i < edgeNodes.size(); i++) {
-            indexInSubdomainX = edgeNodes[i].indexOfProcessNeighborRecv;
-            indexInSubdomainZ = edgeNodes[i].indexOfProcessNeighborSend;
-            numNodesInBufferX = recvProcessNeighborHostAllNodes[indexInSubdomainX].numberOfNodes;
-            numNodesInBufferZ = sendProcessNeighborHostAllNodes[indexInSubdomainZ].numberOfNodes;
+            indexInSubdomainRecv = edgeNodes[i].indexOfProcessNeighborRecv;
+            indexInSubdomainSend = edgeNodes[i].indexOfProcessNeighborSend;
+            numNodesInBufferRecv = recvProcessNeighborHostAllNodes[indexInSubdomainRecv].numberOfNodes;
+            numNodesInBufferSend = sendProcessNeighborHostAllNodes[indexInSubdomainSend].numberOfNodes;
 
-            if(edgeNodes[i].indexInSendBuffer >= sendProcessNeighborHost[indexInSubdomainZ].numberOfNodes){
+            if(edgeNodes[i].indexInSendBuffer >= sendProcessNeighborHost[indexInSubdomainSend].numberOfNodes){
                 // for reduced communication after fine to coarse: only copy send nodes which are not part of the reduced comm
                 continue;
             }
 
             for (uint direction = 0; direction <= dirEND; direction++) {
-                (sendProcessNeighborHostAllNodes[indexInSubdomainZ].f[0] + (direction * numNodesInBufferZ))[edgeNodes[i].indexInSendBuffer] =
-                    (recvProcessNeighborHostAllNodes[indexInSubdomainX].f[0] + (direction * numNodesInBufferX))[edgeNodes[i].indexInRecvBuffer];
+                (sendProcessNeighborHostAllNodes[indexInSubdomainSend].f[0] + (direction * numNodesInBufferSend))[edgeNodes[i].indexInSendBuffer] = 1000;
+                   // (recvProcessNeighborHostAllNodes[indexInSubdomainRecv].f[0] + (direction * numNodesInBufferRecv))[edgeNodes[i].indexInRecvBuffer];
             }
         }
 }
-- 
GitLab