diff --git a/.gitignore b/.gitignore index 4879d82bb2c5ce51f43be6ea9fb183cd6e28097c..df134f3d8f4289b954a8b3f7bfbc138b6be6f80e 100644 --- a/.gitignore +++ b/.gitignore @@ -26,3 +26,5 @@ output/ # MacOS .DS_Store +#Cluster +run/ diff --git a/CMake/cmake_config_files/CSE01.config.cmake b/CMake/cmake_config_files/CSE01.config.cmake index baa0f94981c2e9f9ac05b62468311f4bead32ff3..cad3f60ce31edac1069d1edce3fdd43b49a72b6e 100644 --- a/CMake/cmake_config_files/CSE01.config.cmake +++ b/CMake/cmake_config_files/CSE01.config.cmake @@ -2,22 +2,22 @@ ################################################################################# # BOOST ################################################################################# -SET(BOOST_VERSION "1.60.0") -SET(BOOST_ROOT "d:/boost/boost_1_60_0") -SET(BOOST_DIR ${BOOST_ROOT}) -SET(BOOST_LIBRARYDIR ${BOOST_ROOT}"/stageMSVC64/lib") +#SET(BOOST_VERSION "1.60.0") +#SET(BOOST_ROOT "d:/boost/boost_1_60_0") +#SET(BOOST_DIR ${BOOST_ROOT}) +#SET(BOOST_LIBRARYDIR ${BOOST_ROOT}"/stageMSVC64/lib") ################################################################################# ################################################################################# # METIS ################################################################################# -IF(${USE_METIS}) - SET(METIS_INCLUDEDIR "d:/metis-5.1.0/include") - SET(METIS_DEBUG_LIBRARY "d:/metis-5.1.0/build/libmetis/Debug/metis.lib") - SET(METIS_RELEASE_LIBRARY "d:/metis-5.1.0/build/libmetis/Release/metis.lib") -ENDIF() +#IF(${USE_METIS}) +# SET(METIS_INCLUDEDIR "d:/metis-5.1.0/include") +# SET(METIS_DEBUG_LIBRARY "d:/metis-5.1.0/build/libmetis/Debug/metis.lib") +# SET(METIS_RELEASE_LIBRARY "d:/metis-5.1.0/build/libmetis/Release/metis.lib") +#ENDIF() ################################################################################# # VTK ################################################################################# -set(VTK_DIR "d:/tools/VTK/build/VTK-8.2.0") +#set(VTK_DIR "d:/tools/VTK/build/VTK-8.2.0") ################################################################################# \ No newline at end of file diff --git a/apps/cpu/Applications.cmake b/apps/cpu/Applications.cmake index d8dff8b8c5f776d1d27e9217d7b050ab4022d8a4..e8902e5ffbb3720365476afef50a4f3fbd0ddf76 100644 --- a/apps/cpu/Applications.cmake +++ b/apps/cpu/Applications.cmake @@ -1,8 +1,19 @@ +add_subdirectory(${APPS_ROOT_CPU}/PoiseuilleFlow) +add_subdirectory(${APPS_ROOT_CPU}/HerschelBulkleySphere) +add_subdirectory(${APPS_ROOT_CPU}/HerschelBulkleyModel) +add_subdirectory(${APPS_ROOT_CPU}/rheometer) +add_subdirectory(${APPS_ROOT_CPU}/CouetteFlow) +add_subdirectory(${APPS_ROOT_CPU}/Multiphase) +add_subdirectory(${APPS_ROOT_CPU}/ViskomatXL) +add_subdirectory(${APPS_ROOT_CPU}/sphere) +add_subdirectory(${APPS_ROOT_CPU}/FlowAroundCylinder) +add_subdirectory(${APPS_ROOT_CPU}/LaminarTubeFlow) +add_subdirectory(${APPS_ROOT_CPU}/MultiphaseDropletTest) + #add_subdirectory(tests) #add_subdirectory(Applications/gridRf) #add_subdirectory(Applications/greenvortex) # add_subdirectory(Applications/micropart) -add_subdirectory(${APPS_ROOT_CPU}/sphere) #add_subdirectory(Applications/vfscript) #add_subdirectory(Applications/reefer) #add_subdirectory(Applications/bananas) @@ -11,8 +22,6 @@ add_subdirectory(${APPS_ROOT_CPU}/sphere) #add_subdirectory(Applications/bananas2) # add_subdirectory(Applications/plate) # add_subdirectory(Applications/plate2) -add_subdirectory(${APPS_ROOT_CPU}/FlowAroundCylinder) -add_subdirectory(${APPS_ROOT_CPU}/LaminarTubeFlow) # add_subdirectory(Applications/LaminarTubeFlowConv) #add_subdirectory(Applications/cylinderSt) #add_subdirectory(Applications/mpichTest) @@ -54,7 +63,6 @@ add_subdirectory(${APPS_ROOT_CPU}/LaminarTubeFlow) #add_subdirectory(Applications/levels) #add_subdirectory(Applications/AcousticPulse) #add_subdirectory(Applications/screw) -#add_subdirectory(Applications/PoiseuilleFlow) #add_subdirectory(Applications/InterfaceTest) #add_subdirectory(Applications/teperm) #add_subdirectory(Applications/Thermoplast) @@ -64,10 +72,5 @@ add_subdirectory(${APPS_ROOT_CPU}/LaminarTubeFlow) #add_subdirectory(Applications/bChannelVA) #add_subdirectory(Applications/OrganPipe) #add_subdirectory(Applications/LidDrivenCavity) -add_subdirectory(${APPS_ROOT_CPU}/HerschelBulkleySphere) -add_subdirectory(${APPS_ROOT_CPU}/HerschelBulkleyModel) -add_subdirectory(${APPS_ROOT_CPU}/rheometer) -add_subdirectory(${APPS_ROOT_CPU}/CouetteFlow) -add_subdirectory(${APPS_ROOT_CPU}/Multiphase) -add_subdirectory(${APPS_ROOT_CPU}/ViskomatXL) + diff --git a/apps/cpu/LaminarTubeFlow/ltf.cfg b/apps/cpu/LaminarTubeFlow/ltf.cfg index 94919cc3463c6a60dfd334e1c0505d60e56446d8..8b8e33e4998835da80d2121925acc7d95c3ccd20 100644 --- a/apps/cpu/LaminarTubeFlow/ltf.cfg +++ b/apps/cpu/LaminarTubeFlow/ltf.cfg @@ -1,5 +1,5 @@ pathname = d:/temp/LaminarTubeFlow -numOfThreads = 4 +numOfThreads = 1 availMem = 10e9 #Grid @@ -22,5 +22,5 @@ restartStep = 100000 cpStart = 100000 cpStep = 100000 -outTime = 1 -endTime = 100 \ No newline at end of file +outTime = 1000 +endTime = 1000 \ No newline at end of file diff --git a/apps/cpu/LaminarTubeFlow/ltf.cpp b/apps/cpu/LaminarTubeFlow/ltf.cpp index 71340ab656b6fca7d5bcc534e69a2e25ca10fa9c..e523dd2de7416ea5189dbceab200725d89f15424 100644 --- a/apps/cpu/LaminarTubeFlow/ltf.cpp +++ b/apps/cpu/LaminarTubeFlow/ltf.cpp @@ -315,7 +315,7 @@ void run(string configname) auto timeDepBC = make_shared<TimeDependentBCCoProcessor>(TimeDependentBCCoProcessor(grid, timeBCSch)); timeDepBC->addInteractor(inflowInt); - //omp_set_num_threads(numOfThreads); + omp_set_num_threads(numOfThreads); numOfThreads = 1; SPtr<UbScheduler> stepGhostLayer(visSch); SPtr<Calculator> calculator(new BasicCalculator(grid, stepGhostLayer, int(endTime))); diff --git a/apps/cpu/Multiphase/Multiphase.cfg b/apps/cpu/Multiphase/Multiphase.cfg index d52694ac838c8ae029ebf85476ab6db68de11223..c294ea68ce96c751030380d52d16eb35d06f9faa 100644 --- a/apps/cpu/Multiphase/Multiphase.cfg +++ b/apps/cpu/Multiphase/Multiphase.cfg @@ -1,11 +1,11 @@ -pathname = d:/temp/MultiphaseNew4 +pathname = d:/temp/MultiphaseNew5 #pathGeo = d:/Projects/VirtualFluids-Multiphase/source/Applications/Multiphase/backup pathGeo = d:/Projects/VirtualFluidsCombined/apps/cpu/Multiphase/backup #geoFile = JetBreakupR.ASCII.stl #geoFile = inlet1.stl geoFile = tubeTransformed.stl -numOfThreads = 1 +numOfThreads = 4 availMem = 10e9 #Grid @@ -20,21 +20,21 @@ availMem = 10e9 #boundingBox = -40 40 -1.0 -21.0 -40 40 #(Jet Breakup2) (Original without inlet length) #boundingBox = -40 40 1.0 11.0 -40 40 #(Jet Breakup2) (Original without inlet length) #boundingBox = -40e-3 40e-3 1.0e-3 11.0e-3 -403-3 40e-3 #(Jet Breakup2) (Original without inlet length) -blocknx = 20 20 20 +#blocknx = 20 20 20 -boundingBox = 6.0e-3 16.0e-3 -40e-3 40e-3 -40e-3 40e-3 +boundingBox = 6.0e-3 46.0e-3 -5e-3 5e-3 -5e-3 5e-3 blocknx = 20 20 20 -dx = 0.5e-3 +dx = 1.66666666667e-4 refineLevel = 0 #Simulation -uLB = 0.0005 #inlet velocity +uLB = 0.005 #inlet velocity #uF2 = 0.0001 Re = 10 nuL =1e-2# 1.0e-5 #!1e-2 nuG =1e-2# 1.16e-4 #!1e-2 -densityRatio = 30 +densityRatio = 1000 sigma = 1e-5 #4.66e-3 #surface tension 1e-4 ./. 1e-5 interfaceThickness = 5 radius = 615.0 (Jet Breakup) @@ -55,5 +55,5 @@ restartStep = 100000 cpStart = 100000 cpStep = 100000 -outTime = 100 +outTime = 1 endTime = 10000 \ No newline at end of file diff --git a/apps/cpu/Multiphase/Multiphase.cpp b/apps/cpu/Multiphase/Multiphase.cpp index 10ff2b39618c64ef09edd902471387e7ebc90c70..deb2845f4278661bb970ea68b043e3cb435bffcc 100644 --- a/apps/cpu/Multiphase/Multiphase.cpp +++ b/apps/cpu/Multiphase/Multiphase.cpp @@ -111,10 +111,15 @@ void run(string configname) ////////////////////////////////////////////////////////////////////////// // restart SPtr<UbScheduler> rSch(new UbScheduler(cpStep, cpStart)); - SPtr<MPIIOMigrationBECoProcessor> rcp = make_shared<MPIIOMigrationBECoProcessor>(grid, rSch, pathname, comm); + //SPtr<MPIIORestartCoProcessor> rcp(new MPIIORestartCoProcessor(grid, rSch, pathname, comm)); + //SPtr<MPIIOMigrationCoProcessor> rcp(new MPIIOMigrationCoProcessor(grid, rSch, pathname, comm)); + SPtr<MPIIOMigrationBECoProcessor> rcp(new MPIIOMigrationBECoProcessor(grid, rSch, pathname, comm)); + rcp->setNu(nuLB); + rcp->setNuLG(nuL, nuG); + rcp->setDensityRatio(densityRatio); + rcp->setLBMKernel(kernel); rcp->setBCProcessor(bcProc); - rcp->setNu(nuLB); ////////////////////////////////////////////////////////////////////////// mu::Parser fctF1; @@ -140,7 +145,7 @@ void run(string configname) SPtr<D3Q27Interactor> cylInt; if (newStart) { - // if (newStart) { + // if (newStart) { // bounding box /*double g_minX1 = 0.0; @@ -163,7 +168,7 @@ void run(string configname) SPtr<GbObject3D> gridCube(new GbCuboid3D(g_minX1, g_minX2, g_minX3, g_maxX1, g_maxX2, g_maxX3)); if (myid == 0) GbSystem3D::writeGeoObject(gridCube.get(), pathname + "/geo/gridCube", - WbWriterVtkXmlBinary::getInstance()); + WbWriterVtkXmlBinary::getInstance()); if (myid == 0) UBLOG(logINFO, "Read geoFile:start"); SPtr<GbTriFaceMesh3D> cylinder = make_shared<GbTriFaceMesh3D>(); @@ -176,10 +181,10 @@ void run(string configname) new GbCuboid3D(g_minX1*0.5 - dx, g_minX2 - dx, g_minX3*0.5 - dx, g_maxX1*0.5 + dx, g_minX2, g_maxX3*0.5 + dx)); if (myid == 0) GbSystem3D::writeGeoObject(geoInflowF1.get(), pathname + "/geo/geoInflowF1", WbWriterVtkXmlASCII::getInstance()); - GbCylinder3DPtr cylinder1(new GbCylinder3D(g_minX1-dx, 0.0, 0.0, cylinder->getX1Maximum(), 0.0, 0.0, 3e-3)); + GbCylinder3DPtr cylinder1(new GbCylinder3D(g_minX1-dx, 0.0, 0.0, g_minX1+dx, 0.0, 0.0, 3e-3)); if (myid == 0) GbSystem3D::writeGeoObject(cylinder1.get(), pathname + "/geo/cylinder1", - WbWriterVtkXmlASCII::getInstance()); + WbWriterVtkXmlASCII::getInstance()); //GbCylinder3DPtr cylinder2( // new GbCylinder3D(0.0, g_minX2 - 2.0 * dx / 2.0, 0.0, 0.0, g_minX2 + 4.0 * dx, 0.0, 8.0+2.0*dx)); @@ -192,7 +197,7 @@ void run(string configname) GbCuboid3DPtr geoOutflow(new GbCuboid3D(g_minX1, g_maxX2, g_minX3, g_maxX1, g_maxX2 + dx, g_maxX3)); if (myid == 0) GbSystem3D::writeGeoObject(geoOutflow.get(), pathname + "/geo/geoOutflow", - WbWriterVtkXmlASCII::getInstance()); + WbWriterVtkXmlASCII::getInstance()); // double blockLength = blocknx[0] * dx; @@ -254,32 +259,32 @@ void run(string configname) SPtr<D3Q27Interactor> outflowInt(new D3Q27Interactor(geoOutflow, grid, denBCAdapter, Interactor3D::SOLID)); - // Create boundary conditions geometry - GbCuboid3DPtr wallXmin(new GbCuboid3D(g_minX1 - dx, g_minX2 - dx, g_minX3 - dx, g_minX1, g_maxX2 + dx, g_maxX3)); - GbSystem3D::writeGeoObject(wallXmin.get(), pathname + "/geo/wallXmin", WbWriterVtkXmlASCII::getInstance()); - GbCuboid3DPtr wallXmax(new GbCuboid3D(g_maxX1, g_minX2 - dx, g_minX3 - dx, g_maxX1 + dx, g_maxX2 + dx, g_maxX3)); - GbSystem3D::writeGeoObject(wallXmax.get(), pathname + "/geo/wallXmax", WbWriterVtkXmlASCII::getInstance()); - GbCuboid3DPtr wallZmin(new GbCuboid3D(g_minX1 - dx, g_minX2 - dx, g_minX3 - dx, g_maxX1 + dx, g_maxX2 + dx, g_minX3)); - GbSystem3D::writeGeoObject(wallZmin.get(), pathname + "/geo/wallZmin", WbWriterVtkXmlASCII::getInstance()); - GbCuboid3DPtr wallZmax(new GbCuboid3D(g_minX1 - dx, g_minX2 - dx, g_maxX3, g_maxX1 + dx, g_maxX2 + dx, g_maxX3 + dx)); - GbSystem3D::writeGeoObject(wallZmax.get(), pathname + "/geo/wallZmax", WbWriterVtkXmlASCII::getInstance()); - GbCuboid3DPtr wallYmin(new GbCuboid3D(g_minX1 - dx, g_minX2 - dx, g_minX3 - dx, g_maxX1 + dx, g_minX2, g_maxX3)); - GbSystem3D::writeGeoObject(wallYmin.get(), pathname + "/geo/wallYmin", WbWriterVtkXmlASCII::getInstance()); - GbCuboid3DPtr wallYmax(new GbCuboid3D(g_minX1 - dx, g_maxX2, g_minX3 - dx, g_maxX1 + dx, g_maxX2 + dx, g_maxX3)); - GbSystem3D::writeGeoObject(wallYmax.get(), pathname + "/geo/wallYmax", WbWriterVtkXmlASCII::getInstance()); - - // Add boundary conditions to grid generator - SPtr<D3Q27Interactor> wallXminInt(new D3Q27Interactor(wallXmin, grid, noSlipBCAdapter, Interactor3D::SOLID)); - SPtr<D3Q27Interactor> wallXmaxInt(new D3Q27Interactor(wallXmax, grid, noSlipBCAdapter, Interactor3D::SOLID)); - SPtr<D3Q27Interactor> wallZminInt(new D3Q27Interactor(wallZmin, grid, noSlipBCAdapter, Interactor3D::SOLID)); - SPtr<D3Q27Interactor> wallZmaxInt(new D3Q27Interactor(wallZmax, grid, noSlipBCAdapter, Interactor3D::SOLID)); - SPtr<D3Q27Interactor> wallYminInt(new D3Q27Interactor(wallYmin, grid, noSlipBCAdapter, Interactor3D::SOLID)); - SPtr<D3Q27Interactor> wallYmaxInt(new D3Q27Interactor(wallYmax, grid, noSlipBCAdapter, Interactor3D::SOLID)); - - - cylInt = SPtr<D3Q27Interactor>(new D3Q27Interactor(cylinder1, grid, velBCAdapterF1, Interactor3D::SOLID)); - cylInt->addBCAdapter(velBCAdapterF2); - //SPtr<D3Q27Interactor> cyl2Int(new D3Q27Interactor(cylinder2, grid, noSlipBCAdapter, Interactor3D::SOLID)); + // Create boundary conditions geometry + GbCuboid3DPtr wallXmin(new GbCuboid3D(g_minX1 - dx, g_minX2 - dx, g_minX3 - dx, g_minX1, g_maxX2 + dx, g_maxX3)); + GbSystem3D::writeGeoObject(wallXmin.get(), pathname + "/geo/wallXmin", WbWriterVtkXmlASCII::getInstance()); + GbCuboid3DPtr wallXmax(new GbCuboid3D(g_maxX1, g_minX2 - dx, g_minX3 - dx, g_maxX1 + dx, g_maxX2 + dx, g_maxX3)); + GbSystem3D::writeGeoObject(wallXmax.get(), pathname + "/geo/wallXmax", WbWriterVtkXmlASCII::getInstance()); + GbCuboid3DPtr wallZmin(new GbCuboid3D(g_minX1 - dx, g_minX2 - dx, g_minX3 - dx, g_maxX1 + dx, g_maxX2 + dx, g_minX3)); + GbSystem3D::writeGeoObject(wallZmin.get(), pathname + "/geo/wallZmin", WbWriterVtkXmlASCII::getInstance()); + GbCuboid3DPtr wallZmax(new GbCuboid3D(g_minX1 - dx, g_minX2 - dx, g_maxX3, g_maxX1 + dx, g_maxX2 + dx, g_maxX3 + dx)); + GbSystem3D::writeGeoObject(wallZmax.get(), pathname + "/geo/wallZmax", WbWriterVtkXmlASCII::getInstance()); + GbCuboid3DPtr wallYmin(new GbCuboid3D(g_minX1 - dx, g_minX2 - dx, g_minX3 - dx, g_maxX1 + dx, g_minX2, g_maxX3)); + GbSystem3D::writeGeoObject(wallYmin.get(), pathname + "/geo/wallYmin", WbWriterVtkXmlASCII::getInstance()); + GbCuboid3DPtr wallYmax(new GbCuboid3D(g_minX1 - dx, g_maxX2, g_minX3 - dx, g_maxX1 + dx, g_maxX2 + dx, g_maxX3)); + GbSystem3D::writeGeoObject(wallYmax.get(), pathname + "/geo/wallYmax", WbWriterVtkXmlASCII::getInstance()); + + // Add boundary conditions to grid generator + SPtr<D3Q27Interactor> wallXminInt(new D3Q27Interactor(wallXmin, grid, noSlipBCAdapter, Interactor3D::SOLID)); + SPtr<D3Q27Interactor> wallXmaxInt(new D3Q27Interactor(wallXmax, grid, noSlipBCAdapter, Interactor3D::SOLID)); + SPtr<D3Q27Interactor> wallZminInt(new D3Q27Interactor(wallZmin, grid, noSlipBCAdapter, Interactor3D::SOLID)); + SPtr<D3Q27Interactor> wallZmaxInt(new D3Q27Interactor(wallZmax, grid, noSlipBCAdapter, Interactor3D::SOLID)); + SPtr<D3Q27Interactor> wallYminInt(new D3Q27Interactor(wallYmin, grid, noSlipBCAdapter, Interactor3D::SOLID)); + SPtr<D3Q27Interactor> wallYmaxInt(new D3Q27Interactor(wallYmax, grid, noSlipBCAdapter, Interactor3D::SOLID)); + + + cylInt = SPtr<D3Q27Interactor>(new D3Q27Interactor(cylinder1, grid, velBCAdapterF1, Interactor3D::SOLID)); + cylInt->addBCAdapter(velBCAdapterF2); + //SPtr<D3Q27Interactor> cyl2Int(new D3Q27Interactor(cylinder2, grid, noSlipBCAdapter, Interactor3D::SOLID)); SPtr<Grid3DVisitor> metisVisitor( new MetisPartitioningGridVisitor(comm, MetisPartitioningGridVisitor::LevelBased, D3Q27System::BSW)); @@ -288,8 +293,8 @@ void run(string configname) intHelper.addInteractor(tubes); //intHelper.addInteractor(outflowInt); //intHelper.addInteractor(cyl2Int); - - + + intHelper.addInteractor(wallXminInt); intHelper.addInteractor(wallXmaxInt); intHelper.addInteractor(wallZminInt); @@ -297,7 +302,7 @@ void run(string configname) intHelper.addInteractor(wallYminInt); intHelper.addInteractor(wallYmaxInt); //intHelper.addInteractor(inflowF1Int); - + intHelper.selectBlocks(); @@ -331,7 +336,7 @@ void run(string configname) } MultiphaseSetKernelBlockVisitor kernelVisitor(kernel, nuL, nuG, densityRatio, beta, kappa, theta, availMem, - needMem); + needMem); grid->accept(kernelVisitor); @@ -382,16 +387,16 @@ void run(string configname) if (myid == 0) UBLOG(logINFO, "Restart - end"); } - + TwoDistributionsSetConnectorsBlockVisitor setConnsVisitor(comm); grid->accept(setConnsVisitor); - + //ThreeDistributionsSetConnectorsBlockVisitor setConnsVisitor(comm); //grid->accept(setConnsVisitor); SPtr<UbScheduler> visSch(new UbScheduler(outTime)); SPtr<WriteMultiphaseQuantitiesCoProcessor> pp(new WriteMultiphaseQuantitiesCoProcessor( - //SPtr<WriteMacroscopicQuantitiesCoProcessor> pp(new WriteMacroscopicQuantitiesCoProcessor( + //SPtr<WriteMacroscopicQuantitiesCoProcessor> pp(new WriteMacroscopicQuantitiesCoProcessor( grid, visSch, pathname, WbWriterVtkXmlBinary::getInstance(), conv, comm)); pp->process(0); @@ -414,7 +419,7 @@ void run(string configname) calculator->addCoProcessor(rcp); - + if (myid == 0) UBLOG(logINFO, "Simulation-start"); diff --git a/apps/cpu/MultiphaseDropletTest.zip b/apps/cpu/MultiphaseDropletTest.zip new file mode 100644 index 0000000000000000000000000000000000000000..5eb13a6c0bacfbf392deb00c6b388ba282c038e0 Binary files /dev/null and b/apps/cpu/MultiphaseDropletTest.zip differ diff --git a/apps/cpu/MultiphaseDropletTest/CMakeLists.txt b/apps/cpu/MultiphaseDropletTest/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..3f01ad73a0b28f7050667dde7b9408ac00b76e42 --- /dev/null +++ b/apps/cpu/MultiphaseDropletTest/CMakeLists.txt @@ -0,0 +1,3 @@ +PROJECT(MultiphaseDropletTest) + +vf_add_library(BUILDTYPE binary PRIVATE_LINK VirtualFluidsCore basics ${MPI_CXX_LIBRARIES} FILES droplet.cpp ) diff --git a/apps/cpu/MultiphaseDropletTest/DropletTest.cfg b/apps/cpu/MultiphaseDropletTest/DropletTest.cfg new file mode 100644 index 0000000000000000000000000000000000000000..72c0144890c2fd8ba25fa0dfb7528fdbd1b889d8 --- /dev/null +++ b/apps/cpu/MultiphaseDropletTest/DropletTest.cfg @@ -0,0 +1,41 @@ +pathname = d:/temp/MultiphaseDropletTest + +numOfThreads = 4 +availMem = 10e9 + +#Grid + +boundingBox = 0 128 0 64 0 64 +blocknx = 8 8 8 + +dx = 1 +refineLevel = 0 + +#Simulation +uLB = 0.005 +Re = 10 +nuL =1e-2# 1.0e-5 #!1e-2 +nuG =1e-2# 1.16e-4 #!1e-2 +densityRatio = 1000 +sigma = 1e-5 #4.66e-3 #surface tension 1e-4 ./. 1e-5 +interfaceThickness = 5 +radius = 16 +contactAngle = 110.0 +#gravity = 0.0 +gravity = -5.04e-6 +phi_L = 0.0 +phi_H = 1.0 +Phase-field Relaxation = 0.6 +Mobility = 0.02 # 0.01 ./. 0.08, fine correction of Phase-field Relaxation parameter, to activate it need to change in kernel tauH to tauH1 + + +logToFile = false + +newStart = true +restartStep = 100000 + +cpStart = 100000 +cpStep = 100000 + +outTime = 1 +endTime = 10000 \ No newline at end of file diff --git a/apps/cpu/MultiphaseDropletTest/droplet.cpp b/apps/cpu/MultiphaseDropletTest/droplet.cpp new file mode 100644 index 0000000000000000000000000000000000000000..092d5a16a36b47c726f0f85463484f0c97fdecd0 --- /dev/null +++ b/apps/cpu/MultiphaseDropletTest/droplet.cpp @@ -0,0 +1,306 @@ +#include <iostream> +#include <string> +#include <memory> + +#include "VirtualFluids.h" + +using namespace std; + +void run(string configname) +{ + try { + ConfigurationFile config; + config.load(configname); + + string pathname = config.getValue<string>("pathname"); + int numOfThreads = config.getValue<int>("numOfThreads"); + vector<int> blocknx = config.getVector<int>("blocknx"); + vector<double> boundingBox = config.getVector<double>("boundingBox"); + double uLB = config.getValue<double>("uLB"); + double nuL = config.getValue<double>("nuL"); + double nuG = config.getValue<double>("nuG"); + double densityRatio = config.getValue<double>("densityRatio"); + double sigma = config.getValue<double>("sigma"); + int interfaceThickness = config.getValue<int>("interfaceThickness"); + double radius = config.getValue<double>("radius"); + double theta = config.getValue<double>("contactAngle"); + double gr = config.getValue<double>("gravity"); + double phiL = config.getValue<double>("phi_L"); + double phiH = config.getValue<double>("phi_H"); + double tauH = config.getValue<double>("Phase-field Relaxation"); + double mob = config.getValue<double>("Mobility"); + + double endTime = config.getValue<double>("endTime"); + double outTime = config.getValue<double>("outTime"); + double availMem = config.getValue<double>("availMem"); + int refineLevel = config.getValue<int>("refineLevel"); + double Re = config.getValue<double>("Re"); + double dx = config.getValue<double>("dx"); + bool logToFile = config.getValue<bool>("logToFile"); + //double restartStep = config.getValue<double>("restartStep"); + //double cpStart = config.getValue<double>("cpStart"); + //double cpStep = config.getValue<double>("cpStep"); + bool newStart = config.getValue<bool>("newStart"); + + double beta = 12 * sigma / interfaceThickness; + double kappa = 1.5 * interfaceThickness * sigma; + + SPtr<Communicator> comm = MPICommunicator::getInstance(); + int myid = comm->getProcessID(); + + if (myid == 0) + UBLOG(logINFO, "Droplet Test: Start!"); + + if (logToFile) { +#if defined(__unix__) + if (myid == 0) { + const char *str = pathname.c_str(); + mkdir(str, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH); + } +#endif + + if (myid == 0) { + stringstream logFilename; + logFilename << pathname + "/logfile" + UbSystem::toString(UbSystem::getTimeStamp()) + ".txt"; + UbLog::output_policy::setStream(logFilename.str()); + } + } + + //Sleep(30000); + + // LBMReal dLB = 0; // = length[1] / dx; + LBMReal rhoLB = 0.0; + LBMReal nuLB = nuL; //(uLB*dLB) / Re; + + SPtr<LBMUnitConverter> conv(new LBMUnitConverter()); + + //const int baseLevel = 0; + + SPtr<LBMKernel> kernel; + + //kernel = SPtr<LBMKernel>(new MultiphaseScratchCumulantLBMKernel()); + kernel = SPtr<LBMKernel>(new MultiphaseCumulantLBMKernel()); + //kernel = SPtr<LBMKernel>(new MultiphaseTwoPhaseFieldsCumulantLBMKernel()); + + kernel->setWithForcing(true); + kernel->setForcingX1(gr); + kernel->setForcingX2(0.0); + kernel->setForcingX3(0.0); + + kernel->setPhiL(phiL); + kernel->setPhiH(phiH); + kernel->setPhaseFieldRelaxation(tauH); + kernel->setMobility(mob); + + SPtr<BCProcessor> bcProc(new BCProcessor()); + // BCProcessorPtr bcProc(new ThinWallBCProcessor()); + + kernel->setBCProcessor(bcProc); + + SPtr<Grid3D> grid(new Grid3D(comm)); + grid->setDeltaX(dx); + grid->setBlockNX(blocknx[0], blocknx[1], blocknx[2]); + grid->setPeriodicX1(true); + grid->setPeriodicX2(true); + grid->setPeriodicX3(true); + + ////////////////////////////////////////////////////////////////////////// + // restart + //SPtr<UbScheduler> rSch(new UbScheduler(cpStep, cpStart)); + ////SPtr<MPIIORestartCoProcessor> rcp(new MPIIORestartCoProcessor(grid, rSch, pathname, comm)); + ////SPtr<MPIIOMigrationCoProcessor> rcp(new MPIIOMigrationCoProcessor(grid, rSch, pathname, comm)); + //SPtr<MPIIOMigrationBECoProcessor> rcp(new MPIIOMigrationBECoProcessor(grid, rSch, pathname, comm)); + //rcp->setNu(nuLB); + //rcp->setNuLG(nuL, nuG); + //rcp->setDensityRatio(densityRatio); + + //rcp->setLBMKernel(kernel); + //rcp->setBCProcessor(bcProc); + ////////////////////////////////////////////////////////////////////////// + + if (newStart) { + + // bounding box + double g_minX1 = boundingBox[0]; + double g_minX2 = boundingBox[2]; + double g_minX3 = boundingBox[4]; + + double g_maxX1 = boundingBox[1]; + double g_maxX2 = boundingBox[3]; + double g_maxX3 = boundingBox[5]; + + // geometry + SPtr<GbObject3D> gridCube(new GbCuboid3D(g_minX1, g_minX2, g_minX3, g_maxX1, g_maxX2, g_maxX3)); + if (myid == 0) + GbSystem3D::writeGeoObject(gridCube.get(), pathname + "/geo/gridCube", + WbWriterVtkXmlBinary::getInstance()); + + if (myid == 0) { + UBLOG(logINFO, "uLb = " << uLB); + UBLOG(logINFO, "rho = " << rhoLB); + UBLOG(logINFO, "nuLb = " << nuLB); + UBLOG(logINFO, "Re = " << Re); + UBLOG(logINFO, "dx = " << dx); + UBLOG(logINFO, "Preprocess - start"); + } + + GenBlocksGridVisitor genBlocks(gridCube); + grid->accept(genBlocks); + + + SPtr<WriteBlocksCoProcessor> ppblocks(new WriteBlocksCoProcessor( + grid, SPtr<UbScheduler>(new UbScheduler(1)), pathname, WbWriterVtkXmlBinary::getInstance(), comm)); + + //SPtr<Grid3DVisitor> metisVisitor( + // new MetisPartitioningGridVisitor(comm, MetisPartitioningGridVisitor::LevelBased, D3Q27System::BSW)); + //InteractorsHelper intHelper(grid, metisVisitor); + //intHelper.selectBlocks(); + + ppblocks->process(0); + ppblocks.reset(); + + unsigned long long numberOfBlocks = (unsigned long long)grid->getNumberOfBlocks(); + int ghostLayer = 3; + unsigned long long numberOfNodesPerBlock = + (unsigned long long)(blocknx[0]) * (unsigned long long)(blocknx[1]) * (unsigned long long)(blocknx[2]); + unsigned long long numberOfNodes = numberOfBlocks * numberOfNodesPerBlock; + unsigned long long numberOfNodesPerBlockWithGhostLayer = + numberOfBlocks * (blocknx[0] + ghostLayer) * (blocknx[1] + ghostLayer) * (blocknx[2] + ghostLayer); + double needMemAll = + double(numberOfNodesPerBlockWithGhostLayer * (27 * sizeof(double) + sizeof(int) + sizeof(float) * 4)); + double needMem = needMemAll / double(comm->getNumberOfProcesses()); + + if (myid == 0) { + UBLOG(logINFO, "Number of blocks = " << numberOfBlocks); + UBLOG(logINFO, "Number of nodes = " << numberOfNodes); + int minInitLevel = grid->getCoarsestInitializedLevel(); + int maxInitLevel = grid->getFinestInitializedLevel(); + for (int level = minInitLevel; level <= maxInitLevel; level++) { + int nobl = grid->getNumberOfBlocks(level); + UBLOG(logINFO, "Number of blocks for level " << level << " = " << nobl); + UBLOG(logINFO, "Number of nodes for level " << level << " = " << nobl * numberOfNodesPerBlock); + } + UBLOG(logINFO, "Necessary memory = " << needMemAll << " bytes"); + UBLOG(logINFO, "Necessary memory per process = " << needMem << " bytes"); + UBLOG(logINFO, "Available memory per process = " << availMem << " bytes"); + } + + MultiphaseSetKernelBlockVisitor kernelVisitor(kernel, nuL, nuG, densityRatio, beta, kappa, theta, availMem, + needMem); + + grid->accept(kernelVisitor); + + if (refineLevel > 0) { + SetUndefinedNodesBlockVisitor undefNodesVisitor; + grid->accept(undefNodesVisitor); + } + + + //intHelper.setBC(); + + //grid->accept(bcVisitor); + + // initialization of distributions + LBMReal x1c = (g_maxX1 - g_minX1-1)/2; + LBMReal x2c = (g_maxX2 - g_minX2-1)/2; + LBMReal x3c = (g_maxX3 - g_minX3-1)/2; + mu::Parser fct1; + fct1.SetExpr("0.5-0.5*tanh(2*(sqrt((x1-x1c)^2+(x2-x2c)^2+(x3-x3c)^2)-radius)/interfaceThickness)"); + fct1.DefineConst("x1c", x1c); + fct1.DefineConst("x2c", x2c); + fct1.DefineConst("x3c", x3c); + fct1.DefineConst("radius", radius); + fct1.DefineConst("interfaceThickness", interfaceThickness); + + mu::Parser fct2; + fct2.SetExpr("0.5*uLB-uLB*0.5*tanh(2*(sqrt((x1-x1c)^2+(x2-x2c)^2+(x3-x3c)^2)-radius)/interfaceThickness)"); + fct2.DefineConst("uLB", uLB); + fct2.DefineConst("x1c", x1c); + fct2.DefineConst("x2c", x2c); + fct2.DefineConst("x3c", x3c); + fct2.DefineConst("radius", radius); + fct2.DefineConst("interfaceThickness", interfaceThickness); + + MultiphaseInitDistributionsBlockVisitor initVisitor(densityRatio, interfaceThickness, radius); + initVisitor.setPhi(fct1); + initVisitor.setVx1(fct2); + grid->accept(initVisitor); + + // boundary conditions grid + { + SPtr<UbScheduler> geoSch(new UbScheduler(1)); + SPtr<WriteBoundaryConditionsCoProcessor> ppgeo(new WriteBoundaryConditionsCoProcessor( + grid, geoSch, pathname, WbWriterVtkXmlBinary::getInstance(), comm)); + ppgeo->process(0); + ppgeo.reset(); + } + + if (myid == 0) + UBLOG(logINFO, "Preprocess - end"); + } else { + if (myid == 0) { + UBLOG(logINFO, "Parameters:"); + UBLOG(logINFO, "uLb = " << uLB); + UBLOG(logINFO, "rho = " << rhoLB); + UBLOG(logINFO, "nuLb = " << nuLB); + UBLOG(logINFO, "Re = " << Re); + UBLOG(logINFO, "dx = " << dx); + UBLOG(logINFO, "number of levels = " << refineLevel + 1); + UBLOG(logINFO, "numOfThreads = " << numOfThreads); + UBLOG(logINFO, "path = " << pathname); + } + + //rcp->restart((int)restartStep); + //grid->setTimeStep(restartStep); + + if (myid == 0) + UBLOG(logINFO, "Restart - end"); + } + + TwoDistributionsSetConnectorsBlockVisitor setConnsVisitor(comm); + grid->accept(setConnsVisitor); + + //ThreeDistributionsSetConnectorsBlockVisitor setConnsVisitor(comm); + //grid->accept(setConnsVisitor); + + SPtr<UbScheduler> visSch(new UbScheduler(outTime)); + SPtr<WriteMultiphaseQuantitiesCoProcessor> pp(new WriteMultiphaseQuantitiesCoProcessor( + grid, visSch, pathname, WbWriterVtkXmlBinary::getInstance(), conv, comm)); + //SPtr<WriteMacroscopicQuantitiesCoProcessor> pp(new WriteMacroscopicQuantitiesCoProcessor( + // grid, visSch, pathname, WbWriterVtkXmlBinary::getInstance(), conv, comm)); + + SPtr<UbScheduler> nupsSch(new UbScheduler(10, 30, 100)); + SPtr<NUPSCounterCoProcessor> npr(new NUPSCounterCoProcessor(grid, nupsSch, numOfThreads, comm)); + + SPtr<UbScheduler> stepGhostLayer(new UbScheduler(1)); + SPtr<Calculator> calculator(new BasicCalculator(grid, stepGhostLayer, endTime)); + calculator->addCoProcessor(npr); + calculator->addCoProcessor(pp); + //calculator->addCoProcessor(rcp); + + + + if (myid == 0) + UBLOG(logINFO, "Simulation-start"); + calculator->calculate(); + if (myid == 0) + UBLOG(logINFO, "Simulation-end"); + } catch (std::exception &e) { + cerr << e.what() << endl << flush; + } catch (std::string &s) { + cerr << s << endl; + } catch (...) { + cerr << "unknown exception" << endl; + } +} +int main(int argc, char *argv[]) +{ + // Sleep(30000); + if (argv != NULL) { + if (argv[1] != NULL) { + run(string(argv[1])); + } else { + cout << "Configuration file is missing!" << endl; + } + } +} diff --git a/apps/cpu/PoiseuilleFlow/CMakeLists.txt b/apps/cpu/PoiseuilleFlow/CMakeLists.txt index 43ea7697a846d3453bcdf8e53f6b5a9622ee9e71..1959719d81013762d37f655b342f755135c9ef85 100644 --- a/apps/cpu/PoiseuilleFlow/CMakeLists.txt +++ b/apps/cpu/PoiseuilleFlow/CMakeLists.txt @@ -1,25 +1,6 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) - -######################################################## -## C++ PROJECT ### -######################################################## PROJECT(pf) -INCLUDE(${APPS_ROOT}/IncludsList.cmake) - -################################################################# -### LOCAL FILES ### -################################################################# -FILE(GLOB SPECIFIC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/*.h - ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/*.hpp ) - -SET(ALL_SOURCES ${ALL_SOURCES} ${SPECIFIC_FILES}) -SOURCE_GROUP(src FILES ${SPECIFIC_FILES}) - -SET(CAB_ADDITIONAL_LINK_LIBRARIES VirtualFluids) +vf_add_library(BUILDTYPE binary PRIVATE_LINK VirtualFluidsCore muparser basics ${MPI_CXX_LIBRARIES} FILES pf.cpp pf1.cpp ) -################################################################# -### CREATE PROJECT ### -################################################################# -CREATE_CAB_PROJECT(pf BINARY) +vf_get_library_name (library_name) +target_include_directories(${library_name} PRIVATE ${APPS_ROOT_CPU}) \ No newline at end of file diff --git a/apps/cpu/PoiseuilleFlow/pf1.cpp b/apps/cpu/PoiseuilleFlow/pf1.cpp index e7f4bbf1baa03e235a4263c8c9a1293c1d51d7f3..3880e9583dd07bdad7fcd11272f0a372155ef654 100644 --- a/apps/cpu/PoiseuilleFlow/pf1.cpp +++ b/apps/cpu/PoiseuilleFlow/pf1.cpp @@ -137,8 +137,7 @@ void pf1() grid->accept(initVisitor); //set connectors - InterpolationProcessorPtr iProcessor(new IncompressibleOffsetInterpolationProcessor()); - SetConnectorsBlockVisitor setConnsVisitor(comm, true, D3Q27System::ENDDIR, nuLB, iProcessor); + OneDistributionSetConnectorsBlockVisitor setConnsVisitor(comm); grid->accept(setConnsVisitor); SPtr<UbScheduler> mSch(new UbScheduler(cpStep, cpStart)); diff --git a/apps/cpu/ViskomatXL/viskomat.cfg b/apps/cpu/ViskomatXL/viskomat.cfg index 71228231e36810fca21ebe752be2488b5c56264d..4227ba9f821cfa9336a69c14b828829c6ff2ccb1 100644 --- a/apps/cpu/ViskomatXL/viskomat.cfg +++ b/apps/cpu/ViskomatXL/viskomat.cfg @@ -1,4 +1,4 @@ -outputPath = d:/temp/viskomat +outputPath = d:/temp/viskomatCylinderRestartTest3_Migration geoPath = d:/Projects/TRR277/Project/WP1/Rheometer/Aileen geoFile = fishbone.stl @@ -7,24 +7,44 @@ availMem = 8e9 logToFile = false blocknx = 16 16 16 +#blocknx = 1 8 8 #boundingBox = -4 171 -9.5 155.5 -76.5 82.5 -boundingBox = -4 166 -9.5 155.5 -76.5 82.5 +#boundingBox = -4 166 -9.5 155.5 -76.5 82.5 + +#boundingBox = -4 166 0 165 0 165 + +#boundingBox = -4 166 -82.5 82.5 -82.5 82.5 + +boundingBox = 0 140 -82.5 82.5 -82.5 82.5 + +# around X +#blocknx = 1 16 16 +#boundingBox = 0 1 0 165 0 165 + +# around Y +#blocknx = 16 1 16 +#boundingBox = 0 165 0 1 0 165 + +#zero test +#blocknx = 8 8 8 +#boundingBox = 0 8 0 8 0 8 + deltax = 1 refineLevel = 0 #nuLB = 1.5e-4 -OmegaLB = 1e-5 +OmegaLB = 1e-4 tau0 = 20e-7 resolution = 32 scaleFactor = 1 newStart = true -restartStep = 100000 +restartStep = 10000 cpStart = 10000 cpStep = 10000 -outTime = 10000 -endTime = 100000 \ No newline at end of file +outTime = 1000 +endTime = 1000000 \ No newline at end of file diff --git a/apps/cpu/ViskomatXL/viskomat.cpp b/apps/cpu/ViskomatXL/viskomat.cpp index 113d9c6da16bff21267bb723d48adb2a3c9d5619..91dfb050901571b5cf37e02cfca5ab8c7a8eb8cb 100644 --- a/apps/cpu/ViskomatXL/viskomat.cpp +++ b/apps/cpu/ViskomatXL/viskomat.cpp @@ -81,11 +81,11 @@ void bflow(string configname) //cpStep = endTime; //double Re = 1.38230076758; - double N = 80; - double Omega = 2 * UbMath::PI / 60.0 * N; - double mu = 1; - double R = 0.165 / 2.0; - double rho = 970; + double N = 80; //rpm + double Omega = 2 * UbMath::PI / 60.0 * N; //rad/s + double mu = 1; //Pa s + double R = 0.165 / 2.0; //m + double rho = 970; //kg/m^3 double Re = Omega * R * R * rho / mu; double nuLB = OmegaLB * R * 1e3 * R * 1e3 / Re; @@ -151,21 +151,36 @@ void bflow(string configname) SPtr<BCAdapter> slipBCAdapter(new SlipBCAdapter()); slipBCAdapter->setBcAlgorithm(SPtr<BCAlgorithm>(new SimpleSlipBCAlgorithm())); - mu::Parser fctVx; - //fctVx.SetExpr("omega*(r-x2)"); - fctVx.SetExpr("-Omega*(x2-r)"); - fctVx.DefineConst("Omega", OmegaLB); - //fctVx.DefineConst("r", R0); - fctVx.DefineConst("r", 0.5 * (g_maxX2 - g_minX2)); - + //// rotation around X-axis mu::Parser fctVy; - fctVy.SetExpr("Omega*(x3-r)"); + fctVy.SetExpr("-Omega*(x3-z0-r)"); fctVy.DefineConst("Omega", OmegaLB); - //fctVy.DefineConst("r", R0); - fctVy.DefineConst("r", 0.5 * (g_maxX2 - g_minX2)); + fctVy.DefineConst("r", 0.5 * (g_maxX3 - g_minX3)); + fctVy.DefineConst("z0", g_minX3); mu::Parser fctVz; - fctVz.SetExpr("0.0"); + fctVz.SetExpr("Omega*(x2-y0-r)"); + fctVz.DefineConst("Omega", OmegaLB); + fctVz.DefineConst("r", 0.5 * (g_maxX2 - g_minX2)); + fctVz.DefineConst("y0", g_minX2); + + mu::Parser fctVx; + fctVx.SetExpr("0.0"); + + // rotation around Y-axis + //mu::Parser fctVz; + //// fctVx.SetExpr("omega*(r-x2)"); + //fctVz.SetExpr("Omega*(x1-r)"); + //fctVz.DefineConst("Omega", OmegaLB); + //fctVz.DefineConst("r", 0.5 * (g_maxX1 - g_minX1)); + + //mu::Parser fctVx; + //fctVx.SetExpr("-Omega*(x3-r)"); + //fctVx.DefineConst("Omega", OmegaLB); + //fctVx.DefineConst("r", 0.5 * (g_maxX1 - g_minX1)); + + //mu::Parser fctVy; + //fctVy.SetExpr("0.0"); SPtr<BCAdapter> velocityBCAdapter(new VelocityBCAdapter(true, true, true, fctVx, fctVy, fctVz, 0, BCFunction::INFCONST)); velocityBCAdapter->setBcAlgorithm(SPtr<BCAlgorithm>(new VelocityBCAlgorithm())); @@ -188,8 +203,11 @@ void bflow(string configname) SPtr<BCProcessor> bcProc; bcProc = SPtr<BCProcessor>(new BCProcessor()); + //SPtr<LBMKernel> kernel = SPtr<LBMKernel>(new BGKLBMKernel()); //SPtr<LBMKernel> kernel = SPtr<LBMKernel>(new CumulantLBMKernel()); - SPtr<LBMKernel> kernel = SPtr<LBMKernel>(new CompressibleCumulant4thOrderViscosityLBMKernel()); + //SPtr<LBMKernel> kernel = SPtr<LBMKernel>(new CompressibleCumulant4thOrderViscosityLBMKernel()); + //SPtr<LBMKernel> kernel = SPtr<LBMKernel>(new IncompressibleCumulantLBMKernel()); + SPtr<LBMKernel> kernel = SPtr<LBMKernel>(new CumulantK17LBMKernel()); //SPtr<LBMKernel> kernel = SPtr<LBMKernel>(new RheologyBinghamModelLBMKernel()); //SPtr<LBMKernel> kernel = SPtr<LBMKernel>(new HerschelBulkleyModelLBMKernel()); //SPtr<LBMKernel> kernel = SPtr<LBMKernel>(new BinghamModelLBMKernel()); @@ -211,28 +229,64 @@ void bflow(string configname) //restart SPtr<UbScheduler> mSch(new UbScheduler(cpStep, cpStart)); SPtr<MPIIOMigrationCoProcessor> restartCoProcessor(new MPIIOMigrationCoProcessor(grid, mSch, outputPath, comm)); + //SPtr<MPIIORestartCoProcessor> restartCoProcessor(new MPIIORestartCoProcessor(grid, mSch, outputPath, comm)); restartCoProcessor->setLBMKernel(kernel); restartCoProcessor->setBCProcessor(bcProc); //restartCoProcessor->setNu(k); ////////////////////////////////////////////////////////////////////////// ////stator - //SPtr<GbObject3D> stator(new GbCylinder3D(0.5 * g_maxX1, 0.5 * g_maxX2, g_minX3-2.0*deltax, 0.5 * g_maxX1, 0.5 * g_maxX2, g_maxX3+ 2.0 * deltax, 0.5 * g_maxX1)); - SPtr<GbTriFaceMesh3D> stator = make_shared<GbTriFaceMesh3D>(); - stator->readMeshFromSTLFileBinary(geoPath + "/" + geoFile, false); + // rotation around X-axis + SPtr<GbObject3D> stator(new GbCylinder3D(g_minX1 - 3.0 * deltax, g_minX2 + 0.5 * (g_maxX2 - g_minX2), + g_minX3 + 0.5 * (g_maxX3 - g_minX3), g_maxX1 + 3.0 * deltax, + g_minX2 + 0.5 * (g_maxX2 - g_minX2), g_minX3 + 0.5 * (g_maxX3 - g_minX3), 0.5 * (g_maxX3 - g_minX3) * 0.5)); + + // rotation around Y-axis + //SPtr<GbObject3D> stator(new GbCylinder3D(g_minX1 + 0.5 * (g_maxX1 - g_minX1), g_minX2 - 3.0 * deltax, + // g_minX3 + 0.5 * (g_maxX3 - g_minX3), g_minX1 + 0.5 * (g_maxX1 - g_minX1), + // g_maxX2 + 3.0 * deltax, g_minX3 + 0.5 * (g_maxX3 - g_minX3), + // 0.5 * (g_maxX3 - g_minX3) * 0.5)); + + SPtr<D3Q27Interactor> statorInt = + SPtr<D3Q27Interactor>(new D3Q27Interactor(stator, grid, noSlipBCAdapter, Interactor3D::SOLID)); + + //SPtr<GbTriFaceMesh3D> stator = make_shared<GbTriFaceMesh3D>(); + //stator->readMeshFromSTLFileBinary(geoPath + "/" + geoFile, false); + //stator->translate(4.0, -73.0, -6.0); GbSystem3D::writeGeoObject(stator.get(), outputPath + "/geo/stator", WbWriterVtkXmlBinary::getInstance()); - - SPtr<D3Q27Interactor> statorInt = SPtr<D3Q27TriFaceMeshInteractor>( - new D3Q27TriFaceMeshInteractor(stator, grid, velocityBCAdapter, Interactor3D::SOLID, Interactor3D::EDGES)); + + //SPtr<D3Q27Interactor> statorInt = SPtr<D3Q27TriFaceMeshInteractor>( + // new D3Q27TriFaceMeshInteractor(stator, grid, noSlipBCAdapter, Interactor3D::SOLID, Interactor3D::EDGES)); ////rotor (cylinder) - SPtr<GbObject3D> rotor(new GbCylinder3D(g_minX1, g_minX2 + 0.5 * (g_maxX2 - g_minX2), - g_minX3 + 0.5 * (g_maxX3 - g_minX3), - g_maxX1, + // rotation around X-axis + SPtr<GbObject3D> rotor(new GbCylinder3D( + g_minX1 - 3.0 * deltax, g_minX2 + 0.5 * (g_maxX2 - g_minX2), + g_minX3 + 0.5 * (g_maxX3 - g_minX3), g_maxX1 + 3.0 * deltax, g_minX2 + 0.5 * (g_maxX2 - g_minX2), g_minX3 + 0.5 * (g_maxX3 - g_minX3), 0.5 * (g_maxX3 - g_minX3))); + // rotation around Y-axis + //SPtr<GbObject3D> rotor(new GbCylinder3D(g_minX1 + 0.5 * (g_maxX1 - g_minX1), g_minX2 - 3.0 * deltax, + // g_minX3 + 0.5 * (g_maxX3 - g_minX3), g_minX1 + 0.5 * (g_maxX1 - g_minX1), + // g_maxX2 + 3.0 * deltax, g_minX3 + 0.5 * (g_maxX3 - g_minX3), + // 0.5 * (g_maxX3 - g_minX3))); + GbSystem3D::writeGeoObject(rotor.get(), outputPath + "/geo/rotor", WbWriterVtkXmlBinary::getInstance()); - SPtr<D3Q27Interactor> rotorInt = SPtr<D3Q27Interactor>(new D3Q27Interactor(rotor, grid, noSlipBCAdapter, Interactor3D::INVERSESOLID)); + SPtr<D3Q27Interactor> rotorInt = + SPtr<D3Q27Interactor>(new D3Q27Interactor(rotor, grid, velocityBCAdapter, Interactor3D::INVERSESOLID)); + + //walls + GbCuboid3DPtr wallXmin(new GbCuboid3D(g_minX1 - deltax, g_minX2 - deltax, g_minX3 - deltax, g_minX1, + g_maxX2 + deltax, g_maxX3 + deltax)); + if (myid == 0) GbSystem3D::writeGeoObject(wallXmin.get(), outputPath + "/geo/wallXmin", WbWriterVtkXmlASCII::getInstance()); + + GbCuboid3DPtr wallXmax(new GbCuboid3D(g_maxX1, g_minX2 - deltax, g_minX3 - deltax, g_maxX1 + (double)blocknx[0]*deltax, + g_maxX2 + deltax, g_maxX3 + deltax)); + if (myid == 0) GbSystem3D::writeGeoObject(wallXmax.get(), outputPath + "/geo/wallXmax", WbWriterVtkXmlASCII::getInstance()); + + //wall interactors + SPtr<D3Q27Interactor> wallXminInt(new D3Q27Interactor(wallXmin, grid, slipBCAdapter, Interactor3D::SOLID)); + SPtr<D3Q27Interactor> wallXmaxInt(new D3Q27Interactor(wallXmax, grid, slipBCAdapter, Interactor3D::SOLID)); if (myid == 0) { @@ -281,19 +335,6 @@ void bflow(string configname) } - //walls - GbCuboid3DPtr wallXmin(new GbCuboid3D(g_minX1 - deltax, g_minX2 - deltax, g_minX3 - deltax, g_minX1, - g_maxX2 + deltax, g_maxX3 + deltax)); - if (myid == 0) GbSystem3D::writeGeoObject(wallXmin.get(), outputPath + "/geo/wallXmin", WbWriterVtkXmlASCII::getInstance()); - - GbCuboid3DPtr wallXmax(new GbCuboid3D(g_maxX1, g_minX2 - deltax, g_minX3 - deltax, g_maxX1 + deltax, - g_maxX2 + deltax, g_maxX3 + deltax)); - if (myid == 0) GbSystem3D::writeGeoObject(wallXmax.get(), outputPath + "/geo/wallXmax", WbWriterVtkXmlASCII::getInstance()); - - //wall interactors - SPtr<D3Q27Interactor> wallXminInt(new D3Q27Interactor(wallXmin, grid, noSlipBCAdapter, Interactor3D::SOLID)); - SPtr<D3Q27Interactor> wallXmaxInt(new D3Q27Interactor(wallXmax, grid, slipBCAdapter, Interactor3D::SOLID)); - //////////////////////////////////////////// //METIS SPtr<Grid3DVisitor> metisVisitor(new MetisPartitioningGridVisitor(comm, MetisPartitioningGridVisitor::LevelBased, D3Q27System::BSW, MetisPartitioner::KWAY)); @@ -353,6 +394,9 @@ void bflow(string configname) InitDistributionsBlockVisitor initVisitor; grid->accept(initVisitor); + SPtr<UbScheduler> geoSch(new UbScheduler(1)); + WriteBoundaryConditionsCoProcessor ppgeo = WriteBoundaryConditionsCoProcessor(grid, geoSch, outputPath, WbWriterVtkXmlBinary::getInstance(), comm); + ppgeo.process(0); if (myid == 0) UBLOG(logINFO, "Preprozess - end"); } @@ -360,12 +404,24 @@ void bflow(string configname) { restartCoProcessor->restart((int)restartStep); grid->setTimeStep(restartStep); - SetBcBlocksBlockVisitor v1(rotorInt); - grid->accept(v1); - rotorInt->initInteractor(); - SetBcBlocksBlockVisitor v2(statorInt); - grid->accept(v2); + + //SetBcBlocksBlockVisitor v1(wallXminInt); + //grid->accept(v1); + //wallXminInt->initInteractor(); + // + //SetBcBlocksBlockVisitor v2(wallXmaxInt); + //grid->accept(v2); + //wallXmaxInt->initInteractor(); + + SetBcBlocksBlockVisitor v3(statorInt); + grid->accept(v3); statorInt->initInteractor(); + + SetBcBlocksBlockVisitor v4(rotorInt); + grid->accept(v4); + rotorInt->initInteractor(); + + } omp_set_num_threads(numOfThreads); @@ -381,10 +437,6 @@ void bflow(string configname) grid->accept(bcVisitor); - SPtr<UbScheduler> geoSch(new UbScheduler(1)); - WriteBoundaryConditionsCoProcessor ppgeo = WriteBoundaryConditionsCoProcessor(grid, geoSch, outputPath, WbWriterVtkXmlBinary::getInstance(), comm); - ppgeo.process(0); - SPtr<UbScheduler> nupsSch(new UbScheduler(10, 30, 100)); SPtr<CoProcessor> npr(new NUPSCounterCoProcessor(grid, nupsSch, numOfThreads, comm)); @@ -392,13 +444,13 @@ void bflow(string configname) SPtr<UbScheduler> visSch(new UbScheduler(outTime)); //SPtr<UbScheduler> visSch(new UbScheduler(10,1)); SPtr<WriteMacroscopicQuantitiesCoProcessor> writeMQCoProcessor(new WriteMacroscopicQuantitiesCoProcessor(grid, visSch, outputPath, WbWriterVtkXmlBinary::getInstance(), SPtr<LBMUnitConverter>(new LBMUnitConverter()), comm)); - //writeMQCoProcessor->process(0); + //writeMQCoProcessor->process(100); SPtr<UbScheduler> forceSch(new UbScheduler(100)); - SPtr<CalculateTorqueCoProcessor> fp = make_shared<CalculateTorqueCoProcessor>(grid, forceSch, outputPath + "/torque/TorqueRotor.txt", comm); + SPtr<CalculateTorqueCoProcessor> fp = make_shared<CalculateTorqueCoProcessor>(grid, forceSch, outputPath + "/torque/TorqueRotor.csv", comm); fp->addInteractor(rotorInt); - //SPtr<CalculateTorqueCoProcessor> fp2 = make_shared<CalculateTorqueCoProcessor>(grid, forceSch, outputPath + "/torque/TorqueStator.txt", comm); - //fp2->addInteractor(statorInt); + SPtr<CalculateTorqueCoProcessor> fp2 = make_shared<CalculateTorqueCoProcessor>(grid, forceSch, outputPath + "/torque/TorqueStator.csv", comm); + fp2->addInteractor(statorInt); //SPtr<WriteThixotropyQuantitiesCoProcessor> writeThixotropicMQCoProcessor(new WriteThixotropyQuantitiesCoProcessor(grid, visSch, outputPath, WbWriterVtkXmlBinary::getInstance(), SPtr<LBMUnitConverter>(new LBMUnitConverter()), comm)); @@ -406,7 +458,7 @@ void bflow(string configname) SPtr<Calculator> calculator(new BasicCalculator(grid, stepGhostLayer, endTime)); calculator->addCoProcessor(npr); calculator->addCoProcessor(fp); - //calculator->addCoProcessor(fp2); + calculator->addCoProcessor(fp2); calculator->addCoProcessor(writeMQCoProcessor); //calculator->addCoProcessor(writeThixotropicMQCoProcessor); calculator->addCoProcessor(restartCoProcessor); diff --git a/apps/cpu/rheometer/rheometer.cfg b/apps/cpu/rheometer/rheometer.cfg index 344062fb9f086cf163542285e5d9d745df3d0d80..9eec8c6ded9b7a5ab8d1e6177c43354a4514ccc3 100644 --- a/apps/cpu/rheometer/rheometer.cfg +++ b/apps/cpu/rheometer/rheometer.cfg @@ -1,12 +1,12 @@ -outputPath = d:/temp/rheometer/rheometerBinghamqQBB/rheometerBingham_tau_20e-7_nu_1.5e-3_new_lim_test - +#outputPath = d:/temp/rheometer/rheometerBinghamqQBB/rheometerBingham_tau_20e-7_nu_1.5e-3_new_lim_test +outputPath = d:/temp/Taylor-CouetteFlowIncompCum viscosityPath = d:/Projects/VirtualFluidsCombined/apps/cpu/rheometer -numOfThreads = 4 +numOfThreads = 1 availMem = 8e9 logToFile = false -blocknx = 8 8 1 +blocknx = 16 16 1 #8 8 1 #boundingBox = 32 32 1 deltax = 1 @@ -15,17 +15,20 @@ deltax = 1 refineLevel = 0 -OmegaLB = 4e-5 +#OmegaLB = 4e-5 +#tau0 = 20e-7 + +OmegaLB = 1e-4 tau0 = 20e-7 -resolution = 32 +resolution = 165 scaleFactor = 1 newStart = true restartStep = 100000 -cpStart = 10000 -cpStep = 10000 +cpStart = 10000000 +cpStep = 10000000 -outTime = 10000 -endTime = 100000 \ No newline at end of file +outTime = 1 +endTime = 10 \ No newline at end of file diff --git a/apps/cpu/rheometer/rheometer.cpp b/apps/cpu/rheometer/rheometer.cpp index 3f87dee3451f98fa47a26a1b032414cab5a513c2..f6f98c122c3197f1a080fd335f8edfc9ee1f4e33 100644 --- a/apps/cpu/rheometer/rheometer.cpp +++ b/apps/cpu/rheometer/rheometer.cpp @@ -38,7 +38,7 @@ void bflow(string configname) viscosity.load(viscosityPath + "/viscosity.cfg"); double nuLB = viscosity.getValue<double>("nuLB"); - outputPath = outputPath + "/rheometerBingham_" + config.getValue<string>("resolution") + "_" + config.getValue<string>("OmegaLB"); + //outputPath = outputPath + "/rheometerBingham_" + config.getValue<string>("resolution") + "_" + config.getValue<string>("OmegaLB"); SPtr<Communicator> comm = MPICommunicator::getInstance(); int myid = comm->getProcessID(); @@ -89,12 +89,20 @@ void bflow(string configname) //bounding box + //double g_minX1 = 0; + //double g_minX2 = 0; + //double g_minX3 = 0; + + //double g_maxX1 = resolution;// boundingBox[0]; + //double g_maxX2 = resolution;// boundingBox[1]; + //double g_maxX3 = 1.0; // boundingBox[2]; + double g_minX1 = 0; double g_minX2 = 0; double g_minX3 = 0; - double g_maxX1 = resolution;// boundingBox[0]; - double g_maxX2 = resolution;// boundingBox[1]; + double g_maxX1 = resolution; // boundingBox[0]; + double g_maxX2 = resolution; // boundingBox[1]; double g_maxX3 = 1.0; // boundingBox[2]; //double g_minX1 = -boundingBox[0]/2.0; @@ -132,9 +140,9 @@ void bflow(string configname) //thix->setOmegaMin(omegaMin); SPtr<BCAdapter> noSlipBCAdapter(new NoSlipBCAdapter()); - //noSlipBCAdapter->setBcAlgorithm(SPtr<BCAlgorithm>(new NoSlipBCAlgorithm())); + noSlipBCAdapter->setBcAlgorithm(SPtr<BCAlgorithm>(new NoSlipBCAlgorithm())); //noSlipBCAdapter->setBcAlgorithm(SPtr<BCAlgorithm>(new RheologyHerschelBulkleyModelNoSlipBCAlgorithm())); - noSlipBCAdapter->setBcAlgorithm(SPtr<BCAlgorithm>(new RheologyBinghamModelNoSlipBCAlgorithm())); + //noSlipBCAdapter->setBcAlgorithm(SPtr<BCAlgorithm>(new RheologyBinghamModelNoSlipBCAlgorithm())); //SPtr<BCAdapter> slipBCAdapter(new SlipBCAdapter()); //slipBCAdapter->setBcAlgorithm(SPtr<BCAlgorithm>(new SimpleSlipBCAlgorithm())); @@ -155,11 +163,26 @@ void bflow(string configname) mu::Parser fctVz; fctVz.SetExpr("0.0"); + + //// rotation around X-axis + //mu::Parser fctVy; + //fctVy.SetExpr("-Omega*(x3-r)"); + //fctVy.DefineConst("Omega", OmegaLB); + //fctVy.DefineConst("r", 0.5 * (g_maxX2 - g_minX2)); + + //mu::Parser fctVz; + //fctVz.SetExpr("Omega*(x2-r)"); + //fctVz.DefineConst("Omega", OmegaLB); + //fctVz.DefineConst("r", 0.5 * (g_maxX2 - g_minX2)); + + //mu::Parser fctVx; + //fctVx.SetExpr("0.0"); + SPtr<BCAdapter> velocityBCAdapter(new VelocityBCAdapter(true, true, true, fctVx, fctVy, fctVz, 0, BCFunction::INFCONST)); - //velocityBCAdapter->setBcAlgorithm(SPtr<BCAlgorithm>(new VelocityBCAlgorithm())); + velocityBCAdapter->setBcAlgorithm(SPtr<BCAlgorithm>(new VelocityBCAlgorithm())); //velocityBCAdapter->setBcAlgorithm(SPtr<BCAlgorithm>(new SimpleVelocityBCAlgorithm())); //velocityBCAdapter->setBcAlgorithm(SPtr<BCAlgorithm>(new VelocityWithDensityBCAlgorithm())); - velocityBCAdapter->setBcAlgorithm(SPtr<BCAlgorithm>(new RheologyBinghamModelVelocityBCAlgorithm())); + //velocityBCAdapter->setBcAlgorithm(SPtr<BCAlgorithm>(new RheologyBinghamModelVelocityBCAlgorithm())); //SPtr<BCAdapter> densityBCAdapter(new DensityBCAdapter()); //densityBCAdapter->setBcAlgorithm(SPtr<BCAlgorithm>(new NonEqDensityBCAlgorithm())); @@ -176,9 +199,12 @@ void bflow(string configname) SPtr<BCProcessor> bcProc; bcProc = SPtr<BCProcessor>(new BCProcessor()); + //SPtr<LBMKernel> kernel = SPtr<LBMKernel>(new BGKLBMKernel()); + SPtr<LBMKernel> kernel = SPtr<LBMKernel>(new IncompressibleCumulantLBMKernel()); //SPtr<LBMKernel> kernel = SPtr<LBMKernel>(new CumulantLBMKernel()); //SPtr<LBMKernel> kernel = SPtr<LBMKernel>(new CompressibleCumulant4thOrderViscosityLBMKernel()); - SPtr<LBMKernel> kernel = SPtr<LBMKernel>(new RheologyBinghamModelLBMKernel()); + //SPtr<LBMKernel> kernel = SPtr<LBMKernel>(new CumulantK17LBMKernel()); + //SPtr<LBMKernel> kernel = SPtr<LBMKernel>(new RheologyBinghamModelLBMKernel()); //SPtr<LBMKernel> kernel = SPtr<LBMKernel>(new HerschelBulkleyModelLBMKernel()); //SPtr<LBMKernel> kernel = SPtr<LBMKernel>(new BinghamModelLBMKernel()); kernel->setBCProcessor(bcProc); @@ -205,16 +231,26 @@ void bflow(string configname) ////////////////////////////////////////////////////////////////////////// ////stator - SPtr<GbObject3D> stator(new GbCylinder3D(0.5 * g_maxX1, 0.5 * g_maxX2, g_minX3-2.0*deltax, 0.5 * g_maxX1, 0.5 * g_maxX2, g_maxX3+ 2.0 * deltax, 0.5 * g_maxX1)); - GbSystem3D::writeGeoObject(stator.get(), outputPath + "/geo/stator", WbWriterVtkXmlBinary::getInstance()); + SPtr<GbObject3D> rotor(new GbCylinder3D(0.5 * g_maxX1, 0.5 * g_maxX2, g_minX3 - 2.0 * deltax, 0.5 * g_maxX1, + 0.5 * g_maxX2, g_maxX3 + 2.0 * deltax, 0.5 * g_maxX1)); + + //around x + //SPtr<GbObject3D> stator(new GbCylinder3D(g_minX1 - 3.0 * deltax, 0.5 * g_maxX2, 0.5 * g_maxX3, g_maxX1 + 3.0 * deltax, 0.5 * g_maxX2, 0.5 * g_maxX3, 0.5 * g_maxX3)); + + GbSystem3D::writeGeoObject(rotor.get(), outputPath + "/geo/stator", WbWriterVtkXmlBinary::getInstance()); - SPtr<D3Q27Interactor> statorInt = SPtr<D3Q27Interactor>(new D3Q27Interactor(stator, grid, velocityBCAdapter, Interactor3D::INVERSESOLID)); + SPtr<D3Q27Interactor> rotorInt = + SPtr<D3Q27Interactor>(new D3Q27Interactor(rotor, grid, velocityBCAdapter, Interactor3D::INVERSESOLID)); ////rotor (cylinder) - SPtr<GbObject3D> rotor(new GbCylinder3D(0.5 * g_maxX1, 0.5 * g_maxX2, g_minX3- 2.0 * deltax, 0.5 * g_maxX1, 0.5 * g_maxX2, g_maxX3+ 2.0 * deltax, 0.25 * g_maxX1)); - GbSystem3D::writeGeoObject(rotor.get(), outputPath + "/geo/rotor", WbWriterVtkXmlBinary::getInstance()); + SPtr<GbObject3D> stator(new GbCylinder3D(0.5 * g_maxX1, 0.5 * g_maxX2, g_minX3- 2.0 * deltax, 0.5 * g_maxX1, 0.5 * g_maxX2, g_maxX3+ 2.0 * deltax, 0.25 * g_maxX1)); + + //around x + //SPtr<GbObject3D> rotor(new GbCylinder3D(g_minX1 - 3.0 * deltax, 0.5 * g_maxX2, 0.5 * g_maxX3, g_maxX1 + 3.0 * deltax, 0.5 * g_maxX2, 0.5 * g_maxX3, 0.25 * g_maxX3)); + + GbSystem3D::writeGeoObject(stator.get(), outputPath + "/geo/rotor", WbWriterVtkXmlBinary::getInstance()); - SPtr<D3Q27Interactor> rotorInt = SPtr<D3Q27Interactor>(new D3Q27Interactor(rotor, grid, noSlipBCAdapter, Interactor3D::SOLID)); + SPtr<D3Q27Interactor> statorInt = SPtr<D3Q27Interactor>(new D3Q27Interactor(stator, grid, noSlipBCAdapter, Interactor3D::SOLID)); if (myid == 0) { @@ -385,8 +421,8 @@ void bflow(string configname) calculator->addCoProcessor(fp); calculator->addCoProcessor(fp2); calculator->addCoProcessor(writeMQCoProcessor); - calculator->addCoProcessor(writeThixotropicMQCoProcessor); - calculator->addCoProcessor(restartCoProcessor); + //calculator->addCoProcessor(writeThixotropicMQCoProcessor); + //calculator->addCoProcessor(restartCoProcessor); if (myid == 0) UBLOG(logINFO, "Simulation-start"); calculator->calculate(); diff --git a/apps/cpu/rheometer/viscosity.cfg b/apps/cpu/rheometer/viscosity.cfg index bf2822c7b1d6dd42fdcbc513a4e6b39264ab4180..065757939d9313c7caaf46ba34f7b989b61914f2 100644 --- a/apps/cpu/rheometer/viscosity.cfg +++ b/apps/cpu/rheometer/viscosity.cfg @@ -1 +1 @@ -nuLB = 1.5e-3 \ No newline at end of file +nuLB = 0.0123058 #1.5e-3 \ No newline at end of file diff --git a/src/basics/basics/container/CbVectorPool.h b/src/basics/basics/container/CbVectorPool.h index e1a061d1ac3016fbe3b116243473ee609b24eb8c..0272056b51d65aacafad78cbae03971a24879a7e 100644 --- a/src/basics/basics/container/CbVectorPool.h +++ b/src/basics/basics/container/CbVectorPool.h @@ -118,7 +118,7 @@ public: return this->allocData(allocator, vec, dataSize, value); } - UB_THROW(UbException(UB_EXARGS, "vector-key=" + UbSystem::toString(allocator.key) + " bereits vergeben!")); + UB_THROW(UbException(UB_EXARGS, "vector-key=" + UbSystem::toString(allocator.key) + " already taken! (e.g. SetConnectorBlockVisitor was called several times")); } /*==================================================================*/ bool resizeVectorData(CbVector<value_type> &vec, const size_type &dataSize, const value_type &value = value_type()) diff --git a/src/basics/geometry3d/GbCylinder3D.cpp b/src/basics/geometry3d/GbCylinder3D.cpp index f6d1ed6e701fb7e3209a6e67720dc3d41d8f1aeb..2b90ca0fae9a7e22245961b8d713a35d72fa7df8 100644 --- a/src/basics/geometry3d/GbCylinder3D.cpp +++ b/src/basics/geometry3d/GbCylinder3D.cpp @@ -1162,12 +1162,10 @@ double GbCylinder3D::getIntersectionRaytraceFactor(const double &x1, const doubl } else return -1.0; } else { - // if (UbMath::negative(rx1)) d = -1.0 * (x1 - minX1) / rx1; - // else if(UbMath::positive(rx1)) d = -1.0 * (x1 - maxX1) / rx1; if (UbMath::negative(rx1)) - d = -1.0 * (x1 - maxX1) / rx1; - else if (UbMath::positive(rx1)) d = -1.0 * (x1 - minX1) / rx1; + else if (UbMath::positive(rx1)) + d = -1.0 * (x1 - maxX1) / rx1; } } else if (this->isParallelToX2Axis()) { if (UbMath::equal(x2, minX2) && UbMath::negative(rx2)) diff --git a/src/cpu/VirtualFluids.h b/src/cpu/VirtualFluids.h index 59dee98b1ec59ce85d270fafbe96ca7cd6b85473..363c9c046b37a45d800ea142e79617f71a8499d3 100644 --- a/src/cpu/VirtualFluids.h +++ b/src/cpu/VirtualFluids.h @@ -228,7 +228,7 @@ #include <LBM/LBMKernelETD3Q27BGK.h> #include <LBM/LBMSystem.h> #include <LBM/LBMUnitConverter.h> -//#include <LBM/BGKLBMKernel.h> +#include <LBM/BGKLBMKernel.h> #include <LBM/ThixotropyLBMKernel.h> #include <LBM/ThixotropyExpLBMKernel.h> #include <LBM/CumulantLBMKernel.h> diff --git a/src/cpu/VirtualFluidsCore/BoundaryConditions/BCAlgorithm.cpp b/src/cpu/VirtualFluidsCore/BoundaryConditions/BCAlgorithm.cpp index 69543bc6dce034c9c878711013d0297180c3d99f..5c852528a2abe2bf8de06753f9aaa78bf7f8a565 100644 --- a/src/cpu/VirtualFluidsCore/BoundaryConditions/BCAlgorithm.cpp +++ b/src/cpu/VirtualFluidsCore/BoundaryConditions/BCAlgorithm.cpp @@ -36,7 +36,12 @@ #include "BCArray3D.h" #include "BoundaryConditions.h" #include "EsoTwist3D.h" +#include "Block3D.h" +void BCAlgorithm::setBlock(SPtr<Block3D> block) +{ + this->block = block; +} ////////////////////////////////////////////////////////////////////////// void BCAlgorithm::setNodeIndex(int x1, int x2, int x3) { diff --git a/src/cpu/VirtualFluidsCore/BoundaryConditions/BCAlgorithm.h b/src/cpu/VirtualFluidsCore/BoundaryConditions/BCAlgorithm.h index 6b60da224ecaac9f56412cbd38b65d758816a2b2..67a3620c0a37c623c697bf8ec6a3f70f2ba00247 100644 --- a/src/cpu/VirtualFluidsCore/BoundaryConditions/BCAlgorithm.h +++ b/src/cpu/VirtualFluidsCore/BoundaryConditions/BCAlgorithm.h @@ -41,6 +41,7 @@ class DistributionArray3D; class BCArray3D; class BoundaryConditions; +class Block3D; //! \brief Abstract class of baundary conditions algorithm //! \details BCAlgorithm provides interface for implementation of diferent boundary conditions @@ -77,6 +78,7 @@ public: virtual void addDistributions(SPtr<DistributionArray3D> distributions) = 0; virtual void addDistributionsH(SPtr<DistributionArray3D> distributionsH) {} virtual void addDistributionsH2(SPtr<DistributionArray3D> distributionsH2) {} + void setBlock(SPtr<Block3D> block); void setNodeIndex(int x1, int x2, int x3); void setBcPointer(SPtr<BoundaryConditions> bcPtr); void setCompressible(bool c); @@ -107,6 +109,7 @@ protected: SPtr<DistributionArray3D> distributionsH; SPtr<DistributionArray3D> distributionsH2; SPtr<BCArray3D> bcArray; + SPtr<Block3D> block; LBMReal collFactor; LBMReal collFactorL, collFactorG, collFactorPh; diff --git a/src/cpu/VirtualFluidsCore/BoundaryConditions/RheologyBinghamModelVelocityBCAlgorithm.h b/src/cpu/VirtualFluidsCore/BoundaryConditions/RheologyBinghamModelVelocityBCAlgorithm.h index 263119401c1d2c437e8ae3962edd33bfcd3b554c..9673a009f75bccd71924985ec9a27187d9e1e12e 100644 --- a/src/cpu/VirtualFluidsCore/BoundaryConditions/RheologyBinghamModelVelocityBCAlgorithm.h +++ b/src/cpu/VirtualFluidsCore/BoundaryConditions/RheologyBinghamModelVelocityBCAlgorithm.h @@ -43,7 +43,7 @@ public: RheologyBinghamModelVelocityBCAlgorithm() { BCAlgorithm::type = BCAlgorithm::RheologyBinghamModelVelocityBCAlgorithm; - BCAlgorithm::preCollision = false; + BCAlgorithm::preCollision = true; } ~RheologyBinghamModelVelocityBCAlgorithm() {} SPtr<BCAlgorithm> clone() override diff --git a/src/cpu/VirtualFluidsCore/BoundaryConditions/VelocityBCAlgorithm.cpp b/src/cpu/VirtualFluidsCore/BoundaryConditions/VelocityBCAlgorithm.cpp index 4905e76a9f7a3e46848b8d8dbaf81095fe9a5b14..15768aeeb043620aece86194319eafe00ea1df60 100644 --- a/src/cpu/VirtualFluidsCore/BoundaryConditions/VelocityBCAlgorithm.cpp +++ b/src/cpu/VirtualFluidsCore/BoundaryConditions/VelocityBCAlgorithm.cpp @@ -34,6 +34,7 @@ #include "VelocityBCAlgorithm.h" #include "BoundaryConditions.h" #include "DistributionArray3D.h" +#include "Block3D.h" VelocityBCAlgorithm::VelocityBCAlgorithm() { @@ -61,6 +62,9 @@ void VelocityBCAlgorithm::applyBC() calcMacrosFct(f, drho, vx1, vx2, vx3); calcFeqFct(feq, drho, vx1, vx2, vx3); + //DEBUG + //int blockID = block->getGlobalID(); + rho = 1.0 + drho * compressibleFactor; for (int fdir = D3Q27System::FSTARTDIR; fdir <= D3Q27System::FENDDIR; fdir++) { diff --git a/src/cpu/VirtualFluidsCore/CoProcessors/CalculateTorqueCoProcessor.cpp b/src/cpu/VirtualFluidsCore/CoProcessors/CalculateTorqueCoProcessor.cpp index 37e593d868fa76d5c5cedada99f256c0fa8c74c4..9fd6e8c28aeb1bdb8120c98f0a338aa21b38cc57 100644 --- a/src/cpu/VirtualFluidsCore/CoProcessors/CalculateTorqueCoProcessor.cpp +++ b/src/cpu/VirtualFluidsCore/CoProcessors/CalculateTorqueCoProcessor.cpp @@ -27,13 +27,10 @@ CalculateTorqueCoProcessor::CalculateTorqueCoProcessor( SPtr<Grid3D> grid, SPtr< if(path.size()>0){ UbSystem::makeDirectory(path); ostr.open(fname.c_str(), std::ios_base::out | std::ios_base::app);} if(!ostr) throw UbException(UB_EXARGS,"couldn't open file "+fname); } - ostr.width(12); - ostr << "step" << "\t"; - ostr.width(10); - ostr << "Tx" << "\t"; - ostr.width(18); - ostr << "Ty" << "\t"; - ostr.width(18); + + ostr << "step;"; + ostr << "Tx;"; + ostr << "Ty;"; ostr << "Tz" << std::endl; ostr.close(); } @@ -70,12 +67,10 @@ void CalculateTorqueCoProcessor::collectData( double step ) if(!ostr) throw UbException(UB_EXARGS,"couldn't open file "+fname); } - ostr.width(12); - ostr.setf(std::ios::fixed); - ostr << istep << "\t"; - write(&ostr, forceX1global, (char*)"\t"); - write(&ostr, forceX2global, (char*)"\t"); - write(&ostr, forceX3global, (char*)"\t"); + ostr << istep << ";"; + ostr << forceX1global << ";"; + ostr << forceX2global << ";"; + ostr << forceX3global; ostr << std::endl; ostr.close(); } @@ -134,6 +129,9 @@ void CalculateTorqueCoProcessor::calculateForces() int x3 = node[2]; Vector3D worldCoordinates = grid->getNodeCoordinates(block, x1, x2, x3); + double rx = worldCoordinates[0] - x1Centre; + double ry = worldCoordinates[1] - x2Centre; + double rz = worldCoordinates[2] - x3Centre; //without ghost nodes if (x1 < minX1 || x1 > maxX1 || x2 < minX2 || x2 > maxX2 ||x3 < minX3 || x3 > maxX3 ) continue; @@ -141,10 +139,14 @@ void CalculateTorqueCoProcessor::calculateForces() if(bcArray->isFluid(x1,x2,x3)) //es kann sein, dass der node von einem anderen interactor z.B. als solid gemarkt wurde!!! { SPtr<BoundaryConditions> bc = bcArray->getBC(x1,x2,x3); - UbTupleDouble3 forceVec = getForces(x1,x2,x3,distributions,bc); - torqueX1 += (worldCoordinates[1] - x2Centre) * val<3>(forceVec) - (worldCoordinates[2] - x3Centre) * val<2>(forceVec); - torqueX2 += (worldCoordinates[2] - x3Centre) * val<1>(forceVec) - (worldCoordinates[0] - x1Centre) * val<3>(forceVec); - torqueX3 += (worldCoordinates[0] - x1Centre) * val<2>(forceVec) - (worldCoordinates[1] - x2Centre) * val<1>(forceVec); + UbTupleDouble3 forceVec = getForces(x1,x2,x3,distributions,bc); + double Fx = val<1>(forceVec); + double Fy = val<2>(forceVec); + double Fz = val<3>(forceVec); + + torqueX1 += ry * Fz - rz * Fy; + torqueX2 += rz * Fx - rx * Fz; + torqueX3 += rx * Fy - ry * Fx; //counter++; //UBLOG(logINFO, "x1="<<(worldCoordinates[1] - x2Centre)<<",x2=" << (worldCoordinates[2] - x3Centre)<< ",x3=" << (worldCoordinates[0] - x1Centre) <<" forceX3 = " << forceX3); } @@ -241,14 +243,6 @@ void CalculateTorqueCoProcessor::addInteractor( SPtr<D3Q27Interactor> interactor { interactors.push_back(interactor); } -////////////////////////////////////////////////////////////////////////// -void CalculateTorqueCoProcessor::write(std::ofstream *fileObject, double value, char *separator) -{ - (*fileObject).width(12); - (*fileObject).precision(16); - (*fileObject).setf(std::ios::fixed); - (*fileObject) << value; - (*fileObject) << separator; -} + diff --git a/src/cpu/VirtualFluidsCore/CoProcessors/CalculateTorqueCoProcessor.h b/src/cpu/VirtualFluidsCore/CoProcessors/CalculateTorqueCoProcessor.h index 2eda4a063a8e4a6649380e05e2684d3f1a8f68ee..43e1e75acaf4ab115ac9c6dc40b449cf98f97e79 100644 --- a/src/cpu/VirtualFluidsCore/CoProcessors/CalculateTorqueCoProcessor.h +++ b/src/cpu/VirtualFluidsCore/CoProcessors/CalculateTorqueCoProcessor.h @@ -36,7 +36,6 @@ protected: void collectData(double step); void calculateForces(); UbTupleDouble3 getForces(int x1, int x2, int x3, SPtr<DistributionArray3D> distributions, SPtr<BoundaryConditions> bc); - void write(std::ofstream *fileObject, double value, char *separator); private: std::string path; SPtr<Communicator> comm; diff --git a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOCoProcessor.cpp b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOCoProcessor.cpp index 028cb68ca562771b263b801c48c5f3b3791c723f..a3572c8c40ed63144080c1803d728393eaf30547 100644 --- a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOCoProcessor.cpp +++ b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOCoProcessor.cpp @@ -70,7 +70,7 @@ MPIIOCoProcessor::MPIIOCoProcessor(SPtr<Grid3D> grid, SPtr<UbScheduler> s, const //--------------------------------------- - MPI_Type_contiguous(7, MPI_CHAR, &arrayPresenceType); + MPI_Type_contiguous(8, MPI_CHAR, &arrayPresenceType); MPI_Type_commit(&arrayPresenceType); } @@ -378,10 +378,24 @@ void MPIIOCoProcessor::clearAllFiles(int step) MPI_File_set_size(file_handler, new_size); MPI_File_close(&file_handler); - std::string filename2 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSet.bin"; - int rc2 = MPI_File_open(MPI_COMM_WORLD, filename2.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler); - if (rc2 != MPI_SUCCESS) - throw UbException(UB_EXARGS, "couldn't open file " + filename2); + std::string filename21 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetF.bin"; + int rc21 = MPI_File_open(MPI_COMM_WORLD, filename21.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler); + if (rc21 != MPI_SUCCESS) + throw UbException(UB_EXARGS, "couldn't open file " + filename21); + MPI_File_set_size(file_handler, new_size); + MPI_File_close(&file_handler); + + std::string filename22 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetH1.bin"; + int rc22 = MPI_File_open(MPI_COMM_WORLD, filename22.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler); + if (rc22 != MPI_SUCCESS) + throw UbException(UB_EXARGS, "couldn't open file " + filename22); + MPI_File_set_size(file_handler, new_size); + MPI_File_close(&file_handler); + + std::string filename23 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetH2.bin"; + int rc23 = MPI_File_open(MPI_COMM_WORLD, filename23.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler); + if (rc23 != MPI_SUCCESS) + throw UbException(UB_EXARGS, "couldn't open file " + filename23); MPI_File_set_size(file_handler, new_size); MPI_File_close(&file_handler); @@ -441,25 +455,20 @@ void MPIIOCoProcessor::clearAllFiles(int step) MPI_File_set_size(file_handler, new_size); MPI_File_close(&file_handler); - std::string filename10 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpPhaseField.bin"; + std::string filename10 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpPhaseField1.bin"; int rc10 = MPI_File_open(MPI_COMM_WORLD, filename10.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler); if (rc10 != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename10); MPI_File_set_size(file_handler, new_size); MPI_File_close(&file_handler); - - /*std::string filename10 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpBC1.bin"; - int rc10 = MPI_File_open(MPI_COMM_WORLD, filename10.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, - &file_handler); if (rc10 != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename10); + std::string filename11 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpPhaseField2.bin"; + int rc11 = MPI_File_open(MPI_COMM_WORLD, filename11.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler); + if (rc11 != MPI_SUCCESS) + throw UbException(UB_EXARGS, "couldn't open file " + filename11); MPI_File_set_size(file_handler, new_size); MPI_File_close(&file_handler); - std::string filename11 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpBC2.bin"; - int rc11 = MPI_File_open(MPI_COMM_WORLD, filename11.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, - &file_handler); if (rc11 != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename11); - MPI_File_set_size(file_handler, new_size); - MPI_File_close(&file_handler);*/ } void MPIIOCoProcessor::writeCpTimeStep(int step) diff --git a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationBECoProcessor.cpp b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationBECoProcessor.cpp index 39b32b6aac7110eb873a52351d46560f8843820e..d82c594e6b127f2ade7979f8945b057e7ef6db6c 100644 --- a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationBECoProcessor.cpp +++ b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationBECoProcessor.cpp @@ -25,9 +25,8 @@ using namespace MPIIODataStructures; #define MESSAGE_TAG 80 #define SEND_BLOCK_SIZE 100000 -MPIIOMigrationBECoProcessor::MPIIOMigrationBECoProcessor(SPtr<Grid3D> grid, SPtr<UbScheduler> s, - const std::string &path, SPtr<Communicator> comm) - : MPIIOCoProcessor(grid, s, path, comm), nue(-999.999) +MPIIOMigrationBECoProcessor::MPIIOMigrationBECoProcessor(SPtr<Grid3D> grid, SPtr<UbScheduler> s, const std::string &path, SPtr<Communicator> comm) + : MPIIOCoProcessor(grid, s, path, comm), nue(-999.999), nuL(-999.999), nuG(-999.999), densityRatio(-999.999) { memset(&boundCondParamStr, 0, sizeof(boundCondParamStr)); @@ -107,44 +106,60 @@ void MPIIOMigrationBECoProcessor::writeDataSet(int step) std::vector<SPtr<Block3D>> blocksVector[25]; int minInitLevel = this->grid->getCoarsestInitializedLevel(); int maxInitLevel = this->grid->getFinestInitializedLevel(); - for (int level = minInitLevel; level <= maxInitLevel; level++) { + for (int level = minInitLevel; level <= maxInitLevel; level++) + { grid->getBlocks(level, rank, blocksVector[level]); blocksCount += static_cast<int>(blocksVector[level].size()); } dataSetParam dataSetParamStr1, dataSetParamStr2, dataSetParamStr3; int firstGlobalID; - std::vector<double> doubleValuesArray; // double-values (arrays of f's) in all blocks + std::vector<double> doubleValuesArrayF; // double-values (arrays of f's) in all blocks Fdistribution + std::vector<double> doubleValuesArrayH1; // double-values (arrays of f's) in all blocks H1distribution + // std::vector<double> doubleValuesArrayH2; // double-values (arrays of f's) in all blocks H2distribution - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIOMigrationBECoProcessor::writeDataSet start collect data rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } + bool multiPhase = false; DSArraysPresence arrPresence; bool firstBlock = true; int doubleCountInBlock = 0; int ic = 0; - SPtr<D3Q27EsoTwist3DSplittedVector> D3Q27EsoTwist3DSplittedVectorPtrF, D3Q27EsoTwist3DSplittedVectorPtrH; - CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr localDistributionsF, localDistributionsH; - CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr nonLocalDistributionsF, nonLocalDistributionsH; - CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr zeroDistributionsF, zeroDistributionsH; + SPtr<D3Q27EsoTwist3DSplittedVector> D3Q27EsoTwist3DSplittedVectorPtrF = 0, D3Q27EsoTwist3DSplittedVectorPtrH1 = 0, D3Q27EsoTwist3DSplittedVectorPtrH2 = 0; + CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr localDistributionsF = 0, localDistributionsH1 = 0, localDistributionsH2 = 0; + CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr nonLocalDistributionsF = 0, nonLocalDistributionsH1 = 0, nonLocalDistributionsH2 = 0; + CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr zeroDistributionsF = 0, zeroDistributionsH1 = 0, zeroDistributionsH2 = 0; - for (int level = minInitLevel; level <= maxInitLevel; level++) { + for (int level = minInitLevel; level <= maxInitLevel; level++) + { for (SPtr<Block3D> block : blocksVector[level]) // blocks of the current level { - D3Q27EsoTwist3DSplittedVectorPtrF = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>( - block->getKernel()->getDataSet()->getFdistributions()); + D3Q27EsoTwist3DSplittedVectorPtrF = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(block->getKernel()->getDataSet()->getFdistributions()); localDistributionsF = D3Q27EsoTwist3DSplittedVectorPtrF->getLocalDistributions(); nonLocalDistributionsF = D3Q27EsoTwist3DSplittedVectorPtrF->getNonLocalDistributions(); zeroDistributionsF = D3Q27EsoTwist3DSplittedVectorPtrF->getZeroDistributions(); - D3Q27EsoTwist3DSplittedVectorPtrH = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>( - block->getKernel()->getDataSet()->getHdistributions()); - localDistributionsH = D3Q27EsoTwist3DSplittedVectorPtrH->getLocalDistributions(); - nonLocalDistributionsH = D3Q27EsoTwist3DSplittedVectorPtrH->getNonLocalDistributions(); - zeroDistributionsH = D3Q27EsoTwist3DSplittedVectorPtrH->getZeroDistributions(); + D3Q27EsoTwist3DSplittedVectorPtrH1 = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(block->getKernel()->getDataSet()->getHdistributions()); + if (D3Q27EsoTwist3DSplittedVectorPtrH1 != 0) + { + multiPhase = true; + localDistributionsH1 = D3Q27EsoTwist3DSplittedVectorPtrH1->getLocalDistributions(); + nonLocalDistributionsH1 = D3Q27EsoTwist3DSplittedVectorPtrH1->getNonLocalDistributions(); + zeroDistributionsH1 = D3Q27EsoTwist3DSplittedVectorPtrH1->getZeroDistributions(); + } + + /*D3Q27EsoTwist3DSplittedVectorPtrH2 = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(block->getKernel()->getDataSet()->getH2distributions()); + if (D3Q27EsoTwist3DSplittedVectorPtrH2 != 0) + { + localDistributionsH2 = D3Q27EsoTwist3DSplittedVectorPtrH2->getLocalDistributions(); + nonLocalDistributionsH2 = D3Q27EsoTwist3DSplittedVectorPtrH2->getNonLocalDistributions(); + zeroDistributionsH2 = D3Q27EsoTwist3DSplittedVectorPtrH2->getZeroDistributions(); + }*/ + if (firstBlock) // && block->getKernel()) // when first (any) valid block... { @@ -171,94 +186,92 @@ void MPIIOMigrationBECoProcessor::writeDataSet(int step) } // ... than save some parameters that are equal in all blocks - dataSetParamStr1.nx1 = dataSetParamStr2.nx1 = dataSetParamStr3.nx1 = - static_cast<int>(block->getKernel()->getDataSet()->getFdistributions()->getNX1()); - dataSetParamStr1.nx2 = dataSetParamStr2.nx2 = dataSetParamStr3.nx2 = - static_cast<int>(block->getKernel()->getDataSet()->getFdistributions()->getNX2()); - dataSetParamStr1.nx3 = dataSetParamStr2.nx3 = dataSetParamStr3.nx3 = - static_cast<int>(block->getKernel()->getDataSet()->getFdistributions()->getNX3()); - - // Fdistributions + Hdistributions - doubleCountInBlock = - (dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3] + + dataSetParamStr1.nx1 = dataSetParamStr2.nx1 = dataSetParamStr3.nx1 = static_cast<int>(block->getKernel()->getDataSet()->getFdistributions()->getNX1()); + dataSetParamStr1.nx2 = dataSetParamStr2.nx2 = dataSetParamStr3.nx2 = static_cast<int>(block->getKernel()->getDataSet()->getFdistributions()->getNX2()); + dataSetParamStr1.nx3 = dataSetParamStr2.nx3 = dataSetParamStr3.nx3 = static_cast<int>(block->getKernel()->getDataSet()->getFdistributions()->getNX3()); + + doubleCountInBlock = dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3] + dataSetParamStr2.nx[0] * dataSetParamStr2.nx[1] * dataSetParamStr2.nx[2] * dataSetParamStr2.nx[3] + - dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3]) * 2; + dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3]; - SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> averageDensityArray = - block->getKernel()->getDataSet()->getAverageDensity(); + SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> averageDensityArray = block->getKernel()->getDataSet()->getAverageDensity(); if (averageDensityArray) arrPresence.isAverageDensityArrayPresent = true; else arrPresence.isAverageDensityArrayPresent = false; - SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> AverageVelocityArray3DPtr = - block->getKernel()->getDataSet()->getAverageVelocity(); + SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> AverageVelocityArray3DPtr = block->getKernel()->getDataSet()->getAverageVelocity(); if (AverageVelocityArray3DPtr) arrPresence.isAverageVelocityArrayPresent = true; else arrPresence.isAverageVelocityArrayPresent = false; - SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> AverageFluctArray3DPtr = - block->getKernel()->getDataSet()->getAverageFluctuations(); + SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> AverageFluctArray3DPtr = block->getKernel()->getDataSet()->getAverageFluctuations(); if (AverageFluctArray3DPtr) arrPresence.isAverageFluktuationsArrayPresent = true; else arrPresence.isAverageFluktuationsArrayPresent = false; - SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> AverageTripleArray3DPtr = - block->getKernel()->getDataSet()->getAverageTriplecorrelations(); + SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> AverageTripleArray3DPtr = block->getKernel()->getDataSet()->getAverageTriplecorrelations(); if (AverageTripleArray3DPtr) arrPresence.isAverageTripleArrayPresent = true; else arrPresence.isAverageTripleArrayPresent = false; - SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> ShearStressValArray3DPtr = - block->getKernel()->getDataSet()->getShearStressValues(); + SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> ShearStressValArray3DPtr = block->getKernel()->getDataSet()->getShearStressValues(); if (ShearStressValArray3DPtr) arrPresence.isShearStressValArrayPresent = true; else arrPresence.isShearStressValArrayPresent = false; - SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> relaxationFactor3DPtr = - block->getKernel()->getDataSet()->getRelaxationFactor(); + SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> relaxationFactor3DPtr = block->getKernel()->getDataSet()->getRelaxationFactor(); if (relaxationFactor3DPtr) arrPresence.isRelaxationFactorPresent = true; else arrPresence.isRelaxationFactorPresent = false; - SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> phaseField3DPtr = - block->getKernel()->getDataSet()->getPhaseField(); - if (phaseField3DPtr) - arrPresence.isPhaseFieldPresent = true; + SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> phaseField3DPtr1 = block->getKernel()->getDataSet()->getPhaseField(); + if (phaseField3DPtr1) + arrPresence.isPhaseField1Present = true; else - arrPresence.isPhaseFieldPresent = false; + arrPresence.isPhaseField1Present = false; + + SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> phaseField3DPtr2 = block->getKernel()->getDataSet()->getPhaseField2(); + if (phaseField3DPtr2) + arrPresence.isPhaseField2Present = true; + else + arrPresence.isPhaseField2Present = false; + firstBlock = false; } - if (localDistributionsF && (dataSetParamStr1.nx[0] > 0) && (dataSetParamStr1.nx[1] > 0) && - (dataSetParamStr1.nx[2] > 0) && (dataSetParamStr1.nx[3] > 0)) - doubleValuesArray.insert(doubleValuesArray.end(), localDistributionsF->getDataVector().begin(), - localDistributionsF->getDataVector().end()); - if (nonLocalDistributionsF && (dataSetParamStr2.nx[0] > 0) && (dataSetParamStr2.nx[1] > 0) && - (dataSetParamStr2.nx[2] > 0) && (dataSetParamStr2.nx[3] > 0)) - doubleValuesArray.insert(doubleValuesArray.end(), nonLocalDistributionsF->getDataVector().begin(), - nonLocalDistributionsF->getDataVector().end()); + if (localDistributionsF && (dataSetParamStr1.nx[0] > 0) && (dataSetParamStr1.nx[1] > 0) && (dataSetParamStr1.nx[2] > 0) && (dataSetParamStr1.nx[3] > 0)) + doubleValuesArrayF.insert(doubleValuesArrayF.end(), localDistributionsF->getDataVector().begin(), localDistributionsF->getDataVector().end()); + if (nonLocalDistributionsF && (dataSetParamStr2.nx[0] > 0) && (dataSetParamStr2.nx[1] > 0) && (dataSetParamStr2.nx[2] > 0) && (dataSetParamStr2.nx[3] > 0)) + doubleValuesArrayF.insert(doubleValuesArrayF.end(), nonLocalDistributionsF->getDataVector().begin(), nonLocalDistributionsF->getDataVector().end()); if (zeroDistributionsF && (dataSetParamStr3.nx[0] > 0) && (dataSetParamStr3.nx[1] > 0) && (dataSetParamStr3.nx[2] > 0)) - doubleValuesArray.insert(doubleValuesArray.end(), zeroDistributionsF->getDataVector().begin(), - zeroDistributionsF->getDataVector().end()); - - if (localDistributionsH && (dataSetParamStr1.nx[0] > 0) && (dataSetParamStr1.nx[1] > 0) && - (dataSetParamStr1.nx[2] > 0) && (dataSetParamStr1.nx[3] > 0)) - doubleValuesArray.insert(doubleValuesArray.end(), localDistributionsH->getDataVector().begin(), - localDistributionsH->getDataVector().end()); - if (nonLocalDistributionsH && (dataSetParamStr2.nx[0] > 0) && (dataSetParamStr2.nx[1] > 0) && - (dataSetParamStr2.nx[2] > 0) && (dataSetParamStr2.nx[3] > 0)) - doubleValuesArray.insert(doubleValuesArray.end(), nonLocalDistributionsH->getDataVector().begin(), - nonLocalDistributionsH->getDataVector().end()); - if (zeroDistributionsH && (dataSetParamStr3.nx[0] > 0) && (dataSetParamStr3.nx[1] > 0) && (dataSetParamStr3.nx[2] > 0)) - doubleValuesArray.insert(doubleValuesArray.end(), zeroDistributionsH->getDataVector().begin(), - zeroDistributionsH->getDataVector().end()); + doubleValuesArrayF.insert(doubleValuesArrayF.end(), zeroDistributionsF->getDataVector().begin(), zeroDistributionsF->getDataVector().end()); + + if (multiPhase) + { + if (localDistributionsH1 && (dataSetParamStr1.nx[0] > 0) && (dataSetParamStr1.nx[1] > 0) && (dataSetParamStr1.nx[2] > 0) && (dataSetParamStr1.nx[3] > 0)) + doubleValuesArrayH1.insert(doubleValuesArrayH1.end(), localDistributionsH1->getDataVector().begin(), localDistributionsH1->getDataVector().end()); + if (nonLocalDistributionsH1 && (dataSetParamStr2.nx[0] > 0) && (dataSetParamStr2.nx[1] > 0) && (dataSetParamStr2.nx[2] > 0) && (dataSetParamStr2.nx[3] > 0)) + doubleValuesArrayH1.insert(doubleValuesArrayH1.end(), nonLocalDistributionsH1->getDataVector().begin(), nonLocalDistributionsH1->getDataVector().end()); + if (zeroDistributionsH1 && (dataSetParamStr3.nx[0] > 0) && (dataSetParamStr3.nx[1] > 0) && (dataSetParamStr3.nx[2] > 0)) + doubleValuesArrayH1.insert(doubleValuesArrayH1.end(), zeroDistributionsH1->getDataVector().begin(), zeroDistributionsH1->getDataVector().end()); + } + + /*if (D3Q27EsoTwist3DSplittedVectorPtrH2 != 0) + { + if (localDistributionsH2 && (dataSetParamStr1.nx[0] > 0) && (dataSetParamStr1.nx[1] > 0) && (dataSetParamStr1.nx[2] > 0) && (dataSetParamStr1.nx[3] > 0)) + doubleValuesArrayH2.insert(doubleValuesArrayH2.end(), localDistributionsH2->getDataVector().begin(), localDistributionsH2->getDataVector().end()); + if (nonLocalDistributionsH2 && (dataSetParamStr2.nx[0] > 0) && (dataSetParamStr2.nx[1] > 0) && (dataSetParamStr2.nx[2] > 0) && (dataSetParamStr2.nx[3] > 0)) + doubleValuesArrayH2.insert(doubleValuesArrayH2.end(), nonLocalDistributionsH2->getDataVector().begin(), nonLocalDistributionsH2->getDataVector().end()); + if (zeroDistributionsH2 && (dataSetParamStr3.nx[0] > 0) && (dataSetParamStr3.nx[1] > 0) && (dataSetParamStr3.nx[2] > 0)) + doubleValuesArrayH2.insert(doubleValuesArrayH2.end(), zeroDistributionsH2->getDataVector().begin(), zeroDistributionsH2->getDataVector().end()); + }*/ ic++; } @@ -267,10 +280,10 @@ void MPIIOMigrationBECoProcessor::writeDataSet(int step) MPI_Type_contiguous(doubleCountInBlock , MPI_DOUBLE, &dataSetDoubleType); MPI_Type_commit(&dataSetDoubleType); - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIOMigrationBECoProcessor::writeDataSet start MPI IO rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } double start, finish; @@ -287,28 +300,57 @@ void MPIIOMigrationBECoProcessor::writeDataSet(int step) // write to the file MPI_File file_handler; - std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSet.bin"; + std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetF.bin"; int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler); if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename); - MPI_Offset write_offset = (MPI_Offset)(3 * sizeof(dataSetParam)) + (MPI_Offset)(firstGlobalID) * - (MPI_Offset)(doubleCountInBlock) * - (MPI_Offset)(sizeof(double)); + MPI_Offset write_offset = (MPI_Offset)(3 * sizeof(dataSetParam)) + (MPI_Offset)(firstGlobalID) * (MPI_Offset)(doubleCountInBlock) * (MPI_Offset)(sizeof(double)); MPI_File_write_at(file_handler, (MPI_Offset)0, &dataSetParamStr1, 1, dataSetParamType, MPI_STATUS_IGNORE); - MPI_File_write_at(file_handler, (MPI_Offset)(sizeof(dataSetParam)), &dataSetParamStr2, 1, dataSetParamType, - MPI_STATUS_IGNORE); - MPI_File_write_at(file_handler, (MPI_Offset)(2 * sizeof(dataSetParam)), &dataSetParamStr3, 1, dataSetParamType, - MPI_STATUS_IGNORE); - MPI_File_write_at(file_handler, write_offset, &doubleValuesArray[0], blocksCount, dataSetDoubleType, - MPI_STATUS_IGNORE); + MPI_File_write_at(file_handler, (MPI_Offset)(sizeof(dataSetParam)), &dataSetParamStr2, 1, dataSetParamType, MPI_STATUS_IGNORE); + MPI_File_write_at(file_handler, (MPI_Offset)(2 * sizeof(dataSetParam)), &dataSetParamStr3, 1, dataSetParamType, MPI_STATUS_IGNORE); + MPI_File_write_at(file_handler, write_offset, &doubleValuesArrayF[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE); MPI_File_sync(file_handler); MPI_File_close(&file_handler); + + //-------------------------------- H1 ------------------------------------------------ + if (multiPhase) + { + filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetH1.bin"; + rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler); + if (rc != MPI_SUCCESS) + throw UbException(UB_EXARGS, "couldn't open file " + filename); + + write_offset = (MPI_Offset)(firstGlobalID) * (MPI_Offset)(doubleCountInBlock) * (MPI_Offset)(sizeof(double)); + MPI_File_write_at(file_handler, write_offset, &doubleValuesArrayH1[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE); + + MPI_File_sync(file_handler); + MPI_File_close(&file_handler); + } + + //-------------------------------- H2 -------------------------------------------------- + /*if (D3Q27EsoTwist3DSplittedVectorPtr2 != 0) + { + filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetH2.bin"; + rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler); + if (rc != MPI_SUCCESS) + throw UbException(UB_EXARGS, "couldn't open file " + filename); + + write_offset = (MPI_Offset)(firstGlobalID) * (MPI_Offset)(doubleCountInBlock) * (MPI_Offset)(sizeof(double)); + MPI_File_write_at(file_handler, write_offset, &doubleValuesArrayH2[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE); + + MPI_File_sync(file_handler); + MPI_File_close(&file_handler); + } */ + + //-------------------------------- + MPI_Type_free(&dataSetDoubleType); - if (comm->isRoot()) { + if (comm->isRoot()) + { finish = MPI_Wtime(); UBLOG(logINFO, "MPIIOMigrationBECoProcessor::writeDataSet time: " << finish - start << " s"); } @@ -346,9 +388,12 @@ void MPIIOMigrationBECoProcessor::writeDataSet(int step) write3DArray(step, RelaxationFactor, std::string("/cpRelaxationFactor.bin")); // writeRelaxationFactor(step); - if (arrPresence.isPhaseFieldPresent) - write3DArray(step, PhaseField, std::string("/cpPhaseField.bin")); -} + if (arrPresence.isPhaseField1Present) + write3DArray(step, PhaseField1, std::string("/cpPhaseField1.bin")); + + if (arrPresence.isPhaseField2Present) + write3DArray(step, PhaseField2, std::string("/cpPhaseField2.bin")); + } void MPIIOMigrationBECoProcessor::write4DArray(int step, Arrays arrayType, std::string fname) { @@ -361,7 +406,8 @@ void MPIIOMigrationBECoProcessor::write4DArray(int step, Arrays arrayType, std:: std::vector<SPtr<Block3D>> blocksVector[25]; int minInitLevel = this->grid->getCoarsestInitializedLevel(); int maxInitLevel = this->grid->getFinestInitializedLevel(); - for (int level = minInitLevel; level <= maxInitLevel; level++) { + for (int level = minInitLevel; level <= maxInitLevel; level++) + { grid->getBlocks(level, rank, blocksVector[level]); blocksCount += static_cast<int>(blocksVector[level].size()); } @@ -374,16 +420,18 @@ void MPIIOMigrationBECoProcessor::write4DArray(int step, Arrays arrayType, std:: int ic = 0; SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> ___Array; - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIOMigrationBECoProcessor::write4DArray start collect data rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } - for (int level = minInitLevel; level <= maxInitLevel; level++) { + for (int level = minInitLevel; level <= maxInitLevel; level++) + { for (SPtr<Block3D> block : blocksVector[level]) // blocks of the current level { - switch (arrayType) { + switch (arrayType) + { case AverageDensity: ___Array = block->getKernel()->getDataSet()->getAverageDensity(); break; @@ -400,8 +448,7 @@ void MPIIOMigrationBECoProcessor::write4DArray(int step, Arrays arrayType, std:: ___Array = block->getKernel()->getDataSet()->getShearStressValues(); break; default: - UB_THROW(UbException(UB_EXARGS, - "MPIIOMigrationBECoProcessor::write4DArray : 4D array type does not exist!")); + UB_THROW(UbException(UB_EXARGS, "MPIIOMigrationBECoProcessor::write4DArray : 4D array type does not exist!")); break; } @@ -410,29 +457,26 @@ void MPIIOMigrationBECoProcessor::write4DArray(int step, Arrays arrayType, std:: firstGlobalID = block->getGlobalID(); dataSetParamStr.nx1 = dataSetParamStr.nx2 = dataSetParamStr.nx3 = 0; - dataSetParamStr.nx[0] = static_cast<int>(___Array->getNX1()); - dataSetParamStr.nx[1] = static_cast<int>(___Array->getNX2()); - dataSetParamStr.nx[2] = static_cast<int>(___Array->getNX3()); - dataSetParamStr.nx[3] = static_cast<int>(___Array->getNX4()); - doubleCountInBlock = - dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; + dataSetParamStr.nx[0] = static_cast<int>(___Array->getNX1()); + dataSetParamStr.nx[1] = static_cast<int>(___Array->getNX2()); + dataSetParamStr.nx[2] = static_cast<int>(___Array->getNX3()); + dataSetParamStr.nx[3] = static_cast<int>(___Array->getNX4()); + doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; firstBlock = false; } - if ((dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0) && - (dataSetParamStr.nx[3] > 0)) - doubleValuesArray.insert(doubleValuesArray.end(), ___Array->getDataVector().begin(), - ___Array->getDataVector().end()); + if ((dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0) && (dataSetParamStr.nx[3] > 0)) + doubleValuesArray.insert(doubleValuesArray.end(), ___Array->getDataVector().begin(), ___Array->getDataVector().end()); ic++; } } - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIOMigrationBECoProcessor::write4DArray start MPI IO rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } // register new MPI-type depending on the block-specific information @@ -451,20 +495,18 @@ void MPIIOMigrationBECoProcessor::write4DArray(int step, Arrays arrayType, std:: if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename); - MPI_Offset write_offset = (MPI_Offset)(sizeof(dataSetParam)) + (MPI_Offset)(firstGlobalID) * - (MPI_Offset)(doubleCountInBlock) * - (MPI_Offset)(sizeof(double)); + MPI_Offset write_offset = (MPI_Offset)(sizeof(dataSetParam)) + (MPI_Offset)(firstGlobalID) * (MPI_Offset)(doubleCountInBlock) * (MPI_Offset)(sizeof(double)); // each process writes common parameters of a dataSet MPI_File_write_at(file_handler, 0, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE); - MPI_File_write_at(file_handler, write_offset, &doubleValuesArray[0], blocksCount, dataSetDoubleType, - MPI_STATUS_IGNORE); + MPI_File_write_at(file_handler, write_offset, &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE); MPI_File_sync(file_handler); MPI_File_close(&file_handler); MPI_Type_free(&dataSetDoubleType); - if (comm->isRoot()) { + if (comm->isRoot()) + { finish = MPI_Wtime(); UBLOG(logINFO, "MPIIOMigrationBECoProcessor::write4DArray time: " << finish - start << " s"); } @@ -481,7 +523,8 @@ void MPIIOMigrationBECoProcessor::write3DArray(int step, Arrays arrayType, std:: std::vector<SPtr<Block3D>> blocksVector[25]; int minInitLevel = this->grid->getCoarsestInitializedLevel(); int maxInitLevel = this->grid->getFinestInitializedLevel(); - for (int level = minInitLevel; level <= maxInitLevel; level++) { + for (int level = minInitLevel; level <= maxInitLevel; level++) + { grid->getBlocks(level, rank, blocksVector[level]); blocksCount += static_cast<int>(blocksVector[level].size()); } @@ -494,25 +537,30 @@ void MPIIOMigrationBECoProcessor::write3DArray(int step, Arrays arrayType, std:: int ic = 0; SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> ___Array; - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIOMigrationBECoProcessor::write3DArray start collect data rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } - for (int level = minInitLevel; level <= maxInitLevel; level++) { + for (int level = minInitLevel; level <= maxInitLevel; level++) + { for (SPtr<Block3D> block : blocksVector[level]) // blocks of the current level { - switch (arrayType) { + switch (arrayType) + { case RelaxationFactor: ___Array = block->getKernel()->getDataSet()->getRelaxationFactor(); break; - case PhaseField: + case PhaseField1: ___Array = block->getKernel()->getDataSet()->getPhaseField(); break; + case PhaseField2: + ___Array = block->getKernel()->getDataSet()->getPhaseField2(); + break; default: UB_THROW(UbException(UB_EXARGS, - "MPIIOMigrationBECoProcessor::write3DArray : 3D array type does not exist!")); + "MPIIOMigrationBECoProcessor::write3DArray : 3D array type does not exist!")); break; } @@ -531,17 +579,16 @@ void MPIIOMigrationBECoProcessor::write3DArray(int step, Arrays arrayType, std:: } if ((dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0)) - doubleValuesArray.insert(doubleValuesArray.end(), ___Array->getDataVector().begin(), - ___Array->getDataVector().end()); + doubleValuesArray.insert(doubleValuesArray.end(), ___Array->getDataVector().begin(), ___Array->getDataVector().end()); ic++; } } - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIOMigrationBECoProcessor::write3DArray start MPI IO rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } // register new MPI-type depending on the block-specific information @@ -560,37 +607,33 @@ void MPIIOMigrationBECoProcessor::write3DArray(int step, Arrays arrayType, std:: if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename); - MPI_Offset write_offset = (MPI_Offset)(sizeof(dataSetParam)) + (MPI_Offset)(firstGlobalID) * - (MPI_Offset)(doubleCountInBlock) * - (MPI_Offset)(sizeof(double)); + MPI_Offset write_offset = (MPI_Offset)(sizeof(dataSetParam)) + (MPI_Offset)(firstGlobalID) * (MPI_Offset)(doubleCountInBlock) * (MPI_Offset)(sizeof(double)); // each process writes common parameters of a dataSet MPI_File_write_at(file_handler, 0, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE); - MPI_File_write_at(file_handler, write_offset, &doubleValuesArray[0], blocksCount, dataSetDoubleType, - MPI_STATUS_IGNORE); + MPI_File_write_at(file_handler, write_offset, &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE); MPI_File_sync(file_handler); MPI_File_close(&file_handler); MPI_Type_free(&dataSetDoubleType); - if (comm->isRoot()) { + if (comm->isRoot()) + { finish = MPI_Wtime(); UBLOG(logINFO, "MPIIOMigrationBECoProcessor::write3DArray time: " << finish - start << " s"); } } -//--------------------------------------------------------------------------------- - void MPIIOMigrationBECoProcessor::writeBoundaryConds(int step) { int rank, size; MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIOMigrationBECoProcessor::writeBoundaryConds start collect data rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } int blocksCount = 0; // quantity of blocks, that belong to this process @@ -601,7 +644,8 @@ void MPIIOMigrationBECoProcessor::writeBoundaryConds(int step) std::vector<SPtr<Block3D>> blocksVector[25]; int minInitLevel = this->grid->getCoarsestInitializedLevel(); int maxInitLevel = this->grid->getFinestInitializedLevel(); - for (int level = minInitLevel; level <= maxInitLevel; level++) { + for (int level = minInitLevel; level <= maxInitLevel; level++) + { grid->getBlocks(level, rank, blocksVector[level]); blocksCount += static_cast<int>(blocksVector[level].size()); } @@ -616,24 +660,26 @@ void MPIIOMigrationBECoProcessor::writeBoundaryConds(int step) int ic = 0; SPtr<BCArray3D> bcArr; - for (int level = minInitLevel; level <= maxInitLevel; level++) { + for (int level = minInitLevel; level <= maxInitLevel; level++) + { for (SPtr<Block3D> block : blocksVector[level]) // all the blocks of the current level { bcArr = block->getKernel()->getBCProcessor()->getBCArray(); - bcAddArray[ic].globalID = - block->getGlobalID(); // id of the block needed to find it while regenerating the grid + bcAddArray[ic].globalID = block->getGlobalID(); // id of the block needed to find it while regenerating the grid bcAddArray[ic].boundCond_count = 0; // how many BoundaryConditions in this block bcAddArray[ic].indexContainer_count = 0; // how many indexContainer-values in this block bytesCount[ic] = sizeof(BCAddMigration); bcVector[ic].resize(0); indexContainerVector[ic].resize(0); - for (std::size_t bc = 0; bc < bcArr->getBCVectorSize(); bc++) { + for (std::size_t bc = 0; bc < bcArr->getBCVectorSize(); bc++) + { BoundaryCondition *bouCond = new BoundaryCondition(); if (bcArr->bcvector[bc] == NULL) memset(bouCond, 0, sizeof(BoundaryCondition)); - else { + else + { bouCond->noslipBoundaryFlags = bcArr->bcvector[bc]->getNoSlipBoundary(); bouCond->slipBoundaryFlags = bcArr->bcvector[bc]->getSlipBoundary(); bouCond->velocityBoundaryFlags = bcArr->bcvector[bc]->getVelocityBoundary(); @@ -662,7 +708,8 @@ void MPIIOMigrationBECoProcessor::writeBoundaryConds(int step) bytesCount[ic] += sizeof(BoundaryCondition); } - if (bcindexmatrixCountNotInit) { + if (bcindexmatrixCountNotInit) + { boundCondParamStr.nx1 = static_cast<int>(bcArr->bcindexmatrix.getNX1()); boundCondParamStr.nx2 = static_cast<int>(bcArr->bcindexmatrix.getNX2()); boundCondParamStr.nx3 = static_cast<int>(bcArr->bcindexmatrix.getNX3()); @@ -670,11 +717,9 @@ void MPIIOMigrationBECoProcessor::writeBoundaryConds(int step) bcindexmatrixCountNotInit = false; } - bcindexmatrixVector.insert(bcindexmatrixVector.end(), bcArr->bcindexmatrix.getDataVector().begin(), - bcArr->bcindexmatrix.getDataVector().end()); + bcindexmatrixVector.insert(bcindexmatrixVector.end(), bcArr->bcindexmatrix.getDataVector().begin(), bcArr->bcindexmatrix.getDataVector().end()); - indexContainerVector[ic].insert(indexContainerVector[ic].begin(), bcArr->indexContainer.begin(), - bcArr->indexContainer.end()); + indexContainerVector[ic].insert(indexContainerVector[ic].begin(), bcArr->indexContainer.begin(), bcArr->indexContainer.end()); bcAddArray[ic].indexContainer_count = static_cast<int>(bcArr->indexContainer.size()); count_indexContainer += bcAddArray[ic].indexContainer_count; bytesCount[ic] += bcAddArray[ic].indexContainer_count * sizeof(int); @@ -688,10 +733,10 @@ void MPIIOMigrationBECoProcessor::writeBoundaryConds(int step) MPI_Type_contiguous(boundCondParamStr.bcindexmatrixCount, MPI_INT, &bcindexmatrixType); MPI_Type_commit(&bcindexmatrixType); - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIOMigrationBECoProcessor::writeBoundaryConds start MPI IO rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } double start, finish; @@ -712,13 +757,10 @@ void MPIIOMigrationBECoProcessor::writeBoundaryConds(int step) if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename); - MPI_Offset write_offset = (MPI_Offset)(sizeof(int)) + (MPI_Offset)(bcAddArray[0].globalID) * - (MPI_Offset)(boundCondParamStr.bcindexmatrixCount) * - (MPI_Offset)(sizeof(int)); + MPI_Offset write_offset = (MPI_Offset)(sizeof(int)) + (MPI_Offset)(bcAddArray[0].globalID) * (MPI_Offset)(boundCondParamStr.bcindexmatrixCount) * (MPI_Offset)(sizeof(int)); MPI_File_write_at(file_handler, 0, &boundCondParamStr.bcindexmatrixCount, 1, MPI_INT, MPI_STATUS_IGNORE); - MPI_File_write_at(file_handler, write_offset, &bcindexmatrixVector[0], blocksCount, bcindexmatrixType, - MPI_STATUS_IGNORE); + MPI_File_write_at(file_handler, write_offset, &bcindexmatrixVector[0], blocksCount, bcindexmatrixType, MPI_STATUS_IGNORE); MPI_File_sync(file_handler); MPI_File_close(&file_handler); @@ -732,14 +774,17 @@ void MPIIOMigrationBECoProcessor::writeBoundaryConds(int step) MPI_File_write_at(file_handler, 0, &boundCondParamStr, 4, MPI_INT, MPI_STATUS_IGNORE); - write_offset = - (MPI_Offset)(sizeof(boundCondParam)) + (MPI_Offset)(grid->getNumberOfBlocks()) * (MPI_Offset)(sizeof(size_t)); + write_offset = (MPI_Offset)(sizeof(boundCondParam)) + (MPI_Offset)(grid->getNumberOfBlocks()) * (MPI_Offset)(sizeof(size_t)); size_t next_file_offset = 0; - if (size > 1) { - if (rank == 0) { + if (size > 1) + { + if (rank == 0) + { next_file_offset = write_offset + allBytesCount; MPI_Send(&next_file_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD); - } else { + } + else + { MPI_Recv(&write_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE); next_file_offset = write_offset + allBytesCount; if (rank < size - 1) @@ -749,21 +794,17 @@ void MPIIOMigrationBECoProcessor::writeBoundaryConds(int step) MPI_Offset write_offsetIndex; - for (int nb = 0; nb < blocksCount; nb++) { - write_offsetIndex = - (MPI_Offset)(sizeof(boundCondParam)) + (MPI_Offset)(bcAddArray[nb].globalID) * (MPI_Offset)(sizeof(size_t)); + for (int nb = 0; nb < blocksCount; nb++) + { + write_offsetIndex = (MPI_Offset)(sizeof(boundCondParam)) + (MPI_Offset)(bcAddArray[nb].globalID) * (MPI_Offset)(sizeof(size_t)); MPI_File_write_at(file_handler, write_offsetIndex, &write_offset, 1, MPI_LONG_LONG_INT, MPI_STATUS_IGNORE); MPI_File_write_at(file_handler, write_offset, &bcAddArray[nb], 3, MPI_INT, MPI_STATUS_IGNORE); if (bcVector[nb].size() > 0) - MPI_File_write_at(file_handler, write_offset + (MPI_Offset)(sizeof(BCAddMigration)), &bcVector[nb][0], - bcAddArray[nb].boundCond_count, boundCondType, MPI_STATUS_IGNORE); + MPI_File_write_at(file_handler, write_offset + (MPI_Offset)(sizeof(BCAddMigration)), &bcVector[nb][0], bcAddArray[nb].boundCond_count, boundCondType, MPI_STATUS_IGNORE); if (indexContainerVector[nb].size() > 0) - MPI_File_write_at( - file_handler, - write_offset + (MPI_Offset)(sizeof(BCAddMigration)) + - (MPI_Offset)(bcAddArray[nb].boundCond_count) * (MPI_Offset)(sizeof(BoundaryCondition)), + MPI_File_write_at(file_handler, write_offset + (MPI_Offset)(sizeof(BCAddMigration)) + (MPI_Offset)(bcAddArray[nb].boundCond_count) * (MPI_Offset)(sizeof(BoundaryCondition)), &indexContainerVector[nb][0], bcAddArray[nb].indexContainer_count, MPI_INT, MPI_STATUS_IGNORE); write_offset += bytesCount[nb]; @@ -772,7 +813,8 @@ void MPIIOMigrationBECoProcessor::writeBoundaryConds(int step) MPI_File_sync(file_handler); MPI_File_close(&file_handler); - if (comm->isRoot()) { + if (comm->isRoot()) + { finish = MPI_Wtime(); UBLOG(logINFO, "MPIIOMigrationBECoProcessor::writeBoundaryConds time: " << finish - start << " s"); } @@ -792,8 +834,7 @@ void MPIIOMigrationBECoProcessor::restart(int step) UBLOG(logINFO, "Load check point - start"); readBlocks(step); - SPtr<Grid3DVisitor> newMetisVisitor(new MetisPartitioningGridVisitor(comm, MetisPartitioningGridVisitor::LevelBased, - D3Q27System::BSW, MetisPartitioner::KWAY)); + SPtr<Grid3DVisitor> newMetisVisitor(new MetisPartitioningGridVisitor(comm, MetisPartitioningGridVisitor::LevelBased, D3Q27System::BSW, MetisPartitioner::KWAY)); grid->accept(newMetisVisitor); readDataSet(step); @@ -806,8 +847,7 @@ void MPIIOMigrationBECoProcessor::restart(int step) void MPIIOMigrationBECoProcessor::readBlocks(int step) { MPIIOCoProcessor::readBlocks(step); } -void MPIIOMigrationBECoProcessor::blocksExchange(int tagN, int ind1, int ind2, int doubleCountInBlock, - std::vector<double> &pV, std::vector<double> *rawDataReceive) +void MPIIOMigrationBECoProcessor::blocksExchange(int tagN, int ind1, int ind2, int doubleCountInBlock, std::vector<double> &pV, std::vector<double> *rawDataReceive) { int rank, size; MPI_Comm_rank(MPI_COMM_WORLD, &rank); @@ -821,7 +861,8 @@ void MPIIOMigrationBECoProcessor::blocksExchange(int tagN, int ind1, int ind2, i int *blocksCounterRec = new int[size]; std::vector<double> *rawDataSend = new std::vector<double>[size]; - for (int r = 0; r < size; r++) { + for (int r = 0; r < size; r++) + { rawDataSend[r].resize(0); blocksCounterSend[r] = 0; blocksCounterRec[r] = 0; @@ -842,8 +883,7 @@ void MPIIOMigrationBECoProcessor::blocksExchange(int tagN, int ind1, int ind2, i { blocksCounterRec[tempRank]++; rawDataReceive[tempRank].push_back(double(indexB + ind)); - rawDataReceive[tempRank].insert(rawDataReceive[tempRank].end(), - pV.begin() + ind * size_t(doubleCountInBlock), + rawDataReceive[tempRank].insert(rawDataReceive[tempRank].end(), pV.begin() + ind * size_t(doubleCountInBlock), pV.begin() + ind * size_t(doubleCountInBlock) + size_t(doubleCountInBlock)); } else // we must send data to other processes { @@ -856,17 +896,20 @@ void MPIIOMigrationBECoProcessor::blocksExchange(int tagN, int ind1, int ind2, i MPI_Request *requests = new MPI_Request[size * 2]; // send + receive int requestCount = 0; - // MPI_Status status; - for (int r = 0; r < size; r++) { - if (r != rank) { + for (int r = 0; r < size; r++) + { + if (r != rank) + { MPI_Irecv(&blocksCounterRec[r], 1, MPI_INT, r, tagN, MPI_COMM_WORLD, &requests[requestCount]); requestCount++; } } - for (int r = 0; r < size; r++) { - if (r != rank) { + for (int r = 0; r < size; r++) + { + if (r != rank) + { MPI_Isend(&blocksCounterSend[r], 1, MPI_INT, r, tagN, MPI_COMM_WORLD, &requests[requestCount]); requestCount++; } @@ -877,7 +920,8 @@ void MPIIOMigrationBECoProcessor::blocksExchange(int tagN, int ind1, int ind2, i MPI_Type_contiguous(doubleCountInBlock + 1, MPI_DOUBLE, &sendBlockDoubleType); MPI_Type_commit(&sendBlockDoubleType); - for (int r = 0; r < size; r++) { + for (int r = 0; r < size; r++) + { if (r != rank) rawDataReceive[r].resize(size_t(blocksCounterRec[r]) * size_t(doubleCountInBlock + 1)); } @@ -888,35 +932,39 @@ void MPIIOMigrationBECoProcessor::blocksExchange(int tagN, int ind1, int ind2, i const int maxQuant = 400; int restQuant; - for (int r = 0; r < size; r++) { - if (r != rank) { + for (int r = 0; r < size; r++) + { + if (r != rank) + { sendRecCount = int(blocksCounterRec[r] / maxQuant); if (sendRecCount * maxQuant < blocksCounterRec[r]) sendRecCount++; requests = (MPI_Request *)realloc(requests, (requestCount + sendRecCount) * sizeof(MPI_Request)); - for (int sc = 0; sc < sendRecCount; sc++) { + for (int sc = 0; sc < sendRecCount; sc++) + { restQuant = (sc < sendRecCount - 1) ? maxQuant : blocksCounterRec[r] - sc * maxQuant; sendRecOffset = size_t(sc) * size_t(maxQuant) * size_t((doubleCountInBlock + 1)); - MPI_Irecv(&rawDataReceive[r][sendRecOffset], restQuant, sendBlockDoubleType, r, tagN, MPI_COMM_WORLD, - &requests[requestCount]); + MPI_Irecv(&rawDataReceive[r][sendRecOffset], restQuant, sendBlockDoubleType, r, tagN, MPI_COMM_WORLD, &requests[requestCount]); requestCount++; } } } - for (int r = 0; r < size; r++) { - if (r != rank) { + for (int r = 0; r < size; r++) + { + if (r != rank) + { sendRecCount = int(blocksCounterSend[r] / maxQuant); if (sendRecCount * maxQuant < blocksCounterSend[r]) sendRecCount++; requests = (MPI_Request *)realloc(requests, (requestCount + sendRecCount) * sizeof(MPI_Request)); - for (int sc = 0; sc < sendRecCount; sc++) { + for (int sc = 0; sc < sendRecCount; sc++) + { restQuant = (sc < sendRecCount - 1) ? maxQuant : blocksCounterSend[r] - sc * maxQuant; sendRecOffset = size_t(sc) * size_t(maxQuant) * size_t((doubleCountInBlock + 1)); - MPI_Isend(&rawDataSend[r][sendRecOffset], restQuant, sendBlockDoubleType, r, tagN, MPI_COMM_WORLD, - &requests[requestCount]); + MPI_Isend(&rawDataSend[r][sendRecOffset], restQuant, sendBlockDoubleType, r, tagN, MPI_COMM_WORLD, &requests[requestCount]); requestCount++; } } @@ -942,13 +990,20 @@ void MPIIOMigrationBECoProcessor::readDataSet(int step) UB_THROW(UbException(UB_EXARGS, "bcProcessor does not exist!")); if (nue == -999.999) UB_THROW(UbException(UB_EXARGS, "nue is not initialised!")); + if (nuL == -999.999 ) + UB_THROW(UbException(UB_EXARGS, "nuL is not initialised!")); + if (nuG == -999.999) + UB_THROW(UbException(UB_EXARGS, "nuG is not initialised!")); + if (densityRatio == -999.999) + UB_THROW(UbException(UB_EXARGS, "densityRatio is not initialised!")); - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readDataSet start MPI IO rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } + bool multiPhase = false; dataSetParam dataSetParamStr1, dataSetParamStr2, dataSetParamStr3; int blocksCountAll = grid->getNumberOfBlocks(); // quantity of all blocks in the grid @@ -968,150 +1023,183 @@ void MPIIOMigrationBECoProcessor::readDataSet(int step) start = MPI_Wtime(); MPI_File file_handler; - std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSet.bin"; + std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetF.bin"; int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler); if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename); MPI_File_read_at(file_handler, (MPI_Offset)0, &dataSetParamStr1, 1, dataSetParamType, MPI_STATUS_IGNORE); - MPI_File_read_at(file_handler, (MPI_Offset)(sizeof(dataSetParam)), &dataSetParamStr2, 1, dataSetParamType, - MPI_STATUS_IGNORE); - MPI_File_read_at(file_handler, (MPI_Offset)(2 * sizeof(dataSetParam)), &dataSetParamStr3, 1, dataSetParamType, - MPI_STATUS_IGNORE); + MPI_File_read_at(file_handler, (MPI_Offset)(sizeof(dataSetParam)), &dataSetParamStr2, 1, dataSetParamType, MPI_STATUS_IGNORE); + MPI_File_read_at(file_handler, (MPI_Offset)(2 * sizeof(dataSetParam)), &dataSetParamStr3, 1, dataSetParamType, MPI_STATUS_IGNORE); - size_t doubleCountInBlock = - (dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3] + + size_t doubleCountInBlock = dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3] + dataSetParamStr2.nx[0] * dataSetParamStr2.nx[1] * dataSetParamStr2.nx[2] * dataSetParamStr2.nx[3] + - dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3]) * 2; - std::vector<double> doubleValuesArray(myBlocksCount * doubleCountInBlock); // double-values in all blocks + dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3]; + std::vector<double> doubleValuesArrayF(size_t(myBlocksCount * doubleCountInBlock)); // double-values in all blocks Fdistributions + std::vector<double> doubleValuesArrayH1; // double-values in all blocks H1distributions + //std::vector<double> doubleValuesArrayH2; // double-values in all blocks H2distributions MPI_Type_contiguous(int(doubleCountInBlock), MPI_DOUBLE, &dataSetDoubleType); MPI_Type_commit(&dataSetDoubleType); - MPI_Offset read_offset = - (MPI_Offset)(3 * sizeof(dataSetParam)) + (MPI_Offset)(indexB * doubleCountInBlock * sizeof(double)); - MPI_File_read_at(file_handler, read_offset, &doubleValuesArray[0], int(myBlocksCount), dataSetDoubleType, - MPI_STATUS_IGNORE); + //--------------------------------- F --------------------------------------------------------- + MPI_Offset read_offset = (MPI_Offset)(3 * sizeof(dataSetParam)) + (MPI_Offset)(indexB * doubleCountInBlock * sizeof(double)); + MPI_File_read_at(file_handler, read_offset, &doubleValuesArrayF[0], int(myBlocksCount), dataSetDoubleType, MPI_STATUS_IGNORE); MPI_File_close(&file_handler); + + //--------------------------------- H1 --------------------------------------------------------- + MPI_Offset fsize; + filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetH1.bin"; + rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler); + if (rc != MPI_SUCCESS) + throw UbException(UB_EXARGS, "couldn't open file " + filename); + MPI_File_get_size(file_handler, &fsize); + if (fsize > 0) + { + multiPhase = true; + doubleValuesArrayH1.resize(myBlocksCount * doubleCountInBlock); + + read_offset = (MPI_Offset)(indexB * doubleCountInBlock * sizeof(double)) ; + MPI_File_read_at(file_handler, read_offset, &doubleValuesArrayH1[0], int(myBlocksCount), dataSetDoubleType, MPI_STATUS_IGNORE); + } + MPI_File_close(&file_handler); + MPI_Type_free(&dataSetDoubleType); - if (comm->isRoot()) { + if (comm->isRoot()) + { finish = MPI_Wtime(); UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readDataSet time: " << finish - start << " s"); UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readDataSet start of exchange of data, rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } - std::vector<double> *rawDataReceive = new std::vector<double>[size]; + std::vector<double>* rawDataReceiveF = new std::vector<double>[size]; for (int r = 0; r < size; r++) - rawDataReceive[r].resize(0); + rawDataReceiveF[r].resize(0); + blocksExchange(MESSAGE_TAG, indexB, indexE, int(doubleCountInBlock), doubleValuesArrayF, rawDataReceiveF); - blocksExchange(MESSAGE_TAG, indexB, indexE, int(doubleCountInBlock), doubleValuesArray, rawDataReceive); + std::vector<double>* rawDataReceiveH1 = new std::vector<double>[size]; + for (int r = 0; r < size; r++) + rawDataReceiveH1[r].resize(0); + blocksExchange(MESSAGE_TAG, indexB, indexE, int(doubleCountInBlock), doubleValuesArrayH1, rawDataReceiveH1); - if (comm->isRoot()) { + /* std::vector<double>* rawDataReceiveH2 = new std::vector<double>[size]; + for (int r = 0; r < size; r++) + rawDataReceiveH2[r].resize(0); + blocksExchange(MESSAGE_TAG, indexB, indexE, int(doubleCountInBlock), doubleValuesArrayH2, rawDataReceiveH2);*/ + + if (comm->isRoot()) + { finish = MPI_Wtime(); UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readDataSet time: " << finish - start << " s"); UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readDataSet start of restore of data, rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } - + //-------------------------------------- restore blocks --------------------------------- int blockID; std::vector<double> vectorsOfValuesF1, vectorsOfValuesF2, vectorsOfValuesF3; - std::vector<double> vectorsOfValuesH1, vectorsOfValuesH2, vectorsOfValuesH3; + std::vector<double> vectorsOfValuesH11, vectorsOfValuesH12, vectorsOfValuesH13; + //std::vector<double> vectorsOfValuesH21, vectorsOfValuesH22, vectorsOfValuesH23; - size_t vectorSize1 = - dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3]; - size_t vectorSize2 = - dataSetParamStr2.nx[0] * dataSetParamStr2.nx[1] * dataSetParamStr2.nx[2] * dataSetParamStr2.nx[3]; - size_t vectorSize3 = - dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3]; + size_t vectorSize1 = dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3]; + size_t vectorSize2 = dataSetParamStr2.nx[0] * dataSetParamStr2.nx[1] * dataSetParamStr2.nx[2] * dataSetParamStr2.nx[3]; + size_t vectorSize3 = dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3]; size_t index; - for (int r = 0; r < size; r++) { + for (int r = 0; r < size; r++) + { index = 0; - for (int ii = 0; ii < int(rawDataReceive[r].size() / doubleCountInBlock); ii++) { - blockID = (int)(rawDataReceive[r][index]); + for (int ii = 0; ii < int(rawDataReceiveF[r].size() / doubleCountInBlock); ii++) + { + blockID = (int)(rawDataReceiveF[r][index]); index += 1; - vectorsOfValuesF1.assign(rawDataReceive[r].data() + index, rawDataReceive[r].data() + index + vectorSize1); - index += vectorSize1; - - vectorsOfValuesF2.assign(rawDataReceive[r].data() + index, rawDataReceive[r].data() + index + vectorSize2); - index += vectorSize2; - - vectorsOfValuesF3.assign(rawDataReceive[r].data() + index, rawDataReceive[r].data() + index + vectorSize3); - index += vectorSize3; - - vectorsOfValuesH1.assign(rawDataReceive[r].data() + index, rawDataReceive[r].data() + index + vectorSize1); + vectorsOfValuesF1.assign(rawDataReceiveF[r].data() + index, rawDataReceiveF[r].data() + index + vectorSize1); + if(multiPhase) + vectorsOfValuesH11.assign(rawDataReceiveH1[r].data() + index, rawDataReceiveH1[r].data() + index + vectorSize1); + //vectorsOfValuesH21.assign(rawDataReceiveH2[r].data() + index, rawDataReceiveH2[r].data() + index + vectorSize1); index += vectorSize1; - vectorsOfValuesH2.assign(rawDataReceive[r].data() + index, rawDataReceive[r].data() + index + vectorSize2); + vectorsOfValuesF2.assign(rawDataReceiveF[r].data() + index, rawDataReceiveF[r].data() + index + vectorSize2); + if (multiPhase) + vectorsOfValuesH12.assign(rawDataReceiveH1[r].data() + index, rawDataReceiveH1[r].data() + index + vectorSize1); + //vectorsOfValuesH22.assign(rawDataReceiveH2[r].data() + index, rawDataReceiveH2[r].data() + index + vectorSize1); index += vectorSize2; - vectorsOfValuesH3.assign(rawDataReceive[r].data() + index, rawDataReceive[r].data() + index + vectorSize3); + vectorsOfValuesF3.assign(rawDataReceiveF[r].data() + index, rawDataReceiveF[r].data() + index + vectorSize3); + if (multiPhase) + vectorsOfValuesH13.assign(rawDataReceiveH1[r].data() + index, rawDataReceiveH1[r].data() + index + vectorSize1); + //vectorsOfValuesH23.assign(rawDataReceiveH2[r].data() + index, rawDataReceiveH2[r].data() + index + vectorSize1); index += vectorSize3; SPtr<DistributionArray3D> mFdistributions(new D3Q27EsoTwist3DSplittedVector()); - dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions) - ->setLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr( - new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesF1, dataSetParamStr1.nx[0], - dataSetParamStr1.nx[1], dataSetParamStr1.nx[2], - dataSetParamStr1.nx[3]))); - dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions) - ->setNonLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr( - new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesF2, dataSetParamStr2.nx[0], - dataSetParamStr2.nx[1], dataSetParamStr2.nx[2], - dataSetParamStr2.nx[3]))); - dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions) - ->setZeroDistributions(CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>( + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr( + new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesF1, dataSetParamStr1.nx[0], dataSetParamStr1.nx[1], dataSetParamStr1.nx[2], dataSetParamStr1.nx[3]))); + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setNonLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr( + new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesF2, dataSetParamStr2.nx[0], dataSetParamStr2.nx[1], dataSetParamStr2.nx[2], dataSetParamStr2.nx[3]))); + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setZeroDistributions(CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>( vectorsOfValuesF3, dataSetParamStr3.nx[0], dataSetParamStr3.nx[1], dataSetParamStr3.nx[2]))); dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setNX1(dataSetParamStr1.nx1); dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setNX2(dataSetParamStr1.nx2); dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setNX3(dataSetParamStr1.nx3); - SPtr<DistributionArray3D> mHdistributions(new D3Q27EsoTwist3DSplittedVector()); - dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mHdistributions) - ->setLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr( - new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH1, dataSetParamStr1.nx[0], - dataSetParamStr1.nx[1], dataSetParamStr1.nx[2], - dataSetParamStr1.nx[3]))); - dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mHdistributions) - ->setNonLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr( - new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH2, dataSetParamStr2.nx[0], - dataSetParamStr2.nx[1], dataSetParamStr2.nx[2], - dataSetParamStr2.nx[3]))); - dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mHdistributions) - ->setZeroDistributions( CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>( - vectorsOfValuesH3, dataSetParamStr3.nx[0], dataSetParamStr3.nx[1], dataSetParamStr3.nx[2]))); - - dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mHdistributions)->setNX1(dataSetParamStr1.nx1); - dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mHdistributions)->setNX2(dataSetParamStr1.nx2); - dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mHdistributions)->setNX3(dataSetParamStr1.nx3); + SPtr<DistributionArray3D> mH1distributions(new D3Q27EsoTwist3DSplittedVector()); + if (multiPhase) + { + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH1distributions)->setLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr( + new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH11, dataSetParamStr1.nx[0], dataSetParamStr1.nx[1], dataSetParamStr1.nx[2], dataSetParamStr1.nx[3]))); + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH1distributions)->setNonLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr( + new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH12, dataSetParamStr2.nx[0], dataSetParamStr2.nx[1], dataSetParamStr2.nx[2], dataSetParamStr2.nx[3]))); + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH1distributions)->setZeroDistributions(CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>( + vectorsOfValuesH13, dataSetParamStr3.nx[0], dataSetParamStr3.nx[1], dataSetParamStr3.nx[2]))); + + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH1distributions)->setNX1(dataSetParamStr1.nx1); + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH1distributions)->setNX2(dataSetParamStr1.nx2); + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH1distributions)->setNX3(dataSetParamStr1.nx3); + } + + /*SPtr<DistributionArray3D> mH2distributions(new D3Q27EsoTwist3DSplittedVector()); + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr( + new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH21, dataSetParamStr1.nx[0], dataSetParamStr1.nx[1], dataSetParamStr1.nx[2], dataSetParamStr1.nx[3]))); + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNonLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr( + new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH22, dataSetParamStr2.nx[0], dataSetParamStr2.nx[1], dataSetParamStr2.nx[2], dataSetParamStr2.nx[3]))); + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setZeroDistributions(CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>( + vectorsOfValuesH23, dataSetParamStr3.nx[0], dataSetParamStr3.nx[1], dataSetParamStr3.nx[2]))); + + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNX1(dataSetParamStr1.nx1); + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNX2(dataSetParamStr1.nx2); + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNX3(dataSetParamStr1.nx3);*/ + // find the nesessary block and fill it SPtr<Block3D> block = grid->getBlock(blockID); this->lbmKernel->setBlock(block); SPtr<LBMKernel> kernel = this->lbmKernel->clone(); - LBMReal collFactor = LBMSystem::calcCollisionFactor(this->nue, block->getLevel()); + LBMReal collFactor = LBMSystem::calcCollisionFactor(this->nue, block->getLevel()); + LBMReal collFactorL = LBMSystem::calcCollisionFactor(this->nuL, block->getLevel()); + LBMReal collFactorG = LBMSystem::calcCollisionFactor(this->nuG, block->getLevel()); kernel->setCollisionFactor(collFactor); kernel->setIndex(block->getX1(), block->getX2(), block->getX3()); kernel->setDeltaT(LBMSystem::getDeltaT(block->getLevel())); + kernel->setCollisionFactorMultiphase(collFactorL, collFactorG); + kernel->setDensityRatio(this->densityRatio); SPtr<DataSet3D> dataSetPtr = SPtr<DataSet3D>(new DataSet3D()); dataSetPtr->setFdistributions(mFdistributions); - dataSetPtr->setHdistributions(mHdistributions); + if (multiPhase) + dataSetPtr->setHdistributions(mH1distributions); +// dataSetPtr->setHdistributions(mH2distributions); kernel->setDataSet(dataSetPtr); block->setKernel(kernel); } } - if (comm->isRoot()) { + //if (comm->isRoot()) + { UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readDataSet end of restore of data, rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } //------------------------------------------------------------- @@ -1149,11 +1237,16 @@ void MPIIOMigrationBECoProcessor::readDataSet(int step) if (arrPresence.isRelaxationFactorPresent) readArray(step, RelaxationFactor, std::string("/cpRelaxationFactor.bin")); // readRelaxationFactor(step); + + if (arrPresence.isPhaseField1Present) + readArray(step, PhaseField1, std::string("/cpPhaseField1.bin")); - if (arrPresence.isPhaseFieldPresent) - readArray(step, PhaseField, std::string("/cpPhaseField.bin")); + if (arrPresence.isPhaseField2Present) + readArray(step, PhaseField2, std::string("/cpPhaseField2.bin")); - delete[] rawDataReceive; + delete[] rawDataReceiveF; +// delete[] rawDataReceiveH1; +// delete[] rawDataReceiveH2; } void MPIIOMigrationBECoProcessor::readArray(int step, Arrays arrType, std::string fname) @@ -1162,11 +1255,12 @@ void MPIIOMigrationBECoProcessor::readArray(int step, Arrays arrType, std::strin MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readArray start MPI IO rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } + double start, finish; if (comm->isRoot()) start = MPI_Wtime(); @@ -1194,56 +1288,53 @@ void MPIIOMigrationBECoProcessor::readArray(int step, Arrays arrType, std::strin MPI_File_read_at(file_handler, (MPI_Offset)0, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE); - size_t doubleCountInBlock = - dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; + size_t doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; std::vector<double> doubleValuesArray(myBlocksCount * doubleCountInBlock); // double-values in all blocks MPI_Type_contiguous(int(doubleCountInBlock), MPI_DOUBLE, &dataSetDoubleType); MPI_Type_commit(&dataSetDoubleType); - MPI_Offset read_offset = (MPI_Offset)(sizeof(dataSetParam)) + - (MPI_Offset)(indexB) * (MPI_Offset)(doubleCountInBlock) * (MPI_Offset)(sizeof(double)); - MPI_File_read_at(file_handler, read_offset, &doubleValuesArray[0], int(myBlocksCount), dataSetDoubleType, - MPI_STATUS_IGNORE); + MPI_Offset read_offset = (MPI_Offset)(sizeof(dataSetParam)) + (MPI_Offset)(indexB) * (MPI_Offset)(doubleCountInBlock) * (MPI_Offset)(sizeof(double)); + MPI_File_read_at(file_handler, read_offset, &doubleValuesArray[0], int(myBlocksCount), dataSetDoubleType, MPI_STATUS_IGNORE); MPI_File_close(&file_handler); MPI_Type_free(&dataSetDoubleType); - if (comm->isRoot()) { + if (comm->isRoot()) + { finish = MPI_Wtime(); UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readArray time: " << finish - start << " s"); UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readArray start of exchange of data, rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } std::vector<double> *rawDataReceive = new std::vector<double>[size]; for (int r = 0; r < size; r++) rawDataReceive[r].resize(0); - blocksExchange(MESSAGE_TAG + int(arrType), indexB, indexE, int(doubleCountInBlock), doubleValuesArray, - rawDataReceive); + blocksExchange(MESSAGE_TAG + int(arrType), indexB, indexE, int(doubleCountInBlock), doubleValuesArray, rawDataReceive); - if (comm->isRoot()) { + if (comm->isRoot()) + { finish = MPI_Wtime(); UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readArray end of exchange of data, rank = " << rank); UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readArray time: " << finish - start << " s"); UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readArray start of restore of data, rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } //----------------------------- restore data --------------------------------- int blockID; std::vector<double> vectorsOfValues; size_t index; - size_t nextVectorSize = - dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; + size_t nextVectorSize = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; - for (int r = 0; r < size; r++) { + for (int r = 0; r < size; r++) + { index = 0; - for (int ii = 0; ii < int(rawDataReceive[r].size() / doubleCountInBlock); ii++) { - blockID = (int)(rawDataReceive[r][index]); + for (int ii = 0; ii < int(rawDataReceive[r].size() / doubleCountInBlock); ii++) + { + blockID = (int)(rawDataReceive[r][index]); SPtr<Block3D> block = grid->getBlock(blockID); index += 1; @@ -1254,40 +1345,31 @@ void MPIIOMigrationBECoProcessor::readArray(int step, Arrays arrType, std::strin SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> ___4DArray; SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> ___3DArray; - switch (arrType) { + switch (arrType) + { case AverageDensity: - ___4DArray = - CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>( - vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], - dataSetParamStr.nx[3])); + ___4DArray = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>( + vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3])); block->getKernel()->getDataSet()->setAverageDensity(___4DArray); break; case AverageVelocity: - ___4DArray = - CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>( - vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], - dataSetParamStr.nx[3])); + ___4DArray = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>( + vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3])); block->getKernel()->getDataSet()->setAverageVelocity(___4DArray); break; case AverageFluktuations: - ___4DArray = - CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>( - vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], - dataSetParamStr.nx[3])); + ___4DArray = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>( + vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3])); block->getKernel()->getDataSet()->setAverageFluctuations(___4DArray); break; case AverageTriple: - ___4DArray = - CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>( - vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], - dataSetParamStr.nx[3])); + ___4DArray = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>( + vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3])); block->getKernel()->getDataSet()->setAverageTriplecorrelations(___4DArray); break; case ShearStressVal: - ___4DArray = - CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>( - vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], - dataSetParamStr.nx[3])); + ___4DArray = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>( + vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3])); block->getKernel()->getDataSet()->setShearStressValues(___4DArray); break; case RelaxationFactor: @@ -1295,14 +1377,18 @@ void MPIIOMigrationBECoProcessor::readArray(int step, Arrays arrType, std::strin vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2])); block->getKernel()->getDataSet()->setRelaxationFactor(___3DArray); break; - case PhaseField: + case PhaseField1: ___3DArray = CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>( vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2])); block->getKernel()->getDataSet()->setPhaseField(___3DArray); break; + case PhaseField2: + ___3DArray = CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>( + vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2])); + block->getKernel()->getDataSet()->setPhaseField2(___3DArray); + break; default: - UB_THROW( - UbException(UB_EXARGS, "MPIIOMigrationBECoProcessor::readArray : array type does not exist!")); + UB_THROW(UbException(UB_EXARGS, "MPIIOMigrationBECoProcessor::readArray : array type does not exist!")); break; } } @@ -1310,10 +1396,10 @@ void MPIIOMigrationBECoProcessor::readArray(int step, Arrays arrType, std::strin delete[] rawDataReceive; - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readArray end of restore of data, rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } } @@ -1323,10 +1409,10 @@ void MPIIOMigrationBECoProcessor::readBoundaryConds(int step) MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readBoundaryConds start MPI IO rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } double start, finish; @@ -1360,25 +1446,24 @@ void MPIIOMigrationBECoProcessor::readBoundaryConds(int step) MPI_Type_contiguous(sizeOfBIM, MPI_INT, &bcindexmatrixType); MPI_Type_commit(&bcindexmatrixType); - MPI_Offset read_offset = - (MPI_Offset)(sizeof(int)) + (MPI_Offset)(indexB) * (MPI_Offset)(sizeOfBIM) * (MPI_Offset)(sizeof(int)); - MPI_File_read_at(file_handler, read_offset, &bcindexmatrixVAll[0], int(myBlocksCount), bcindexmatrixType, - MPI_STATUS_IGNORE); + MPI_Offset read_offset = (MPI_Offset)(sizeof(int)) + (MPI_Offset)(indexB) * (MPI_Offset)(sizeOfBIM) * (MPI_Offset)(sizeof(int)); + MPI_File_read_at(file_handler, read_offset, &bcindexmatrixVAll[0], int(myBlocksCount), bcindexmatrixType, MPI_STATUS_IGNORE); MPI_File_close(&file_handler); MPI_Type_free(&bcindexmatrixType); - if (comm->isRoot()) { + if (comm->isRoot()) + { finish = MPI_Wtime(); UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readBoundaryConds time: " << finish - start << " s"); UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readBoundaryConds start of exchange of data, rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } std::vector<int> *rawDataReceive = new std::vector<int>[size]; std::vector<int> *rawDataSend = new std::vector<int>[size]; - for (int r = 0; r < size; r++) { + for (int r = 0; r < size; r++) + { rawDataReceive[r].resize(0); rawDataSend[r].resize(0); rawDataReceive[r].push_back(0); @@ -1396,14 +1481,12 @@ void MPIIOMigrationBECoProcessor::readBoundaryConds(int step) { rawDataReceive[tempRank][0]++; rawDataReceive[tempRank].push_back(indexB + ind); - rawDataReceive[tempRank].insert(rawDataReceive[tempRank].end(), bcindexmatrixVAll.begin() + ind * sizeOfBIM, - bcindexmatrixVAll.begin() + ind * sizeOfBIM + sizeOfBIM); + rawDataReceive[tempRank].insert(rawDataReceive[tempRank].end(), bcindexmatrixVAll.begin() + ind * sizeOfBIM, bcindexmatrixVAll.begin() + ind * sizeOfBIM + sizeOfBIM); } else // we must send data to other processes { rawDataSend[tempRank][0]++; rawDataSend[tempRank].push_back(indexB + ind); - rawDataSend[tempRank].insert(rawDataSend[tempRank].end(), bcindexmatrixVAll.begin() + ind * sizeOfBIM, - bcindexmatrixVAll.begin() + ind * sizeOfBIM + sizeOfBIM); + rawDataSend[tempRank].insert(rawDataSend[tempRank].end(), bcindexmatrixVAll.begin() + ind * sizeOfBIM, bcindexmatrixVAll.begin() + ind * sizeOfBIM + sizeOfBIM); } } @@ -1414,9 +1497,11 @@ void MPIIOMigrationBECoProcessor::readBoundaryConds(int step) int intBlockCount; int rds; - for (int r = 0; r < size; r++) { - if (r != rank) { - rds = int(rawDataSend[r].size()); + for (int r = 0; r < size; r++) + { + if (r != rank) + { + rds = int(rawDataSend[r].size()); intBlockCount = (int)(rds / SEND_BLOCK_SIZE); if (intBlockCount * SEND_BLOCK_SIZE < rds) intBlockCount += 1; @@ -1424,21 +1509,21 @@ void MPIIOMigrationBECoProcessor::readBoundaryConds(int step) for (int i = rds; i < intBlockCount * SEND_BLOCK_SIZE; i++) rawDataSend[r].push_back(0); - MPI_Isend(&rawDataSend[r][0], intBlockCount, sendBlockIntType, r, MESSAGE_TAG + 7, MPI_COMM_WORLD, - &requests[requestCount]); + MPI_Isend(&rawDataSend[r][0], intBlockCount, sendBlockIntType, r, MESSAGE_TAG + 7, MPI_COMM_WORLD, &requests[requestCount]); // MPI_Isend(&rawDataSend[r][0], rawDataSend[r].size(), MPI_INT, r, MESSAGE_TAG + 7, MPI_COMM_WORLD, // &requests[requestCount]); requestCount++; } } - for (int r = 0; r < size; r++) { - if (r != rank) { + for (int r = 0; r < size; r++) + { + if (r != rank) + { MPI_Probe(r, MESSAGE_TAG + 7, MPI_COMM_WORLD, &status); MPI_Get_count(&status, sendBlockIntType, &quant); rawDataReceive[r].resize(quant * SEND_BLOCK_SIZE); - MPI_Irecv(&rawDataReceive[r][0], quant, sendBlockIntType, r, MESSAGE_TAG + 7, MPI_COMM_WORLD, - &requests[requestCount]); + MPI_Irecv(&rawDataReceive[r][0], quant, sendBlockIntType, r, MESSAGE_TAG + 7, MPI_COMM_WORLD, &requests[requestCount]); requestCount++; } } @@ -1447,17 +1532,17 @@ void MPIIOMigrationBECoProcessor::readBoundaryConds(int step) //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if (comm->isRoot()) { + if (comm->isRoot()) + { finish = MPI_Wtime(); UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readBoundaryConds end of exchange of data, rank = " << rank); UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readBoundaryConds time: " << finish - start << " s"); UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readBoundaryConds start of restore of data, rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpBC2.bin"; - rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler); + rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler); if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename); @@ -1475,10 +1560,12 @@ void MPIIOMigrationBECoProcessor::readBoundaryConds(int step) std::vector<int> indexContainerV; std::vector<int> bcindexmatrixV; - for (int r = 0; r < size; r++) { + for (int r = 0; r < size; r++) + { index = 1; - for (int ii = 0; ii < rawDataReceive[r][0]; ii++) { + for (int ii = 0; ii < rawDataReceive[r][0]; ii++) + { blockID = (int)(rawDataReceive[r][index]); index += 1; @@ -1498,18 +1585,19 @@ void MPIIOMigrationBECoProcessor::readBoundaryConds(int step) bcAddArray.boundCond_count, boundCondType, MPI_STATUS_IGNORE); if (bcAddArray.indexContainer_count > 0) - MPI_File_read_at(file_handler, - read_offset2 + (MPI_Offset)(sizeof(BCAddMigration)) + + MPI_File_read_at(file_handler, read_offset2 + (MPI_Offset)(sizeof(BCAddMigration)) + (MPI_Offset)(bcAddArray.boundCond_count) * (MPI_Offset)(sizeof(BoundaryCondition)), &indexContainerV[0], bcAddArray.indexContainer_count, MPI_INT, MPI_STATUS_IGNORE); bcVector.resize(0); - for (int ibc = 0; ibc < bcAddArray.boundCond_count; ibc++) { + for (int ibc = 0; ibc < bcAddArray.boundCond_count; ibc++) + { SPtr<BoundaryConditions> bc; if (memcmp(&bcArray[ibc], nullBouCond, sizeof(BoundaryCondition)) == 0) bc = SPtr<BoundaryConditions>(); - else { + else + { bc = SPtr<BoundaryConditions>(new BoundaryConditions); bc->noslipBoundaryFlags = bcArray[ibc].noslipBoundaryFlags; bc->slipBoundaryFlags = bcArray[ibc].slipBoundaryFlags; @@ -1537,8 +1625,7 @@ void MPIIOMigrationBECoProcessor::readBoundaryConds(int step) bcVector.push_back(bc); } - CbArray3D<int, IndexerX3X2X1> bcim(bcindexmatrixV, boundCondParamStr.nx1, boundCondParamStr.nx2, - boundCondParamStr.nx3); + CbArray3D<int, IndexerX3X2X1> bcim(bcindexmatrixV, boundCondParamStr.nx1, boundCondParamStr.nx2, boundCondParamStr.nx3); SPtr<Block3D> block1 = grid->getBlock(blockID); SPtr<BCProcessor> bcProc = bcProcessor->clone(block1->getKernel()); @@ -1562,12 +1649,12 @@ void MPIIOMigrationBECoProcessor::readBoundaryConds(int step) delete[] rawDataSend; delete[] requests; - if (comm->isRoot()) { + if (comm->isRoot()) + { finish = MPI_Wtime(); UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readBoundaryConds end of restore of data, rank = " << rank); UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readBoundaryConds time: " << finish - start << " s"); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } } @@ -1577,3 +1664,8 @@ void MPIIOMigrationBECoProcessor::setLBMKernel(SPtr<LBMKernel> kernel) { this->l void MPIIOMigrationBECoProcessor::setBCProcessor(SPtr<BCProcessor> bcProcessor) { this->bcProcessor = bcProcessor; } ////////////////////////////////////////////////////////////////////////// void MPIIOMigrationBECoProcessor::setNu(double nu) { this->nue = nu; } + +void MPIIOMigrationBECoProcessor::setNuLG(double cfL, double cfG) { this->nuL = cfL; this->nuG = cfG; } + +void MPIIOMigrationBECoProcessor::setDensityRatio(double dr) { this->densityRatio = dr; } + diff --git a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationBECoProcessor.h b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationBECoProcessor.h index 4d825fde3956dcbe711f49b18b57cd929ba986d9..9a89ada1ae039d10cd53b06b189e5709398911c8 100644 --- a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationBECoProcessor.h +++ b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationBECoProcessor.h @@ -25,7 +25,8 @@ class MPIIOMigrationBECoProcessor : public MPIIOCoProcessor AverageTriple = 4, ShearStressVal = 5, RelaxationFactor = 6, - PhaseField = 7 + PhaseField1 = 7, + PhaseField2 = 8 }; public: @@ -71,6 +72,8 @@ public: //! The function truncates the data files void clearAllFiles(int step); void setNu(double nu); + void setNuLG(double cfL, double cfG); + void setDensityRatio(double dr); void blocksExchange(int tagN, int ind1, int ind2, int doubleCountInBlock, std::vector<double> &pV, std::vector<double> *rawDataReceive); @@ -91,6 +94,10 @@ private: SPtr<LBMKernel> lbmKernel; SPtr<BCProcessor> bcProcessor; double nue; + double nuL; + double nuG; + double densityRatio; + }; #endif diff --git a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.cpp b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.cpp index 69cc8795bb7fdb4791420473a242a0f1ff96a06d..2c5a547c4cca531fe50e7255b0aba6a6a4b5c6e9 100644 --- a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.cpp +++ b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.cpp @@ -22,8 +22,7 @@ using namespace MPIIODataStructures; -MPIIOMigrationCoProcessor::MPIIOMigrationCoProcessor(SPtr<Grid3D> grid, SPtr<UbScheduler> s, const std::string &path, - SPtr<Communicator> comm) +MPIIOMigrationCoProcessor::MPIIOMigrationCoProcessor(SPtr<Grid3D> grid, SPtr<UbScheduler> s, const std::string &path, SPtr<Communicator> comm) : MPIIOCoProcessor(grid, s, path, comm) { memset(&boundCondParamStr, 0, sizeof(boundCondParamStr)); @@ -31,7 +30,7 @@ MPIIOMigrationCoProcessor::MPIIOMigrationCoProcessor(SPtr<Grid3D> grid, SPtr<UbS //------------------------- define MPI types --------------------------------- MPI_Datatype typesDataSet[3] = { MPI_DOUBLE, MPI_INT, MPI_CHAR }; - int blocksDataSet[3] = { 2, 2, 2 }; + int blocksDataSet[3] = { 5, 2, 2 }; MPI_Aint offsetsDatatSet[3], lbDataSet, extentDataSet; offsetsDatatSet[0] = 0; @@ -71,7 +70,8 @@ MPIIOMigrationCoProcessor::~MPIIOMigrationCoProcessor() ////////////////////////////////////////////////////////////////////////// void MPIIOMigrationCoProcessor::process(double step) { - if (scheduler->isDue(step)) { + if (scheduler->isDue(step)) + { if (comm->isRoot()) UBLOG(logINFO, "MPIIOMigrationCoProcessor save step: " << step); if (comm->isRoot()) @@ -100,8 +100,7 @@ void MPIIOMigrationCoProcessor::clearAllFiles(int step) UbSystem::makeDirectory(path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step)); std::string filename10 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpBC.bin"; - int rc10 = - MPI_File_open(MPI_COMM_WORLD, filename10.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler); + int rc10 = MPI_File_open(MPI_COMM_WORLD, filename10.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler); if (rc10 != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename10); MPI_File_set_size(file_handler, new_size); @@ -125,69 +124,93 @@ void MPIIOMigrationCoProcessor::writeDataSet(int step) std::vector<SPtr<Block3D>> blocksVector[25]; int minInitLevel = this->grid->getCoarsestInitializedLevel(); int maxInitLevel = this->grid->getFinestInitializedLevel(); - for (int level = minInitLevel; level <= maxInitLevel; level++) { + for (int level = minInitLevel; level <= maxInitLevel; level++) + { grid->getBlocks(level, rank, blocksVector[level]); blocksCount += static_cast<int>(blocksVector[level].size()); } dataSetParam dataSetParamStr1, dataSetParamStr2, dataSetParamStr3; DataSetMigration *dataSetArray = new DataSetMigration[blocksCount]; - std::vector<double> doubleValuesArray; // double-values (arrays of f's) in all blocks + std::vector<double> doubleValuesArrayF; // double-values (arrays of f's) in all blocks Fdistribution + std::vector<double> doubleValuesArrayH1; // double-values (arrays of f's) in all blocks H1distribution + // std::vector<double> doubleValuesArrayH2; // double-values (arrays of f's) in all blocks H2distribution - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIOMigrationCoProcessor::writeDataSet start collect data rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } + bool multiPhase = false; DSArraysPresence arrPresence; bool firstBlock = true; size_t doubleCountInBlock = 0; int ic = 0; - SPtr<D3Q27EsoTwist3DSplittedVector> D3Q27EsoTwist3DSplittedVectorPtrF, D3Q27EsoTwist3DSplittedVectorPtrH; - CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr localDistributionsF, localDistributionsH; - CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr nonLocalDistributionsF, nonLocalDistributionsH; - CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr zeroDistributionsF, zeroDistributionsH; + SPtr<D3Q27EsoTwist3DSplittedVector> D3Q27EsoTwist3DSplittedVectorPtrF = 0, D3Q27EsoTwist3DSplittedVectorPtrH1 = 0, D3Q27EsoTwist3DSplittedVectorPtrH2 = 0; + CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr localDistributionsF = 0, localDistributionsH1 = 0, localDistributionsH2 = 0; + CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr nonLocalDistributionsF = 0, nonLocalDistributionsH1 = 0, nonLocalDistributionsH2 = 0; + CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr zeroDistributionsF = 0, zeroDistributionsH1 = 0, zeroDistributionsH2 = 0; + + SPtr<LBMKernel> kernel; - for (int level = minInitLevel; level <= maxInitLevel; level++) { + for (int level = minInitLevel; level <= maxInitLevel; level++) + { for (SPtr<Block3D> block : blocksVector[level]) // blocks of the current level { - dataSetArray[ic].globalID = - block->getGlobalID(); // id of the block needed to find it while regenerating the grid - dataSetArray[ic].ghostLayerWidth = block->getKernel()->getGhostLayerWidth(); - dataSetArray[ic].collFactor = block->getKernel()->getCollisionFactor(); - dataSetArray[ic].deltaT = block->getKernel()->getDeltaT(); - dataSetArray[ic].compressible = block->getKernel()->getCompressible(); - dataSetArray[ic].withForcing = block->getKernel()->getWithForcing(); - - D3Q27EsoTwist3DSplittedVectorPtrF = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>( - block->getKernel()->getDataSet()->getFdistributions()); + kernel = dynamicPointerCast<LBMKernel>(block->getKernel()); + + dataSetArray[ic].globalID = block->getGlobalID(); // id of the block needed to find it while regenerating the grid + dataSetArray[ic].ghostLayerWidth = kernel->getGhostLayerWidth(); + dataSetArray[ic].collFactor = kernel->getCollisionFactor(); + dataSetArray[ic].deltaT = kernel->getDeltaT(); + dataSetArray[ic].compressible = kernel->getCompressible(); + dataSetArray[ic].withForcing = kernel->getWithForcing(); + dataSetArray[ic].collFactorL = kernel->getCollisionFactorL(); + dataSetArray[ic].collFactorG = kernel->getCollisionFactorG(); + dataSetArray[ic].densityRatio = kernel->getDensityRatio(); + + D3Q27EsoTwist3DSplittedVectorPtrF = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(block->getKernel()->getDataSet()->getFdistributions()); localDistributionsF = D3Q27EsoTwist3DSplittedVectorPtrF->getLocalDistributions(); nonLocalDistributionsF = D3Q27EsoTwist3DSplittedVectorPtrF->getNonLocalDistributions(); zeroDistributionsF = D3Q27EsoTwist3DSplittedVectorPtrF->getZeroDistributions(); - D3Q27EsoTwist3DSplittedVectorPtrH = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>( - block->getKernel()->getDataSet()->getHdistributions()); - localDistributionsH = D3Q27EsoTwist3DSplittedVectorPtrH->getLocalDistributions(); - nonLocalDistributionsH = D3Q27EsoTwist3DSplittedVectorPtrH->getNonLocalDistributions(); - zeroDistributionsH = D3Q27EsoTwist3DSplittedVectorPtrH->getZeroDistributions(); + D3Q27EsoTwist3DSplittedVectorPtrH1 = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(block->getKernel()->getDataSet()->getHdistributions()); + if (D3Q27EsoTwist3DSplittedVectorPtrH1 != 0) + { + multiPhase = true; + localDistributionsH1 = D3Q27EsoTwist3DSplittedVectorPtrH1->getLocalDistributions(); + nonLocalDistributionsH1 = D3Q27EsoTwist3DSplittedVectorPtrH1->getNonLocalDistributions(); + zeroDistributionsH1 = D3Q27EsoTwist3DSplittedVectorPtrH1->getZeroDistributions(); + } + + /*D3Q27EsoTwist3DSplittedVectorPtrH2 = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(block->getKernel()->getDataSet()->getH2distributions()); + if (D3Q27EsoTwist3DSplittedVectorPtrH2 != 0) + { + localDistributionsH2 = D3Q27EsoTwist3DSplittedVectorPtrH2->getLocalDistributions(); + nonLocalDistributionsH2 = D3Q27EsoTwist3DSplittedVectorPtrH2->getNonLocalDistributions(); + zeroDistributionsH2 = D3Q27EsoTwist3DSplittedVectorPtrH2->getZeroDistributions(); + }*/ if (firstBlock) // && block->getKernel()) // when first (any) valid block... { - if (localDistributionsF) { + if (localDistributionsF) + { dataSetParamStr1.nx[0] = static_cast<int>(localDistributionsF->getNX1()); dataSetParamStr1.nx[1] = static_cast<int>(localDistributionsF->getNX2()); dataSetParamStr1.nx[2] = static_cast<int>(localDistributionsF->getNX3()); dataSetParamStr1.nx[3] = static_cast<int>(localDistributionsF->getNX4()); } - if (nonLocalDistributionsF) { + if (nonLocalDistributionsF) + { dataSetParamStr2.nx[0] = static_cast<int>(nonLocalDistributionsF->getNX1()); dataSetParamStr2.nx[1] = static_cast<int>(nonLocalDistributionsF->getNX2()); dataSetParamStr2.nx[2] = static_cast<int>(nonLocalDistributionsF->getNX3()); dataSetParamStr2.nx[3] = static_cast<int>(nonLocalDistributionsF->getNX4()); } - if (zeroDistributionsF) { + if (zeroDistributionsF) + { dataSetParamStr3.nx[0] = static_cast<int>(zeroDistributionsF->getNX1()); dataSetParamStr3.nx[1] = static_cast<int>(zeroDistributionsF->getNX2()); dataSetParamStr3.nx[2] = static_cast<int>(zeroDistributionsF->getNX3()); @@ -195,94 +218,91 @@ void MPIIOMigrationCoProcessor::writeDataSet(int step) } // ... than save some parameters that are equal in all blocks - dataSetParamStr1.nx1 = dataSetParamStr2.nx1 = dataSetParamStr3.nx1 = - static_cast<int>(block->getKernel()->getDataSet()->getFdistributions()->getNX1()); - dataSetParamStr1.nx2 = dataSetParamStr2.nx2 = dataSetParamStr3.nx2 = - static_cast<int>(block->getKernel()->getDataSet()->getFdistributions()->getNX2()); - dataSetParamStr1.nx3 = dataSetParamStr2.nx3 = dataSetParamStr3.nx3 = - static_cast<int>(block->getKernel()->getDataSet()->getFdistributions()->getNX3()); - - // Fdistributions + Hdistributions - doubleCountInBlock = - (dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3] + - dataSetParamStr2.nx[0] * dataSetParamStr2.nx[1] * dataSetParamStr2.nx[2] * dataSetParamStr2.nx[3] + - dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3]) * 2; - - SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> averageDensityArray = - block->getKernel()->getDataSet()->getAverageDensity(); + dataSetParamStr1.nx1 = dataSetParamStr2.nx1 = dataSetParamStr3.nx1 = static_cast<int>(block->getKernel()->getDataSet()->getFdistributions()->getNX1()); + dataSetParamStr1.nx2 = dataSetParamStr2.nx2 = dataSetParamStr3.nx2 = static_cast<int>(block->getKernel()->getDataSet()->getFdistributions()->getNX2()); + dataSetParamStr1.nx3 = dataSetParamStr2.nx3 = dataSetParamStr3.nx3 = static_cast<int>(block->getKernel()->getDataSet()->getFdistributions()->getNX3()); + + doubleCountInBlock = dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3] + + dataSetParamStr2.nx[0] * dataSetParamStr2.nx[1] * dataSetParamStr2.nx[2] * dataSetParamStr2.nx[3] + + dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3]; + + SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> averageDensityArray = block->getKernel()->getDataSet()->getAverageDensity(); if (averageDensityArray) arrPresence.isAverageDensityArrayPresent = true; else arrPresence.isAverageDensityArrayPresent = false; - SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> AverageVelocityArray3DPtr = - block->getKernel()->getDataSet()->getAverageVelocity(); + SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> AverageVelocityArray3DPtr = block->getKernel()->getDataSet()->getAverageVelocity(); if (AverageVelocityArray3DPtr) arrPresence.isAverageVelocityArrayPresent = true; else arrPresence.isAverageVelocityArrayPresent = false; - SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> AverageFluctArray3DPtr = - block->getKernel()->getDataSet()->getAverageFluctuations(); + SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> AverageFluctArray3DPtr = block->getKernel()->getDataSet()->getAverageFluctuations(); if (AverageFluctArray3DPtr) arrPresence.isAverageFluktuationsArrayPresent = true; else arrPresence.isAverageFluktuationsArrayPresent = false; - SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> AverageTripleArray3DPtr = - block->getKernel()->getDataSet()->getAverageTriplecorrelations(); + SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> AverageTripleArray3DPtr = block->getKernel()->getDataSet()->getAverageTriplecorrelations(); if (AverageTripleArray3DPtr) arrPresence.isAverageTripleArrayPresent = true; else arrPresence.isAverageTripleArrayPresent = false; - SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> ShearStressValArray3DPtr = - block->getKernel()->getDataSet()->getShearStressValues(); + SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> ShearStressValArray3DPtr = block->getKernel()->getDataSet()->getShearStressValues(); if (ShearStressValArray3DPtr) arrPresence.isShearStressValArrayPresent = true; else arrPresence.isShearStressValArrayPresent = false; - SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> relaxationFactor3DPtr = - block->getKernel()->getDataSet()->getRelaxationFactor(); + SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> relaxationFactor3DPtr = block->getKernel()->getDataSet()->getRelaxationFactor(); if (relaxationFactor3DPtr) arrPresence.isRelaxationFactorPresent = true; else arrPresence.isRelaxationFactorPresent = false; - SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> phaseField3DPtr = - block->getKernel()->getDataSet()->getPhaseField(); - if (phaseField3DPtr) - arrPresence.isPhaseFieldPresent = true; + SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> phaseField3DPtr1 = block->getKernel()->getDataSet()->getPhaseField(); + if (phaseField3DPtr1) + arrPresence.isPhaseField1Present = true; + else + arrPresence.isPhaseField1Present = false; + + SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> phaseField3DPtr2 = block->getKernel()->getDataSet()->getPhaseField2(); + if (phaseField3DPtr2) + arrPresence.isPhaseField2Present = true; else - arrPresence.isPhaseFieldPresent = false; + arrPresence.isPhaseField2Present = false; firstBlock = false; } - if (localDistributionsF && (dataSetParamStr1.nx[0] > 0) && (dataSetParamStr1.nx[1] > 0) && - (dataSetParamStr1.nx[2] > 0) && (dataSetParamStr1.nx[3] > 0)) - doubleValuesArray.insert(doubleValuesArray.end(), localDistributionsF->getDataVector().begin(), - localDistributionsF->getDataVector().end()); - if (nonLocalDistributionsF && (dataSetParamStr2.nx[0] > 0) && (dataSetParamStr2.nx[1] > 0) && - (dataSetParamStr2.nx[2] > 0) && (dataSetParamStr2.nx[3] > 0)) - doubleValuesArray.insert(doubleValuesArray.end(), nonLocalDistributionsF->getDataVector().begin(), - nonLocalDistributionsF->getDataVector().end()); + if (localDistributionsF && (dataSetParamStr1.nx[0] > 0) && (dataSetParamStr1.nx[1] > 0) && (dataSetParamStr1.nx[2] > 0) && (dataSetParamStr1.nx[3] > 0)) + doubleValuesArrayF.insert(doubleValuesArrayF.end(), localDistributionsF->getDataVector().begin(), localDistributionsF->getDataVector().end()); + if (nonLocalDistributionsF && (dataSetParamStr2.nx[0] > 0) && (dataSetParamStr2.nx[1] > 0) && (dataSetParamStr2.nx[2] > 0) && (dataSetParamStr2.nx[3] > 0)) + doubleValuesArrayF.insert(doubleValuesArrayF.end(), nonLocalDistributionsF->getDataVector().begin(), nonLocalDistributionsF->getDataVector().end()); if (zeroDistributionsF && (dataSetParamStr3.nx[0] > 0) && (dataSetParamStr3.nx[1] > 0) && (dataSetParamStr3.nx[2] > 0)) - doubleValuesArray.insert(doubleValuesArray.end(), zeroDistributionsF->getDataVector().begin(), - zeroDistributionsF->getDataVector().end()); - - if (localDistributionsH && (dataSetParamStr1.nx[0] > 0) && (dataSetParamStr1.nx[1] > 0) && - (dataSetParamStr1.nx[2] > 0) && (dataSetParamStr1.nx[3] > 0)) - doubleValuesArray.insert(doubleValuesArray.end(), localDistributionsH->getDataVector().begin(), - localDistributionsH->getDataVector().end()); - if (nonLocalDistributionsH && (dataSetParamStr2.nx[0] > 0) && (dataSetParamStr2.nx[1] > 0) && - (dataSetParamStr2.nx[2] > 0) && (dataSetParamStr2.nx[3] > 0)) - doubleValuesArray.insert(doubleValuesArray.end(), nonLocalDistributionsH->getDataVector().begin(), - nonLocalDistributionsH->getDataVector().end()); - if (zeroDistributionsH && (dataSetParamStr3.nx[0] > 0) && (dataSetParamStr3.nx[1] > 0) && (dataSetParamStr3.nx[2] > 0)) - doubleValuesArray.insert(doubleValuesArray.end(), zeroDistributionsH->getDataVector().begin(), - zeroDistributionsH->getDataVector().end()); + doubleValuesArrayF.insert(doubleValuesArrayF.end(), zeroDistributionsF->getDataVector().begin(), zeroDistributionsF->getDataVector().end()); + + if (multiPhase) + { + if (localDistributionsH1 && (dataSetParamStr1.nx[0] > 0) && (dataSetParamStr1.nx[1] > 0) && (dataSetParamStr1.nx[2] > 0) && (dataSetParamStr1.nx[3] > 0)) + doubleValuesArrayH1.insert(doubleValuesArrayH1.end(), localDistributionsH1->getDataVector().begin(), localDistributionsH1->getDataVector().end()); + if (nonLocalDistributionsH1 && (dataSetParamStr2.nx[0] > 0) && (dataSetParamStr2.nx[1] > 0) && (dataSetParamStr2.nx[2] > 0) && (dataSetParamStr2.nx[3] > 0)) + doubleValuesArrayH1.insert(doubleValuesArrayH1.end(), nonLocalDistributionsH1->getDataVector().begin(), nonLocalDistributionsH1->getDataVector().end()); + if (zeroDistributionsH1 && (dataSetParamStr3.nx[0] > 0) && (dataSetParamStr3.nx[1] > 0) && (dataSetParamStr3.nx[2] > 0)) + doubleValuesArrayH1.insert(doubleValuesArrayH1.end(), zeroDistributionsH1->getDataVector().begin(), zeroDistributionsH1->getDataVector().end()); + } + + /*if (D3Q27EsoTwist3DSplittedVectorPtrH2 != 0) + { + if (localDistributionsH2 && (dataSetParamStr1.nx[0] > 0) && (dataSetParamStr1.nx[1] > 0) && (dataSetParamStr1.nx[2] > 0) && (dataSetParamStr1.nx[3] > 0)) + doubleValuesArrayH2.insert(doubleValuesArrayH2.end(), localDistributionsH2->getDataVector().begin(), localDistributionsH2->getDataVector().end()); + if (nonLocalDistributionsH2 && (dataSetParamStr2.nx[0] > 0) && (dataSetParamStr2.nx[1] > 0) && (dataSetParamStr2.nx[2] > 0) && (dataSetParamStr2.nx[3] > 0)) + doubleValuesArrayH2.insert(doubleValuesArrayH2.end(), nonLocalDistributionsH2->getDataVector().begin(), nonLocalDistributionsH2->getDataVector().end()); + if (zeroDistributionsH2 && (dataSetParamStr3.nx[0] > 0) && (dataSetParamStr3.nx[1] > 0) && (dataSetParamStr3.nx[2] > 0)) + doubleValuesArrayH2.insert(doubleValuesArrayH2.end(), zeroDistributionsH2->getDataVector().begin(), zeroDistributionsH2->getDataVector().end()); + }*/ ic++; } @@ -292,10 +312,10 @@ void MPIIOMigrationCoProcessor::writeDataSet(int step) MPI_Type_contiguous(int(doubleCountInBlock), MPI_DOUBLE, &dataSetDoubleType); MPI_Type_commit(&dataSetDoubleType); - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIOMigrationCoProcessor::writeDataSet start MPI IO rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } double start, finish; @@ -311,32 +331,73 @@ void MPIIOMigrationCoProcessor::writeDataSet(int step) // write to the file MPI_File file_handler; - std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSet.bin"; + std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetF.bin"; int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler); if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename); MPI_File_write_at(file_handler, (MPI_Offset)0, &dataSetParamStr1, 1, dataSetParamType, MPI_STATUS_IGNORE); - MPI_File_write_at(file_handler, (MPI_Offset)(sizeof(dataSetParam)), &dataSetParamStr2, 1, dataSetParamType, - MPI_STATUS_IGNORE); - MPI_File_write_at(file_handler, (MPI_Offset)(2 * sizeof(dataSetParam)), &dataSetParamStr3, 1, dataSetParamType, - MPI_STATUS_IGNORE); - + MPI_File_write_at(file_handler, (MPI_Offset)(sizeof(dataSetParam)), &dataSetParamStr2, 1, dataSetParamType, MPI_STATUS_IGNORE); + MPI_File_write_at(file_handler, (MPI_Offset)(2 * sizeof(dataSetParam)), &dataSetParamStr3, 1, dataSetParamType, MPI_STATUS_IGNORE); + MPI_Offset write_offset; size_t sizeofOneDataSet = sizeof(DataSetMigration) + doubleCountInBlock * sizeof(double); - for (int nb = 0; nb < blocksCount; nb++) { + for (int nb = 0; nb < blocksCount; nb++) + { write_offset = (MPI_Offset)(3 * sizeof(dataSetParam) + dataSetArray[nb].globalID * sizeofOneDataSet); MPI_File_write_at(file_handler, write_offset, &dataSetArray[nb], 1, dataSetType, MPI_STATUS_IGNORE); - MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(DataSetMigration)), - &doubleValuesArray[nb * doubleCountInBlock], 1, dataSetDoubleType, MPI_STATUS_IGNORE); + MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(DataSetMigration)), &doubleValuesArrayF[nb * doubleCountInBlock], 1, dataSetDoubleType, MPI_STATUS_IGNORE); } MPI_File_sync(file_handler); MPI_File_close(&file_handler); + + //-------------------------------- H1 ---------------------------------------------------- + if (multiPhase) + { + filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetH1.bin"; + rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler); + if (rc != MPI_SUCCESS) + throw UbException(UB_EXARGS, "couldn't open file " + filename); + + sizeofOneDataSet = doubleCountInBlock * sizeof(double); + + for (int nb = 0; nb < blocksCount; nb++) + { + write_offset = (MPI_Offset)(dataSetArray[nb].globalID * sizeofOneDataSet); + MPI_File_write_at(file_handler, write_offset, &doubleValuesArrayH1[nb * doubleCountInBlock], 1, dataSetDoubleType, MPI_STATUS_IGNORE); + } + + MPI_File_sync(file_handler); + MPI_File_close(&file_handler); + } + + //-------------------------------- H2 ---------------------------------------------------- + /*if (D3Q27EsoTwist3DSplittedVectorPtrH2 != 0) + { + filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetH2.bin"; + rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler); + if (rc != MPI_SUCCESS) + throw UbException(UB_EXARGS, "couldn't open file " + filename); + + sizeofOneDataSet = doubleCountInBlock * sizeof(double); + + for (int nb = 0; nb < blocksCount; nb++) + { + write_offset = (MPI_Offset)(dataSetArray[nb].globalID * sizeofOneDataSet); + MPI_File_write_at(file_handler, write_offset, &doubleValuesArrayH2[nb * doubleCountInBlock], 1, dataSetDoubleType, MPI_STATUS_IGNORE); + } + + MPI_File_sync(file_handler); + MPI_File_close(&file_handler); + }*/ + //-------------------------------- + MPI_Type_free(&dataSetDoubleType); - if (comm->isRoot()) { + if (comm->isRoot()) + { finish = MPI_Wtime(); UBLOG(logINFO, "MPIIOMigrationCoProcessor::writeDataSet time: " << finish - start << " s"); } @@ -375,8 +436,11 @@ void MPIIOMigrationCoProcessor::writeDataSet(int step) if (arrPresence.isRelaxationFactorPresent) write3DArray(step, RelaxationFactor, std::string("/cpRelaxationFactor.bin")); - if (arrPresence.isPhaseFieldPresent) - write3DArray(step, PhaseField, std::string("/cpPhaseField.bin")); + if (arrPresence.isPhaseField1Present) + write3DArray(step, PhaseField1, std::string("/cpPhaseField1.bin")); + + if (arrPresence.isPhaseField2Present) + write3DArray(step, PhaseField2, std::string("/cpPhaseField2.bin")); } @@ -391,7 +455,8 @@ void MPIIOMigrationCoProcessor::write4DArray(int step, Arrays arrayType, std::st std::vector<SPtr<Block3D>> blocksVector[25]; int minInitLevel = this->grid->getCoarsestInitializedLevel(); int maxInitLevel = this->grid->getFinestInitializedLevel(); - for (int level = minInitLevel; level <= maxInitLevel; level++) { + for (int level = minInitLevel; level <= maxInitLevel; level++) + { grid->getBlocks(level, rank, blocksVector[level]); blocksCount += static_cast<int>(blocksVector[level].size()); } @@ -400,10 +465,10 @@ void MPIIOMigrationCoProcessor::write4DArray(int step, Arrays arrayType, std::st std::vector<double> doubleValuesArray; // double-values of the AverageDensityArray in all blocks dataSetParam dataSetParamStr; - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIOMigrationCoProcessor::writeAverageDensityArray start collect data rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } bool firstBlock = true; @@ -411,13 +476,14 @@ void MPIIOMigrationCoProcessor::write4DArray(int step, Arrays arrayType, std::st int ic = 0; SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> ___Array; - for (int level = minInitLevel; level <= maxInitLevel; level++) { + for (int level = minInitLevel; level <= maxInitLevel; level++) + { for (SPtr<Block3D> block : blocksVector[level]) // blocks of the current level { - dataSetSmallArray[ic].globalID = - block->getGlobalID(); // id of the block needed to find it while regenerating the grid + dataSetSmallArray[ic].globalID = block->getGlobalID(); // id of the block needed to find it while regenerating the grid - switch (arrayType) { + switch (arrayType) + { case AverageDensity: ___Array = block->getKernel()->getDataSet()->getAverageDensity(); break; @@ -434,28 +500,24 @@ void MPIIOMigrationCoProcessor::write4DArray(int step, Arrays arrayType, std::st ___Array = block->getKernel()->getDataSet()->getShearStressValues(); break; default: - UB_THROW(UbException(UB_EXARGS, - "MPIIOMigrationCoProcessor::write4DArray : 4D array type does not exist!")); + UB_THROW(UbException(UB_EXARGS, "MPIIOMigrationCoProcessor::write4DArray : 4D array type does not exist!")); break; } if (firstBlock) // when first (any) valid block... { dataSetParamStr.nx1 = dataSetParamStr.nx2 = dataSetParamStr.nx3 = 0; - dataSetParamStr.nx[0] = static_cast<int>(___Array->getNX1()); - dataSetParamStr.nx[1] = static_cast<int>(___Array->getNX2()); - dataSetParamStr.nx[2] = static_cast<int>(___Array->getNX3()); - dataSetParamStr.nx[3] = static_cast<int>(___Array->getNX4()); - doubleCountInBlock = - dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; + dataSetParamStr.nx[0] = static_cast<int>(___Array->getNX1()); + dataSetParamStr.nx[1] = static_cast<int>(___Array->getNX2()); + dataSetParamStr.nx[2] = static_cast<int>(___Array->getNX3()); + dataSetParamStr.nx[3] = static_cast<int>(___Array->getNX4()); + doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; firstBlock = false; } - if (___Array && (dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0) && - (dataSetParamStr.nx[3] > 0)) - doubleValuesArray.insert(doubleValuesArray.end(), ___Array->getDataVector().begin(), - ___Array->getDataVector().end()); + if (___Array && (dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0) && (dataSetParamStr.nx[3] > 0)) + doubleValuesArray.insert(doubleValuesArray.end(), ___Array->getDataVector().begin(), ___Array->getDataVector().end()); ic++; } @@ -465,10 +527,10 @@ void MPIIOMigrationCoProcessor::write4DArray(int step, Arrays arrayType, std::st MPI_Type_contiguous(int(doubleCountInBlock), MPI_DOUBLE, &dataSetDoubleType); MPI_Type_commit(&dataSetDoubleType); - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIOMigrationCoProcessor::write4DArray start MPI IO rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } double start, finish; @@ -489,7 +551,8 @@ void MPIIOMigrationCoProcessor::write4DArray(int step, Arrays arrayType, std::st MPI_Offset write_offset; size_t sizeofOneDataSet = sizeof(DataSetSmallMigration) + doubleCountInBlock * sizeof(double); - for (int nb = 0; nb < blocksCount; nb++) { + for (int nb = 0; nb < blocksCount; nb++) + { write_offset = (MPI_Offset)(sizeof(dataSetParam) + dataSetSmallArray[nb].globalID * sizeofOneDataSet); MPI_File_write_at(file_handler, write_offset, &dataSetSmallArray[nb], 1, dataSetSmallType, MPI_STATUS_IGNORE); MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(DataSetSmallMigration)), @@ -500,7 +563,8 @@ void MPIIOMigrationCoProcessor::write4DArray(int step, Arrays arrayType, std::st MPI_File_close(&file_handler); MPI_Type_free(&dataSetDoubleType); - if (comm->isRoot()) { + if (comm->isRoot()) + { finish = MPI_Wtime(); UBLOG(logINFO, "MPIIOMigrationCoProcessor::write4DArray time: " << finish - start << " s"); } @@ -519,7 +583,8 @@ void MPIIOMigrationCoProcessor::write3DArray(int step, Arrays arrayType, std::st std::vector<SPtr<Block3D>> blocksVector[25]; int minInitLevel = this->grid->getCoarsestInitializedLevel(); int maxInitLevel = this->grid->getFinestInitializedLevel(); - for (int level = minInitLevel; level <= maxInitLevel; level++) { + for (int level = minInitLevel; level <= maxInitLevel; level++) + { grid->getBlocks(level, rank, blocksVector[level]); blocksCount += static_cast<int>(blocksVector[level].size()); } @@ -528,10 +593,10 @@ void MPIIOMigrationCoProcessor::write3DArray(int step, Arrays arrayType, std::st std::vector<double> doubleValuesArray; // double-values (arrays of f's) in all blocks dataSetParam dataSetParamStr; - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIOMigrationCoProcessor::write3DArray start collect data rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } bool firstBlock = true; @@ -539,41 +604,42 @@ void MPIIOMigrationCoProcessor::write3DArray(int step, Arrays arrayType, std::st int ic = 0; SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> ___Array; - for (int level = minInitLevel; level <= maxInitLevel; level++) { + for (int level = minInitLevel; level <= maxInitLevel; level++) + { for (SPtr<Block3D> block : blocksVector[level]) // blocks of the current level { - dataSetSmallArray[ic].globalID = - block->getGlobalID(); // id of the block needed to find it while regenerating the grid + dataSetSmallArray[ic].globalID = block->getGlobalID(); // id of the block needed to find it while regenerating the grid - switch (arrayType) { + switch (arrayType) + { case RelaxationFactor: ___Array = block->getKernel()->getDataSet()->getRelaxationFactor(); break; - case PhaseField: + case PhaseField1: ___Array = block->getKernel()->getDataSet()->getPhaseField(); break; + case PhaseField2: + ___Array = block->getKernel()->getDataSet()->getPhaseField2(); + break; default: - UB_THROW(UbException(UB_EXARGS, - "MPIIOMigrationCoProcessor::write3DArray : 3D array type does not exist!")); + UB_THROW(UbException(UB_EXARGS, "MPIIOMigrationCoProcessor::write3DArray : 3D array type does not exist!")); break; } if (firstBlock) // when first (any) valid block... { dataSetParamStr.nx1 = dataSetParamStr.nx2 = dataSetParamStr.nx3 = 0; - dataSetParamStr.nx[0] = static_cast<int>(___Array->getNX1()); - dataSetParamStr.nx[1] = static_cast<int>(___Array->getNX2()); - dataSetParamStr.nx[2] = static_cast<int>(___Array->getNX3()); - dataSetParamStr.nx[3] = 1; - doubleCountInBlock = - dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; + dataSetParamStr.nx[0] = static_cast<int>(___Array->getNX1()); + dataSetParamStr.nx[1] = static_cast<int>(___Array->getNX2()); + dataSetParamStr.nx[2] = static_cast<int>(___Array->getNX3()); + dataSetParamStr.nx[3] = 1; + doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; firstBlock = false; } if (___Array && (dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0)) - doubleValuesArray.insert(doubleValuesArray.end(), ___Array->getDataVector().begin(), - ___Array->getDataVector().end()); + doubleValuesArray.insert(doubleValuesArray.end(), ___Array->getDataVector().begin(), ___Array->getDataVector().end()); ic++; } @@ -583,10 +649,10 @@ void MPIIOMigrationCoProcessor::write3DArray(int step, Arrays arrayType, std::st MPI_Type_contiguous(int(doubleCountInBlock), MPI_DOUBLE, &dataSetDoubleType); MPI_Type_commit(&dataSetDoubleType); - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIOMigrationCoProcessor::write3DArray start MPI IO rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } double start, finish; @@ -613,7 +679,8 @@ void MPIIOMigrationCoProcessor::write3DArray(int step, Arrays arrayType, std::st size_t sizeofOneDataSet = sizeof(DataSetSmallMigration) + doubleCountInBlock * sizeof(double); MPI_Offset write_offset; - for (int nb = 0; nb < blocksCount; nb++) { + for (int nb = 0; nb < blocksCount; nb++) + { write_offset = (MPI_Offset)(sizeof(dataSetParam) + dataSetSmallArray[nb].globalID * sizeofOneDataSet); MPI_File_write_at(file_handler, write_offset, &dataSetSmallArray[nb], 1, dataSetSmallType, MPI_STATUS_IGNORE); MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(DataSetSmallMigration)), @@ -624,7 +691,8 @@ void MPIIOMigrationCoProcessor::write3DArray(int step, Arrays arrayType, std::st MPI_File_close(&file_handler); MPI_Type_free(&dataSetDoubleType); - if (comm->isRoot()) { + if (comm->isRoot()) + { finish = MPI_Wtime(); UBLOG(logINFO, "MPIIOMigrationCoProcessor::write3DArray time: " << finish - start << " s"); } @@ -1352,10 +1420,10 @@ void MPIIOMigrationCoProcessor::writeBoundaryConds(int step) MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIOMigrationCoProcessor::writeBoundaryConds start collect data rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } int blocksCount = 0; // quantity of blocks, that belong to this process @@ -1366,7 +1434,8 @@ void MPIIOMigrationCoProcessor::writeBoundaryConds(int step) std::vector<SPtr<Block3D>> blocksVector[25]; int minInitLevel = this->grid->getCoarsestInitializedLevel(); int maxInitLevel = this->grid->getFinestInitializedLevel(); - for (int level = minInitLevel; level <= maxInitLevel; level++) { + for (int level = minInitLevel; level <= maxInitLevel; level++) + { grid->getBlocks(level, rank, blocksVector[level]); blocksCount += static_cast<int>(blocksVector[level].size()); } @@ -1381,25 +1450,27 @@ void MPIIOMigrationCoProcessor::writeBoundaryConds(int step) int ic = 0; SPtr<BCArray3D> bcArr; - for (int level = minInitLevel; level <= maxInitLevel; level++) { + for (int level = minInitLevel; level <= maxInitLevel; level++) + { for (SPtr<Block3D> block : blocksVector[level]) // all the blocks of the current level { bcArr = block->getKernel()->getBCProcessor()->getBCArray(); - bcAddArray[ic].globalID = - block->getGlobalID(); // id of the block needed to find it while regenerating the grid + bcAddArray[ic].globalID = block->getGlobalID(); // id of the block needed to find it while regenerating the grid bcAddArray[ic].boundCond_count = 0; // how many BoundaryConditions in this block bcAddArray[ic].indexContainer_count = 0; // how many indexContainer-values in this block - bytesCount[ic] = sizeof(BCAddMigration); + bytesCount[ic] = sizeof(BCAddMigration); bcVector[ic].resize(0); bcindexmatrixVector[ic].resize(0); indexContainerVector[ic].resize(0); - for (std::size_t bc = 0; bc < bcArr->getBCVectorSize(); bc++) { + for (std::size_t bc = 0; bc < bcArr->getBCVectorSize(); bc++) + { BoundaryCondition *bouCond = new BoundaryCondition(); - if (bcArr->bcvector[bc] == NULL) { + if (bcArr->bcvector[bc] == NULL) memset(bouCond, 0, sizeof(BoundaryCondition)); - } else { + else + { bouCond->noslipBoundaryFlags = bcArr->bcvector[bc]->getNoSlipBoundary(); bouCond->slipBoundaryFlags = bcArr->bcvector[bc]->getSlipBoundary(); bouCond->velocityBoundaryFlags = bcArr->bcvector[bc]->getVelocityBoundary(); @@ -1428,20 +1499,18 @@ void MPIIOMigrationCoProcessor::writeBoundaryConds(int step) bytesCount[ic] += sizeof(BoundaryCondition); } - if (bcindexmatrixCountNotInit) { + if (bcindexmatrixCountNotInit) + { boundCondParamStr.nx1 = static_cast<int>(bcArr->bcindexmatrix.getNX1()); boundCondParamStr.nx2 = static_cast<int>(bcArr->bcindexmatrix.getNX2()); boundCondParamStr.nx3 = static_cast<int>(bcArr->bcindexmatrix.getNX3()); boundCondParamStr.bcindexmatrixCount = static_cast<int>(bcArr->bcindexmatrix.getDataVector().size()); bcindexmatrixCountNotInit = false; } - bcindexmatrixVector[ic].insert(bcindexmatrixVector[ic].begin(), - bcArr->bcindexmatrix.getDataVector().begin(), - bcArr->bcindexmatrix.getDataVector().end()); + bcindexmatrixVector[ic].insert(bcindexmatrixVector[ic].begin(), bcArr->bcindexmatrix.getDataVector().begin(), bcArr->bcindexmatrix.getDataVector().end()); bytesCount[ic] += boundCondParamStr.bcindexmatrixCount * sizeof(int); - indexContainerVector[ic].insert(indexContainerVector[ic].begin(), bcArr->indexContainer.begin(), - bcArr->indexContainer.end()); + indexContainerVector[ic].insert(indexContainerVector[ic].begin(), bcArr->indexContainer.begin(), bcArr->indexContainer.end()); bcAddArray[ic].indexContainer_count = static_cast<int>(bcArr->indexContainer.size()); count_indexContainer += bcAddArray[ic].indexContainer_count; bytesCount[ic] += bcAddArray[ic].indexContainer_count * sizeof(int); @@ -1455,10 +1524,10 @@ void MPIIOMigrationCoProcessor::writeBoundaryConds(int step) MPI_Type_contiguous(boundCondParamStr.bcindexmatrixCount, MPI_INT, &bcindexmatrixType); MPI_Type_commit(&bcindexmatrixType); - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIOMigrationCoProcessor::writeBoundaryConds start MPI IO rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: "<< Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } double start, finish; @@ -1479,11 +1548,15 @@ void MPIIOMigrationCoProcessor::writeBoundaryConds(int step) MPI_Offset write_offset = (MPI_Offset)(sizeof(boundCondParam) + grid->getNumberOfBlocks() * sizeof(size_t)); size_t next_file_offset = 0; - if (size > 1) { - if (rank == 0) { + if (size > 1) + { + if (rank == 0) + { next_file_offset = write_offset + allBytesCount; MPI_Send(&next_file_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD); - } else { + } + else + { MPI_Recv(&write_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE); next_file_offset = write_offset + allBytesCount; if (rank < size - 1) @@ -1495,7 +1568,8 @@ void MPIIOMigrationCoProcessor::writeBoundaryConds(int step) MPI_Offset write_offsetIndex; - for (int nb = 0; nb < blocksCount; nb++) { + for (int nb = 0; nb < blocksCount; nb++) + { write_offsetIndex = (MPI_Offset)(sizeof(boundCondParam) + bcAddArray[nb].globalID * sizeof(size_t)); MPI_File_write_at(file_handler, write_offsetIndex, &write_offset, 1, MPI_LONG_LONG_INT, MPI_STATUS_IGNORE); @@ -1505,17 +1579,12 @@ void MPIIOMigrationCoProcessor::writeBoundaryConds(int step) bcAddArray[nb].boundCond_count, boundCondType, MPI_STATUS_IGNORE); if (bcindexmatrixVector[nb].size() > 0) - MPI_File_write_at(file_handler, - (MPI_Offset)(write_offset + sizeof(BCAddMigration) + - bcAddArray[nb].boundCond_count * sizeof(BoundaryCondition)), + MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(BCAddMigration) + bcAddArray[nb].boundCond_count * sizeof(BoundaryCondition)), &bcindexmatrixVector[nb][0], 1, bcindexmatrixType, MPI_STATUS_IGNORE); if (indexContainerVector[nb].size() > 0) - MPI_File_write_at(file_handler, - (MPI_Offset)(write_offset + sizeof(BCAddMigration) + - bcAddArray[nb].boundCond_count * sizeof(BoundaryCondition) + - boundCondParamStr.bcindexmatrixCount * sizeof(int)), - &indexContainerVector[nb][0], bcAddArray[nb].indexContainer_count, MPI_INT, + MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(BCAddMigration) + bcAddArray[nb].boundCond_count * sizeof(BoundaryCondition) + + boundCondParamStr.bcindexmatrixCount * sizeof(int)), &indexContainerVector[nb][0], bcAddArray[nb].indexContainer_count, MPI_INT, MPI_STATUS_IGNORE); write_offset += bytesCount[nb]; @@ -1525,7 +1594,8 @@ void MPIIOMigrationCoProcessor::writeBoundaryConds(int step) MPI_File_close(&file_handler); MPI_Type_free(&bcindexmatrixType); - if (comm->isRoot()) { + if (comm->isRoot()) + { finish = MPI_Wtime(); UBLOG(logINFO, "MPIIOMigrationCoProcessor::writeBoundaryConds time: " << finish - start << " s"); } @@ -1547,8 +1617,7 @@ void MPIIOMigrationCoProcessor::restart(int step) readBlocks(step); - SPtr<Grid3DVisitor> metisVisitor(new MetisPartitioningGridVisitor(comm, MetisPartitioningGridVisitor::LevelBased, - D3Q27System::BSW, MetisPartitioner::KWAY)); + SPtr<Grid3DVisitor> metisVisitor(new MetisPartitioningGridVisitor(comm, MetisPartitioningGridVisitor::LevelBased, D3Q27System::BSW, MetisPartitioner::KWAY)); grid->accept(metisVisitor); readDataSet(step); @@ -1568,15 +1637,16 @@ void MPIIOMigrationCoProcessor::readDataSet(int step) MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIOMigrationCoProcessor::readDataSet start MPI IO rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } double start, finish; if (comm->isRoot()) start = MPI_Wtime(); + bool multiPhase = false; size_t blocksCount = 0; // quantity of the blocks, that belong to this process dataSetParam dataSetParamStr1, dataSetParamStr2, dataSetParamStr3; @@ -1584,7 +1654,8 @@ void MPIIOMigrationCoProcessor::readDataSet(int step) std::vector<SPtr<Block3D>> blocksVector[25]; int minInitLevel = this->grid->getCoarsestInitializedLevel(); int maxInitLevel = this->grid->getFinestInitializedLevel(); - for (int level = minInitLevel; level <= maxInitLevel; level++) { + for (int level = minInitLevel; level <= maxInitLevel; level++) + { grid->getBlocks(level, rank, blocksVector[level]); blocksCount += static_cast<int>(blocksVector[level].size()); } @@ -1592,22 +1663,21 @@ void MPIIOMigrationCoProcessor::readDataSet(int step) DataSetMigration *dataSetArray = new DataSetMigration[blocksCount]; MPI_File file_handler; - std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSet.bin"; + std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetF.bin"; int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler); if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename); MPI_File_read_at(file_handler, (MPI_Offset)0, &dataSetParamStr1, 1, dataSetParamType, MPI_STATUS_IGNORE); - MPI_File_read_at(file_handler, (MPI_Offset)(sizeof(dataSetParam)), &dataSetParamStr2, 1, dataSetParamType, - MPI_STATUS_IGNORE); - MPI_File_read_at(file_handler, (MPI_Offset)(2 * sizeof(dataSetParam)), &dataSetParamStr3, 1, dataSetParamType, - MPI_STATUS_IGNORE); + MPI_File_read_at(file_handler, (MPI_Offset)(sizeof(dataSetParam)), &dataSetParamStr2, 1, dataSetParamType, MPI_STATUS_IGNORE); + MPI_File_read_at(file_handler, (MPI_Offset)(2 * sizeof(dataSetParam)), &dataSetParamStr3, 1, dataSetParamType, MPI_STATUS_IGNORE); - size_t doubleCountInBlock = - (dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3] + + size_t doubleCountInBlock = dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3] + dataSetParamStr2.nx[0] * dataSetParamStr2.nx[1] * dataSetParamStr2.nx[2] * dataSetParamStr2.nx[3] + - dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3] * 2); - std::vector<double> doubleValuesArray(blocksCount * doubleCountInBlock); // double-values in all blocks + dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3]; + std::vector<double> doubleValuesArrayF(size_t(blocksCount * doubleCountInBlock)); // double-values in all blocks Fdistributions + std::vector<double> doubleValuesArrayH1; // double-values in all blocks H1distributions + //std::vector<double> doubleValuesArrayH2; // double-values in all blocks H2distributions // define MPI_types depending on the block-specific information MPI_Type_contiguous(int(doubleCountInBlock), MPI_DOUBLE, &dataSetDoubleType); @@ -1617,95 +1687,145 @@ void MPIIOMigrationCoProcessor::readDataSet(int step) MPI_Offset read_offset; size_t sizeofOneDataSet = size_t(sizeof(DataSetMigration) + doubleCountInBlock * sizeof(double)); - for (int level = minInitLevel; level <= maxInitLevel; level++) { + for (int level = minInitLevel; level <= maxInitLevel; level++) + { for (SPtr<Block3D> block : blocksVector[level]) // blocks of the current level { read_offset = (MPI_Offset)(3 * sizeof(dataSetParam) + block->getGlobalID() * sizeofOneDataSet); MPI_File_read_at(file_handler, read_offset, &dataSetArray[ic], 1, dataSetType, MPI_STATUS_IGNORE); MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(DataSetMigration)), - &doubleValuesArray[ic * doubleCountInBlock], 1, dataSetDoubleType, MPI_STATUS_IGNORE); + &doubleValuesArrayF[ic * doubleCountInBlock], 1, dataSetDoubleType, MPI_STATUS_IGNORE); ic++; } } MPI_File_close(&file_handler); + + //----------------------------------------- H1 ---------------------------------------------------- + MPI_Offset fsize; + filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetH1.bin"; + rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler); + if (rc != MPI_SUCCESS) + throw UbException(UB_EXARGS, "couldn't open file " + filename); + MPI_File_get_size(file_handler, &fsize); + if (fsize > 0) + { + multiPhase = true; + doubleValuesArrayH1.resize(blocksCount * doubleCountInBlock); + + sizeofOneDataSet = size_t(doubleCountInBlock * sizeof(double)); + + for (int level = minInitLevel; level <= maxInitLevel; level++) + { + for (SPtr<Block3D> block : blocksVector[level]) // blocks of the current level + { + read_offset = (MPI_Offset)(block->getGlobalID() * sizeofOneDataSet); + MPI_File_read_at(file_handler, read_offset, &doubleValuesArrayH1[ic * doubleCountInBlock], 1, dataSetDoubleType, MPI_STATUS_IGNORE); + ic++; + } + } + + } + MPI_File_close(&file_handler); + + //----------------------------------------- H2 ---------------------------------------------------- + /*filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetH2.bin"; + rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler); + if (rc != MPI_SUCCESS) + throw UbException(UB_EXARGS, "couldn't open file " + filename); + + sizeofOneDataSet = size_t(doubleCountInBlock * sizeof(double)); + doubleValuesArrayH2.resize(blocksCount * doubleCountInBlock); + + for (int level = minInitLevel; level <= maxInitLevel; level++) + { + for (SPtr<Block3D> block : blocksVector[level]) // blocks of the current level + { + read_offset = (MPI_Offset)(block->getGlobalID() * sizeofOneDataSet); + MPI_File_read_at(file_handler, read_offset, &doubleValuesArrayH2[ic * doubleCountInBlock], 1, dataSetDoubleType, MPI_STATUS_IGNORE); + ic++; + } + } + + MPI_File_close(&file_handler);*/ + MPI_Type_free(&dataSetDoubleType); - if (comm->isRoot()) { + if (comm->isRoot()) + { finish = MPI_Wtime(); UBLOG(logINFO, "MPIIOMigrationCoProcessor::readDataSet time: " << finish - start << " s"); UBLOG(logINFO, "MPIIOMigrationCoProcessor::readDataSet start of restore of data, rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } size_t index = 0; std::vector<double> vectorsOfValuesF1, vectorsOfValuesF2, vectorsOfValuesF3; - std::vector<double> vectorsOfValuesH1, vectorsOfValuesH2, vectorsOfValuesH3; - - size_t vectorSize1 = - dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3]; - size_t vectorSize2 = - dataSetParamStr2.nx[0] * dataSetParamStr2.nx[1] * dataSetParamStr2.nx[2] * dataSetParamStr2.nx[3]; - size_t vectorSize3 = - dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3]; - - for (std::size_t n = 0; n < blocksCount; n++) { - vectorsOfValuesF1.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + vectorSize1); + std::vector<double> vectorsOfValuesH11, vectorsOfValuesH12, vectorsOfValuesH13; + //std::vector<double> vectorsOfValuesH21, vectorsOfValuesH22, vectorsOfValuesH23; + + size_t vectorSize1 = dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3]; + size_t vectorSize2 = dataSetParamStr2.nx[0] * dataSetParamStr2.nx[1] * dataSetParamStr2.nx[2] * dataSetParamStr2.nx[3]; + size_t vectorSize3 = dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3]; + + for (std::size_t n = 0; n < blocksCount; n++) + { + vectorsOfValuesF1.assign(doubleValuesArrayF.data() + index, doubleValuesArrayF.data() + index + vectorSize1); + if(multiPhase) + vectorsOfValuesH11.assign(doubleValuesArrayH1.data() + index, doubleValuesArrayH1.data() + index + vectorSize1); + //vectorsOfValuesH21.assign(doubleValuesArrayH2.data() + index, doubleValuesArrayH2.data() + index + vectorSize1); index += vectorSize1; - vectorsOfValuesF2.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + vectorSize2); + vectorsOfValuesF2.assign(doubleValuesArrayF.data() + index, doubleValuesArrayF.data() + index + vectorSize2); + if (multiPhase) + vectorsOfValuesH12.assign(doubleValuesArrayH1.data() + index, doubleValuesArrayH1.data() + index + vectorSize2); + //vectorsOfValuesH22.assign(doubleValuesArrayH2.data() + index, doubleValuesArrayH2.data() + index + vectorSize2); index += vectorSize2; - vectorsOfValuesF3.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + vectorSize3); - index += vectorSize3; - - vectorsOfValuesH1.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + vectorSize1); - index += vectorSize1; - - vectorsOfValuesH2.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + vectorSize2); - index += vectorSize2; - - vectorsOfValuesH3.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + vectorSize3); + vectorsOfValuesF3.assign(doubleValuesArrayF.data() + index, doubleValuesArrayF.data() + index + vectorSize3); + if (multiPhase) + vectorsOfValuesH13.assign(doubleValuesArrayH1.data() + index, doubleValuesArrayH1.data() + index + vectorSize3); + //vectorsOfValuesH23.assign(doubleValuesArrayH2.data() + index, doubleValuesArrayH2.data() + index + vectorSize3); index += vectorSize3; SPtr<DistributionArray3D> mFdistributions(new D3Q27EsoTwist3DSplittedVector()); - dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions) - ->setLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr( - new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesF1, dataSetParamStr1.nx[0], - dataSetParamStr1.nx[1], dataSetParamStr1.nx[2], - dataSetParamStr1.nx[3]))); - dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions) - ->setNonLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr( - new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesF2, dataSetParamStr2.nx[0], - dataSetParamStr2.nx[1], dataSetParamStr2.nx[2], - dataSetParamStr2.nx[3]))); - dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions) - ->setZeroDistributions(CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>( + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr( + new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesF1, dataSetParamStr1.nx[0], dataSetParamStr1.nx[1], dataSetParamStr1.nx[2], dataSetParamStr1.nx[3]))); + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setNonLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr( + new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesF2, dataSetParamStr2.nx[0], dataSetParamStr2.nx[1], dataSetParamStr2.nx[2], dataSetParamStr2.nx[3]))); + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setZeroDistributions(CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>( vectorsOfValuesF3, dataSetParamStr3.nx[0], dataSetParamStr3.nx[1], dataSetParamStr3.nx[2]))); dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setNX1(dataSetParamStr1.nx1); dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setNX2(dataSetParamStr1.nx2); dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setNX3(dataSetParamStr1.nx3); - SPtr<DistributionArray3D> mHdistributions(new D3Q27EsoTwist3DSplittedVector()); - dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mHdistributions) - ->setLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr( - new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH1, dataSetParamStr1.nx[0], - dataSetParamStr1.nx[1], dataSetParamStr1.nx[2], - dataSetParamStr1.nx[3]))); - dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mHdistributions) - ->setNonLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr( - new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH2, dataSetParamStr2.nx[0], - dataSetParamStr2.nx[1], dataSetParamStr2.nx[2], - dataSetParamStr2.nx[3]))); - dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mHdistributions) - ->setZeroDistributions(CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>( - vectorsOfValuesH3, dataSetParamStr3.nx[0], dataSetParamStr3.nx[1], dataSetParamStr3.nx[2]))); - - dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mHdistributions)->setNX1(dataSetParamStr1.nx1); - dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mHdistributions)->setNX2(dataSetParamStr1.nx2); - dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mHdistributions)->setNX3(dataSetParamStr1.nx3); + SPtr<DistributionArray3D> mH1distributions(new D3Q27EsoTwist3DSplittedVector()); + if (multiPhase) + { + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH1distributions)->setLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr( + new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH11, dataSetParamStr1.nx[0], dataSetParamStr1.nx[1], dataSetParamStr1.nx[2], dataSetParamStr1.nx[3]))); + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH1distributions)->setNonLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr( + new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH12, dataSetParamStr2.nx[0], dataSetParamStr2.nx[1], dataSetParamStr2.nx[2], dataSetParamStr2.nx[3]))); + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH1distributions)->setZeroDistributions(CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>( + vectorsOfValuesH13, dataSetParamStr3.nx[0], dataSetParamStr3.nx[1], dataSetParamStr3.nx[2]))); + + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH1distributions)->setNX1(dataSetParamStr1.nx1); + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH1distributions)->setNX2(dataSetParamStr1.nx2); + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH1distributions)->setNX3(dataSetParamStr1.nx3); + } + + /*SPtr<DistributionArray3D> mH2distributions(new D3Q27EsoTwist3DSplittedVector()); + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr( + new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH21, dataSetParamStr1.nx[0], dataSetParamStr1.nx[1], dataSetParamStr1.nx[2], dataSetParamStr1.nx[3]))); + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNonLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr( + new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH22, dataSetParamStr2.nx[0], dataSetParamStr2.nx[1], dataSetParamStr2.nx[2], dataSetParamStr2.nx[3]))); + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setZeroDistributions(CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>( + vectorsOfValuesH23, dataSetParamStr3.nx[0], dataSetParamStr3.nx[1], dataSetParamStr3.nx[2]))); + + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNX1(dataSetParamStr1.nx1); + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNX2(dataSetParamStr1.nx2); + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNX3(dataSetParamStr1.nx3);*/ // find the nesessary block and fill it SPtr<Block3D> block = grid->getBlock(dataSetArray[n].globalID); @@ -1716,17 +1836,22 @@ void MPIIOMigrationCoProcessor::readDataSet(int step) kernel->setDeltaT(dataSetArray[n].deltaT); kernel->setCompressible(dataSetArray[n].compressible); kernel->setWithForcing(dataSetArray[n].withForcing); + kernel->setCollisionFactorMultiphase(dataSetArray[n].collFactorL, dataSetArray[n].collFactorG); + kernel->setDensityRatio(dataSetArray[n].densityRatio); + SPtr<DataSet3D> dataSetPtr = SPtr<DataSet3D>(new DataSet3D()); dataSetPtr->setFdistributions(mFdistributions); - dataSetPtr->setHdistributions(mHdistributions); + if (multiPhase) + dataSetPtr->setHdistributions(mH1distributions); + //dataSetPtr->setH2distributions(mH2distributions); kernel->setDataSet(dataSetPtr); block->setKernel(kernel); } - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIOMigrationCoProcessor::readDataSet end of restore of data, rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } delete[] dataSetArray; @@ -1766,8 +1891,12 @@ void MPIIOMigrationCoProcessor::readDataSet(int step) readArray(step, RelaxationFactor, std::string("/cpRelaxationFactor.bin")); // readRelaxationFactor(step); - if (arrPresence.isPhaseFieldPresent) - readArray(step, PhaseField, std::string("/cpPhaseField.bin")); + if (arrPresence.isPhaseField1Present) + readArray(step, PhaseField1, std::string("/cpPhaseField1.bin")); + + if (arrPresence.isPhaseField2Present) + readArray(step, PhaseField2, std::string("/cpPhaseField2.bin")); + } void MPIIOMigrationCoProcessor::readArray(int step, Arrays arrType, std::string fname) @@ -1776,10 +1905,10 @@ void MPIIOMigrationCoProcessor::readArray(int step, Arrays arrType, std::string MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIOMigrationCoProcessor::readArray start MPI IO rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } double start, finish; if (comm->isRoot()) @@ -1799,7 +1928,8 @@ void MPIIOMigrationCoProcessor::readArray(int step, Arrays arrType, std::string std::vector<SPtr<Block3D>> blocksVector[25]; int minInitLevel = this->grid->getCoarsestInitializedLevel(); int maxInitLevel = this->grid->getFinestInitializedLevel(); - for (int level = minInitLevel; level <= maxInitLevel; level++) { + for (int level = minInitLevel; level <= maxInitLevel; level++) + { grid->getBlocks(level, rank, blocksVector[level]); blocksCount += static_cast<int>(blocksVector[level].size()); } @@ -1807,8 +1937,7 @@ void MPIIOMigrationCoProcessor::readArray(int step, Arrays arrType, std::string MPI_File_read_at(file_handler, (MPI_Offset)0, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE); DataSetSmallMigration *dataSetSmallArray = new DataSetSmallMigration[blocksCount]; - size_t doubleCountInBlock = - dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; + size_t doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; std::vector<double> doubleValuesArray(blocksCount * doubleCountInBlock); // double-values in all blocks // define MPI_types depending on the block-specific information @@ -1819,7 +1948,8 @@ void MPIIOMigrationCoProcessor::readArray(int step, Arrays arrType, std::string MPI_Offset read_offset; size_t sizeofOneDataSet = size_t(sizeof(DataSetSmallMigration) + doubleCountInBlock * sizeof(double)); - for (int level = minInitLevel; level <= maxInitLevel; level++) { + for (int level = minInitLevel; level <= maxInitLevel; level++) + { for (SPtr<Block3D> block : blocksVector[level]) // blocks of the current level { read_offset = (MPI_Offset)(sizeof(dataSetParam) + block->getGlobalID() * sizeofOneDataSet); @@ -1833,20 +1963,20 @@ void MPIIOMigrationCoProcessor::readArray(int step, Arrays arrType, std::string MPI_File_close(&file_handler); MPI_Type_free(&dataSetDoubleType); - if (comm->isRoot()) { + if (comm->isRoot()) + { finish = MPI_Wtime(); UBLOG(logINFO, "MPIIOMigrationCoProcessor::readArray readArray: " << finish - start << " s"); UBLOG(logINFO, "MPIIOMigrationCoProcessor::readArray start of restore of data, rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } //----------------------------- restore data --------------------------------- size_t index = 0; - size_t nextVectorSize = - dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; + size_t nextVectorSize = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; std::vector<double> vectorsOfValues; - for (std::size_t n = 0; n < blocksCount; n++) { + for (std::size_t n = 0; n < blocksCount; n++) + { SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].globalID); vectorsOfValues.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + nextVectorSize); @@ -1856,35 +1986,31 @@ void MPIIOMigrationCoProcessor::readArray(int step, Arrays arrType, std::string SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> ___4DArray; SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> ___3DArray; - switch (arrType) { + switch (arrType) + { case AverageDensity: ___4DArray = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>( - vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], - dataSetParamStr.nx[3])); + vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3])); block->getKernel()->getDataSet()->setAverageDensity(___4DArray); break; case AverageVelocity: ___4DArray = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>( - vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], - dataSetParamStr.nx[3])); + vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3])); block->getKernel()->getDataSet()->setAverageVelocity(___4DArray); break; case AverageFluktuations: ___4DArray = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>( - vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], - dataSetParamStr.nx[3])); + vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3])); block->getKernel()->getDataSet()->setAverageFluctuations(___4DArray); break; case AverageTriple: ___4DArray = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>( - vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], - dataSetParamStr.nx[3])); + vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3])); block->getKernel()->getDataSet()->setAverageTriplecorrelations(___4DArray); break; case ShearStressVal: ___4DArray = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>( - vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], - dataSetParamStr.nx[3])); + vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3])); block->getKernel()->getDataSet()->setShearStressValues(___4DArray); break; case RelaxationFactor: @@ -1892,21 +2018,26 @@ void MPIIOMigrationCoProcessor::readArray(int step, Arrays arrType, std::string vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2])); block->getKernel()->getDataSet()->setRelaxationFactor(___3DArray); break; - case PhaseField: + case PhaseField1: ___3DArray = CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>( vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2])); block->getKernel()->getDataSet()->setPhaseField(___3DArray); break; + case PhaseField2: + ___3DArray = CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>( + vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2])); + block->getKernel()->getDataSet()->setPhaseField2(___3DArray); + break; default: UB_THROW(UbException(UB_EXARGS, "MPIIOMigrationCoProcessor::readArray : array type does not exist!")); break; } } - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIOMigrationCoProcessor::readArray end of restore of data, rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } delete[] dataSetSmallArray; @@ -2548,10 +2679,10 @@ void MPIIOMigrationCoProcessor::readBoundaryConds(int step) MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIOMigrationCoProcessor::readBoundaryConds start MPI IO rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } double start, finish; @@ -2570,7 +2701,8 @@ void MPIIOMigrationCoProcessor::readBoundaryConds(int step) std::vector<SPtr<Block3D>> blocksVector[25]; int minInitLevel = this->grid->getCoarsestInitializedLevel(); int maxInitLevel = this->grid->getFinestInitializedLevel(); - for (int level = minInitLevel; level <= maxInitLevel; level++) { + for (int level = minInitLevel; level <= maxInitLevel; level++) + { grid->getBlocks(level, rank, blocksVector[level]); blocksCount += static_cast<int>(blocksVector[level].size()); } @@ -2585,12 +2717,12 @@ void MPIIOMigrationCoProcessor::readBoundaryConds(int step) std::vector<int> bcindexmatrixV; std::vector<int> indexContainerV; - if (comm->isRoot()) { + if (comm->isRoot()) + { finish = MPI_Wtime(); UBLOG(logINFO, "MPIIOMigrationCoProcessor::readBoundaryConds time: " << finish - start << " s"); UBLOG(logINFO, "MPIIOMigrationCoProcessor::readBoundaryConds start of restore of data, rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } MPI_File_read_at(file_handler, (MPI_Offset)0, &boundCondParamStr, 1, boundCondParamType, MPI_STATUS_IGNORE); @@ -2599,7 +2731,8 @@ void MPIIOMigrationCoProcessor::readBoundaryConds(int step) int ic = 0; MPI_Offset read_offset1, read_offset2; - for (int level = minInitLevel; level <= maxInitLevel; level++) { + for (int level = minInitLevel; level <= maxInitLevel; level++) + { for (SPtr<Block3D> block : blocksVector[level]) // blocks of the current level { read_offset1 = (MPI_Offset)(sizeof(boundCondParam) + block->getGlobalID() * sizeof(size_t)); @@ -2611,31 +2744,30 @@ void MPIIOMigrationCoProcessor::readBoundaryConds(int step) intArray1 = new int[boundCondParamStr.bcindexmatrixCount]; intArray2 = new int[bcAddArray[ic].indexContainer_count]; - if (bcAddArray[ic].boundCond_count > 0) { + if (bcAddArray[ic].boundCond_count > 0) + { MPI_File_read_at(file_handler, (MPI_Offset)(read_offset2 + sizeof(BCAddMigration)), &bcArray[0], bcAddArray[ic].boundCond_count, boundCondType, MPI_STATUS_IGNORE); } - MPI_File_read_at(file_handler, - (MPI_Offset)(read_offset2 + sizeof(BCAddMigration) + - bcAddArray[ic].boundCond_count * sizeof(BoundaryCondition)), + MPI_File_read_at(file_handler, (MPI_Offset)(read_offset2 + sizeof(BCAddMigration) + bcAddArray[ic].boundCond_count * sizeof(BoundaryCondition)), &intArray1[0], 1, bcindexmatrixType, MPI_STATUS_IGNORE); - if (bcAddArray[ic].indexContainer_count > 0) { - MPI_File_read_at(file_handler, - (MPI_Offset)(read_offset2 + sizeof(BCAddMigration) + - bcAddArray[ic].boundCond_count * sizeof(BoundaryCondition) + - boundCondParamStr.bcindexmatrixCount * sizeof(int)), - &intArray2[0], bcAddArray[ic].indexContainer_count, MPI_INT, MPI_STATUS_IGNORE); + if (bcAddArray[ic].indexContainer_count > 0) + { + MPI_File_read_at(file_handler, (MPI_Offset)(read_offset2 + sizeof(BCAddMigration) + bcAddArray[ic].boundCond_count * sizeof(BoundaryCondition) + + boundCondParamStr.bcindexmatrixCount * sizeof(int)), &intArray2[0], bcAddArray[ic].indexContainer_count, MPI_INT, MPI_STATUS_IGNORE); } bcindexmatrixV.resize(0); indexContainerV.resize(0); bcVector.resize(0); - for (int ibc = 0; ibc < bcAddArray[ic].boundCond_count; ibc++) { + for (int ibc = 0; ibc < bcAddArray[ic].boundCond_count; ibc++) + { SPtr<BoundaryConditions> bc; if (memcmp(&bcArray[ibc], nullBouCond, sizeof(BoundaryCondition)) == 0) bc = SPtr<BoundaryConditions>(); - else { + else + { bc = SPtr<BoundaryConditions>(new BoundaryConditions); bc->noslipBoundaryFlags = bcArray[ibc].noslipBoundaryFlags; bc->slipBoundaryFlags = bcArray[ibc].slipBoundaryFlags; @@ -2669,8 +2801,7 @@ void MPIIOMigrationCoProcessor::readBoundaryConds(int step) for (int b2 = 0; b2 < bcAddArray[ic].indexContainer_count; b2++) indexContainerV.push_back(intArray2[b2]); - CbArray3D<int, IndexerX3X2X1> bcim(bcindexmatrixV, boundCondParamStr.nx1, boundCondParamStr.nx2, - boundCondParamStr.nx3); + CbArray3D<int, IndexerX3X2X1> bcim(bcindexmatrixV, boundCondParamStr.nx1, boundCondParamStr.nx2, boundCondParamStr.nx3); SPtr<Block3D> block1 = grid->getBlock(bcAddArray[ic].globalID); SPtr<BCProcessor> bcProc = bcProcessor->clone(block1->getKernel()); @@ -2694,10 +2825,10 @@ void MPIIOMigrationCoProcessor::readBoundaryConds(int step) delete nullBouCond; - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIOMigrationCoProcessor::readBoundaryConds end of restore of data, rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } } diff --git a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.h b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.h index ad7a93086afa379822fc7909a68fd39748dd607f..ca0de8f3e7ba315bc8a870f89063ea9f38d7b59f 100644 --- a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.h +++ b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.h @@ -25,7 +25,8 @@ public: AverageTriple = 4, ShearStressVal = 5, RelaxationFactor = 6, - PhaseField = 7 + PhaseField1 = 7, + PhaseField2 = 8 }; MPIIOMigrationCoProcessor(SPtr<Grid3D> grid, SPtr<UbScheduler> s, const std::string &path, SPtr<Communicator> comm); diff --git a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.cpp b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.cpp index ed5d3b275c006700d29c43f16928d2ddc08827f0..036fea0c780bf2a74b22789ee6e0cb605ddbd065 100644 --- a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.cpp +++ b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.cpp @@ -25,8 +25,7 @@ using namespace MPIIODataStructures; -MPIIORestartCoProcessor::MPIIORestartCoProcessor(SPtr<Grid3D> grid, SPtr<UbScheduler> s, const std::string &path, - SPtr<Communicator> comm) +MPIIORestartCoProcessor::MPIIORestartCoProcessor(SPtr<Grid3D> grid, SPtr<UbScheduler> s, const std::string &path, SPtr<Communicator> comm) : MPIIOCoProcessor(grid, s, path, comm) { memset(&boundCondParamStr, 0, sizeof(boundCondParamStr)); @@ -34,7 +33,7 @@ MPIIORestartCoProcessor::MPIIORestartCoProcessor(SPtr<Grid3D> grid, SPtr<UbSched //------------------------- define MPI types --------------------------------- MPI_Datatype typesDataSet[3] = { MPI_DOUBLE, MPI_INT, MPI_CHAR }; - int blocksDataSet[3] = { 2, 5, 2 }; + int blocksDataSet[3] = { 5, 5, 2 }; MPI_Aint offsetsDatatSet[3], lbDataSet, extentDataSet; offsetsDatatSet[0] = 0; @@ -80,7 +79,8 @@ MPIIORestartCoProcessor::~MPIIORestartCoProcessor() ////////////////////////////////////////////////////////////////////////// void MPIIORestartCoProcessor::process(double step) { - if (scheduler->isDue(step)) { + if (scheduler->isDue(step)) + { if (comm->isRoot()) UBLOG(logINFO, "MPIIORestartCoProcessor save step: " << step); if (comm->isRoot()) @@ -130,56 +130,77 @@ void MPIIORestartCoProcessor::writeDataSet(int step) std::vector<SPtr<Block3D>> blocksVector[25]; int minInitLevel = this->grid->getCoarsestInitializedLevel(); int maxInitLevel = this->grid->getFinestInitializedLevel(); - for (int level = minInitLevel; level <= maxInitLevel; level++) { + for (int level = minInitLevel; level <= maxInitLevel; level++) + { grid->getBlocks(level, rank, blocksVector[level]); blocksCount += static_cast<int>(blocksVector[level].size()); } dataSetParam dataSetParamStr1, dataSetParamStr2, dataSetParamStr3; DataSetRestart *dataSetArray = new DataSetRestart[blocksCount]; - std::vector<double> doubleValuesArray; // double-values (arrays of f's) in all blocks + std::vector<double> doubleValuesArrayF; // double-values (arrays of f's) in all blocks Fdistribution + std::vector<double> doubleValuesArrayH1; // double-values (arrays of f's) in all blocks H1distribution + // std::vector<double> doubleValuesArrayH2; // double-values (arrays of f's) in all blocks H2distribution - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIORestartCoProcessor::writeDataSet start collect data rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } + bool multiPhase = false; DSArraysPresence arrPresence; bool firstBlock = true; int doubleCountInBlock = 0; int ic = 0; - SPtr<D3Q27EsoTwist3DSplittedVector> D3Q27EsoTwist3DSplittedVectorPtrF, D3Q27EsoTwist3DSplittedVectorPtrH; - CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr localDistributionsF, localDistributionsH; - CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr nonLocalDistributionsF, nonLocalDistributionsH; - CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr zeroDistributionsF, zeroDistributionsH; + SPtr<D3Q27EsoTwist3DSplittedVector> D3Q27EsoTwist3DSplittedVectorPtrF, D3Q27EsoTwist3DSplittedVectorPtrH1, D3Q27EsoTwist3DSplittedVectorPtrH2; + CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr localDistributionsF, localDistributionsH1, localDistributionsH2; + CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr nonLocalDistributionsF, nonLocalDistributionsH1, nonLocalDistributionsH2; + CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr zeroDistributionsF, zeroDistributionsH1, zeroDistributionsH2; - for (int level = minInitLevel; level <= maxInitLevel; level++) { + SPtr<LBMKernel> kernel; + + for (int level = minInitLevel; level <= maxInitLevel; level++) + { for (SPtr<Block3D> block : blocksVector[level]) // blocks of the current level { - dataSetArray[ic].x1 = - block->getX1(); // coordinates of the block needed to find it while regenerating the grid - dataSetArray[ic].x2 = block->getX2(); - dataSetArray[ic].x3 = block->getX3(); - dataSetArray[ic].level = block->getLevel(); - dataSetArray[ic].ghostLayerWidth = block->getKernel()->getGhostLayerWidth(); - dataSetArray[ic].collFactor = block->getKernel()->getCollisionFactor(); - dataSetArray[ic].deltaT = block->getKernel()->getDeltaT(); - dataSetArray[ic].compressible = block->getKernel()->getCompressible(); - dataSetArray[ic].withForcing = block->getKernel()->getWithForcing(); - - D3Q27EsoTwist3DSplittedVectorPtrF = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>( - block->getKernel()->getDataSet()->getFdistributions()); + kernel = dynamicPointerCast<LBMKernel>(block->getKernel()); + + dataSetArray[ic].x1 = block->getX1(); // coordinates of the block needed to find it while regenerating the grid + dataSetArray[ic].x2 = block->getX2(); + dataSetArray[ic].x3 = block->getX3(); + dataSetArray[ic].level = block->getLevel(); + dataSetArray[ic].ghostLayerWidth = kernel->getGhostLayerWidth(); + dataSetArray[ic].collFactor = kernel->getCollisionFactor(); + dataSetArray[ic].deltaT = kernel->getDeltaT(); + dataSetArray[ic].compressible = kernel->getCompressible(); + dataSetArray[ic].withForcing = kernel->getWithForcing(); + dataSetArray[ic].collFactorL = kernel->getCollisionFactorL(); + dataSetArray[ic].collFactorG = kernel->getCollisionFactorG(); + dataSetArray[ic].densityRatio = kernel->getDensityRatio(); + + D3Q27EsoTwist3DSplittedVectorPtrF = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(block->getKernel()->getDataSet()->getFdistributions()); localDistributionsF = D3Q27EsoTwist3DSplittedVectorPtrF->getLocalDistributions(); nonLocalDistributionsF = D3Q27EsoTwist3DSplittedVectorPtrF->getNonLocalDistributions(); zeroDistributionsF = D3Q27EsoTwist3DSplittedVectorPtrF->getZeroDistributions(); - D3Q27EsoTwist3DSplittedVectorPtrH = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>( - block->getKernel()->getDataSet()->getHdistributions()); - localDistributionsH = D3Q27EsoTwist3DSplittedVectorPtrH->getLocalDistributions(); - nonLocalDistributionsH = D3Q27EsoTwist3DSplittedVectorPtrH->getNonLocalDistributions(); - zeroDistributionsH = D3Q27EsoTwist3DSplittedVectorPtrH->getZeroDistributions(); + D3Q27EsoTwist3DSplittedVectorPtrH1 = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(block->getKernel()->getDataSet()->getHdistributions()); + if (D3Q27EsoTwist3DSplittedVectorPtrH1 != 0) + { + multiPhase = true; + localDistributionsH1 = D3Q27EsoTwist3DSplittedVectorPtrH1->getLocalDistributions(); + nonLocalDistributionsH1 = D3Q27EsoTwist3DSplittedVectorPtrH1->getNonLocalDistributions(); + zeroDistributionsH1 = D3Q27EsoTwist3DSplittedVectorPtrH1->getZeroDistributions(); + } + + /*D3Q27EsoTwist3DSplittedVectorPtrH2 = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(block->getKernel()->getDataSet()->getH2distributions()); + if (D3Q27EsoTwist3DSplittedVectorPtrH2 != 0) + { + localDistributionsH2 = D3Q27EsoTwist3DSplittedVectorPtrH2->getLocalDistributions(); + nonLocalDistributionsH2 = D3Q27EsoTwist3DSplittedVectorPtrH2->getNonLocalDistributions(); + zeroDistributionsH2 = D3Q27EsoTwist3DSplittedVectorPtrH2->getZeroDistributions(); + }*/ if (firstBlock) // when first (any) valid block... { @@ -204,96 +225,88 @@ void MPIIORestartCoProcessor::writeDataSet(int step) } // ... than save some parameters that are equal in all dataSets - dataSetParamStr1.nx1 = dataSetParamStr2.nx1 = dataSetParamStr3.nx1 = - static_cast<int>(block->getKernel()->getDataSet()->getFdistributions()->getNX1()); - dataSetParamStr1.nx2 = dataSetParamStr2.nx2 = dataSetParamStr3.nx2 = - static_cast<int>(block->getKernel()->getDataSet()->getFdistributions()->getNX2()); - dataSetParamStr1.nx3 = dataSetParamStr2.nx3 = dataSetParamStr3.nx3 = - static_cast<int>(block->getKernel()->getDataSet()->getFdistributions()->getNX3()); - - // Fdistributions + Hdistributions - doubleCountInBlock = - (dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3] + + dataSetParamStr1.nx1 = dataSetParamStr2.nx1 = dataSetParamStr3.nx1 = static_cast<int>(block->getKernel()->getDataSet()->getFdistributions()->getNX1()); + dataSetParamStr1.nx2 = dataSetParamStr2.nx2 = dataSetParamStr3.nx2 = static_cast<int>(block->getKernel()->getDataSet()->getFdistributions()->getNX2()); + dataSetParamStr1.nx3 = dataSetParamStr2.nx3 = dataSetParamStr3.nx3 = static_cast<int>(block->getKernel()->getDataSet()->getFdistributions()->getNX3()); + + doubleCountInBlock = dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3] + dataSetParamStr2.nx[0] * dataSetParamStr2.nx[1] * dataSetParamStr2.nx[2] * dataSetParamStr2.nx[3] + - dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3]) * 2; + dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3]; - SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> averageDensityArray = - block->getKernel()->getDataSet()->getAverageDensity(); + SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> averageDensityArray = kernel->getDataSet()->getAverageDensity(); if (averageDensityArray) arrPresence.isAverageDensityArrayPresent = true; else arrPresence.isAverageDensityArrayPresent = false; - SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> AverageVelocityArray3DPtr = - block->getKernel()->getDataSet()->getAverageVelocity(); + SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> AverageVelocityArray3DPtr = kernel->getDataSet()->getAverageVelocity(); if (AverageVelocityArray3DPtr) arrPresence.isAverageVelocityArrayPresent = true; else arrPresence.isAverageVelocityArrayPresent = false; - SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> AverageFluctArray3DPtr = - block->getKernel()->getDataSet()->getAverageFluctuations(); + SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> AverageFluctArray3DPtr = kernel->getDataSet()->getAverageFluctuations(); if (AverageFluctArray3DPtr) arrPresence.isAverageFluktuationsArrayPresent = true; else arrPresence.isAverageFluktuationsArrayPresent = false; - SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> AverageTripleArray3DPtr = - block->getKernel()->getDataSet()->getAverageTriplecorrelations(); + SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> AverageTripleArray3DPtr = kernel->getDataSet()->getAverageTriplecorrelations(); if (AverageTripleArray3DPtr) arrPresence.isAverageTripleArrayPresent = true; else arrPresence.isAverageTripleArrayPresent = false; - SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> ShearStressValArray3DPtr = - block->getKernel()->getDataSet()->getShearStressValues(); + SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> ShearStressValArray3DPtr = kernel->getDataSet()->getShearStressValues(); if (ShearStressValArray3DPtr) arrPresence.isShearStressValArrayPresent = true; else arrPresence.isShearStressValArrayPresent = false; - SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> relaxationFactor3DPtr = - block->getKernel()->getDataSet()->getRelaxationFactor(); + SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> relaxationFactor3DPtr = kernel->getDataSet()->getRelaxationFactor(); if (relaxationFactor3DPtr) arrPresence.isRelaxationFactorPresent = true; else arrPresence.isRelaxationFactorPresent = false; - SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> phaseField3DPtr = - block->getKernel()->getDataSet()->getPhaseField(); - if (phaseField3DPtr) - arrPresence.isPhaseFieldPresent = true; + SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> phaseField3DPtr1 = kernel->getDataSet()->getPhaseField(); + if (phaseField3DPtr1) + arrPresence.isPhaseField1Present = true; + else + arrPresence.isPhaseField1Present = false; + + SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> phaseField3DPtr2 = kernel->getDataSet()->getPhaseField2(); + if (phaseField3DPtr2) + arrPresence.isPhaseField2Present = true; else - arrPresence.isPhaseFieldPresent = false; + arrPresence.isPhaseField2Present = false; firstBlock = false; } - if (localDistributionsF && (dataSetParamStr1.nx[0] > 0) && (dataSetParamStr1.nx[1] > 0) && - (dataSetParamStr1.nx[2] > 0) && (dataSetParamStr1.nx[3] > 0)) - doubleValuesArray.insert(doubleValuesArray.end(), localDistributionsF->getDataVector().begin(), - localDistributionsF->getDataVector().end()); - if (nonLocalDistributionsF && (dataSetParamStr2.nx[0] > 0) && (dataSetParamStr2.nx[1] > 0) && - (dataSetParamStr2.nx[2] > 0) && (dataSetParamStr2.nx[3] > 0)) - doubleValuesArray.insert(doubleValuesArray.end(), nonLocalDistributionsF->getDataVector().begin(), - nonLocalDistributionsF->getDataVector().end()); - if (zeroDistributionsF && (dataSetParamStr3.nx[0] > 0) && (dataSetParamStr3.nx[1] > 0) && - (dataSetParamStr3.nx[2] > 0)) - doubleValuesArray.insert(doubleValuesArray.end(), zeroDistributionsF->getDataVector().begin(), - zeroDistributionsF->getDataVector().end()); - - if (localDistributionsH && (dataSetParamStr1.nx[0] > 0) && (dataSetParamStr1.nx[1] > 0) && - (dataSetParamStr1.nx[2] > 0) && (dataSetParamStr1.nx[3] > 0)) - doubleValuesArray.insert(doubleValuesArray.end(), localDistributionsH->getDataVector().begin(), - localDistributionsH->getDataVector().end()); - if (nonLocalDistributionsH && (dataSetParamStr2.nx[0] > 0) && (dataSetParamStr2.nx[1] > 0) && - (dataSetParamStr2.nx[2] > 0) && (dataSetParamStr2.nx[3] > 0)) - doubleValuesArray.insert(doubleValuesArray.end(), nonLocalDistributionsH->getDataVector().begin(), - nonLocalDistributionsH->getDataVector().end()); - if (zeroDistributionsH && (dataSetParamStr3.nx[0] > 0) && (dataSetParamStr3.nx[1] > 0) && - (dataSetParamStr3.nx[2] > 0)) - doubleValuesArray.insert(doubleValuesArray.end(), zeroDistributionsH->getDataVector().begin(), - zeroDistributionsH->getDataVector().end()); + if (localDistributionsF && (dataSetParamStr1.nx[0] > 0) && (dataSetParamStr1.nx[1] > 0) && (dataSetParamStr1.nx[2] > 0) && (dataSetParamStr1.nx[3] > 0)) + doubleValuesArrayF.insert(doubleValuesArrayF.end(), localDistributionsF->getDataVector().begin(), localDistributionsF->getDataVector().end()); + if (nonLocalDistributionsF && (dataSetParamStr2.nx[0] > 0) && (dataSetParamStr2.nx[1] > 0) && (dataSetParamStr2.nx[2] > 0) && (dataSetParamStr2.nx[3] > 0)) + doubleValuesArrayF.insert(doubleValuesArrayF.end(), nonLocalDistributionsF->getDataVector().begin(), nonLocalDistributionsF->getDataVector().end()); + if (zeroDistributionsF && (dataSetParamStr3.nx[0] > 0) && (dataSetParamStr3.nx[1] > 0) && (dataSetParamStr3.nx[2] > 0)) + doubleValuesArrayF.insert(doubleValuesArrayF.end(), zeroDistributionsF->getDataVector().begin(), zeroDistributionsF->getDataVector().end()); + + if (multiPhase) + { + if (localDistributionsH1 && (dataSetParamStr1.nx[0] > 0) && (dataSetParamStr1.nx[1] > 0) && (dataSetParamStr1.nx[2] > 0) && (dataSetParamStr1.nx[3] > 0)) + doubleValuesArrayH1.insert(doubleValuesArrayH1.end(), localDistributionsH1->getDataVector().begin(), localDistributionsH1->getDataVector().end()); + if (nonLocalDistributionsH1 && (dataSetParamStr2.nx[0] > 0) && (dataSetParamStr2.nx[1] > 0) && (dataSetParamStr2.nx[2] > 0) && (dataSetParamStr2.nx[3] > 0)) + doubleValuesArrayH1.insert(doubleValuesArrayH1.end(), nonLocalDistributionsH1->getDataVector().begin(), nonLocalDistributionsH1->getDataVector().end()); + if (zeroDistributionsH1 && (dataSetParamStr3.nx[0] > 0) && (dataSetParamStr3.nx[1] > 0) && (dataSetParamStr3.nx[2] > 0)) + doubleValuesArrayH1.insert(doubleValuesArrayH1.end(), zeroDistributionsH1->getDataVector().begin(), zeroDistributionsH1->getDataVector().end()); + } + + /*if (localDistributionsH2 && (dataSetParamStr1.nx[0] > 0) && (dataSetParamStr1.nx[1] > 0) && (dataSetParamStr1.nx[2] > 0) && (dataSetParamStr1.nx[3] > 0)) + doubleValuesArrayH2.insert(doubleValuesArrayH2.end(), localDistributionsH2->getDataVector().begin(), localDistributionsH2->getDataVector().end()); + if (nonLocalDistributionsH2 && (dataSetParamStr2.nx[0] > 0) && (dataSetParamStr2.nx[1] > 0) && (dataSetParamStr2.nx[2] > 0) && (dataSetParamStr2.nx[3] > 0)) + doubleValuesArrayH2.insert(doubleValuesArrayH2.end(), nonLocalDistributionsH2->getDataVector().begin(), nonLocalDistributionsH2->getDataVector().end()); + if (zeroDistributionsH2 && (dataSetParamStr3.nx[0] > 0) && (dataSetParamStr3.nx[1] > 0) && (dataSetParamStr3.nx[2] > 0)) + doubleValuesArrayH2.insert(doubleValuesArrayH2.end(), zeroDistributionsH2->getDataVector().begin(), zeroDistributionsH2->getDataVector().end());*/ ic++; } @@ -303,10 +316,10 @@ void MPIIORestartCoProcessor::writeDataSet(int step) MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType); MPI_Type_commit(&dataSetDoubleType); - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIORestartCoProcessor::writeDataSet start MPI IO rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } // write to the file @@ -315,15 +328,17 @@ void MPIIORestartCoProcessor::writeDataSet(int step) MPI_Offset write_offset = (MPI_Offset)(size * sizeof(int)); size_t next_write_offset = 0; - if (size > 1) { - if (rank == 0) { - next_write_offset = write_offset + 3 * sizeof(dataSetParam) + - blocksCount * (sizeof(DataSetRestart) + doubleCountInBlock * sizeof(double)); + if (size > 1) + { + if (rank == 0) + { + next_write_offset = write_offset + 3 * sizeof(dataSetParam) + blocksCount * (sizeof(DataSetRestart) + doubleCountInBlock * sizeof(double)); MPI_Send(&next_write_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD); - } else { + } + else + { MPI_Recv(&write_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE); - next_write_offset = write_offset + 3 * sizeof(dataSetParam) + - blocksCount * (sizeof(DataSetRestart) + doubleCountInBlock * sizeof(double)); + next_write_offset = write_offset + 3 * sizeof(dataSetParam) + blocksCount * (sizeof(DataSetRestart) + doubleCountInBlock * sizeof(double)); if (rank < size - 1) MPI_Send(&next_write_offset, 1, MPI_LONG_LONG_INT, rank + 1, 5, MPI_COMM_WORLD); } @@ -342,7 +357,7 @@ void MPIIORestartCoProcessor::writeDataSet(int step) #endif MPI_File file_handler; - std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSet.bin"; + std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetF.bin"; int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler); if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename); @@ -351,27 +366,54 @@ void MPIIORestartCoProcessor::writeDataSet(int step) MPI_File_write_at(file_handler, (MPI_Offset)(rank * sizeof(int)), &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE); // each process writes common parameters of a dataSet MPI_File_write_at(file_handler, write_offset, &dataSetParamStr1, 1, dataSetParamType, MPI_STATUS_IGNORE); - MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(dataSetParam)), &dataSetParamStr2, 1, - dataSetParamType, MPI_STATUS_IGNORE); - MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + 2 * sizeof(dataSetParam)), &dataSetParamStr3, 1, - dataSetParamType, MPI_STATUS_IGNORE); + MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(dataSetParam)), &dataSetParamStr2, 1, dataSetParamType, MPI_STATUS_IGNORE); + MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + 2 * sizeof(dataSetParam)), &dataSetParamStr3, 1, dataSetParamType, MPI_STATUS_IGNORE); // each process writes data identifying blocks - MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + 3 * sizeof(dataSetParam)), dataSetArray, blocksCount, - dataSetType, MPI_STATUS_IGNORE); + MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + 3 * sizeof(dataSetParam)), dataSetArray, blocksCount, dataSetType, MPI_STATUS_IGNORE); // each process writes the dataSet arrays - if (doubleValuesArray.size() > 0) - MPI_File_write_at(file_handler, - (MPI_Offset)(write_offset + 3 * sizeof(dataSetParam) + blocksCount * sizeof(DataSetRestart)), - &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE); + if (doubleValuesArrayF.size() > 0) + MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + 3 * sizeof(dataSetParam) + blocksCount * sizeof(DataSetRestart)), + &doubleValuesArrayF[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE); MPI_File_sync(file_handler); MPI_File_close(&file_handler); + //------------------------------------------------------------------------------------------------------------------ + if (multiPhase) + { + filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetH1.bin"; + rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler); + if (rc != MPI_SUCCESS) + throw UbException(UB_EXARGS, "couldn't open file " + filename); + + // each process writes the dataSet arrays + if (doubleValuesArrayH1.size() > 0) + MPI_File_write_at(file_handler, write_offset, &doubleValuesArrayH1[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE); + + MPI_File_sync(file_handler); + MPI_File_close(&file_handler); + } + + //-------------------------------------------------------------------------------------------------------------------- + /*filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetH2.bin"; + rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler); + if (rc != MPI_SUCCESS) + throw UbException(UB_EXARGS, "couldn't open file " + filename); + + // each process writes the dataSet arrays + if (doubleValuesArrayH1.size() > 0) + MPI_File_write_at(file_handler, write_offset, &doubleValuesArrayH2[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE); + + MPI_File_sync(file_handler); + MPI_File_close(&file_handler);*/ + + //-------------------------------- MPI_Type_free(&dataSetDoubleType); delete[] dataSetArray; - if (comm->isRoot()) { + if (comm->isRoot()) + { finish = MPI_Wtime(); UBLOG(logINFO, "MPIIORestartCoProcessor::writeDataSet time: " << finish - start << " s"); } @@ -403,8 +445,11 @@ void MPIIORestartCoProcessor::writeDataSet(int step) if (arrPresence.isRelaxationFactorPresent) writeRelaxationFactor(step); - if (arrPresence.isPhaseFieldPresent) - writePhaseField(step); + if (arrPresence.isPhaseField1Present) + writePhaseField(step, 1); + + if (arrPresence.isPhaseField2Present) + writePhaseField(step, 2); } void MPIIORestartCoProcessor::writeAverageDensityArray(int step) @@ -418,7 +463,8 @@ void MPIIORestartCoProcessor::writeAverageDensityArray(int step) std::vector<SPtr<Block3D>> blocksVector[25]; int minInitLevel = this->grid->getCoarsestInitializedLevel(); int maxInitLevel = this->grid->getFinestInitializedLevel(); - for (int level = minInitLevel; level <= maxInitLevel; level++) { + for (int level = minInitLevel; level <= maxInitLevel; level++) + { grid->getBlocks(level, rank, blocksVector[level]); blocksCount += static_cast<int>(blocksVector[level].size()); } @@ -427,10 +473,10 @@ void MPIIORestartCoProcessor::writeAverageDensityArray(int step) std::vector<double> doubleValuesArray; // double-values of the AverageDensityArray in all blocks dataSetParam dataSetParamStr; - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIORestartCoProcessor::writeAverageDensityArray start collect data rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } bool firstBlock = true; @@ -438,13 +484,13 @@ void MPIIORestartCoProcessor::writeAverageDensityArray(int step) int ic = 0; SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> averageDensityArray; - for (int level = minInitLevel; level <= maxInitLevel; level++) { + for (int level = minInitLevel; level <= maxInitLevel; level++) + { for (SPtr<Block3D> block : blocksVector[level]) // blocks of the current level { - dataSetSmallArray[ic].x1 = - block->getX1(); // coordinates of the block needed to find it while regenerating the grid - dataSetSmallArray[ic].x2 = block->getX2(); - dataSetSmallArray[ic].x3 = block->getX3(); + dataSetSmallArray[ic].x1 = block->getX1(); // coordinates of the block needed to find it while regenerating the grid + dataSetSmallArray[ic].x2 = block->getX2(); + dataSetSmallArray[ic].x3 = block->getX3(); dataSetSmallArray[ic].level = block->getLevel(); averageDensityArray = block->getKernel()->getDataSet()->getAverageDensity(); @@ -462,10 +508,8 @@ void MPIIORestartCoProcessor::writeAverageDensityArray(int step) firstBlock = false; } - if ((dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0) && - (dataSetParamStr.nx[3] > 0)) - doubleValuesArray.insert(doubleValuesArray.end(), averageDensityArray->getDataVector().begin(), - averageDensityArray->getDataVector().end()); + if ((dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0) && (dataSetParamStr.nx[3] > 0)) + doubleValuesArray.insert(doubleValuesArray.end(), averageDensityArray->getDataVector().begin(), averageDensityArray->getDataVector().end()); ic++; } @@ -475,10 +519,10 @@ void MPIIORestartCoProcessor::writeAverageDensityArray(int step) MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType); MPI_Type_commit(&dataSetDoubleType); - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIORestartCoProcessor::writeAverageDensityArray start MPI IO rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } // write to the file @@ -487,15 +531,17 @@ void MPIIORestartCoProcessor::writeAverageDensityArray(int step) MPI_Offset write_offset = (MPI_Offset)(size * sizeof(int)); size_t next_write_offset = 0; - if (size > 1) { - if (rank == 0) { - next_write_offset = write_offset + sizeof(dataSetParam) + - blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); + if (size > 1) + { + if (rank == 0) + { + next_write_offset = write_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); MPI_Send(&next_write_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD); - } else { + } + else + { MPI_Recv(&write_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE); - next_write_offset = write_offset + sizeof(dataSetParam) + - blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); + next_write_offset = write_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); if (rank < size - 1) MPI_Send(&next_write_offset, 1, MPI_LONG_LONG_INT, rank + 1, 5, MPI_COMM_WORLD); } @@ -524,19 +570,18 @@ void MPIIORestartCoProcessor::writeAverageDensityArray(int step) // each process writes common parameters of a dataSet MPI_File_write_at(file_handler, write_offset, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE); // each process writes data identifying blocks - MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(dataSetParam)), dataSetSmallArray, blocksCount, - dataSetSmallType, MPI_STATUS_IGNORE); + MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(dataSetParam)), dataSetSmallArray, blocksCount, dataSetSmallType, MPI_STATUS_IGNORE); // each process writes the dataSet arrays if (doubleValuesArray.size() > 0) - MPI_File_write_at(file_handler, - (MPI_Offset)(write_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)), + MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)), &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE); MPI_File_sync(file_handler); MPI_File_close(&file_handler); MPI_Type_free(&dataSetDoubleType); - if (comm->isRoot()) { + if (comm->isRoot()) + { finish = MPI_Wtime(); UBLOG(logINFO, "MPIIORestartCoProcessor::writeAverageDensityArray time: " << finish - start << " s"); } @@ -555,7 +600,8 @@ void MPIIORestartCoProcessor::writeAverageVelocityArray(int step) std::vector<SPtr<Block3D>> blocksVector[25]; int minInitLevel = this->grid->getCoarsestInitializedLevel(); int maxInitLevel = this->grid->getFinestInitializedLevel(); - for (int level = minInitLevel; level <= maxInitLevel; level++) { + for (int level = minInitLevel; level <= maxInitLevel; level++) + { grid->getBlocks(level, rank, blocksVector[level]); blocksCount += static_cast<int>(blocksVector[level].size()); } @@ -564,10 +610,10 @@ void MPIIORestartCoProcessor::writeAverageVelocityArray(int step) std::vector<double> doubleValuesArray; // double-values (arrays of f's) in all blocks dataSetParam dataSetParamStr; - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIORestartCoProcessor::writeAverageVelocityArray start collect data rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } bool firstBlock = true; @@ -575,13 +621,13 @@ void MPIIORestartCoProcessor::writeAverageVelocityArray(int step) int ic = 0; SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> AverageVelocityArray3DPtr; - for (int level = minInitLevel; level <= maxInitLevel; level++) { + for (int level = minInitLevel; level <= maxInitLevel; level++) + { for (SPtr<Block3D> block : blocksVector[level]) // blocks of the current level { - dataSetSmallArray[ic].x1 = - block->getX1(); // coordinates of the block needed to find it while regenerating the grid - dataSetSmallArray[ic].x2 = block->getX2(); - dataSetSmallArray[ic].x3 = block->getX3(); + dataSetSmallArray[ic].x1 = block->getX1(); // coordinates of the block needed to find it while regenerating the grid + dataSetSmallArray[ic].x2 = block->getX2(); + dataSetSmallArray[ic].x3 = block->getX3(); dataSetSmallArray[ic].level = block->getLevel(); AverageVelocityArray3DPtr = block->getKernel()->getDataSet()->getAverageVelocity(); @@ -593,16 +639,13 @@ void MPIIORestartCoProcessor::writeAverageVelocityArray(int step) dataSetParamStr.nx[1] = static_cast<int>(AverageVelocityArray3DPtr->getNX2()); dataSetParamStr.nx[2] = static_cast<int>(AverageVelocityArray3DPtr->getNX3()); dataSetParamStr.nx[3] = static_cast<int>(AverageVelocityArray3DPtr->getNX4()); - doubleCountInBlock = - dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; + doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; firstBlock = false; } - if ((dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0) && - (dataSetParamStr.nx[3] > 0)) - doubleValuesArray.insert(doubleValuesArray.end(), AverageVelocityArray3DPtr->getDataVector().begin(), - AverageVelocityArray3DPtr->getDataVector().end()); + if ((dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0) && (dataSetParamStr.nx[3] > 0)) + doubleValuesArray.insert(doubleValuesArray.end(), AverageVelocityArray3DPtr->getDataVector().begin(), AverageVelocityArray3DPtr->getDataVector().end()); ic++; } @@ -612,10 +655,10 @@ void MPIIORestartCoProcessor::writeAverageVelocityArray(int step) MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType); MPI_Type_commit(&dataSetDoubleType); - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIORestartCoProcessor::writeAverageVelocityArray start MPI IO rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } // write to the file @@ -624,15 +667,16 @@ void MPIIORestartCoProcessor::writeAverageVelocityArray(int step) MPI_Offset write_offset = (MPI_Offset)(size * sizeof(int)); size_t next_write_offset = 0; - if (size > 1) { - if (rank == 0) { - next_write_offset = write_offset + sizeof(dataSetParam) + - blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); + if (size > 1) + { + if (rank == 0) + { + next_write_offset = write_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); MPI_Send(&next_write_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD); - } else { + } else + { MPI_Recv(&write_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE); - next_write_offset = write_offset + sizeof(dataSetParam) + - blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); + next_write_offset = write_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); if (rank < size - 1) MPI_Send(&next_write_offset, 1, MPI_LONG_LONG_INT, rank + 1, 5, MPI_COMM_WORLD); } @@ -661,19 +705,18 @@ void MPIIORestartCoProcessor::writeAverageVelocityArray(int step) // each process writes common parameters of a dataSet MPI_File_write_at(file_handler, write_offset, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE); // each process writes data identifying blocks - MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(dataSetParam)), dataSetSmallArray, blocksCount, - dataSetSmallType, MPI_STATUS_IGNORE); + MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(dataSetParam)), dataSetSmallArray, blocksCount, dataSetSmallType, MPI_STATUS_IGNORE); // each process writes the dataSet arrays if (doubleValuesArray.size() > 0) - MPI_File_write_at(file_handler, - (MPI_Offset)(write_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)), + MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)), &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE); MPI_File_sync(file_handler); MPI_File_close(&file_handler); MPI_Type_free(&dataSetDoubleType); - if (comm->isRoot()) { + if (comm->isRoot()) + { finish = MPI_Wtime(); UBLOG(logINFO, "MPIIORestartCoProcessor::writeAverageVelocityArray time: " << finish - start << " s"); } @@ -692,7 +735,8 @@ void MPIIORestartCoProcessor::writeAverageFluktuationsArray(int step) std::vector<SPtr<Block3D>> blocksVector[25]; int minInitLevel = this->grid->getCoarsestInitializedLevel(); int maxInitLevel = this->grid->getFinestInitializedLevel(); - for (int level = minInitLevel; level <= maxInitLevel; level++) { + for (int level = minInitLevel; level <= maxInitLevel; level++) + { grid->getBlocks(level, rank, blocksVector[level]); blocksCount += static_cast<int>(blocksVector[level].size()); } @@ -701,10 +745,10 @@ void MPIIORestartCoProcessor::writeAverageFluktuationsArray(int step) std::vector<double> doubleValuesArray; // double-values (arrays of f's) in all blocks dataSetParam dataSetParamStr; - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIORestartCoProcessor::writeAverageFluktuationsArray start collect data rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } bool firstBlock = true; @@ -712,11 +756,11 @@ void MPIIORestartCoProcessor::writeAverageFluktuationsArray(int step) int ic = 0; SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> AverageFluctArray3DPtr; - for (int level = minInitLevel; level <= maxInitLevel; level++) { + for (int level = minInitLevel; level <= maxInitLevel; level++) + { for (SPtr<Block3D> block : blocksVector[level]) // blocks of the current level { - dataSetSmallArray[ic].x1 = - block->getX1(); // coordinates of the block needed to find it while regenerating the grid + dataSetSmallArray[ic].x1 = block->getX1(); // coordinates of the block needed to find it while regenerating the grid dataSetSmallArray[ic].x2 = block->getX2(); dataSetSmallArray[ic].x3 = block->getX3(); dataSetSmallArray[ic].level = block->getLevel(); @@ -730,14 +774,12 @@ void MPIIORestartCoProcessor::writeAverageFluktuationsArray(int step) dataSetParamStr.nx[1] = static_cast<int>(AverageFluctArray3DPtr->getNX2()); dataSetParamStr.nx[2] = static_cast<int>(AverageFluctArray3DPtr->getNX3()); dataSetParamStr.nx[3] = static_cast<int>(AverageFluctArray3DPtr->getNX4()); - doubleCountInBlock = - dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; + doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; firstBlock = false; } - if ((dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0) && - (dataSetParamStr.nx[3] > 0)) + if ((dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0) &&(dataSetParamStr.nx[3] > 0)) doubleValuesArray.insert(doubleValuesArray.end(), AverageFluctArray3DPtr->getDataVector().begin(), AverageFluctArray3DPtr->getDataVector().end()); @@ -749,10 +791,10 @@ void MPIIORestartCoProcessor::writeAverageFluktuationsArray(int step) MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType); MPI_Type_commit(&dataSetDoubleType); - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIORestartCoProcessor::writeAverageFluktuationsArray start MPI IO rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } // write to the file @@ -761,15 +803,16 @@ void MPIIORestartCoProcessor::writeAverageFluktuationsArray(int step) MPI_Offset write_offset = (MPI_Offset)(size * sizeof(int)); size_t next_write_offset = 0; - if (size > 1) { - if (rank == 0) { - next_write_offset = write_offset + sizeof(dataSetParam) + - blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); + if (size > 1) + { + if (rank == 0) + { + next_write_offset = write_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); MPI_Send(&next_write_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD); - } else { + } else + { MPI_Recv(&write_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE); - next_write_offset = write_offset + sizeof(dataSetParam) + - blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); + next_write_offset = write_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); if (rank < size - 1) MPI_Send(&next_write_offset, 1, MPI_LONG_LONG_INT, rank + 1, 5, MPI_COMM_WORLD); } @@ -788,8 +831,7 @@ void MPIIORestartCoProcessor::writeAverageFluktuationsArray(int step) #endif MPI_File file_handler; - std::string filename = - path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpAverageFluktuationsArray.bin"; + std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpAverageFluktuationsArray.bin"; int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler); if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename); @@ -803,15 +845,15 @@ void MPIIORestartCoProcessor::writeAverageFluktuationsArray(int step) dataSetSmallType, MPI_STATUS_IGNORE); // each process writes the dataSet arrays if (doubleValuesArray.size() > 0) - MPI_File_write_at(file_handler, - (MPI_Offset)(write_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)), + MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)), &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE); MPI_File_sync(file_handler); MPI_File_close(&file_handler); MPI_Type_free(&dataSetDoubleType); - if (comm->isRoot()) { + if (comm->isRoot()) + { finish = MPI_Wtime(); UBLOG(logINFO, "MPIIORestartCoProcessor::writeAverageFluktuationsArray time: " << finish - start << " s"); } @@ -830,7 +872,8 @@ void MPIIORestartCoProcessor::writeAverageTripleArray(int step) std::vector<SPtr<Block3D>> blocksVector[25]; int minInitLevel = this->grid->getCoarsestInitializedLevel(); int maxInitLevel = this->grid->getFinestInitializedLevel(); - for (int level = minInitLevel; level <= maxInitLevel; level++) { + for (int level = minInitLevel; level <= maxInitLevel; level++) + { grid->getBlocks(level, rank, blocksVector[level]); blocksCount += static_cast<int>(blocksVector[level].size()); } @@ -839,10 +882,10 @@ void MPIIORestartCoProcessor::writeAverageTripleArray(int step) std::vector<double> doubleValuesArray; // double-values (arrays of f's) in all blocks dataSetParam dataSetParamStr; - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIORestartCoProcessor::writeAverageTripleArray start collect data rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } bool firstBlock = true; @@ -850,11 +893,11 @@ void MPIIORestartCoProcessor::writeAverageTripleArray(int step) int ic = 0; SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> AverageTripleArray3DPtr; - for (int level = minInitLevel; level <= maxInitLevel; level++) { + for (int level = minInitLevel; level <= maxInitLevel; level++) + { for (SPtr<Block3D> block : blocksVector[level]) // blocks of the current level { - dataSetSmallArray[ic].x1 = - block->getX1(); // coordinates of the block needed to find it while regenerating the grid + dataSetSmallArray[ic].x1 = block->getX1(); // coordinates of the block needed to find it while regenerating the grid dataSetSmallArray[ic].x2 = block->getX2(); dataSetSmallArray[ic].x3 = block->getX3(); dataSetSmallArray[ic].level = block->getLevel(); @@ -868,16 +911,13 @@ void MPIIORestartCoProcessor::writeAverageTripleArray(int step) dataSetParamStr.nx[1] = static_cast<int>(AverageTripleArray3DPtr->getNX2()); dataSetParamStr.nx[2] = static_cast<int>(AverageTripleArray3DPtr->getNX3()); dataSetParamStr.nx[3] = static_cast<int>(AverageTripleArray3DPtr->getNX4()); - doubleCountInBlock = - dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; + doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; firstBlock = false; } - if ((dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0) && - (dataSetParamStr.nx[3] > 0)) - doubleValuesArray.insert(doubleValuesArray.end(), AverageTripleArray3DPtr->getDataVector().begin(), - AverageTripleArray3DPtr->getDataVector().end()); + if ((dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0) && (dataSetParamStr.nx[3] > 0)) + doubleValuesArray.insert(doubleValuesArray.end(), AverageTripleArray3DPtr->getDataVector().begin(), AverageTripleArray3DPtr->getDataVector().end()); ic++; } @@ -887,10 +927,10 @@ void MPIIORestartCoProcessor::writeAverageTripleArray(int step) MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType); MPI_Type_commit(&dataSetDoubleType); - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIORestartCoProcessor::writeAverageTripleArray start MPI IO rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } // write to the file @@ -899,15 +939,17 @@ void MPIIORestartCoProcessor::writeAverageTripleArray(int step) MPI_Offset write_offset = (MPI_Offset)(size * sizeof(int)); size_t next_write_offset = 0; - if (size > 1) { - if (rank == 0) { - next_write_offset = write_offset + sizeof(dataSetParam) + - blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); + if (size > 1) + { + if (rank == 0) + { + next_write_offset = write_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); MPI_Send(&next_write_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD); - } else { + } + else + { MPI_Recv(&write_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE); - next_write_offset = write_offset + sizeof(dataSetParam) + - blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); + next_write_offset = write_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); if (rank < size - 1) MPI_Send(&next_write_offset, 1, MPI_LONG_LONG_INT, rank + 1, 5, MPI_COMM_WORLD); } @@ -940,8 +982,7 @@ void MPIIORestartCoProcessor::writeAverageTripleArray(int step) dataSetSmallType, MPI_STATUS_IGNORE); // each process writes the dataSet arrays if (doubleValuesArray.size() > 0) - MPI_File_write_at(file_handler, - (MPI_Offset)(write_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)), + MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)), &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE); MPI_File_sync(file_handler); @@ -967,7 +1008,8 @@ void MPIIORestartCoProcessor::writeShearStressValArray(int step) std::vector<SPtr<Block3D>> blocksVector[25]; int minInitLevel = this->grid->getCoarsestInitializedLevel(); int maxInitLevel = this->grid->getFinestInitializedLevel(); - for (int level = minInitLevel; level <= maxInitLevel; level++) { + for (int level = minInitLevel; level <= maxInitLevel; level++) + { grid->getBlocks(level, rank, blocksVector[level]); blocksCount += static_cast<int>(blocksVector[level].size()); } @@ -976,10 +1018,10 @@ void MPIIORestartCoProcessor::writeShearStressValArray(int step) std::vector<double> doubleValuesArray; // double-values (arrays of f's) in all blocks dataSetParam dataSetParamStr; - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIORestartCoProcessor::writeShearStressValArray start collect data rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } bool firstBlock = true; @@ -987,11 +1029,11 @@ void MPIIORestartCoProcessor::writeShearStressValArray(int step) int ic = 0; SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> ShearStressValArray3DPtr; - for (int level = minInitLevel; level <= maxInitLevel; level++) { + for (int level = minInitLevel; level <= maxInitLevel; level++) + { for (SPtr<Block3D> block : blocksVector[level]) // blocks of the current level { - dataSetSmallArray[ic].x1 = - block->getX1(); // coordinates of the block needed to find it while regenerating the grid + dataSetSmallArray[ic].x1 = block->getX1(); // coordinates of the block needed to find it while regenerating the grid dataSetSmallArray[ic].x2 = block->getX2(); dataSetSmallArray[ic].x3 = block->getX3(); dataSetSmallArray[ic].level = block->getLevel(); @@ -1005,14 +1047,12 @@ void MPIIORestartCoProcessor::writeShearStressValArray(int step) dataSetParamStr.nx[1] = static_cast<int>(ShearStressValArray3DPtr->getNX2()); dataSetParamStr.nx[2] = static_cast<int>(ShearStressValArray3DPtr->getNX3()); dataSetParamStr.nx[3] = static_cast<int>(ShearStressValArray3DPtr->getNX4()); - doubleCountInBlock = - dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; + doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; firstBlock = false; } - if ((dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0) && - (dataSetParamStr.nx[3] > 0)) + if ((dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0) && (dataSetParamStr.nx[3] > 0)) doubleValuesArray.insert(doubleValuesArray.end(), ShearStressValArray3DPtr->getDataVector().begin(), ShearStressValArray3DPtr->getDataVector().end()); @@ -1024,10 +1064,10 @@ void MPIIORestartCoProcessor::writeShearStressValArray(int step) MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType); MPI_Type_commit(&dataSetDoubleType); - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIORestartCoProcessor::writeShearStressValArray start MPI IO rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } // write to the file @@ -1036,15 +1076,17 @@ void MPIIORestartCoProcessor::writeShearStressValArray(int step) MPI_Offset write_offset = (MPI_Offset)(size * sizeof(int)); size_t next_write_offset = 0; - if (size > 1) { - if (rank == 0) { - next_write_offset = write_offset + sizeof(dataSetParam) + - blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); + if (size > 1) + { + if (rank == 0) + { + next_write_offset = write_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); MPI_Send(&next_write_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD); - } else { + } + else + { MPI_Recv(&write_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE); - next_write_offset = write_offset + sizeof(dataSetParam) + - blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); + next_write_offset = write_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); if (rank < size - 1) MPI_Send(&next_write_offset, 1, MPI_LONG_LONG_INT, rank + 1, 5, MPI_COMM_WORLD); } @@ -1077,15 +1119,15 @@ void MPIIORestartCoProcessor::writeShearStressValArray(int step) dataSetSmallType, MPI_STATUS_IGNORE); // each process writes the dataSet arrays if (doubleValuesArray.size() > 0) - MPI_File_write_at(file_handler, - (MPI_Offset)(write_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)), + MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)), &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE); MPI_File_sync(file_handler); MPI_File_close(&file_handler); MPI_Type_free(&dataSetDoubleType); - if (comm->isRoot()) { + if (comm->isRoot()) + { finish = MPI_Wtime(); UBLOG(logINFO, "MPIIORestartCoProcessor::writeShearStressValArray time: " << finish - start << " s"); } @@ -1104,7 +1146,8 @@ void MPIIORestartCoProcessor::writeRelaxationFactor(int step) std::vector<SPtr<Block3D>> blocksVector[25]; int minInitLevel = this->grid->getCoarsestInitializedLevel(); int maxInitLevel = this->grid->getFinestInitializedLevel(); - for (int level = minInitLevel; level <= maxInitLevel; level++) { + for (int level = minInitLevel; level <= maxInitLevel; level++) + { grid->getBlocks(level, rank, blocksVector[level]); blocksCount += static_cast<int>(blocksVector[level].size()); } @@ -1113,10 +1156,10 @@ void MPIIORestartCoProcessor::writeRelaxationFactor(int step) std::vector<double> doubleValuesArray; // double-values (arrays of f's) in all blocks dataSetParam dataSetParamStr; - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIORestartCoProcessor::writeRelaxationFactor start collect data rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } bool firstBlock = true; @@ -1124,11 +1167,11 @@ void MPIIORestartCoProcessor::writeRelaxationFactor(int step) int ic = 0; SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> RelaxationFactor3DPtr; - for (int level = minInitLevel; level <= maxInitLevel; level++) { + for (int level = minInitLevel; level <= maxInitLevel; level++) + { for (SPtr<Block3D> block : blocksVector[level]) // blocks of the current level { - dataSetSmallArray[ic].x1 = - block->getX1(); // coordinates of the block needed to find it while regenerating the grid + dataSetSmallArray[ic].x1 = block->getX1(); // coordinates of the block needed to find it while regenerating the grid dataSetSmallArray[ic].x2 = block->getX2(); dataSetSmallArray[ic].x3 = block->getX3(); dataSetSmallArray[ic].level = block->getLevel(); @@ -1142,8 +1185,7 @@ void MPIIORestartCoProcessor::writeRelaxationFactor(int step) dataSetParamStr.nx[1] = static_cast<int>(RelaxationFactor3DPtr->getNX2()); dataSetParamStr.nx[2] = static_cast<int>(RelaxationFactor3DPtr->getNX3()); dataSetParamStr.nx[3] = 1; - doubleCountInBlock = - dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; + doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; firstBlock = false; } @@ -1160,10 +1202,10 @@ void MPIIORestartCoProcessor::writeRelaxationFactor(int step) MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType); MPI_Type_commit(&dataSetDoubleType); - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIORestartCoProcessor::writeRelaxationFactor start MPI IO rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } // write to the file @@ -1172,15 +1214,17 @@ void MPIIORestartCoProcessor::writeRelaxationFactor(int step) MPI_Offset write_offset = (MPI_Offset)(size * sizeof(int)); size_t next_write_offset = 0; - if (size > 1) { - if (rank == 0) { - next_write_offset = write_offset + sizeof(dataSetParam) + - blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); + if (size > 1) + { + if (rank == 0) + { + next_write_offset = write_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); MPI_Send(&next_write_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD); - } else { + } + else + { MPI_Recv(&write_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE); - next_write_offset = write_offset + sizeof(dataSetParam) + - blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); + next_write_offset = write_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); if (rank < size - 1) MPI_Send(&next_write_offset, 1, MPI_LONG_LONG_INT, rank + 1, 5, MPI_COMM_WORLD); } @@ -1213,15 +1257,15 @@ void MPIIORestartCoProcessor::writeRelaxationFactor(int step) dataSetSmallType, MPI_STATUS_IGNORE); // each process writes the dataSet arrays if (doubleValuesArray.size() > 0) - MPI_File_write_at(file_handler, - (MPI_Offset)(write_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)), + MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)), &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE); MPI_File_sync(file_handler); MPI_File_close(&file_handler); MPI_Type_free(&dataSetDoubleType); - if (comm->isRoot()) { + if (comm->isRoot()) + { finish = MPI_Wtime(); UBLOG(logINFO, "MPIIORestartCoProcessor::writeRelaxationFactor time: " << finish - start << " s"); } @@ -1229,7 +1273,7 @@ void MPIIORestartCoProcessor::writeRelaxationFactor(int step) delete[] dataSetSmallArray; } -void MPIIORestartCoProcessor::writePhaseField(int step) +void MPIIORestartCoProcessor::writePhaseField(int step, int fieldN) { int rank, size; MPI_Comm_rank(MPI_COMM_WORLD, &rank); @@ -1240,7 +1284,8 @@ void MPIIORestartCoProcessor::writePhaseField(int step) std::vector<SPtr<Block3D>> blocksVector[25]; int minInitLevel = this->grid->getCoarsestInitializedLevel(); int maxInitLevel = this->grid->getFinestInitializedLevel(); - for (int level = minInitLevel; level <= maxInitLevel; level++) { + for (int level = minInitLevel; level <= maxInitLevel; level++) + { grid->getBlocks(level, rank, blocksVector[level]); blocksCount += static_cast<int>(blocksVector[level].size()); } @@ -1249,10 +1294,10 @@ void MPIIORestartCoProcessor::writePhaseField(int step) std::vector<double> doubleValuesArray; // double-values (arrays of f's) in all blocks dataSetParam dataSetParamStr; - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIORestartCoProcessor::writePhaseField start collect data rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } bool firstBlock = true; @@ -1260,7 +1305,8 @@ void MPIIORestartCoProcessor::writePhaseField(int step) int ic = 0; SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> PhaseField3DPtr; - for (int level = minInitLevel; level <= maxInitLevel; level++) { + for (int level = minInitLevel; level <= maxInitLevel; level++) + { for (SPtr<Block3D> block : blocksVector[level]) // blocks of the current level { dataSetSmallArray[ic].x1 = block->getX1(); // coordinates of the block needed to find it while regenerating the grid @@ -1268,7 +1314,10 @@ void MPIIORestartCoProcessor::writePhaseField(int step) dataSetSmallArray[ic].x3 = block->getX3(); dataSetSmallArray[ic].level = block->getLevel(); - PhaseField3DPtr = block->getKernel()->getDataSet()->getPhaseField(); + if(fieldN == 1) + PhaseField3DPtr = block->getKernel()->getDataSet()->getPhaseField(); + else + PhaseField3DPtr = block->getKernel()->getDataSet()->getPhaseField2(); if (firstBlock) // when first (any) valid block... { @@ -1277,28 +1326,25 @@ void MPIIORestartCoProcessor::writePhaseField(int step) dataSetParamStr.nx[1] = static_cast<int>(PhaseField3DPtr->getNX2()); dataSetParamStr.nx[2] = static_cast<int>(PhaseField3DPtr->getNX3()); dataSetParamStr.nx[3] = 1; - doubleCountInBlock = - dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; + doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; firstBlock = false; } - if ((dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0)) - doubleValuesArray.insert(doubleValuesArray.end(), PhaseField3DPtr->getDataVector().begin(), - PhaseField3DPtr->getDataVector().end()); + doubleValuesArray.insert(doubleValuesArray.end(), PhaseField3DPtr->getDataVector().begin(), PhaseField3DPtr->getDataVector().end()); ic++; } } - + // register new MPI-types depending on the block-specific information MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType); MPI_Type_commit(&dataSetDoubleType); - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIORestartCoProcessor::writePhaseField start MPI IO rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } // write to the file @@ -1307,15 +1353,17 @@ void MPIIORestartCoProcessor::writePhaseField(int step) MPI_Offset write_offset = (MPI_Offset)(size * sizeof(int)); size_t next_write_offset = 0; - if (size > 1) { - if (rank == 0) { - next_write_offset = write_offset + sizeof(dataSetParam) + - blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); + if (size > 1) + { + if (rank == 0) + { + next_write_offset = write_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); MPI_Send(&next_write_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD); - } else { + } + else + { MPI_Recv(&write_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE); - next_write_offset = write_offset + sizeof(dataSetParam) + - blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); + next_write_offset = write_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); if (rank < size - 1) MPI_Send(&next_write_offset, 1, MPI_LONG_LONG_INT, rank + 1, 5, MPI_COMM_WORLD); } @@ -1334,7 +1382,9 @@ void MPIIORestartCoProcessor::writePhaseField(int step) #endif MPI_File file_handler; - std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpPhaseField.bin"; + std::string filename; + if(fieldN == 1) filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpPhaseField1.bin"; + else filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpPhaseField2.bin"; int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler); if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename); @@ -1348,15 +1398,15 @@ void MPIIORestartCoProcessor::writePhaseField(int step) dataSetSmallType, MPI_STATUS_IGNORE); // each process writes the dataSet arrays if (doubleValuesArray.size() > 0) - MPI_File_write_at(file_handler, - (MPI_Offset)(write_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)), + MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)), &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE); MPI_File_sync(file_handler); MPI_File_close(&file_handler); MPI_Type_free(&dataSetDoubleType); - if (comm->isRoot()) { + if (comm->isRoot()) + { finish = MPI_Wtime(); UBLOG(logINFO, "MPIIORestartCoProcessor::writePhaseField time: " << finish - start << " s"); } @@ -1370,10 +1420,10 @@ void MPIIORestartCoProcessor::writeBoundaryConds(int step) MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIORestartCoProcessor::writeBoundaryConds start collect data rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } int blocksCount = 0; // quantity of blocks in the grid, max 2147483648 blocks! @@ -1384,7 +1434,8 @@ void MPIIORestartCoProcessor::writeBoundaryConds(int step) std::vector<SPtr<Block3D>> blocksVector[25]; int minInitLevel = this->grid->getCoarsestInitializedLevel(); int maxInitLevel = this->grid->getFinestInitializedLevel(); - for (int level = minInitLevel; level <= maxInitLevel; level++) { + for (int level = minInitLevel; level <= maxInitLevel; level++) + { grid->getBlocks(level, rank, blocksVector[level]); blocksCount += static_cast<int>(blocksVector[level].size()); } @@ -1394,27 +1445,31 @@ void MPIIORestartCoProcessor::writeBoundaryConds(int step) std::vector<int> bcindexmatrixV; std::vector<int> indexContainerV; bool bcindexmatrixCountNotInit = true; - int ic = 0; + int ic = 0; SPtr<BCArray3D> bcArr; - for (int level = minInitLevel; level <= maxInitLevel; level++) { + for (int level = minInitLevel; level <= maxInitLevel; level++) + { for (SPtr<Block3D> block : blocksVector[level]) // all the blocks of the current level { bcArr = block->getKernel()->getBCProcessor()->getBCArray(); - bcAddArray[ic].x1 = - block->getX1(); // coordinates of the block needed to find it while regenerating the grid + bcAddArray[ic].x1 = block->getX1(); // coordinates of the block needed to find it while regenerating the grid bcAddArray[ic].x2 = block->getX2(); bcAddArray[ic].x3 = block->getX3(); bcAddArray[ic].level = block->getLevel(); bcAddArray[ic].boundCond_count = 0; // how many BoundaryConditions in this block bcAddArray[ic].indexContainer_count = 0; // how many indexContainer-values in this block - for (std::size_t bc = 0; bc < bcArr->getBCVectorSize(); bc++) { + for (std::size_t bc = 0; bc < bcArr->getBCVectorSize(); bc++) + { BoundaryCondition *bouCond = new BoundaryCondition(); - if (bcArr->bcvector[bc] == NULL) { + if (bcArr->bcvector[bc] == NULL) + { memset(bouCond, 0, sizeof(BoundaryCondition)); - } else { + } + else + { bouCond->noslipBoundaryFlags = bcArr->bcvector[bc]->getNoSlipBoundary(); bouCond->slipBoundaryFlags = bcArr->bcvector[bc]->getSlipBoundary(); bouCond->velocityBoundaryFlags = bcArr->bcvector[bc]->getVelocityBoundary(); @@ -1445,15 +1500,15 @@ void MPIIORestartCoProcessor::writeBoundaryConds(int step) // the quantity of elements in the bcindexmatrix array (CbArray3D<int, IndexerX3X2X1>) in bcArray(BCArray3D) // is always equal, this will be the size of the "write-read-block" in MPI_write_.../MPI_read-functions when // writing/reading BoundConds - if (bcindexmatrixCountNotInit) { + if (bcindexmatrixCountNotInit) + { boundCondParamStr.nx1 = static_cast<int>(bcArr->bcindexmatrix.getNX1()); boundCondParamStr.nx2 = static_cast<int>(bcArr->bcindexmatrix.getNX2()); boundCondParamStr.nx3 = static_cast<int>(bcArr->bcindexmatrix.getNX3()); boundCondParamStr.bcindexmatrixCount = static_cast<int>(bcArr->bcindexmatrix.getDataVector().size()); bcindexmatrixCountNotInit = false; } - bcindexmatrixV.insert(bcindexmatrixV.end(), bcArr->bcindexmatrix.getDataVector().begin(), - bcArr->bcindexmatrix.getDataVector().end()); + bcindexmatrixV.insert(bcindexmatrixV.end(), bcArr->bcindexmatrix.getDataVector().begin(), bcArr->bcindexmatrix.getDataVector().end()); indexContainerV.insert(indexContainerV.end(), bcArr->indexContainer.begin(), bcArr->indexContainer.end()); bcAddArray[ic].indexContainer_count = static_cast<int>(bcArr->indexContainer.size()); @@ -1470,7 +1525,8 @@ void MPIIORestartCoProcessor::writeBoundaryConds(int step) int bcBlockCount = (int)(count_boundCond / BLOCK_SIZE); if (bcBlockCount * BLOCK_SIZE < (int)count_boundCond) bcBlockCount += 1; - for (int i = (int)count_boundCond; i < bcBlockCount * BLOCK_SIZE; i++) { + for (int i = (int)count_boundCond; i < bcBlockCount * BLOCK_SIZE; i++) + { BoundaryCondition *bouCond = new BoundaryCondition(); memset(bouCond, 0, sizeof(BoundaryCondition)); bcVector.push_back(*bouCond); @@ -1485,11 +1541,15 @@ void MPIIORestartCoProcessor::writeBoundaryConds(int step) MPI_Offset write_offset = (MPI_Offset)(size * (3 * sizeof(int) + sizeof(boundCondParam))); size_t next_write_offset = 0; - if (size > 1) { - if (rank == 0) { + if (size > 1) + { + if (rank == 0) + { next_write_offset = write_offset + byteCount; MPI_Send(&next_write_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD); - } else { + } + else + { MPI_Recv(&write_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE); next_write_offset = write_offset + byteCount; if (rank < size - 1) @@ -1497,10 +1557,10 @@ void MPIIORestartCoProcessor::writeBoundaryConds(int step) } } - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIORestartCoProcessor::writeBoundaryConds start MPI IO rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } double start, finish; @@ -1526,14 +1586,11 @@ void MPIIORestartCoProcessor::writeBoundaryConds(int step) // each process writes the quantity of it's blocks MPI_File_write_at(file_handler, write_offset1, &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE); // each process writes the quantity of "big blocks" of BLOCK_SIZE of boundary conditions - MPI_File_write_at(file_handler, (MPI_Offset)(write_offset1 + sizeof(int)), &bcBlockCount, 1, MPI_INT, - MPI_STATUS_IGNORE); + MPI_File_write_at(file_handler, (MPI_Offset)(write_offset1 + sizeof(int)), &bcBlockCount, 1, MPI_INT, MPI_STATUS_IGNORE); // each process writes the quantity of indexContainer elements in all blocks - MPI_File_write_at(file_handler, (MPI_Offset)(write_offset1 + 2 * sizeof(int)), &count_indexContainer, 1, MPI_INT, - MPI_STATUS_IGNORE); + MPI_File_write_at(file_handler, (MPI_Offset)(write_offset1 + 2 * sizeof(int)), &count_indexContainer, 1, MPI_INT, MPI_STATUS_IGNORE); // each process writes the quantity of bcindexmatrix elements in every block - MPI_File_write_at(file_handler, (MPI_Offset)(write_offset1 + 3 * sizeof(int)), &boundCondParamStr, 1, - boundCondParamType, MPI_STATUS_IGNORE); + MPI_File_write_at(file_handler, (MPI_Offset)(write_offset1 + 3 * sizeof(int)), &boundCondParamStr, 1, boundCondParamType, MPI_STATUS_IGNORE); // each process writes data identifying the blocks MPI_File_write_at(file_handler, write_offset, bcAddArray, blocksCount, boundCondTypeAdd, MPI_STATUS_IGNORE); @@ -1543,23 +1600,19 @@ void MPIIORestartCoProcessor::writeBoundaryConds(int step) bcBlockCount, boundCondType1000, MPI_STATUS_IGNORE); // each process writes bcindexmatrix values if (bcindexmatrixV.size() > 0) - MPI_File_write_at(file_handler, - (MPI_Offset)(write_offset + blocksCount * sizeof(BCAddRestart) + - bcBlockCount * BLOCK_SIZE * sizeof(BoundaryCondition)), + MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + blocksCount * sizeof(BCAddRestart) + bcBlockCount * BLOCK_SIZE * sizeof(BoundaryCondition)), &bcindexmatrixV[0], blocksCount, bcindexmatrixType, MPI_STATUS_IGNORE); // each process writes indexContainer values if (indexContainerV.size() > 0) - MPI_File_write_at(file_handler, - (MPI_Offset)(write_offset + blocksCount * sizeof(BCAddRestart) + - bcBlockCount * BLOCK_SIZE * sizeof(BoundaryCondition) + - blocksCount * boundCondParamStr.bcindexmatrixCount * sizeof(int)), - &indexContainerV[0], count_indexContainer, MPI_INT, MPI_STATUS_IGNORE); + MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + blocksCount * sizeof(BCAddRestart) + bcBlockCount * BLOCK_SIZE * sizeof(BoundaryCondition) + + blocksCount * boundCondParamStr.bcindexmatrixCount * sizeof(int)), &indexContainerV[0], count_indexContainer, MPI_INT, MPI_STATUS_IGNORE); MPI_File_sync(file_handler); MPI_File_close(&file_handler); MPI_Type_free(&bcindexmatrixType); - if (comm->isRoot()) { + if (comm->isRoot()) + { finish = MPI_Wtime(); UBLOG(logINFO, "MPIIORestartCoProcessor::writeBoundaryConds time: " << finish - start << " s"); } @@ -1593,17 +1646,18 @@ void MPIIORestartCoProcessor::readDataSet(int step) MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIORestartCoProcessor::readDataSet start MPI IO rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } + double start, finish; if (comm->isRoot()) start = MPI_Wtime(); MPI_File file_handler; - std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSet.bin"; + std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetF.bin"; int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler); if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename); @@ -1611,6 +1665,7 @@ void MPIIORestartCoProcessor::readDataSet(int step) // calculate the read offset MPI_Offset read_offset = (MPI_Offset)(size * sizeof(int)); size_t next_read_offset = 0; + bool multiPhase = false; // read count of blocks int blocksCount = 0; @@ -1618,122 +1673,146 @@ void MPIIORestartCoProcessor::readDataSet(int step) MPI_File_read_at(file_handler, (MPI_Offset)(rank * sizeof(int)), &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE); MPI_File_read_at(file_handler, read_offset, &dataSetParamStr1, 1, dataSetParamType, MPI_STATUS_IGNORE); - MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam)), &dataSetParamStr2, 1, - dataSetParamType, MPI_STATUS_IGNORE); - MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + 2 * sizeof(dataSetParam)), &dataSetParamStr3, 1, - dataSetParamType, MPI_STATUS_IGNORE); + MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam)), &dataSetParamStr2, 1, dataSetParamType, MPI_STATUS_IGNORE); + MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + 2 * sizeof(dataSetParam)), &dataSetParamStr3, 1, dataSetParamType, MPI_STATUS_IGNORE); DataSetRestart *dataSetArray = new DataSetRestart[blocksCount]; - double doubleCountInBlock = - (dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3] + + double doubleCountInBlock = dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3] + dataSetParamStr2.nx[0] * dataSetParamStr2.nx[1] * dataSetParamStr2.nx[2] * dataSetParamStr2.nx[3] + - dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3]) * 2; - std::vector<double> doubleValuesArray(size_t(blocksCount * doubleCountInBlock)); // double-values in all blocks + dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3]; + std::vector<double> doubleValuesArrayF(size_t(blocksCount * doubleCountInBlock)); // double-values in all blocks Fdistributions + std::vector<double> doubleValuesArrayH1; // double-values in all blocks H1distributions + //std::vector<double> doubleValuesArrayH2; // double-values in all blocks H2distributions // define MPI_types depending on the block-specific information MPI_Type_contiguous(int(doubleCountInBlock), MPI_DOUBLE, &dataSetDoubleType); MPI_Type_commit(&dataSetDoubleType); - if (size > 1) { - if (rank == 0) { - next_read_offset = read_offset + 3 * sizeof(dataSetParam) + - blocksCount * (sizeof(DataSetRestart) + size_t(doubleCountInBlock) * sizeof(double)); + if (size > 1) + { + if (rank == 0) + { + next_read_offset = read_offset + 3 * sizeof(dataSetParam) + blocksCount * (sizeof(DataSetRestart) + size_t(doubleCountInBlock) * sizeof(double)); MPI_Send(&next_read_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD); - } else { + } + else + { MPI_Recv(&read_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE); - next_read_offset = read_offset + 3 * sizeof(dataSetParam) + - blocksCount * (sizeof(DataSetRestart) + size_t(doubleCountInBlock) * sizeof(double)); + next_read_offset = read_offset + 3 * sizeof(dataSetParam) + blocksCount * (sizeof(DataSetRestart) + size_t(doubleCountInBlock) * sizeof(double)); if (rank < size - 1) MPI_Send(&next_read_offset, 1, MPI_LONG_LONG_INT, rank + 1, 5, MPI_COMM_WORLD); } } - MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + 3 * sizeof(dataSetParam)), dataSetArray, blocksCount, - dataSetType, MPI_STATUS_IGNORE); - MPI_File_read_at(file_handler, - (MPI_Offset)(read_offset + 3 * sizeof(dataSetParam) + blocksCount * sizeof(DataSetRestart)), - &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE); + MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + 3 * sizeof(dataSetParam)), dataSetArray, blocksCount, dataSetType, MPI_STATUS_IGNORE); + MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + 3 * sizeof(dataSetParam) + blocksCount * sizeof(DataSetRestart)), + &doubleValuesArrayF[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE); + MPI_File_close(&file_handler); + + //-------------------------------------- H1 ----------------------------- + MPI_Offset fsize; + filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetH1.bin"; + rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler); + if (rc != MPI_SUCCESS) + throw UbException(UB_EXARGS, "couldn't open file " + filename); + MPI_File_get_size(file_handler, &fsize); + if (fsize > 0) + { + multiPhase = true; + doubleValuesArrayH1.resize(blocksCount * doubleCountInBlock); + MPI_File_read_at(file_handler, read_offset, &doubleValuesArrayH1[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE); + } MPI_File_close(&file_handler); + + //-------------------------------------- H2 ----------------------------- + /*filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetH2.bin"; + rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler); + if (rc != MPI_SUCCESS) + throw UbException(UB_EXARGS, "couldn't open file " + filename); + + doubleValuesArrayH2.resize(blocksCount * doubleCountInBlock); + MPI_File_read_at(file_handler, read_offset, &doubleValuesArrayH2[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE); + MPI_File_close(&file_handler);*/ + MPI_Type_free(&dataSetDoubleType); - if (comm->isRoot()) { + if (comm->isRoot()) + { finish = MPI_Wtime(); UBLOG(logINFO, "MPIIORestartCoProcessor::readDataSet time: " << finish - start << " s"); UBLOG(logINFO, "MPIIORestartCoProcessor::readDataSet start of restore of data, rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } - + size_t index = 0; std::vector<double> vectorsOfValuesF1, vectorsOfValuesF2, vectorsOfValuesF3; - std::vector<double> vectorsOfValuesH1, vectorsOfValuesH2, vectorsOfValuesH3; - size_t vectorSize1 = - dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3]; - size_t vectorSize2 = - dataSetParamStr2.nx[0] * dataSetParamStr2.nx[1] * dataSetParamStr2.nx[2] * dataSetParamStr2.nx[3]; - size_t vectorSize3 = - dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3]; - - for (int n = 0; n < blocksCount; n++) { - vectorsOfValuesF1.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + vectorSize1); - index += vectorSize1; - - vectorsOfValuesF2.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + vectorSize2); - index += vectorSize2; - - vectorsOfValuesF3.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + vectorSize3); - index += vectorSize3; - - vectorsOfValuesH1.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + vectorSize1); + std::vector<double> vectorsOfValuesH11, vectorsOfValuesH12, vectorsOfValuesH13; + //std::vector<double> vectorsOfValuesH21, vectorsOfValuesH22, vectorsOfValuesH23; + size_t vectorSize1 = dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3]; + size_t vectorSize2 = dataSetParamStr2.nx[0] * dataSetParamStr2.nx[1] * dataSetParamStr2.nx[2] * dataSetParamStr2.nx[3]; + size_t vectorSize3 = dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3]; + + for (int n = 0; n < blocksCount; n++) + { + vectorsOfValuesF1.assign(doubleValuesArrayF.data() + index, doubleValuesArrayF.data() + index + vectorSize1); + if (multiPhase) + vectorsOfValuesH11.assign(doubleValuesArrayH1.data() + index, doubleValuesArrayH1.data() + index + vectorSize1); + //vectorsOfValuesH21.assign(doubleValuesArrayH2.data() + index, doubleValuesArrayH2.data() + index + vectorSize1); index += vectorSize1; - vectorsOfValuesH2.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + vectorSize2); + vectorsOfValuesF2.assign(doubleValuesArrayF.data() + index, doubleValuesArrayF.data() + index + vectorSize2); + if (multiPhase) + vectorsOfValuesH12.assign(doubleValuesArrayH1.data() + index, doubleValuesArrayH1.data() + index + vectorSize2); + //vectorsOfValuesH22.assign(doubleValuesArrayH2.data() + index, doubleValuesArrayH2.data() + index + vectorSize2); index += vectorSize2; - vectorsOfValuesH3.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + vectorSize3); + vectorsOfValuesF3.assign(doubleValuesArrayF.data() + index, doubleValuesArrayF.data() + index + vectorSize3); + if (multiPhase) + vectorsOfValuesH13.assign(doubleValuesArrayH1.data() + index, doubleValuesArrayH1.data() + index + vectorSize3); + //vectorsOfValuesH23.assign(doubleValuesArrayH2.data() + index, doubleValuesArrayH2.data() + index + vectorSize3); index += vectorSize3; SPtr<DistributionArray3D> mFdistributions(new D3Q27EsoTwist3DSplittedVector()); - dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions) - ->setLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr( - new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesF1, dataSetParamStr1.nx[0], - dataSetParamStr1.nx[1], dataSetParamStr1.nx[2], - dataSetParamStr1.nx[3]))); - dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions) - ->setNonLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr( - new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesF2, dataSetParamStr2.nx[0], - dataSetParamStr2.nx[1], dataSetParamStr2.nx[2], - dataSetParamStr2.nx[3]))); - dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions) - ->setZeroDistributions(CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>( + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr( + new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesF1, dataSetParamStr1.nx[0], dataSetParamStr1.nx[1], dataSetParamStr1.nx[2], dataSetParamStr1.nx[3]))); + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setNonLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr( + new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesF2, dataSetParamStr2.nx[0], dataSetParamStr2.nx[1], dataSetParamStr2.nx[2], dataSetParamStr2.nx[3]))); + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setZeroDistributions(CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>( vectorsOfValuesF3, dataSetParamStr3.nx[0], dataSetParamStr3.nx[1], dataSetParamStr3.nx[2]))); dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setNX1(dataSetParamStr1.nx1); dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setNX2(dataSetParamStr1.nx2); dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setNX3(dataSetParamStr1.nx3); - SPtr<DistributionArray3D> mHdistributions(new D3Q27EsoTwist3DSplittedVector()); - dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mHdistributions) - ->setLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr( - new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH1, dataSetParamStr1.nx[0], - dataSetParamStr1.nx[1], dataSetParamStr1.nx[2], - dataSetParamStr1.nx[3]))); - dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mHdistributions) - ->setNonLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr( - new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH2, dataSetParamStr2.nx[0], - dataSetParamStr2.nx[1], dataSetParamStr2.nx[2], - dataSetParamStr2.nx[3]))); - dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mHdistributions) - ->setZeroDistributions(CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>( - vectorsOfValuesH3, dataSetParamStr3.nx[0], dataSetParamStr3.nx[1], dataSetParamStr3.nx[2]))); - - dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mHdistributions)->setNX1(dataSetParamStr1.nx1); - dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mHdistributions)->setNX2(dataSetParamStr1.nx2); - dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mHdistributions)->setNX3(dataSetParamStr1.nx3); - + SPtr<DistributionArray3D> mH1distributions(new D3Q27EsoTwist3DSplittedVector()); + if (multiPhase) + { + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH1distributions)->setLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr( + new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH11, dataSetParamStr1.nx[0], dataSetParamStr1.nx[1], dataSetParamStr1.nx[2], dataSetParamStr1.nx[3]))); + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH1distributions)->setNonLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr( + new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH12, dataSetParamStr2.nx[0], dataSetParamStr2.nx[1], dataSetParamStr2.nx[2], dataSetParamStr2.nx[3]))); + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH1distributions)->setZeroDistributions(CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>( + vectorsOfValuesH13, dataSetParamStr3.nx[0], dataSetParamStr3.nx[1], dataSetParamStr3.nx[2]))); + + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH1distributions)->setNX1(dataSetParamStr1.nx1); + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH1distributions)->setNX2(dataSetParamStr1.nx2); + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH1distributions)->setNX3(dataSetParamStr1.nx3); + } + /*SPtr<DistributionArray3D> mH2distributions(new D3Q27EsoTwist3DSplittedVector()); + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr( + new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH21, dataSetParamStr1.nx[0], dataSetParamStr1.nx[1], dataSetParamStr1.nx[2], dataSetParamStr1.nx[3]))); + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNonLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr( + new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH22, dataSetParamStr2.nx[0], dataSetParamStr2.nx[1], dataSetParamStr2.nx[2], dataSetParamStr2.nx[3]))); + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setZeroDistributions(CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>( + vectorsOfValuesH23, dataSetParamStr3.nx[0], dataSetParamStr3.nx[1], dataSetParamStr3.nx[2]))); + + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNX1(dataSetParamStr1.nx1); + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNX2(dataSetParamStr1.nx2); + dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNX3(dataSetParamStr1.nx3);*/ + // find the nesessary block and fill it - SPtr<Block3D> block = - grid->getBlock(dataSetArray[n].x1, dataSetArray[n].x2, dataSetArray[n].x3, dataSetArray[n].level); + SPtr<Block3D> block = grid->getBlock(dataSetArray[n].x1, dataSetArray[n].x2, dataSetArray[n].x3, dataSetArray[n].level); + this->lbmKernel->setBlock(block); SPtr<LBMKernel> kernel = this->lbmKernel->clone(); kernel->setGhostLayerWidth(dataSetArray[n].ghostLayerWidth); @@ -1741,17 +1820,22 @@ void MPIIORestartCoProcessor::readDataSet(int step) kernel->setDeltaT(dataSetArray[n].deltaT); kernel->setCompressible(dataSetArray[n].compressible); kernel->setWithForcing(dataSetArray[n].withForcing); + kernel->setCollisionFactorMultiphase(dataSetArray[n].collFactorL, dataSetArray[n].collFactorG); + kernel->setDensityRatio(dataSetArray[n].densityRatio); + SPtr<DataSet3D> dataSetPtr = SPtr<DataSet3D>(new DataSet3D()); dataSetPtr->setFdistributions(mFdistributions); - dataSetPtr->setHdistributions(mHdistributions); + if (multiPhase) + dataSetPtr->setHdistributions(mH1distributions); + //dataSetPtr->setH2distributions(mH2distributions); kernel->setDataSet(dataSetPtr); block->setKernel(kernel); } - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIORestartCoProcessor::readDataSet end of restore of data, rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } delete[] dataSetArray; @@ -1785,8 +1869,11 @@ void MPIIORestartCoProcessor::readDataSet(int step) if (arrPresence.isRelaxationFactorPresent) readRelaxationFactor(step); - if (arrPresence.isPhaseFieldPresent) - readPhaseField(step); + if (arrPresence.isPhaseField1Present) + readPhaseField(step, 1); + + if (arrPresence.isPhaseField2Present) + readPhaseField(step, 2); } void MPIIORestartCoProcessor::readAverageDensityArray(int step) @@ -1795,10 +1882,10 @@ void MPIIORestartCoProcessor::readAverageDensityArray(int step) MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIORestartCoProcessor::readAverageDensityArray start MPI IO rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } double start, finish; if (comm->isRoot()) @@ -1816,12 +1903,10 @@ void MPIIORestartCoProcessor::readAverageDensityArray(int step) memset(&dataSetParamStr, 0, sizeof(dataSetParam)); MPI_File_read_at(file_handler, (MPI_Offset)(rank * sizeof(int)), &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE); - MPI_File_read_at(file_handler, (MPI_Offset)(size * sizeof(int)), &dataSetParamStr, 1, dataSetParamType, - MPI_STATUS_IGNORE); + MPI_File_read_at(file_handler, (MPI_Offset)(size * sizeof(int)), &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE); DataSetSmallRestart *dataSetSmallArray = new DataSetSmallRestart[blocksCount]; - int doubleCountInBlock = - dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; + int doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; std::vector<double> doubleValuesArray(blocksCount * doubleCountInBlock); // double-values in all blocks // define MPI_types depending on the block-specific information @@ -1832,61 +1917,59 @@ void MPIIORestartCoProcessor::readAverageDensityArray(int step) MPI_Offset read_offset = (MPI_Offset)(size * sizeof(int)); size_t next_read_offset = 0; - if (size > 1) { - if (rank == 0) { - next_read_offset = read_offset + sizeof(dataSetParam) + - blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); + if (size > 1) + { + if (rank == 0) + { + next_read_offset = read_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); MPI_Send(&next_read_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD); - } else { + } + else + { MPI_Recv(&read_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE); - next_read_offset = read_offset + sizeof(dataSetParam) + - blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); + next_read_offset = read_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); if (rank < size - 1) MPI_Send(&next_read_offset, 1, MPI_LONG_LONG_INT, rank + 1, 5, MPI_COMM_WORLD); } } - MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam)), dataSetSmallArray, blocksCount, - dataSetSmallType, MPI_STATUS_IGNORE); + MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam)), dataSetSmallArray, blocksCount, dataSetSmallType, MPI_STATUS_IGNORE); if (doubleCountInBlock > 0) - MPI_File_read_at(file_handler, - (MPI_Offset)(read_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)), + MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)), &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE); MPI_File_close(&file_handler); MPI_Type_free(&dataSetDoubleType); - if (comm->isRoot()) { + if (comm->isRoot()) + { finish = MPI_Wtime(); UBLOG(logINFO, "MPIIORestartCoProcessor::readAverageDensityArray time: " << finish - start << " s"); UBLOG(logINFO, "MPIIORestartCoProcessor::readAverageDensityArray start of restore of data, rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } size_t index = 0; - size_t nextVectorSize = - dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; + size_t nextVectorSize = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; std::vector<double> vectorsOfValues; - for (int n = 0; n < blocksCount; n++) { + for (int n = 0; n < blocksCount; n++) + { vectorsOfValues.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + nextVectorSize); index += nextVectorSize; // fill mAverageDensity arrays SPtr<AverageValuesArray3D> mAverageDensity; - mAverageDensity = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr( - new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], - dataSetParamStr.nx[2], dataSetParamStr.nx[3])); + mAverageDensity = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues, + dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3])); // find the nesessary block and fill it - SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].x1, dataSetSmallArray[n].x2, dataSetSmallArray[n].x3, - dataSetSmallArray[n].level); + SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].x1, dataSetSmallArray[n].x2, dataSetSmallArray[n].x3, dataSetSmallArray[n].level); block->getKernel()->getDataSet()->setAverageDensity(mAverageDensity); } - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIORestartCoProcessor::readAverageDensityArray end of restore of data, rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } delete[] dataSetSmallArray; @@ -1898,10 +1981,10 @@ void MPIIORestartCoProcessor::readAverageVelocityArray(int step) MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIORestartCoProcessor::readAverageVelocityArray start MPI IO rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } double start, finish; if (comm->isRoot()) @@ -1917,12 +2000,10 @@ void MPIIORestartCoProcessor::readAverageVelocityArray(int step) int blocksCount = 0; dataSetParam dataSetParamStr; MPI_File_read_at(file_handler, (MPI_Offset)(rank * sizeof(int)), &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE); - MPI_File_read_at(file_handler, (MPI_Offset)(size * sizeof(int)), &dataSetParamStr, 1, dataSetParamType, - MPI_STATUS_IGNORE); + MPI_File_read_at(file_handler, (MPI_Offset)(size * sizeof(int)), &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE); DataSetSmallRestart *dataSetSmallArray = new DataSetSmallRestart[blocksCount]; - int doubleCountInBlock = - dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; + int doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; std::vector<double> doubleValuesArray(blocksCount * doubleCountInBlock); // double-values in all blocks // define MPI_types depending on the block-specific information @@ -1933,61 +2014,59 @@ void MPIIORestartCoProcessor::readAverageVelocityArray(int step) MPI_Offset read_offset = (MPI_Offset)(size * sizeof(int)); size_t next_read_offset = 0; - if (size > 1) { - if (rank == 0) { - next_read_offset = read_offset + sizeof(dataSetParam) + - blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); + if (size > 1) + { + if (rank == 0) + { + next_read_offset = read_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); MPI_Send(&next_read_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD); - } else { + } + else + { MPI_Recv(&read_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE); - next_read_offset = read_offset + sizeof(dataSetParam) + - blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); + next_read_offset = read_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); if (rank < size - 1) MPI_Send(&next_read_offset, 1, MPI_LONG_LONG_INT, rank + 1, 5, MPI_COMM_WORLD); } } - MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam)), dataSetSmallArray, blocksCount, - dataSetSmallType, MPI_STATUS_IGNORE); + MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam)), dataSetSmallArray, blocksCount, dataSetSmallType, MPI_STATUS_IGNORE); if (doubleCountInBlock > 0) - MPI_File_read_at(file_handler, - (MPI_Offset)(read_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)), + MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)), &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE); MPI_File_close(&file_handler); MPI_Type_free(&dataSetDoubleType); - if (comm->isRoot()) { + if (comm->isRoot()) + { finish = MPI_Wtime(); UBLOG(logINFO, "MPIIORestartCoProcessor::readAverageVelocityArray time: " << finish - start << " s"); UBLOG(logINFO, "MPIIORestartCoProcessor::readAverageVelocityArray start of restore of data, rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } size_t index = 0; - size_t nextVectorSize = - dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; + size_t nextVectorSize = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; std::vector<double> vectorsOfValues; - for (int n = 0; n < blocksCount; n++) { + for (int n = 0; n < blocksCount; n++) + { vectorsOfValues.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + nextVectorSize); index += nextVectorSize; // fill mAverageVelocity array SPtr<AverageValuesArray3D> mAverageVelocity; - mAverageVelocity = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr( - new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], - dataSetParamStr.nx[2], dataSetParamStr.nx[3])); + mAverageVelocity = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues, dataSetParamStr.nx[0], + dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3])); // find the nesessary block and fill it - SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].x1, dataSetSmallArray[n].x2, dataSetSmallArray[n].x3, - dataSetSmallArray[n].level); + SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].x1, dataSetSmallArray[n].x2, dataSetSmallArray[n].x3, dataSetSmallArray[n].level); block->getKernel()->getDataSet()->setAverageVelocity(mAverageVelocity); } - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIORestartCoProcessor::readAverageVelocityArray end of restore of data, rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } delete[] dataSetSmallArray; @@ -1999,10 +2078,10 @@ void MPIIORestartCoProcessor::readAverageFluktuationsArray(int step) MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIORestartCoProcessor::readAverageFluktuationsArray start MPI IO rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } double start, finish; if (comm->isRoot()) @@ -2019,12 +2098,10 @@ void MPIIORestartCoProcessor::readAverageFluktuationsArray(int step) int blocksCount = 0; dataSetParam dataSetParamStr; MPI_File_read_at(file_handler, (MPI_Offset)(rank * sizeof(int)), &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE); - MPI_File_read_at(file_handler, (MPI_Offset)(size * sizeof(int)), &dataSetParamStr, 1, dataSetParamType, - MPI_STATUS_IGNORE); + MPI_File_read_at(file_handler, (MPI_Offset)(size * sizeof(int)), &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE); DataSetSmallRestart *dataSetSmallArray = new DataSetSmallRestart[blocksCount]; - int doubleCountInBlock = - dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; + int doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; std::vector<double> doubleValuesArray(blocksCount * doubleCountInBlock); // double-values in all blocks // define MPI_types depending on the block-specific information @@ -2035,62 +2112,59 @@ void MPIIORestartCoProcessor::readAverageFluktuationsArray(int step) MPI_Offset read_offset = (MPI_Offset)(size * sizeof(int)); size_t next_read_offset = 0; - if (size > 1) { - if (rank == 0) { - next_read_offset = read_offset + sizeof(dataSetParam) + - blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); + if (size > 1) + { + if (rank == 0) + { + next_read_offset = read_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); MPI_Send(&next_read_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD); - } else { + } + else + { MPI_Recv(&read_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE); - next_read_offset = read_offset + sizeof(dataSetParam) + - blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); + next_read_offset = read_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); if (rank < size - 1) MPI_Send(&next_read_offset, 1, MPI_LONG_LONG_INT, rank + 1, 5, MPI_COMM_WORLD); } } - MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam)), dataSetSmallArray, blocksCount, - dataSetSmallType, MPI_STATUS_IGNORE); + MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam)), dataSetSmallArray, blocksCount, dataSetSmallType, MPI_STATUS_IGNORE); if (doubleCountInBlock > 0) - MPI_File_read_at(file_handler, - (MPI_Offset)(read_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)), + MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)), &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE); MPI_File_close(&file_handler); MPI_Type_free(&dataSetDoubleType); - if (comm->isRoot()) { + if (comm->isRoot()) + { finish = MPI_Wtime(); UBLOG(logINFO, "MPIIORestartCoProcessor::readAverageFluktuationsArray time: " << finish - start << " s"); - UBLOG(logINFO, - "MPIIORestartCoProcessor::readAverageFluktuationsArray start of restore of data, rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "MPIIORestartCoProcessor::readAverageFluktuationsArray start of restore of data, rank = " << rank); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } size_t index = 0; - size_t nextVectorSize = - dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; + size_t nextVectorSize = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; std::vector<double> vectorsOfValues; - for (int n = 0; n < blocksCount; n++) { + for (int n = 0; n < blocksCount; n++) + { vectorsOfValues.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + nextVectorSize); index += nextVectorSize; // fill AverageFluktuations array SPtr<AverageValuesArray3D> mAverageFluktuations; - mAverageFluktuations = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr( - new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], - dataSetParamStr.nx[2], dataSetParamStr.nx[3])); + mAverageFluktuations = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues, + dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3])); // find the nesessary block and fill it - SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].x1, dataSetSmallArray[n].x2, dataSetSmallArray[n].x3, - dataSetSmallArray[n].level); + SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].x1, dataSetSmallArray[n].x2, dataSetSmallArray[n].x3, dataSetSmallArray[n].level); block->getKernel()->getDataSet()->setAverageFluctuations(mAverageFluktuations); } - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIORestartCoProcessor::readAverageFluktuationsArray end of restore of data, rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } delete[] dataSetSmallArray; @@ -2102,10 +2176,10 @@ void MPIIORestartCoProcessor::readAverageTripleArray(int step) MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIORestartCoProcessor::readAverageTripleArray start MPI IO rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } double start, finish; if (comm->isRoot()) @@ -2121,12 +2195,10 @@ void MPIIORestartCoProcessor::readAverageTripleArray(int step) int blocksCount = 0; dataSetParam dataSetParamStr; MPI_File_read_at(file_handler, (MPI_Offset)(rank * sizeof(int)), &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE); - MPI_File_read_at(file_handler, (MPI_Offset)(size * sizeof(int)), &dataSetParamStr, 1, dataSetParamType, - MPI_STATUS_IGNORE); + MPI_File_read_at(file_handler, (MPI_Offset)(size * sizeof(int)), &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE); DataSetSmallRestart *dataSetSmallArray = new DataSetSmallRestart[blocksCount]; - int doubleCountInBlock = - dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; + int doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; std::vector<double> doubleValuesArray(blocksCount * doubleCountInBlock); // double-values in all blocks // define MPI_types depending on the block-specific information @@ -2137,61 +2209,59 @@ void MPIIORestartCoProcessor::readAverageTripleArray(int step) MPI_Offset read_offset = (MPI_Offset)(size * sizeof(int)); size_t next_read_offset = 0; - if (size > 1) { - if (rank == 0) { - next_read_offset = read_offset + sizeof(dataSetParam) + - blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); + if (size > 1) + { + if (rank == 0) + { + next_read_offset = read_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); MPI_Send(&next_read_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD); - } else { + } + else + { MPI_Recv(&read_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE); - next_read_offset = read_offset + sizeof(dataSetParam) + - blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); + next_read_offset = read_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); if (rank < size - 1) MPI_Send(&next_read_offset, 1, MPI_LONG_LONG_INT, rank + 1, 5, MPI_COMM_WORLD); } } - MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam)), dataSetSmallArray, blocksCount, - dataSetSmallType, MPI_STATUS_IGNORE); + MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam)), dataSetSmallArray, blocksCount, dataSetSmallType, MPI_STATUS_IGNORE); if (doubleCountInBlock > 0) - MPI_File_read_at(file_handler, - (MPI_Offset)(read_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)), + MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)), &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE); MPI_File_close(&file_handler); MPI_Type_free(&dataSetDoubleType); - if (comm->isRoot()) { + if (comm->isRoot()) + { finish = MPI_Wtime(); UBLOG(logINFO, "MPIIORestartCoProcessor::readAverageTripleArray time: " << finish - start << " s"); UBLOG(logINFO, "MPIIORestartCoProcessor::readAverageTripleArray start of restore of data, rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } size_t index = 0; - size_t nextVectorSize = - dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; + size_t nextVectorSize = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; std::vector<double> vectorsOfValues; - for (int n = 0; n < blocksCount; n++) { + for (int n = 0; n < blocksCount; n++) + { vectorsOfValues.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + nextVectorSize); index += nextVectorSize; // fill AverageTriplecorrelations array SPtr<AverageValuesArray3D> mAverageTriplecorrelations; - mAverageTriplecorrelations = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr( - new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], - dataSetParamStr.nx[2], dataSetParamStr.nx[3])); + mAverageTriplecorrelations = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues, + dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3])); // find the nesessary block and fill it - SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].x1, dataSetSmallArray[n].x2, dataSetSmallArray[n].x3, - dataSetSmallArray[n].level); + SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].x1, dataSetSmallArray[n].x2, dataSetSmallArray[n].x3, dataSetSmallArray[n].level); block->getKernel()->getDataSet()->setAverageTriplecorrelations(mAverageTriplecorrelations); } - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIORestartCoProcessor::readAverageTripleArray end of restore of data, rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } delete[] dataSetSmallArray; @@ -2203,10 +2273,10 @@ void MPIIORestartCoProcessor::readShearStressValArray(int step) MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIORestartCoProcessor::readShearStressValArray start MPI IO rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } double start, finish; if (comm->isRoot()) @@ -2222,12 +2292,10 @@ void MPIIORestartCoProcessor::readShearStressValArray(int step) int blocksCount = 0; dataSetParam dataSetParamStr; MPI_File_read_at(file_handler, (MPI_Offset)(rank * sizeof(int)), &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE); - MPI_File_read_at(file_handler, (MPI_Offset)(size * sizeof(int)), &dataSetParamStr, 1, dataSetParamType, - MPI_STATUS_IGNORE); + MPI_File_read_at(file_handler, (MPI_Offset)(size * sizeof(int)), &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE); DataSetSmallRestart *dataSetSmallArray = new DataSetSmallRestart[blocksCount]; - int doubleCountInBlock = - dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; + int doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; std::vector<double> doubleValuesArray(blocksCount * doubleCountInBlock); // double-values in all blocks // define MPI_types depending on the block-specific information @@ -2238,61 +2306,59 @@ void MPIIORestartCoProcessor::readShearStressValArray(int step) MPI_Offset read_offset = (MPI_Offset)(size * sizeof(int)); size_t next_read_offset = 0; - if (size > 1) { - if (rank == 0) { - next_read_offset = read_offset + sizeof(dataSetParam) + - blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); + if (size > 1) + { + if (rank == 0) + { + next_read_offset = read_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); MPI_Send(&next_read_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD); - } else { + } + else + { MPI_Recv(&read_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE); - next_read_offset = read_offset + sizeof(dataSetParam) + - blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); + next_read_offset = read_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); if (rank < size - 1) MPI_Send(&next_read_offset, 1, MPI_LONG_LONG_INT, rank + 1, 5, MPI_COMM_WORLD); } } - MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam)), dataSetSmallArray, blocksCount, - dataSetSmallType, MPI_STATUS_IGNORE); + MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam)), dataSetSmallArray, blocksCount, dataSetSmallType, MPI_STATUS_IGNORE); if (doubleCountInBlock > 0) - MPI_File_read_at(file_handler, - (MPI_Offset)(read_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)), + MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)), &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE); MPI_File_close(&file_handler); MPI_Type_free(&dataSetDoubleType); - if (comm->isRoot()) { + if (comm->isRoot()) + { finish = MPI_Wtime(); UBLOG(logINFO, "MPIIORestartCoProcessor::readShearStressValArray time: " << finish - start << " s"); UBLOG(logINFO, "MPIIORestartCoProcessor::readShearStressValArray start of restore of data, rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } size_t index = 0; - size_t nextVectorSize = - dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; + size_t nextVectorSize = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; std::vector<double> vectorsOfValues; - for (int n = 0; n < blocksCount; n++) { + for (int n = 0; n < blocksCount; n++) + { vectorsOfValues.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + nextVectorSize); index += nextVectorSize; // fill ShearStressValuesArray array SPtr<ShearStressValuesArray3D> mShearStressValues; - mShearStressValues = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr( - new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], - dataSetParamStr.nx[2], dataSetParamStr.nx[3])); + mShearStressValues = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues, + dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3])); // find the nesessary block and fill it - SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].x1, dataSetSmallArray[n].x2, dataSetSmallArray[n].x3, - dataSetSmallArray[n].level); + SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].x1, dataSetSmallArray[n].x2, dataSetSmallArray[n].x3, dataSetSmallArray[n].level); block->getKernel()->getDataSet()->setShearStressValues(mShearStressValues); } - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIORestartCoProcessor::readShearStressValArray end of restore of data, rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } delete[] dataSetSmallArray; @@ -2304,10 +2370,10 @@ void MPIIORestartCoProcessor::readRelaxationFactor(int step) MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIORestartCoProcessor::readRelaxationFactor start MPI IO rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } double start, finish; if (comm->isRoot()) @@ -2323,12 +2389,10 @@ void MPIIORestartCoProcessor::readRelaxationFactor(int step) int blocksCount = 0; dataSetParam dataSetParamStr; MPI_File_read_at(file_handler, (MPI_Offset)(rank * sizeof(int)), &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE); - MPI_File_read_at(file_handler, (MPI_Offset)(size * sizeof(int)), &dataSetParamStr, 1, dataSetParamType, - MPI_STATUS_IGNORE); + MPI_File_read_at(file_handler, (MPI_Offset)(size * sizeof(int)), &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE); DataSetSmallRestart *dataSetSmallArray = new DataSetSmallRestart[blocksCount]; - int doubleCountInBlock = - dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; + int doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; std::vector<double> doubleValuesArray(blocksCount * doubleCountInBlock); // double-values in all blocks // define MPI_types depending on the block-specific information @@ -2339,42 +2403,42 @@ void MPIIORestartCoProcessor::readRelaxationFactor(int step) MPI_Offset read_offset = (MPI_Offset)(size * sizeof(int)); size_t next_read_offset = 0; - if (size > 1) { - if (rank == 0) { - next_read_offset = read_offset + sizeof(dataSetParam) + - blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); + if (size > 1) + { + if (rank == 0) + { + next_read_offset = read_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); MPI_Send(&next_read_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD); - } else { + } + else + { MPI_Recv(&read_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE); - next_read_offset = read_offset + sizeof(dataSetParam) + - blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); + next_read_offset = read_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); if (rank < size - 1) MPI_Send(&next_read_offset, 1, MPI_LONG_LONG_INT, rank + 1, 5, MPI_COMM_WORLD); } } - MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam)), dataSetSmallArray, blocksCount, - dataSetSmallType, MPI_STATUS_IGNORE); + MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam)), dataSetSmallArray, blocksCount, dataSetSmallType, MPI_STATUS_IGNORE); if (doubleCountInBlock > 0) - MPI_File_read_at(file_handler, - (MPI_Offset)(read_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)), + MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)), &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE); MPI_File_close(&file_handler); MPI_Type_free(&dataSetDoubleType); - if (comm->isRoot()) { + if (comm->isRoot()) + { finish = MPI_Wtime(); UBLOG(logINFO, "MPIIORestartCoProcessor::readRelaxationFactor time: " << finish - start << " s"); UBLOG(logINFO, "MPIIORestartCoProcessor::readRelaxationFactor start of restore of data, rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } size_t index = 0; - size_t nextVectorSize = - dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; + size_t nextVectorSize = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; std::vector<double> vectorsOfValues; - for (int n = 0; n < blocksCount; n++) { + for (int n = 0; n < blocksCount; n++) + { vectorsOfValues.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + nextVectorSize); index += nextVectorSize; @@ -2384,37 +2448,38 @@ void MPIIORestartCoProcessor::readRelaxationFactor(int step) vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2])); // find the nesessary block and fill it - SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].x1, dataSetSmallArray[n].x2, dataSetSmallArray[n].x3, - dataSetSmallArray[n].level); + SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].x1, dataSetSmallArray[n].x2, dataSetSmallArray[n].x3, dataSetSmallArray[n].level); block->getKernel()->getDataSet()->setRelaxationFactor(mRelaxationFactor); } - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIORestartCoProcessor::readRelaxationFactor end of restore of data, rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } delete[] dataSetSmallArray; } -void MPIIORestartCoProcessor::readPhaseField(int step) +void MPIIORestartCoProcessor::readPhaseField(int step, int fieldN) { int rank, size; MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIORestartCoProcessor::readPhaseField start MPI IO rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } double start, finish; if (comm->isRoot()) start = MPI_Wtime(); MPI_File file_handler; - std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpPhaseField.bin"; + std::string filename; + if(fieldN == 1) filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpPhaseField1.bin"; + else filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpPhaseField2.bin"; int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler); if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename); @@ -2427,8 +2492,7 @@ void MPIIORestartCoProcessor::readPhaseField(int step) MPI_STATUS_IGNORE); DataSetSmallRestart *dataSetSmallArray = new DataSetSmallRestart[blocksCount]; - int doubleCountInBlock = - dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; + int doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; std::vector<double> doubleValuesArray(blocksCount * doubleCountInBlock); // double-values in all blocks // define MPI_types depending on the block-specific information @@ -2439,42 +2503,43 @@ void MPIIORestartCoProcessor::readPhaseField(int step) MPI_Offset read_offset = (MPI_Offset)(size * sizeof(int)); size_t next_read_offset = 0; - if (size > 1) { - if (rank == 0) { - next_read_offset = read_offset + sizeof(dataSetParam) + - blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); + if (size > 1) + { + if (rank == 0) + { + next_read_offset = read_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); MPI_Send(&next_read_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD); - } else { + } + else + { MPI_Recv(&read_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE); - next_read_offset = read_offset + sizeof(dataSetParam) + - blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); + next_read_offset = read_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double)); if (rank < size - 1) MPI_Send(&next_read_offset, 1, MPI_LONG_LONG_INT, rank + 1, 5, MPI_COMM_WORLD); } } - MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam)), dataSetSmallArray, blocksCount, - dataSetSmallType, MPI_STATUS_IGNORE); + MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam)), dataSetSmallArray, blocksCount, dataSetSmallType, MPI_STATUS_IGNORE); if (doubleCountInBlock > 0) - MPI_File_read_at(file_handler, - (MPI_Offset)(read_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)), + MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)), &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE); MPI_File_close(&file_handler); MPI_Type_free(&dataSetDoubleType); - if (comm->isRoot()) { + if (comm->isRoot()) + { finish = MPI_Wtime(); UBLOG(logINFO, "MPIIORestartCoProcessor::readPhaseField time: " << finish - start << " s"); UBLOG(logINFO, "MPIIORestartCoProcessor::readPhaseField start of restore of data, rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } size_t index = 0; - size_t nextVectorSize = - dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; + size_t nextVectorSize = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3]; std::vector<double> vectorsOfValues; - for (int n = 0; n < blocksCount; n++) { + + for (int n = 0; n < blocksCount; n++) + { vectorsOfValues.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + nextVectorSize); index += nextVectorSize; @@ -2484,15 +2549,18 @@ void MPIIORestartCoProcessor::readPhaseField(int step) vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2])); // find the nesessary block and fill it - SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].x1, dataSetSmallArray[n].x2, dataSetSmallArray[n].x3, - dataSetSmallArray[n].level); - block->getKernel()->getDataSet()->setPhaseField(mPhaseField); + SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].x1, dataSetSmallArray[n].x2, dataSetSmallArray[n].x3, dataSetSmallArray[n].level); + if(fieldN == 1) + block->getKernel()->getDataSet()->setPhaseField(mPhaseField); + else + block->getKernel()->getDataSet()->setPhaseField2(mPhaseField); + } - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIORestartCoProcessor::readPhaseField end of restore of data, rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } delete[] dataSetSmallArray; @@ -2504,10 +2572,10 @@ void MPIIORestartCoProcessor::readBoundaryConds(int step) MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); - if (comm->isRoot()) { + if (comm->isRoot()) + { UBLOG(logINFO, "MPIIORestartCoProcessor::readBoundaryConds start MPI IO rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } double start, finish; if (comm->isRoot()) @@ -2527,14 +2595,11 @@ void MPIIORestartCoProcessor::readBoundaryConds(int step) // read count of blocks MPI_File_read_at(file_handler, read_offset1, &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE); // read count of big BoundaryCondition blocks - MPI_File_read_at(file_handler, (MPI_Offset)(read_offset1 + sizeof(int)), &dataCount1000, 1, MPI_INT, - MPI_STATUS_IGNORE); + MPI_File_read_at(file_handler, (MPI_Offset)(read_offset1 + sizeof(int)), &dataCount1000, 1, MPI_INT, MPI_STATUS_IGNORE); // read count of indexContainer values in all blocks - MPI_File_read_at(file_handler, (MPI_Offset)(read_offset1 + 2 * sizeof(int)), &dataCount2, 1, MPI_INT, - MPI_STATUS_IGNORE); + MPI_File_read_at(file_handler, (MPI_Offset)(read_offset1 + 2 * sizeof(int)), &dataCount2, 1, MPI_INT, MPI_STATUS_IGNORE); // read count of bcindexmatrix values in every block - MPI_File_read_at(file_handler, (MPI_Offset)(read_offset1 + 3 * sizeof(int)), &boundCondParamStr, 1, - boundCondParamType, MPI_STATUS_IGNORE); + MPI_File_read_at(file_handler, (MPI_Offset)(read_offset1 + 3 * sizeof(int)), &boundCondParamStr, 1, boundCondParamType, MPI_STATUS_IGNORE); MPI_Type_contiguous(boundCondParamStr.bcindexmatrixCount, MPI_INT, &bcindexmatrixType); MPI_Type_commit(&bcindexmatrixType); @@ -2550,16 +2615,18 @@ void MPIIORestartCoProcessor::readBoundaryConds(int step) MPI_Offset read_offset = (MPI_Offset)(size * (3 * sizeof(int) + sizeof(boundCondParam))); size_t next_read_offset = 0; - if (size > 1) { - if (rank == 0) { - next_read_offset = read_offset + blocksCount * sizeof(BCAddRestart) + - dataCount * sizeof(BoundaryCondition) + + if (size > 1) + { + if (rank == 0) + { + next_read_offset = read_offset + blocksCount * sizeof(BCAddRestart) + dataCount * sizeof(BoundaryCondition) + (blocksCount * boundCondParamStr.bcindexmatrixCount + dataCount2) * sizeof(int); MPI_Send(&next_read_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD); - } else { + } + else + { MPI_Recv(&read_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE); - next_read_offset = read_offset + blocksCount * sizeof(BCAddRestart) + - dataCount * sizeof(BoundaryCondition) + + next_read_offset = read_offset + blocksCount * sizeof(BCAddRestart) + dataCount * sizeof(BoundaryCondition) + (blocksCount * boundCondParamStr.bcindexmatrixCount + dataCount2) * sizeof(int); if (rank < size - 1) MPI_Send(&next_read_offset, 1, MPI_LONG_LONG_INT, rank + 1, 5, MPI_COMM_WORLD); @@ -2567,27 +2634,21 @@ void MPIIORestartCoProcessor::readBoundaryConds(int step) } MPI_File_read_at(file_handler, read_offset, bcAddArray, blocksCount, boundCondTypeAdd, MPI_STATUS_IGNORE); - MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + blocksCount * sizeof(BCAddRestart)), &bcArray[0], - dataCount1000, boundCondType1000, MPI_STATUS_IGNORE); - MPI_File_read_at( - file_handler, - (MPI_Offset)(read_offset + blocksCount * sizeof(BCAddRestart) + dataCount * sizeof(BoundaryCondition)), + MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + blocksCount * sizeof(BCAddRestart)), &bcArray[0], dataCount1000, boundCondType1000, MPI_STATUS_IGNORE); + MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + blocksCount * sizeof(BCAddRestart) + dataCount * sizeof(BoundaryCondition)), &intArray1[0], blocksCount, bcindexmatrixType, MPI_STATUS_IGNORE); - MPI_File_read_at(file_handler, - (MPI_Offset)(read_offset + blocksCount * sizeof(BCAddRestart) + - dataCount * sizeof(BoundaryCondition) + - blocksCount * boundCondParamStr.bcindexmatrixCount * sizeof(int)), - &intArray2[0], dataCount2, MPI_INT, MPI_STATUS_IGNORE); + MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + blocksCount * sizeof(BCAddRestart) + dataCount * sizeof(BoundaryCondition) + + blocksCount * boundCondParamStr.bcindexmatrixCount * sizeof(int)), &intArray2[0], dataCount2, MPI_INT, MPI_STATUS_IGNORE); MPI_File_close(&file_handler); MPI_Type_free(&bcindexmatrixType); - if (comm->isRoot()) { + if (comm->isRoot()) + { finish = MPI_Wtime(); UBLOG(logINFO, "MPIIORestartCoProcessor::readBoundaryConds time: " << finish - start << " s"); UBLOG(logINFO, "MPIIORestartCoProcessor::readBoundaryConds start of restore of data, rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } int index = 0, index1 = 0, index2 = 0; @@ -2595,16 +2656,19 @@ void MPIIORestartCoProcessor::readBoundaryConds(int step) std::vector<int> bcindexmatrixV; std::vector<int> indexContainerV; - for (int n = 0; n < blocksCount; n++) { + for (int n = 0; n < blocksCount; n++) + { bcVector.resize(0); bcindexmatrixV.resize(0); indexContainerV.resize(0); - for (int ibc = 0; ibc < bcAddArray[n].boundCond_count; ibc++) { + for (int ibc = 0; ibc < bcAddArray[n].boundCond_count; ibc++) + { SPtr<BoundaryConditions> bc; if (memcmp(&bcArray[index], nullBouCond, sizeof(BoundaryCondition)) == 0) bc = SPtr<BoundaryConditions>(); - else { + else + { bc = SPtr<BoundaryConditions>(new BoundaryConditions); bc->noslipBoundaryFlags = bcArray[index].noslipBoundaryFlags; bc->slipBoundaryFlags = bcArray[index].slipBoundaryFlags; @@ -2639,8 +2703,7 @@ void MPIIORestartCoProcessor::readBoundaryConds(int step) for (int b2 = 0; b2 < bcAddArray[n].indexContainer_count; b2++) indexContainerV.push_back(intArray2[index2++]); - CbArray3D<int, IndexerX3X2X1> bcim(bcindexmatrixV, boundCondParamStr.nx1, boundCondParamStr.nx2, - boundCondParamStr.nx3); + CbArray3D<int, IndexerX3X2X1> bcim(bcindexmatrixV, boundCondParamStr.nx1, boundCondParamStr.nx2, boundCondParamStr.nx3); SPtr<Block3D> block = grid->getBlock(bcAddArray[n].x1, bcAddArray[n].x2, bcAddArray[n].x3, bcAddArray[n].level); SPtr<BCProcessor> bcProc = bcProcessor->clone(block->getKernel()); @@ -2661,8 +2724,7 @@ void MPIIORestartCoProcessor::readBoundaryConds(int step) if (comm->isRoot()) { UBLOG(logINFO, "MPIIORestartCoProcessor::readBoundaryConds end of restore of data, rank = " << rank); - UBLOG(logINFO, "Physical Memory currently used by current process: " - << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); + UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB"); } } ////////////////////////////////////////////////////////////////////////// diff --git a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.h b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.h index cbcf8c553943aa325f415cd123ae1fbe0bf4dcf3..57f559769a06d9a87a968ada73fbaba712da789b 100644 --- a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.h +++ b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.h @@ -4,6 +4,7 @@ #include <mpi.h> //#include <PointerDefinitions.h> #include <string> +#include <vector> #include "MPIIOCoProcessor.h" #include "MPIIODataStructures.h" @@ -35,8 +36,8 @@ public: void writeAverageTripleArray(int step); void writeShearStressValArray(int step); void writeRelaxationFactor(int step); - void writePhaseField(int step); - //! Writes the boundary conditions of the blocks into the file cpBC.bin + void writePhaseField(int step, int num); + //! Writes the boundary conditions of the blocks into the file cpBC.bin void writeBoundaryConds(int step); //! Reads the blocks of the grid from the file cpBlocks.bin @@ -49,7 +50,7 @@ public: void readAverageTripleArray(int step); void readShearStressValArray(int step); void readRelaxationFactor(int step); - void readPhaseField(int step); + void readPhaseField(int step, int num); //! Reads the boundary conditions of the blocks from the file cpBC.bin void readBoundaryConds(int step); //! The function sets LBMKernel diff --git a/src/cpu/VirtualFluidsCore/CoProcessors/WriteMacroscopicQuantitiesCoProcessor.cpp b/src/cpu/VirtualFluidsCore/CoProcessors/WriteMacroscopicQuantitiesCoProcessor.cpp index 3519c83a529314e1135f3d76e21c3b2c3c3f8cba..e98d6ac874ace46659bc2903b3c67a0f9f93fa24 100644 --- a/src/cpu/VirtualFluidsCore/CoProcessors/WriteMacroscopicQuantitiesCoProcessor.cpp +++ b/src/cpu/VirtualFluidsCore/CoProcessors/WriteMacroscopicQuantitiesCoProcessor.cpp @@ -211,33 +211,25 @@ void WriteMacroscopicQuantitiesCoProcessor::addDataMQ(SPtr<Block3D> block) distributions->getDistribution(f, ix1, ix2, ix3); calcMacros(f, rho, vx1, vx2, vx3); - double press = D3Q27System::getPressure(f); // D3Q27System::calcPress(f,rho,vx1,vx2,vx3); + //double press = D3Q27System::getPressure(f); // D3Q27System::calcPress(f,rho,vx1,vx2,vx3); if (UbMath::isNaN(rho) || UbMath::isInfinity(rho)) - // UB_THROW( UbException(UB_EXARGS,"rho is not a number (nan or -1.#IND) or infinity number - // -1.#INF in block="+block->toString()+", - // node="+UbSystem::toString(ix1)+","+UbSystem::toString(ix2)+","+UbSystem::toString(ix3))); - rho = 999.0; - if (UbMath::isNaN(press) || UbMath::isInfinity(press)) + UB_THROW( UbException(UB_EXARGS,"rho is not a number (nan or -1.#IND) or infinity number -1.#INF in block="+block->toString()+",node="+UbSystem::toString(ix1)+","+UbSystem::toString(ix2)+","+UbSystem::toString(ix3))); + //rho = 999.0; + //if (UbMath::isNaN(press) || UbMath::isInfinity(press)) // UB_THROW( UbException(UB_EXARGS,"press is not a number (nan or -1.#IND) or infinity number // -1.#INF in block="+block->toString()+", // node="+UbSystem::toString(ix1)+","+UbSystem::toString(ix2)+","+UbSystem::toString(ix3))); - press = 999.0; + //press = 999.0; if (UbMath::isNaN(vx1) || UbMath::isInfinity(vx1)) - // UB_THROW( UbException(UB_EXARGS,"vx1 is not a number (nan or -1.#IND) or infinity number - // -1.#INF in block="+block->toString()+", - // node="+UbSystem::toString(ix1)+","+UbSystem::toString(ix2)+","+UbSystem::toString(ix3))); - vx1 = 999.0; + UB_THROW( UbException(UB_EXARGS,"vx1 is not a number (nan or -1.#IND) or infinity number -1.#INF in block="+block->toString()+", node="+UbSystem::toString(ix1)+","+UbSystem::toString(ix2)+","+UbSystem::toString(ix3))); + //vx1 = 999.0; if (UbMath::isNaN(vx2) || UbMath::isInfinity(vx2)) - // UB_THROW( UbException(UB_EXARGS,"vx2 is not a number (nan or -1.#IND) or infinity number - // -1.#INF in block="+block->toString()+", - // node="+UbSystem::toString(ix1)+","+UbSystem::toString(ix2)+","+UbSystem::toString(ix3))); - vx2 = 999.0; + UB_THROW( UbException(UB_EXARGS,"vx2 is not a number (nan or -1.#IND) or infinity number -1.#INF in block="+block->toString()+", node="+UbSystem::toString(ix1)+","+UbSystem::toString(ix2)+","+UbSystem::toString(ix3))); + //vx2 = 999.0; if (UbMath::isNaN(vx3) || UbMath::isInfinity(vx3)) - // UB_THROW( UbException(UB_EXARGS,"vx3 is not a number (nan or -1.#IND) or infinity number - // -1.#INF in block="+block->toString()+", - // node="+UbSystem::toString(ix1)+","+UbSystem::toString(ix2)+","+UbSystem::toString(ix3))); - vx3 = 999.0; + UB_THROW( UbException(UB_EXARGS,"vx3 is not a number (nan or -1.#IND) or infinity number -1.#INF in block="+block->toString()+", node="+UbSystem::toString(ix1)+","+UbSystem::toString(ix2)+","+UbSystem::toString(ix3))); + //vx3 = 999.0; data[index++].push_back(rho); data[index++].push_back(vx1); diff --git a/src/cpu/VirtualFluidsCore/Connectors/FullDirectConnector.h b/src/cpu/VirtualFluidsCore/Connectors/FullDirectConnector.h index 6d8877ee909183dcb4088ccb77f6726e83447ba8..fb04de68aaa4ab360e38ae83c9d47d077c05e59f 100644 --- a/src/cpu/VirtualFluidsCore/Connectors/FullDirectConnector.h +++ b/src/cpu/VirtualFluidsCore/Connectors/FullDirectConnector.h @@ -48,9 +48,9 @@ public: void sendVectors() override; protected: - virtual inline void updatePointers() = 0; + virtual void updatePointers() = 0; void exchangeData(); - virtual inline void exchangeData(int x1From, int x2From, int x3From, int x1To, int x2To, int x3To) = 0; + virtual void exchangeData(int x1From, int x2From, int x3From, int x1To, int x2To, int x3To) = 0; int maxX1; int maxX2; diff --git a/src/cpu/VirtualFluidsCore/Connectors/FullVectorConnector.h b/src/cpu/VirtualFluidsCore/Connectors/FullVectorConnector.h index c2853d4a81bdb3497e235a9115f7cf5260425117..39fc3d1afa3fb958b09d128bd67a5aca42acbc03 100644 --- a/src/cpu/VirtualFluidsCore/Connectors/FullVectorConnector.h +++ b/src/cpu/VirtualFluidsCore/Connectors/FullVectorConnector.h @@ -55,11 +55,11 @@ public: void distributeReceiveVectors() override; protected: - virtual inline void updatePointers() = 0; + virtual void updatePointers() = 0; void fillData(); void distributeData(); - virtual inline void fillData(vector_type &sdata, int &index, int x1, int x2, int x3) = 0; - virtual inline void distributeData(vector_type &rdata, int &index, int x1, int x2, int x3) = 0; + virtual void fillData(vector_type &sdata, int &index, int x1, int x2, int x3) = 0; + virtual void distributeData(vector_type &rdata, int &index, int x1, int x2, int x3) = 0; int maxX1; int maxX2; diff --git a/src/cpu/VirtualFluidsCore/Data/DataSet3D.h b/src/cpu/VirtualFluidsCore/Data/DataSet3D.h index b8b541546f3c2cccb49ff09c859b8c97c2e22f63..e53e38a74daea2a2a40ca53eff1aa1f4febcc27a 100644 --- a/src/cpu/VirtualFluidsCore/Data/DataSet3D.h +++ b/src/cpu/VirtualFluidsCore/Data/DataSet3D.h @@ -91,23 +91,18 @@ public: protected: private: SPtr<DistributionArray3D> fdistributions; - SPtr<DistributionArray3D> hdistributions; - //SPtr<DistributionArray3D> h1distributions; SPtr<DistributionArray3D> h2distributions; - + SPtr<AverageValuesArray3D> averageValues; - SPtr<AverageValuesArray3D> averageDensity; SPtr<AverageValuesArray3D> averageVelocity; SPtr<AverageValuesArray3D> averageFluktuations; SPtr<AverageValuesArray3D> averageTriplecorrelations; - SPtr<ShearStressValuesArray3D> shearStressValues; SPtr<RelaxationFactorArray3D> relaxationFactor; - SPtr<PhaseFieldArray3D> phaseField; SPtr<PhaseFieldArray3D> phaseField2; }; diff --git a/src/cpu/VirtualFluidsCore/Interactors/D3Q27TriFaceMeshInteractor.cpp b/src/cpu/VirtualFluidsCore/Interactors/D3Q27TriFaceMeshInteractor.cpp index e7144a7c6f67d1a9feb8a5883a046420750f1c1b..37c6c47f05215b1d210ef943e639b1cb957ca63d 100644 --- a/src/cpu/VirtualFluidsCore/Interactors/D3Q27TriFaceMeshInteractor.cpp +++ b/src/cpu/VirtualFluidsCore/Interactors/D3Q27TriFaceMeshInteractor.cpp @@ -58,7 +58,7 @@ void D3Q27TriFaceMeshInteractor::initInteractor(const double &timeStep) setQs(timeStep); } ////////////////////////////////////////////////////////////////////////// -bool D3Q27TriFaceMeshInteractor::setDifferencesToGbObject3D(const SPtr<Block3D> block/*,const double& orgX1,const double& orgX2,const double& orgX3,const double& blockLengthX1,const double& blockLengthX2,const double& blockLengthX3, const double& timestep*/) +bool D3Q27TriFaceMeshInteractor::setDifferencesToGbObject3D(const SPtr<Block3D> block) { if (!block) return false; diff --git a/src/cpu/VirtualFluidsCore/Interactors/D3Q27TriFaceMeshInteractor.h b/src/cpu/VirtualFluidsCore/Interactors/D3Q27TriFaceMeshInteractor.h index ebd707a41ca454cb1e8b22f0020cfd98cadfab36..9ac8bfc48a4fda3612b0781d93496cce723d2cd8 100644 --- a/src/cpu/VirtualFluidsCore/Interactors/D3Q27TriFaceMeshInteractor.h +++ b/src/cpu/VirtualFluidsCore/Interactors/D3Q27TriFaceMeshInteractor.h @@ -42,7 +42,7 @@ public: void setQs(const double &timeStep); void refineBlockGridToLevel(int level, double startDistance, double stopDistance); - bool setDifferencesToGbObject3D(const SPtr<Block3D> block/*,const double& orgX1,const double& orgX2,const double& orgX3,const double& blockLengthX1,const double& blockLengthX2,const double& blockLengthX3, const double& timestep=0*/) override; + bool setDifferencesToGbObject3D(const SPtr<Block3D> block) override; void setRegardPointInObjectTest(bool opt) { this->regardPIOTest = opt; } diff --git a/src/cpu/VirtualFluidsCore/Interactors/Interactor3D.h b/src/cpu/VirtualFluidsCore/Interactors/Interactor3D.h index 74627b76addaf6badaea678d1c4a20b274234b3a..9727bf636085c7c0d24a9108acc71925af36e5d1 100644 --- a/src/cpu/VirtualFluidsCore/Interactors/Interactor3D.h +++ b/src/cpu/VirtualFluidsCore/Interactors/Interactor3D.h @@ -76,12 +76,7 @@ public: SPtr<Grid3D> getGrid3D() const { return grid.lock(); } void setGrid3D(SPtr<Grid3D> grid) { this->grid = grid; } virtual SPtr<GbObject3D> getGbObject3D() const { return geoObject3D; } - virtual bool setDifferencesToGbObject3D(const SPtr<Block3D>) - { - // UBLOG(logINFO, "Interactor3D::setDifferencesToGbObject3D()"); - return false; - } - + virtual bool setDifferencesToGbObject3D(const SPtr<Block3D>) = 0; virtual std::vector<SPtr<Block3D>> &getBcBlocks() { return this->bcBlocks; } virtual void removeBcBlocks() { this->bcBlocks.clear(); } virtual std::vector<SPtr<Block3D>> &getSolidBlockSet() { return this->solidBlocks; } diff --git a/src/cpu/VirtualFluidsCore/LBM/BGKLBMKernel.cpp b/src/cpu/VirtualFluidsCore/LBM/BGKLBMKernel.cpp index 280562ebfaff7ccaa61055fa7caed0a4cac4d666..20851b019a3a0abd2c8865c7c40530e73bcf6245 100644 --- a/src/cpu/VirtualFluidsCore/LBM/BGKLBMKernel.cpp +++ b/src/cpu/VirtualFluidsCore/LBM/BGKLBMKernel.cpp @@ -5,8 +5,9 @@ #include "D3Q27EsoTwist3DSplittedVector.h" #include "D3Q27System.h" #include "DataSet3D.h" +#include "Block3D.h" -//#define PROOF_CORRECTNESS +#define PROOF_CORRECTNESS ////////////////////////////////////////////////////////////////////////// BGKLBMKernel::BGKLBMKernel() { this->compressible = false; } @@ -22,6 +23,7 @@ void BGKLBMKernel::initDataSet() SPtr<LBMKernel> BGKLBMKernel::clone() { SPtr<LBMKernel> kernel(new BGKLBMKernel()); + kernel->setNX(nx); std::dynamic_pointer_cast<BGKLBMKernel>(kernel)->initDataSet(); kernel->setCollisionFactor(this->collFactor); kernel->setBCProcessor(bcProcessor->clone(kernel)); @@ -30,10 +32,12 @@ SPtr<LBMKernel> BGKLBMKernel::clone() kernel->setForcingX2(muForcingX2); kernel->setForcingX3(muForcingX3); kernel->setIndex(ix1, ix2, ix3); + kernel->setDeltaT(deltaT); + kernel->setBlock(block.lock()); return kernel; } ////////////////////////////////////////////////////////////////////////// -void BGKLBMKernel::calculate(int /*step*/) +void BGKLBMKernel::calculate(int step) { using namespace D3Q27System; using namespace UbMath; @@ -250,7 +254,10 @@ void BGKLBMKernel::calculate(int /*step*/) if (dif > 10.0E-15 || dif < -10.0E-15) #endif { - UB_THROW(UbException(UB_EXARGS, "rho is not correct")); + UB_THROW(UbException(UB_EXARGS, "rho="+UbSystem::toString(drho)+", rho_post="+UbSystem::toString(rho_post) + +" dif="+UbSystem::toString(dif) + +" rho is not correct for node "+UbSystem::toString(x1)+","+UbSystem::toString(x2)+","+UbSystem::toString(x3) + +" in " + block.lock()->toString()+" step = "+UbSystem::toString(step))); } #endif ////////////////////////////////////////////////////////////////////////// diff --git a/src/cpu/VirtualFluidsCore/LBM/BGKLBMKernel.h b/src/cpu/VirtualFluidsCore/LBM/BGKLBMKernel.h index e998267c083c604d6387acf71d2e069315e595c8..9d17a8cc7677db7a142f4340dcdeaf38e268d214 100644 --- a/src/cpu/VirtualFluidsCore/LBM/BGKLBMKernel.h +++ b/src/cpu/VirtualFluidsCore/LBM/BGKLBMKernel.h @@ -1,5 +1,5 @@ -#ifndef LBMKERNELETD3Q27BGK_H -#define LBMKERNELETD3Q27BGK_H +#ifndef BGKLBMKernel_H +#define BGKLBMKernel_H #include "LBMKernel.h" #include "basics/container/CbArray3D.h" diff --git a/src/cpu/VirtualFluidsCore/Parallel/MPIIODataStructures.h b/src/cpu/VirtualFluidsCore/Parallel/MPIIODataStructures.h index 8b284fc2f768472a4115c61cd567ce0b37b7f4e9..c8bd2d0797af86858b40a1a29a154107f04e46c8 100644 --- a/src/cpu/VirtualFluidsCore/Parallel/MPIIODataStructures.h +++ b/src/cpu/VirtualFluidsCore/Parallel/MPIIODataStructures.h @@ -59,6 +59,9 @@ struct dataSetParam { struct DataSetRestart { double collFactor; double deltaT; + double collFactorL; // for Multiphase model + double collFactorG; // for Multiphase model + double densityRatio;// for Multiphase model int x1; int x2; int x3; @@ -74,6 +77,9 @@ struct DataSetRestart { struct DataSetMigration { double collFactor; double deltaT; + double collFactorL; // for Multiphase model + double collFactorG; // for Multiphase model + double densityRatio;// for Multiphase model int globalID; int ghostLayerWidth; bool compressible; @@ -164,7 +170,8 @@ struct DSArraysPresence { bool isAverageTripleArrayPresent; bool isShearStressValArrayPresent; bool isRelaxationFactorPresent; - bool isPhaseFieldPresent; + bool isPhaseField1Present; + bool isPhaseField2Present; }; } // namespace MPIIODataStructures #endif \ No newline at end of file diff --git a/src/cpu/VirtualFluidsCore/Utilities/CheckpointConverter.cpp b/src/cpu/VirtualFluidsCore/Utilities/CheckpointConverter.cpp index 914659afa685814842904e9622c31b875d6a2207..b66eff480e99102edf332cfd750e0d2b6965ba83 100644 --- a/src/cpu/VirtualFluidsCore/Utilities/CheckpointConverter.cpp +++ b/src/cpu/VirtualFluidsCore/Utilities/CheckpointConverter.cpp @@ -77,7 +77,7 @@ CheckpointConverter::CheckpointConverter(SPtr<Grid3D> grid, const std::string &p //--------------------------------------- MPI_Datatype typesDataSetRead[3] = { MPI_DOUBLE, MPI_INT, MPI_CHAR }; - int blocksDataSetRead[3] = { 2, 5, 2 }; + int blocksDataSetRead[3] = { 3, 5, 2 }; MPI_Aint offsetsDataSetRead[3], lbDataSetRead, extentDataSetRead; offsetsDataSetRead[0] = 0; @@ -358,6 +358,7 @@ void CheckpointConverter::convertDataSet(int step, int procCount) dataSetWriteArray[nb].deltaT = dataSetReadArray[nb].deltaT; dataSetWriteArray[nb].compressible = dataSetReadArray[nb].compressible; dataSetWriteArray[nb].withForcing = dataSetReadArray[nb].withForcing; +// dataSetWriteArray[nb].densityRatio = dataSetReadArray[nb].densityRatio; write_offset = (MPI_Offset)(3 * sizeof(dataSetParam) + dataSetWriteArray[nb].globalID * sizeofOneDataSet); MPI_File_write_at(file_handlerW, write_offset, &dataSetWriteArray[nb], 1, dataSetTypeWrite, diff --git a/src/cpu/VirtualFluidsCore/Visitors/BoundaryConditionsBlockVisitor.cpp b/src/cpu/VirtualFluidsCore/Visitors/BoundaryConditionsBlockVisitor.cpp index b4eafda902448564800bb1479f7ac60f74c4b77d..a6372fc31712899dab0b8edaf919a141663991ca 100644 --- a/src/cpu/VirtualFluidsCore/Visitors/BoundaryConditionsBlockVisitor.cpp +++ b/src/cpu/VirtualFluidsCore/Visitors/BoundaryConditionsBlockVisitor.cpp @@ -98,6 +98,7 @@ void BoundaryConditionsBlockVisitor::visit(SPtr<Grid3D> grid, SPtr<Block3D> bloc if (bca) { bca = bca->clone(); + bca->setBlock(block); bca->setNodeIndex(x1, x2, x3); bca->setBcPointer(bcPtr); bca->addDistributions(distributions);