diff --git a/CMake/cmake_config_files/CSE01.config.cmake b/CMake/cmake_config_files/CSE01.config.cmake
index baa0f94981c2e9f9ac05b62468311f4bead32ff3..cad3f60ce31edac1069d1edce3fdd43b49a72b6e 100644
--- a/CMake/cmake_config_files/CSE01.config.cmake
+++ b/CMake/cmake_config_files/CSE01.config.cmake
@@ -2,22 +2,22 @@
 #################################################################################
 #  BOOST  
 #################################################################################
-SET(BOOST_VERSION "1.60.0")
-SET(BOOST_ROOT "d:/boost/boost_1_60_0")
-SET(BOOST_DIR ${BOOST_ROOT})
-SET(BOOST_LIBRARYDIR ${BOOST_ROOT}"/stageMSVC64/lib")  
+#SET(BOOST_VERSION "1.60.0")
+#SET(BOOST_ROOT "d:/boost/boost_1_60_0")
+#SET(BOOST_DIR ${BOOST_ROOT})
+#SET(BOOST_LIBRARYDIR ${BOOST_ROOT}"/stageMSVC64/lib")  
 #################################################################################
 
 #################################################################################
 #  METIS  
 #################################################################################
-IF(${USE_METIS})
-  SET(METIS_INCLUDEDIR "d:/metis-5.1.0/include")
-  SET(METIS_DEBUG_LIBRARY "d:/metis-5.1.0/build/libmetis/Debug/metis.lib") 
-  SET(METIS_RELEASE_LIBRARY "d:/metis-5.1.0/build/libmetis/Release/metis.lib") 
-ENDIF()
+#IF(${USE_METIS})
+#  SET(METIS_INCLUDEDIR "d:/metis-5.1.0/include")
+#  SET(METIS_DEBUG_LIBRARY "d:/metis-5.1.0/build/libmetis/Debug/metis.lib") 
+#  SET(METIS_RELEASE_LIBRARY "d:/metis-5.1.0/build/libmetis/Release/metis.lib") 
+#ENDIF()
 #################################################################################
 #  VTK  
 #################################################################################
-set(VTK_DIR "d:/tools/VTK/build/VTK-8.2.0")
+#set(VTK_DIR "d:/tools/VTK/build/VTK-8.2.0")
 #################################################################################
\ No newline at end of file
diff --git a/apps/cpu/Multiphase/Multiphase.cpp b/apps/cpu/Multiphase/Multiphase.cpp
index 03b5334f0fb4bf84eb588f484c4c93537a4df257..bb99f5efdb092fb69121b9025663918cd9807d07 100644
--- a/apps/cpu/Multiphase/Multiphase.cpp
+++ b/apps/cpu/Multiphase/Multiphase.cpp
@@ -70,7 +70,7 @@ void run(string configname)
             }
         }
 
-        // Sleep(30000);
+         //Sleep(30000);
 
         // LBMReal dLB = 0; // = length[1] / dx;
         LBMReal rhoLB = 0.0;
@@ -82,10 +82,10 @@ void run(string configname)
 
         SPtr<LBMKernel> kernel;
 
-        //kernel = SPtr<LBMKernel>(new MultiphaseScratchCumulantLBMKernel());
+        kernel = SPtr<LBMKernel>(new MultiphaseScratchCumulantLBMKernel());
         //kernel = SPtr<LBMKernel>(new MultiphaseCumulantLBMKernel());
-        kernel = SPtr<LBMKernel>(new MultiphaseTwoPhaseFieldsCumulantLBMKernel());
-
+        //kernel = SPtr<LBMKernel>(new MultiphaseTwoPhaseFieldsCumulantLBMKernel());
+       
         kernel->setWithForcing(true);
         kernel->setForcingX1(0.0);
         kernel->setForcingX2(gr);
@@ -108,10 +108,15 @@ void run(string configname)
         //////////////////////////////////////////////////////////////////////////
         // restart
         SPtr<UbScheduler> rSch(new UbScheduler(cpStep, cpStart));
-        // RestartCoProcessor rp(grid, rSch, comm, pathname, RestartCoProcessor::TXT);
-        MPIIORestartCoProcessor rcp(grid, rSch, pathname, comm);
-        rcp.setLBMKernel(kernel);
-        rcp.setBCProcessor(bcProc);
+        //SPtr<MPIIORestartCoProcessor> rcp(new MPIIORestartCoProcessor(grid, rSch, pathname, comm));
+        //SPtr<MPIIOMigrationCoProcessor> rcp(new MPIIOMigrationCoProcessor(grid, rSch, pathname, comm));
+        SPtr<MPIIOMigrationBECoProcessor> rcp(new MPIIOMigrationBECoProcessor(grid, rSch, pathname, comm));
+        rcp->setNu(nuLB);
+        rcp->setNuLG(nuL, nuG);
+        rcp->setDensityRatio(densityRatio);
+
+        rcp->setLBMKernel(kernel);
+        rcp->setBCProcessor(bcProc);
         //////////////////////////////////////////////////////////////////////////
 
         mu::Parser fctF1;
@@ -127,7 +132,7 @@ void run(string configname)
         fctF2.SetExpr("vy1");
         fctF2.DefineConst("vy1", -uLB);
 
-        double startTime = 5;
+        double startTime = 1;
         SPtr<BCAdapter> velBCAdapterF1(new MultiphaseVelocityBCAdapter(false, true, false, fctF1, phiH, 0.0, startTime));
         SPtr<BCAdapter> velBCAdapterF2(new MultiphaseVelocityBCAdapter(false, true, false, fctF2, phiH, startTime, endTime));
 
@@ -267,8 +272,8 @@ void run(string configname)
             // phiL, 0.0, endTime)); BCAdapterPtr velBCAdapterF2_2_init(new MultiphaseVelocityBCAdapter(false, false,
             // true, fctvel_F2_init, phiL, 0.0, endTime));
 
-            //velBCAdapterF1->setBcAlgorithm(SPtr<BCAlgorithm>(new MultiphaseVelocityBCAlgorithm()));
-            velBCAdapterF1->setBcAlgorithm(SPtr<BCAlgorithm>(new VelocityBCAlgorithm()));
+            velBCAdapterF1->setBcAlgorithm(SPtr<BCAlgorithm>(new MultiphaseVelocityBCAlgorithm()));
+            //velBCAdapterF1->setBcAlgorithm(SPtr<BCAlgorithm>(new VelocityBCAlgorithm()));
             // velBCAdapterF2_1_init->setBcAlgorithm(BCAlgorithmPtr(new MultiphaseVelocityBCAlgorithm()));
             // velBCAdapterF2_2_init->setBcAlgorithm(BCAlgorithmPtr(new MultiphaseVelocityBCAlgorithm()));
 
@@ -435,8 +440,8 @@ void run(string configname)
             //SetConnectorsBlockVisitor setConnsVisitor(comm, true, D3Q27System::ENDDIR, nuLB, iProcessor);
             // ConnectorFactoryPtr factory(new Block3DConnectorFactory());
             // ConnectorBlockVisitor setConnsVisitor(comm, nuLB, iProcessor, factory);
-            ThreeDistributionsSetConnectorsBlockVisitor setConnsVisitor(comm);
-            grid->accept(setConnsVisitor);
+            //ThreeDistributionsSetConnectorsBlockVisitor setConnsVisitor(comm);
+            //grid->accept(setConnsVisitor);
 
             // domain decomposition for threads
             // PQueuePartitioningGridVisitor pqPartVisitor(numOfThreads);
@@ -466,7 +471,7 @@ void run(string configname)
                 UBLOG(logINFO, "path = " << pathname);
             }
 
-            rcp.restart((int)restartStep);
+            rcp->restart((int)restartStep);
             grid->setTimeStep(restartStep);
 
             // BCAdapterPtr velBCAdapter(new VelocityBCAdapter());
@@ -487,10 +492,15 @@ void run(string configname)
         
         TwoDistributionsSetConnectorsBlockVisitor setConnsVisitor(comm);
         grid->accept(setConnsVisitor);
+        
+        //ThreeDistributionsSetConnectorsBlockVisitor setConnsVisitor(comm);
+        //grid->accept(setConnsVisitor);
 
         SPtr<UbScheduler> visSch(new UbScheduler(outTime));
         SPtr<WriteMultiphaseQuantitiesCoProcessor> pp(new WriteMultiphaseQuantitiesCoProcessor(
             grid, visSch, pathname, WbWriterVtkXmlBinary::getInstance(), conv, comm));
+        //SPtr<WriteMacroscopicQuantitiesCoProcessor> pp(new WriteMacroscopicQuantitiesCoProcessor(
+        //    grid, visSch, pathname, WbWriterVtkXmlBinary::getInstance(), conv, comm));
 
         SPtr<UbScheduler> nupsSch(new UbScheduler(10, 30, 100));
         SPtr<NUPSCounterCoProcessor> npr(new NUPSCounterCoProcessor(grid, nupsSch, numOfThreads, comm));
@@ -504,7 +514,7 @@ void run(string configname)
         calculator->addCoProcessor(npr);
         calculator->addCoProcessor(pp);
         calculator->addCoProcessor(timeDepBC);
-
+        calculator->addCoProcessor(rcp);
 
         
 
diff --git a/apps/cpu/ViskomatXL/viskomat.cpp b/apps/cpu/ViskomatXL/viskomat.cpp
index 7c73fe35fbd929b9684378618b42ccd0054baa9e..113d9c6da16bff21267bb723d48adb2a3c9d5619 100644
--- a/apps/cpu/ViskomatXL/viskomat.cpp
+++ b/apps/cpu/ViskomatXL/viskomat.cpp
@@ -19,7 +19,7 @@ void bflow(string configname)
       int             numOfThreads = config.getValue<int>("numOfThreads");
       vector<int>     blocknx = config.getVector<int>("blocknx");
       vector<double>  boundingBox = config.getVector<double>("boundingBox");
-      double          nuLB = config.getValue<double>("nuLB");
+      //double          nuLB = config.getValue<double>("nuLB");
       double          endTime = config.getValue<double>("endTime");
       double          outTime = config.getValue<double>("outTime");
       double          availMem = config.getValue<double>("availMem");
@@ -65,12 +65,12 @@ void bflow(string configname)
       LBMReal rhoLB = 0.0;
 
       //akoustic
-       OmegaLB /= scaleFactor;
-       nuLB *=scaleFactor;
-       endTime *= scaleFactor;
-       //outTime = endTime;
-       cpStart = endTime;
-       cpStep  = endTime;
+       //OmegaLB /= scaleFactor;
+       //nuLB *=scaleFactor;
+       //endTime *= scaleFactor;
+       ////outTime = endTime;
+       //cpStart = endTime;
+       //cpStep  = endTime;
 
 //diffusive
       //OmegaLB /= scaleFactor * scaleFactor;
@@ -80,6 +80,16 @@ void bflow(string configname)
       //cpStart = endTime;
       //cpStep = endTime;
 
+      //double Re = 1.38230076758;
+      double N  = 80;
+      double Omega = 2 * UbMath::PI / 60.0 * N;
+      double mu    = 1;
+      double R     = 0.165 / 2.0;
+      double rho   = 970;
+      double Re    = Omega * R * R * rho / mu;
+
+      double nuLB = OmegaLB * R * 1e3 * R * 1e3 / Re;
+
       SPtr<LBMUnitConverter> conv = SPtr<LBMUnitConverter>(new LBMUnitConverter());
       // double uWorld = (N * PI) / 30.0; //0.0037699111843
       // double rhoWorld = 2350.0; //kg/m^3
@@ -134,9 +144,9 @@ void bflow(string configname)
       //thix->setOmegaMin(omegaMin);
 
       SPtr<BCAdapter> noSlipBCAdapter(new NoSlipBCAdapter());
-      //noSlipBCAdapter->setBcAlgorithm(SPtr<BCAlgorithm>(new NoSlipBCAlgorithm()));
+      noSlipBCAdapter->setBcAlgorithm(SPtr<BCAlgorithm>(new NoSlipBCAlgorithm()));
       //noSlipBCAdapter->setBcAlgorithm(SPtr<BCAlgorithm>(new RheologyHerschelBulkleyModelNoSlipBCAlgorithm()));
-      noSlipBCAdapter->setBcAlgorithm(SPtr<BCAlgorithm>(new RheologyBinghamModelNoSlipBCAlgorithm()));
+      //noSlipBCAdapter->setBcAlgorithm(SPtr<BCAlgorithm>(new RheologyBinghamModelNoSlipBCAlgorithm()));
 
       SPtr<BCAdapter> slipBCAdapter(new SlipBCAdapter());
       slipBCAdapter->setBcAlgorithm(SPtr<BCAlgorithm>(new SimpleSlipBCAlgorithm()));
@@ -158,10 +168,10 @@ void bflow(string configname)
       fctVz.SetExpr("0.0");
 
       SPtr<BCAdapter> velocityBCAdapter(new VelocityBCAdapter(true, true, true, fctVx, fctVy, fctVz, 0, BCFunction::INFCONST));
-      //velocityBCAdapter->setBcAlgorithm(SPtr<BCAlgorithm>(new VelocityBCAlgorithm()));
+      velocityBCAdapter->setBcAlgorithm(SPtr<BCAlgorithm>(new VelocityBCAlgorithm()));
       //velocityBCAdapter->setBcAlgorithm(SPtr<BCAlgorithm>(new SimpleVelocityBCAlgorithm()));
       //velocityBCAdapter->setBcAlgorithm(SPtr<BCAlgorithm>(new VelocityWithDensityBCAlgorithm()));
-      velocityBCAdapter->setBcAlgorithm(SPtr<BCAlgorithm>(new RheologyBinghamModelVelocityBCAlgorithm()));
+      //velocityBCAdapter->setBcAlgorithm(SPtr<BCAlgorithm>(new RheologyBinghamModelVelocityBCAlgorithm()));
 
       //SPtr<BCAdapter> densityBCAdapter(new DensityBCAdapter());
       //densityBCAdapter->setBcAlgorithm(SPtr<BCAlgorithm>(new NonEqDensityBCAlgorithm()));
@@ -179,8 +189,8 @@ void bflow(string configname)
       bcProc = SPtr<BCProcessor>(new BCProcessor());
 
       //SPtr<LBMKernel> kernel = SPtr<LBMKernel>(new CumulantLBMKernel());
-      //SPtr<LBMKernel> kernel = SPtr<LBMKernel>(new CompressibleCumulant4thOrderViscosityLBMKernel());
-      SPtr<LBMKernel> kernel = SPtr<LBMKernel>(new RheologyBinghamModelLBMKernel());
+      SPtr<LBMKernel> kernel = SPtr<LBMKernel>(new CompressibleCumulant4thOrderViscosityLBMKernel());
+      //SPtr<LBMKernel> kernel = SPtr<LBMKernel>(new RheologyBinghamModelLBMKernel());
       //SPtr<LBMKernel> kernel = SPtr<LBMKernel>(new HerschelBulkleyModelLBMKernel());
       //SPtr<LBMKernel> kernel = SPtr<LBMKernel>(new BinghamModelLBMKernel());
       kernel->setBCProcessor(bcProc);
@@ -228,6 +238,9 @@ void bflow(string configname)
       {
          UBLOG(logINFO, "Parameters:");
          //UBLOG(logINFO, "forcing = " << forcing);
+         UBLOG(logINFO, "N = " << N << " rpm");
+         UBLOG(logINFO, "Omega = " << Omega << " rad/s");
+         UBLOG(logINFO, "Re = " << Re);
          UBLOG(logINFO, "rho = " << rhoLB);
          UBLOG(logINFO, "uLB = " << OmegaLB);
          UBLOG(logINFO, "nuLB = " << nuLB);
@@ -384,18 +397,18 @@ void bflow(string configname)
       SPtr<UbScheduler> forceSch(new UbScheduler(100));
       SPtr<CalculateTorqueCoProcessor> fp = make_shared<CalculateTorqueCoProcessor>(grid, forceSch, outputPath + "/torque/TorqueRotor.txt", comm);
       fp->addInteractor(rotorInt);
-      SPtr<CalculateTorqueCoProcessor> fp2 = make_shared<CalculateTorqueCoProcessor>(grid, forceSch, outputPath + "/torque/TorqueStator.txt", comm);
-      fp2->addInteractor(statorInt);
+      //SPtr<CalculateTorqueCoProcessor> fp2 = make_shared<CalculateTorqueCoProcessor>(grid, forceSch, outputPath + "/torque/TorqueStator.txt", comm);
+      //fp2->addInteractor(statorInt);
 
-      SPtr<WriteThixotropyQuantitiesCoProcessor> writeThixotropicMQCoProcessor(new WriteThixotropyQuantitiesCoProcessor(grid, visSch, outputPath, WbWriterVtkXmlBinary::getInstance(), SPtr<LBMUnitConverter>(new LBMUnitConverter()), comm));
+      //SPtr<WriteThixotropyQuantitiesCoProcessor> writeThixotropicMQCoProcessor(new WriteThixotropyQuantitiesCoProcessor(grid, visSch, outputPath, WbWriterVtkXmlBinary::getInstance(), SPtr<LBMUnitConverter>(new LBMUnitConverter()), comm));
 
       SPtr<UbScheduler> stepGhostLayer(new UbScheduler(1));
       SPtr<Calculator> calculator(new BasicCalculator(grid, stepGhostLayer, endTime));
       calculator->addCoProcessor(npr);
       calculator->addCoProcessor(fp);
-      calculator->addCoProcessor(fp2);
+      //calculator->addCoProcessor(fp2);
       calculator->addCoProcessor(writeMQCoProcessor);
-      calculator->addCoProcessor(writeThixotropicMQCoProcessor);
+      //calculator->addCoProcessor(writeThixotropicMQCoProcessor);
       calculator->addCoProcessor(restartCoProcessor);
 
       if (myid == 0) UBLOG(logINFO, "Simulation-start");
diff --git a/src/cpu/VirtualFluidsCore/BoundaryConditions/MultiphaseVelocityBCAlgorithm.cpp b/src/cpu/VirtualFluidsCore/BoundaryConditions/MultiphaseVelocityBCAlgorithm.cpp
index 80fe197007a35120c624730d84da071021ea67b6..609f7130896a59a564a69e5099488fcb03508173 100644
--- a/src/cpu/VirtualFluidsCore/BoundaryConditions/MultiphaseVelocityBCAlgorithm.cpp
+++ b/src/cpu/VirtualFluidsCore/BoundaryConditions/MultiphaseVelocityBCAlgorithm.cpp
@@ -105,7 +105,7 @@ void MultiphaseVelocityBCAlgorithm::applyBC()
    phiBC = bcPtr->getBoundaryPhaseField();
    
    //D3Q27System::calcMultiphaseHeq(htemp, phiBC, vx1, vx2, vx3);
-   D3Q27System::calcMultiphaseHeq(htemp, phiBC, 0.0, 0.0, 0.0);//16.03.2021 dirty hack!
+   D3Q27System::calcMultiphaseHeq(htemp, phiBC, bcPtr->getBoundaryVelocityX1(), bcPtr->getBoundaryVelocityX2(), bcPtr->getBoundaryVelocityX2());//30.03.2021 EQ phase field BC!
    for (int fdir = D3Q27System::STARTF; fdir<=D3Q27System::ENDF; fdir++)
    {
 	   if (bcPtr->hasVelocityBoundaryFlag(fdir))
diff --git a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOCoProcessor.cpp b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOCoProcessor.cpp
index 028cb68ca562771b263b801c48c5f3b3791c723f..a3572c8c40ed63144080c1803d728393eaf30547 100644
--- a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOCoProcessor.cpp
+++ b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOCoProcessor.cpp
@@ -70,7 +70,7 @@ MPIIOCoProcessor::MPIIOCoProcessor(SPtr<Grid3D> grid, SPtr<UbScheduler> s, const
 
     //---------------------------------------
 
-    MPI_Type_contiguous(7, MPI_CHAR, &arrayPresenceType);
+    MPI_Type_contiguous(8, MPI_CHAR, &arrayPresenceType);
     MPI_Type_commit(&arrayPresenceType);
 }
 
@@ -378,10 +378,24 @@ void MPIIOCoProcessor::clearAllFiles(int step)
     MPI_File_set_size(file_handler, new_size);
     MPI_File_close(&file_handler);
 
-    std::string filename2 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSet.bin";
-    int rc2 = MPI_File_open(MPI_COMM_WORLD, filename2.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
-    if (rc2 != MPI_SUCCESS)
-        throw UbException(UB_EXARGS, "couldn't open file " + filename2);
+    std::string filename21 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetF.bin";
+    int rc21 = MPI_File_open(MPI_COMM_WORLD, filename21.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
+    if (rc21 != MPI_SUCCESS)
+        throw UbException(UB_EXARGS, "couldn't open file " + filename21);
+    MPI_File_set_size(file_handler, new_size);
+    MPI_File_close(&file_handler);
+
+    std::string filename22 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetH1.bin";
+    int rc22 = MPI_File_open(MPI_COMM_WORLD, filename22.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
+    if (rc22 != MPI_SUCCESS)
+        throw UbException(UB_EXARGS, "couldn't open file " + filename22);
+    MPI_File_set_size(file_handler, new_size);
+    MPI_File_close(&file_handler);
+
+    std::string filename23 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetH2.bin";
+    int rc23 = MPI_File_open(MPI_COMM_WORLD, filename23.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
+    if (rc23 != MPI_SUCCESS)
+        throw UbException(UB_EXARGS, "couldn't open file " + filename23);
     MPI_File_set_size(file_handler, new_size);
     MPI_File_close(&file_handler);
 
@@ -441,25 +455,20 @@ void MPIIOCoProcessor::clearAllFiles(int step)
     MPI_File_set_size(file_handler, new_size);
     MPI_File_close(&file_handler);
 
-    std::string filename10 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpPhaseField.bin";
+    std::string filename10 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpPhaseField1.bin";
     int rc10 = MPI_File_open(MPI_COMM_WORLD, filename10.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
     if (rc10 != MPI_SUCCESS)
         throw UbException(UB_EXARGS, "couldn't open file " + filename10);
     MPI_File_set_size(file_handler, new_size);
     MPI_File_close(&file_handler);
 
-
-    /*std::string filename10 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpBC1.bin";
-    int rc10 = MPI_File_open(MPI_COMM_WORLD, filename10.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info,
-    &file_handler); if (rc10 != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename10);
+    std::string filename11 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpPhaseField2.bin";
+    int rc11 = MPI_File_open(MPI_COMM_WORLD, filename11.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
+    if (rc11 != MPI_SUCCESS)
+        throw UbException(UB_EXARGS, "couldn't open file " + filename11);
     MPI_File_set_size(file_handler, new_size);
     MPI_File_close(&file_handler);
 
-    std::string filename11 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpBC2.bin";
-    int rc11 = MPI_File_open(MPI_COMM_WORLD, filename11.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info,
-    &file_handler); if (rc11 != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename11);
-    MPI_File_set_size(file_handler, new_size);
-    MPI_File_close(&file_handler);*/
 }
 
 void MPIIOCoProcessor::writeCpTimeStep(int step)
diff --git a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationBECoProcessor.cpp b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationBECoProcessor.cpp
index 39b32b6aac7110eb873a52351d46560f8843820e..0e27f03a9ff1f7e1eb905cdb87e6e5cfcb97454c 100644
--- a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationBECoProcessor.cpp
+++ b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationBECoProcessor.cpp
@@ -25,9 +25,8 @@ using namespace MPIIODataStructures;
 #define MESSAGE_TAG 80
 #define SEND_BLOCK_SIZE 100000
 
-MPIIOMigrationBECoProcessor::MPIIOMigrationBECoProcessor(SPtr<Grid3D> grid, SPtr<UbScheduler> s,
-                                                         const std::string &path, SPtr<Communicator> comm)
-    : MPIIOCoProcessor(grid, s, path, comm), nue(-999.999)
+MPIIOMigrationBECoProcessor::MPIIOMigrationBECoProcessor(SPtr<Grid3D> grid, SPtr<UbScheduler> s, const std::string &path, SPtr<Communicator> comm)
+    : MPIIOCoProcessor(grid, s, path, comm), nue(-999.999), nuL(-999.999), nuG(-999.999), densityRatio(-999.999)
 {
     memset(&boundCondParamStr, 0, sizeof(boundCondParamStr));
 
@@ -107,44 +106,60 @@ void MPIIOMigrationBECoProcessor::writeDataSet(int step)
     std::vector<SPtr<Block3D>> blocksVector[25];
     int minInitLevel = this->grid->getCoarsestInitializedLevel();
     int maxInitLevel = this->grid->getFinestInitializedLevel();
-    for (int level = minInitLevel; level <= maxInitLevel; level++) {
+    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    {
         grid->getBlocks(level, rank, blocksVector[level]);
         blocksCount += static_cast<int>(blocksVector[level].size());
     }
 
     dataSetParam dataSetParamStr1, dataSetParamStr2, dataSetParamStr3;
     int firstGlobalID;
-    std::vector<double> doubleValuesArray; // double-values (arrays of f's) in all blocks
+    std::vector<double> doubleValuesArrayF; // double-values (arrays of f's) in all blocks  Fdistribution
+    std::vector<double> doubleValuesArrayH1; // double-values (arrays of f's) in all blocks  H1distribution
+    // std::vector<double> doubleValuesArrayH2; // double-values (arrays of f's) in all blocks  H2distribution
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIOMigrationBECoProcessor::writeDataSet start collect data rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
+    bool multiPhase = false;
     DSArraysPresence arrPresence;
     bool firstBlock        = true;
     int doubleCountInBlock = 0;
     int ic                 = 0;
-    SPtr<D3Q27EsoTwist3DSplittedVector> D3Q27EsoTwist3DSplittedVectorPtrF, D3Q27EsoTwist3DSplittedVectorPtrH;
-    CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr localDistributionsF, localDistributionsH;
-    CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr nonLocalDistributionsF, nonLocalDistributionsH;
-    CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr zeroDistributionsF, zeroDistributionsH;
+    SPtr<D3Q27EsoTwist3DSplittedVector> D3Q27EsoTwist3DSplittedVectorPtrF = 0, D3Q27EsoTwist3DSplittedVectorPtrH1 = 0, D3Q27EsoTwist3DSplittedVectorPtrH2 = 0;
+    CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr localDistributionsF = 0, localDistributionsH1 = 0, localDistributionsH2 = 0;
+    CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr nonLocalDistributionsF = 0, nonLocalDistributionsH1 = 0, nonLocalDistributionsH2 = 0;
+    CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr zeroDistributionsF = 0, zeroDistributionsH1 = 0, zeroDistributionsH2 = 0;
     
-    for (int level = minInitLevel; level <= maxInitLevel; level++) {
+    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    {
         for (SPtr<Block3D> block : blocksVector[level]) //	blocks of the current level
         {
-            D3Q27EsoTwist3DSplittedVectorPtrF = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(
-                block->getKernel()->getDataSet()->getFdistributions());
+            D3Q27EsoTwist3DSplittedVectorPtrF = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(block->getKernel()->getDataSet()->getFdistributions());
             localDistributionsF    = D3Q27EsoTwist3DSplittedVectorPtrF->getLocalDistributions();
             nonLocalDistributionsF = D3Q27EsoTwist3DSplittedVectorPtrF->getNonLocalDistributions();
             zeroDistributionsF     = D3Q27EsoTwist3DSplittedVectorPtrF->getZeroDistributions();
  
-            D3Q27EsoTwist3DSplittedVectorPtrH = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(
-                block->getKernel()->getDataSet()->getHdistributions());
-            localDistributionsH    = D3Q27EsoTwist3DSplittedVectorPtrH->getLocalDistributions();
-            nonLocalDistributionsH = D3Q27EsoTwist3DSplittedVectorPtrH->getNonLocalDistributions();
-            zeroDistributionsH     = D3Q27EsoTwist3DSplittedVectorPtrH->getZeroDistributions();
+            D3Q27EsoTwist3DSplittedVectorPtrH1 = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(block->getKernel()->getDataSet()->getHdistributions());
+            if (D3Q27EsoTwist3DSplittedVectorPtrH1 != 0)
+            {
+                multiPhase = true;
+                localDistributionsH1 = D3Q27EsoTwist3DSplittedVectorPtrH1->getLocalDistributions();
+                nonLocalDistributionsH1 = D3Q27EsoTwist3DSplittedVectorPtrH1->getNonLocalDistributions();
+                zeroDistributionsH1 = D3Q27EsoTwist3DSplittedVectorPtrH1->getZeroDistributions();
+            }
+
+            /*D3Q27EsoTwist3DSplittedVectorPtrH2 = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(block->getKernel()->getDataSet()->getH2distributions());
+            if (D3Q27EsoTwist3DSplittedVectorPtrH2 != 0)
+            {
+                localDistributionsH2 = D3Q27EsoTwist3DSplittedVectorPtrH2->getLocalDistributions();
+                nonLocalDistributionsH2 = D3Q27EsoTwist3DSplittedVectorPtrH2->getNonLocalDistributions();
+                zeroDistributionsH2 = D3Q27EsoTwist3DSplittedVectorPtrH2->getZeroDistributions();
+            }*/
+
 
             if (firstBlock) // && block->getKernel()) // when first (any) valid block...
             {
@@ -171,94 +186,92 @@ void MPIIOMigrationBECoProcessor::writeDataSet(int step)
                 }
 
                 // ... than save some parameters that are equal in all blocks
-                dataSetParamStr1.nx1 = dataSetParamStr2.nx1 = dataSetParamStr3.nx1 =
-                    static_cast<int>(block->getKernel()->getDataSet()->getFdistributions()->getNX1());
-                dataSetParamStr1.nx2 = dataSetParamStr2.nx2 = dataSetParamStr3.nx2 =
-                    static_cast<int>(block->getKernel()->getDataSet()->getFdistributions()->getNX2());
-                dataSetParamStr1.nx3 = dataSetParamStr2.nx3 = dataSetParamStr3.nx3 =
-                    static_cast<int>(block->getKernel()->getDataSet()->getFdistributions()->getNX3());
-
-             //  Fdistributions + Hdistributions
-                doubleCountInBlock =
-                    (dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3] +
+                dataSetParamStr1.nx1 = dataSetParamStr2.nx1 = dataSetParamStr3.nx1 = static_cast<int>(block->getKernel()->getDataSet()->getFdistributions()->getNX1());
+                dataSetParamStr1.nx2 = dataSetParamStr2.nx2 = dataSetParamStr3.nx2 = static_cast<int>(block->getKernel()->getDataSet()->getFdistributions()->getNX2());
+                dataSetParamStr1.nx3 = dataSetParamStr2.nx3 = dataSetParamStr3.nx3 = static_cast<int>(block->getKernel()->getDataSet()->getFdistributions()->getNX3());
+
+                doubleCountInBlock = dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3] +
                     dataSetParamStr2.nx[0] * dataSetParamStr2.nx[1] * dataSetParamStr2.nx[2] * dataSetParamStr2.nx[3] +
-                    dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3]) * 2;
+                    dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3];
 
-                SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> averageDensityArray =
-                    block->getKernel()->getDataSet()->getAverageDensity();
+                SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> averageDensityArray = block->getKernel()->getDataSet()->getAverageDensity();
                 if (averageDensityArray)
                     arrPresence.isAverageDensityArrayPresent = true;
                 else
                     arrPresence.isAverageDensityArrayPresent = false;
 
-                SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> AverageVelocityArray3DPtr =
-                    block->getKernel()->getDataSet()->getAverageVelocity();
+                SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> AverageVelocityArray3DPtr = block->getKernel()->getDataSet()->getAverageVelocity();
                 if (AverageVelocityArray3DPtr)
                     arrPresence.isAverageVelocityArrayPresent = true;
                 else
                     arrPresence.isAverageVelocityArrayPresent = false;
 
-                SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> AverageFluctArray3DPtr =
-                    block->getKernel()->getDataSet()->getAverageFluctuations();
+                SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> AverageFluctArray3DPtr = block->getKernel()->getDataSet()->getAverageFluctuations();
                 if (AverageFluctArray3DPtr)
                     arrPresence.isAverageFluktuationsArrayPresent = true;
                 else
                     arrPresence.isAverageFluktuationsArrayPresent = false;
 
-                SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> AverageTripleArray3DPtr =
-                    block->getKernel()->getDataSet()->getAverageTriplecorrelations();
+                SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> AverageTripleArray3DPtr = block->getKernel()->getDataSet()->getAverageTriplecorrelations();
                 if (AverageTripleArray3DPtr)
                     arrPresence.isAverageTripleArrayPresent = true;
                 else
                     arrPresence.isAverageTripleArrayPresent = false;
 
-                SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> ShearStressValArray3DPtr =
-                    block->getKernel()->getDataSet()->getShearStressValues();
+                SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> ShearStressValArray3DPtr = block->getKernel()->getDataSet()->getShearStressValues();
                 if (ShearStressValArray3DPtr)
                     arrPresence.isShearStressValArrayPresent = true;
                 else
                     arrPresence.isShearStressValArrayPresent = false;
 
-                SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> relaxationFactor3DPtr =
-                    block->getKernel()->getDataSet()->getRelaxationFactor();
+                SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> relaxationFactor3DPtr = block->getKernel()->getDataSet()->getRelaxationFactor();
                 if (relaxationFactor3DPtr)
                     arrPresence.isRelaxationFactorPresent = true;
                 else
                     arrPresence.isRelaxationFactorPresent = false;
 
-                SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> phaseField3DPtr =
-                    block->getKernel()->getDataSet()->getPhaseField();
-                if (phaseField3DPtr)
-                    arrPresence.isPhaseFieldPresent = true;
+                SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> phaseField3DPtr1 = block->getKernel()->getDataSet()->getPhaseField();
+                if (phaseField3DPtr1)
+                    arrPresence.isPhaseField1Present = true;
                 else
-                    arrPresence.isPhaseFieldPresent = false;
+                    arrPresence.isPhaseField1Present = false;
+
+                SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> phaseField3DPtr2 = block->getKernel()->getDataSet()->getPhaseField2();
+                if (phaseField3DPtr2)
+                    arrPresence.isPhaseField2Present = true;
+                else
+                    arrPresence.isPhaseField2Present = false;
+
 
                 firstBlock = false;
             }
 
-            if (localDistributionsF && (dataSetParamStr1.nx[0] > 0) && (dataSetParamStr1.nx[1] > 0) &&
-                (dataSetParamStr1.nx[2] > 0) && (dataSetParamStr1.nx[3] > 0))
-                doubleValuesArray.insert(doubleValuesArray.end(), localDistributionsF->getDataVector().begin(),
-                                         localDistributionsF->getDataVector().end());
-            if (nonLocalDistributionsF && (dataSetParamStr2.nx[0] > 0) && (dataSetParamStr2.nx[1] > 0) &&
-                (dataSetParamStr2.nx[2] > 0) && (dataSetParamStr2.nx[3] > 0))
-                doubleValuesArray.insert(doubleValuesArray.end(), nonLocalDistributionsF->getDataVector().begin(),
-                                         nonLocalDistributionsF->getDataVector().end());
+            if (localDistributionsF && (dataSetParamStr1.nx[0] > 0) && (dataSetParamStr1.nx[1] > 0) && (dataSetParamStr1.nx[2] > 0) && (dataSetParamStr1.nx[3] > 0))
+                doubleValuesArrayF.insert(doubleValuesArrayF.end(), localDistributionsF->getDataVector().begin(), localDistributionsF->getDataVector().end());
+            if (nonLocalDistributionsF && (dataSetParamStr2.nx[0] > 0) && (dataSetParamStr2.nx[1] > 0) && (dataSetParamStr2.nx[2] > 0) && (dataSetParamStr2.nx[3] > 0))
+                doubleValuesArrayF.insert(doubleValuesArrayF.end(), nonLocalDistributionsF->getDataVector().begin(), nonLocalDistributionsF->getDataVector().end());
             if (zeroDistributionsF && (dataSetParamStr3.nx[0] > 0) && (dataSetParamStr3.nx[1] > 0) && (dataSetParamStr3.nx[2] > 0))
-                doubleValuesArray.insert(doubleValuesArray.end(), zeroDistributionsF->getDataVector().begin(),
-                                         zeroDistributionsF->getDataVector().end());
-
-            if (localDistributionsH && (dataSetParamStr1.nx[0] > 0) && (dataSetParamStr1.nx[1] > 0) &&
-                (dataSetParamStr1.nx[2] > 0) && (dataSetParamStr1.nx[3] > 0))
-                doubleValuesArray.insert(doubleValuesArray.end(), localDistributionsH->getDataVector().begin(),
-                                         localDistributionsH->getDataVector().end());
-            if (nonLocalDistributionsH && (dataSetParamStr2.nx[0] > 0) && (dataSetParamStr2.nx[1] > 0) &&
-                (dataSetParamStr2.nx[2] > 0) && (dataSetParamStr2.nx[3] > 0))
-                doubleValuesArray.insert(doubleValuesArray.end(), nonLocalDistributionsH->getDataVector().begin(),
-                                         nonLocalDistributionsH->getDataVector().end());
-            if (zeroDistributionsH && (dataSetParamStr3.nx[0] > 0) && (dataSetParamStr3.nx[1] > 0) && (dataSetParamStr3.nx[2] > 0))
-                doubleValuesArray.insert(doubleValuesArray.end(), zeroDistributionsH->getDataVector().begin(),
-                                         zeroDistributionsH->getDataVector().end());
+                doubleValuesArrayF.insert(doubleValuesArrayF.end(), zeroDistributionsF->getDataVector().begin(), zeroDistributionsF->getDataVector().end());
+
+            if (multiPhase)
+            {
+                if (localDistributionsH1 && (dataSetParamStr1.nx[0] > 0) && (dataSetParamStr1.nx[1] > 0) && (dataSetParamStr1.nx[2] > 0) && (dataSetParamStr1.nx[3] > 0))
+                    doubleValuesArrayH1.insert(doubleValuesArrayH1.end(), localDistributionsH1->getDataVector().begin(), localDistributionsH1->getDataVector().end());
+                if (nonLocalDistributionsH1 && (dataSetParamStr2.nx[0] > 0) && (dataSetParamStr2.nx[1] > 0) && (dataSetParamStr2.nx[2] > 0) && (dataSetParamStr2.nx[3] > 0))
+                    doubleValuesArrayH1.insert(doubleValuesArrayH1.end(), nonLocalDistributionsH1->getDataVector().begin(), nonLocalDistributionsH1->getDataVector().end());
+                if (zeroDistributionsH1 && (dataSetParamStr3.nx[0] > 0) && (dataSetParamStr3.nx[1] > 0) && (dataSetParamStr3.nx[2] > 0))
+                    doubleValuesArrayH1.insert(doubleValuesArrayH1.end(), zeroDistributionsH1->getDataVector().begin(), zeroDistributionsH1->getDataVector().end());
+            }
+
+            /*if (D3Q27EsoTwist3DSplittedVectorPtrH2 != 0)
+            {
+                if (localDistributionsH2 && (dataSetParamStr1.nx[0] > 0) && (dataSetParamStr1.nx[1] > 0) && (dataSetParamStr1.nx[2] > 0) && (dataSetParamStr1.nx[3] > 0))
+                doubleValuesArrayH2.insert(doubleValuesArrayH2.end(), localDistributionsH2->getDataVector().begin(), localDistributionsH2->getDataVector().end());
+                if (nonLocalDistributionsH2 && (dataSetParamStr2.nx[0] > 0) && (dataSetParamStr2.nx[1] > 0) && (dataSetParamStr2.nx[2] > 0) && (dataSetParamStr2.nx[3] > 0))
+                doubleValuesArrayH2.insert(doubleValuesArrayH2.end(), nonLocalDistributionsH2->getDataVector().begin(), nonLocalDistributionsH2->getDataVector().end());
+                if (zeroDistributionsH2 && (dataSetParamStr3.nx[0] > 0) && (dataSetParamStr3.nx[1] > 0) && (dataSetParamStr3.nx[2] > 0))
+                doubleValuesArrayH2.insert(doubleValuesArrayH2.end(), zeroDistributionsH2->getDataVector().begin(), zeroDistributionsH2->getDataVector().end());
+            }*/
 
             ic++;
         }
@@ -267,10 +280,10 @@ void MPIIOMigrationBECoProcessor::writeDataSet(int step)
     MPI_Type_contiguous(doubleCountInBlock , MPI_DOUBLE, &dataSetDoubleType);
     MPI_Type_commit(&dataSetDoubleType);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIOMigrationBECoProcessor::writeDataSet start MPI IO rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     double start, finish;
@@ -287,28 +300,57 @@ void MPIIOMigrationBECoProcessor::writeDataSet(int step)
 
     // write to the file
     MPI_File file_handler;
-    std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSet.bin";
+    std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetF.bin";
     int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
     if (rc != MPI_SUCCESS)
         throw UbException(UB_EXARGS, "couldn't open file " + filename);
 
-    MPI_Offset write_offset = (MPI_Offset)(3 * sizeof(dataSetParam)) + (MPI_Offset)(firstGlobalID) *
-                                                                           (MPI_Offset)(doubleCountInBlock) *
-                                                                           (MPI_Offset)(sizeof(double));
+    MPI_Offset write_offset = (MPI_Offset)(3 * sizeof(dataSetParam)) + (MPI_Offset)(firstGlobalID) * (MPI_Offset)(doubleCountInBlock) * (MPI_Offset)(sizeof(double));
 
     MPI_File_write_at(file_handler, (MPI_Offset)0, &dataSetParamStr1, 1, dataSetParamType, MPI_STATUS_IGNORE);
-    MPI_File_write_at(file_handler, (MPI_Offset)(sizeof(dataSetParam)), &dataSetParamStr2, 1, dataSetParamType,
-                      MPI_STATUS_IGNORE);
-    MPI_File_write_at(file_handler, (MPI_Offset)(2 * sizeof(dataSetParam)), &dataSetParamStr3, 1, dataSetParamType,
-                      MPI_STATUS_IGNORE);
-    MPI_File_write_at(file_handler, write_offset, &doubleValuesArray[0], blocksCount, dataSetDoubleType,
-                      MPI_STATUS_IGNORE);
+    MPI_File_write_at(file_handler, (MPI_Offset)(sizeof(dataSetParam)), &dataSetParamStr2, 1, dataSetParamType, MPI_STATUS_IGNORE);
+    MPI_File_write_at(file_handler, (MPI_Offset)(2 * sizeof(dataSetParam)), &dataSetParamStr3, 1, dataSetParamType, MPI_STATUS_IGNORE);
+    MPI_File_write_at(file_handler, write_offset, &doubleValuesArrayF[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
 
     MPI_File_sync(file_handler);
     MPI_File_close(&file_handler);
+
+    //-------------------------------- H1 ------------------------------------------------
+    if (multiPhase)
+    {
+        filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetH1.bin";
+        rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
+        if (rc != MPI_SUCCESS)
+            throw UbException(UB_EXARGS, "couldn't open file " + filename);
+
+        write_offset = (MPI_Offset)(firstGlobalID) * (MPI_Offset)(doubleCountInBlock) * (MPI_Offset)(sizeof(double));
+        MPI_File_write_at(file_handler, write_offset, &doubleValuesArrayH1[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
+
+        MPI_File_sync(file_handler);
+        MPI_File_close(&file_handler);
+    }
+
+    //-------------------------------- H2 --------------------------------------------------
+    /*if (D3Q27EsoTwist3DSplittedVectorPtr2 != 0)
+    {
+        filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetH2.bin";
+        rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
+        if (rc != MPI_SUCCESS)
+            throw UbException(UB_EXARGS, "couldn't open file " + filename);
+
+        write_offset = (MPI_Offset)(firstGlobalID) * (MPI_Offset)(doubleCountInBlock) * (MPI_Offset)(sizeof(double));
+        MPI_File_write_at(file_handler, write_offset, &doubleValuesArrayH2[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
+
+        MPI_File_sync(file_handler);
+        MPI_File_close(&file_handler);
+    }    */
+
+    //--------------------------------
+
     MPI_Type_free(&dataSetDoubleType);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         finish = MPI_Wtime();
         UBLOG(logINFO, "MPIIOMigrationBECoProcessor::writeDataSet time: " << finish - start << " s");
     }
@@ -346,9 +388,12 @@ void MPIIOMigrationBECoProcessor::writeDataSet(int step)
         write3DArray(step, RelaxationFactor, std::string("/cpRelaxationFactor.bin"));
     // writeRelaxationFactor(step);
 
-    if (arrPresence.isPhaseFieldPresent)
-        write3DArray(step, PhaseField, std::string("/cpPhaseField.bin"));
-}
+    if (arrPresence.isPhaseField1Present)
+        write3DArray(step, PhaseField1, std::string("/cpPhaseField1.bin"));
+
+    if (arrPresence.isPhaseField2Present)
+        write3DArray(step, PhaseField2, std::string("/cpPhaseField2.bin"));
+    }
 
 void MPIIOMigrationBECoProcessor::write4DArray(int step, Arrays arrayType, std::string fname)
 {
@@ -361,7 +406,8 @@ void MPIIOMigrationBECoProcessor::write4DArray(int step, Arrays arrayType, std::
     std::vector<SPtr<Block3D>> blocksVector[25];
     int minInitLevel = this->grid->getCoarsestInitializedLevel();
     int maxInitLevel = this->grid->getFinestInitializedLevel();
-    for (int level = minInitLevel; level <= maxInitLevel; level++) {
+    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    {
         grid->getBlocks(level, rank, blocksVector[level]);
         blocksCount += static_cast<int>(blocksVector[level].size());
     }
@@ -374,16 +420,18 @@ void MPIIOMigrationBECoProcessor::write4DArray(int step, Arrays arrayType, std::
     int ic                 = 0;
     SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> ___Array;
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIOMigrationBECoProcessor::write4DArray start collect data rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
-    for (int level = minInitLevel; level <= maxInitLevel; level++) {
+    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    {
         for (SPtr<Block3D> block : blocksVector[level]) //	blocks of the current level
         {
-            switch (arrayType) {
+            switch (arrayType) 
+            {
                 case AverageDensity:
                     ___Array = block->getKernel()->getDataSet()->getAverageDensity();
                     break;
@@ -400,8 +448,7 @@ void MPIIOMigrationBECoProcessor::write4DArray(int step, Arrays arrayType, std::
                     ___Array = block->getKernel()->getDataSet()->getShearStressValues();
                     break;
                 default:
-                    UB_THROW(UbException(UB_EXARGS,
-                                         "MPIIOMigrationBECoProcessor::write4DArray : 4D array type does not exist!"));
+                    UB_THROW(UbException(UB_EXARGS, "MPIIOMigrationBECoProcessor::write4DArray : 4D array type does not exist!"));
                     break;
             }
 
@@ -410,29 +457,26 @@ void MPIIOMigrationBECoProcessor::write4DArray(int step, Arrays arrayType, std::
                 firstGlobalID = block->getGlobalID();
 
                 dataSetParamStr.nx1 = dataSetParamStr.nx2 = dataSetParamStr.nx3 = 0;
-                dataSetParamStr.nx[0]                                           = static_cast<int>(___Array->getNX1());
-                dataSetParamStr.nx[1]                                           = static_cast<int>(___Array->getNX2());
-                dataSetParamStr.nx[2]                                           = static_cast<int>(___Array->getNX3());
-                dataSetParamStr.nx[3]                                           = static_cast<int>(___Array->getNX4());
-                doubleCountInBlock =
-                    dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+                dataSetParamStr.nx[0] = static_cast<int>(___Array->getNX1());
+                dataSetParamStr.nx[1] = static_cast<int>(___Array->getNX2());
+                dataSetParamStr.nx[2] = static_cast<int>(___Array->getNX3());
+                dataSetParamStr.nx[3] = static_cast<int>(___Array->getNX4());
+                doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
 
                 firstBlock = false;
             }
 
-            if ((dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0) &&
-                (dataSetParamStr.nx[3] > 0))
-                doubleValuesArray.insert(doubleValuesArray.end(), ___Array->getDataVector().begin(),
-                                         ___Array->getDataVector().end());
+            if ((dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0) && (dataSetParamStr.nx[3] > 0))
+                doubleValuesArray.insert(doubleValuesArray.end(), ___Array->getDataVector().begin(), ___Array->getDataVector().end());
 
             ic++;
         }
     }
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIOMigrationBECoProcessor::write4DArray start MPI IO rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     // register new MPI-type depending on the block-specific information
@@ -451,20 +495,18 @@ void MPIIOMigrationBECoProcessor::write4DArray(int step, Arrays arrayType, std::
     if (rc != MPI_SUCCESS)
         throw UbException(UB_EXARGS, "couldn't open file " + filename);
 
-    MPI_Offset write_offset = (MPI_Offset)(sizeof(dataSetParam)) + (MPI_Offset)(firstGlobalID) *
-                                                                       (MPI_Offset)(doubleCountInBlock) *
-                                                                       (MPI_Offset)(sizeof(double));
+    MPI_Offset write_offset = (MPI_Offset)(sizeof(dataSetParam)) + (MPI_Offset)(firstGlobalID) * (MPI_Offset)(doubleCountInBlock) * (MPI_Offset)(sizeof(double));
 
     // each process writes common parameters of a dataSet
     MPI_File_write_at(file_handler, 0, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
-    MPI_File_write_at(file_handler, write_offset, &doubleValuesArray[0], blocksCount, dataSetDoubleType,
-                      MPI_STATUS_IGNORE);
+    MPI_File_write_at(file_handler, write_offset, &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
 
     MPI_File_sync(file_handler);
     MPI_File_close(&file_handler);
     MPI_Type_free(&dataSetDoubleType);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         finish = MPI_Wtime();
         UBLOG(logINFO, "MPIIOMigrationBECoProcessor::write4DArray time: " << finish - start << " s");
     }
@@ -481,7 +523,8 @@ void MPIIOMigrationBECoProcessor::write3DArray(int step, Arrays arrayType, std::
     std::vector<SPtr<Block3D>> blocksVector[25];
     int minInitLevel = this->grid->getCoarsestInitializedLevel();
     int maxInitLevel = this->grid->getFinestInitializedLevel();
-    for (int level = minInitLevel; level <= maxInitLevel; level++) {
+    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    {
         grid->getBlocks(level, rank, blocksVector[level]);
         blocksCount += static_cast<int>(blocksVector[level].size());
     }
@@ -494,25 +537,30 @@ void MPIIOMigrationBECoProcessor::write3DArray(int step, Arrays arrayType, std::
     int ic                 = 0;
     SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> ___Array;
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIOMigrationBECoProcessor::write3DArray start collect data rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
-    for (int level = minInitLevel; level <= maxInitLevel; level++) {
+    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    {
         for (SPtr<Block3D> block : blocksVector[level]) //	blocks of the current level
         {
-            switch (arrayType) {
+            switch (arrayType) 
+            {
                 case RelaxationFactor:
                     ___Array = block->getKernel()->getDataSet()->getRelaxationFactor();
                     break;
-                case PhaseField:
+                case PhaseField1:
                     ___Array = block->getKernel()->getDataSet()->getPhaseField();
                     break;
+                case PhaseField2:
+                    ___Array = block->getKernel()->getDataSet()->getPhaseField2();
+                    break;
                 default:
                     UB_THROW(UbException(UB_EXARGS,
-                                         "MPIIOMigrationBECoProcessor::write3DArray : 3D array type does not exist!"));
+                    "MPIIOMigrationBECoProcessor::write3DArray : 3D array type does not exist!"));
                     break;
             }
 
@@ -531,17 +579,16 @@ void MPIIOMigrationBECoProcessor::write3DArray(int step, Arrays arrayType, std::
             }
 
             if ((dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0))
-                doubleValuesArray.insert(doubleValuesArray.end(), ___Array->getDataVector().begin(),
-                                         ___Array->getDataVector().end());
+                doubleValuesArray.insert(doubleValuesArray.end(), ___Array->getDataVector().begin(), ___Array->getDataVector().end());
 
             ic++;
         }
     }
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIOMigrationBECoProcessor::write3DArray start MPI IO rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     // register new MPI-type depending on the block-specific information
@@ -560,37 +607,33 @@ void MPIIOMigrationBECoProcessor::write3DArray(int step, Arrays arrayType, std::
     if (rc != MPI_SUCCESS)
         throw UbException(UB_EXARGS, "couldn't open file " + filename);
 
-    MPI_Offset write_offset = (MPI_Offset)(sizeof(dataSetParam)) + (MPI_Offset)(firstGlobalID) *
-                                                                       (MPI_Offset)(doubleCountInBlock) *
-                                                                       (MPI_Offset)(sizeof(double));
+    MPI_Offset write_offset = (MPI_Offset)(sizeof(dataSetParam)) + (MPI_Offset)(firstGlobalID) * (MPI_Offset)(doubleCountInBlock) * (MPI_Offset)(sizeof(double));
 
     // each process writes common parameters of a dataSet
     MPI_File_write_at(file_handler, 0, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
-    MPI_File_write_at(file_handler, write_offset, &doubleValuesArray[0], blocksCount, dataSetDoubleType,
-                      MPI_STATUS_IGNORE);
+    MPI_File_write_at(file_handler, write_offset, &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
 
     MPI_File_sync(file_handler);
     MPI_File_close(&file_handler);
     MPI_Type_free(&dataSetDoubleType);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         finish = MPI_Wtime();
         UBLOG(logINFO, "MPIIOMigrationBECoProcessor::write3DArray time: " << finish - start << " s");
     }
 }
 
-//---------------------------------------------------------------------------------
-
 void MPIIOMigrationBECoProcessor::writeBoundaryConds(int step)
 {
     int rank, size;
     MPI_Comm_rank(MPI_COMM_WORLD, &rank);
     MPI_Comm_size(MPI_COMM_WORLD, &size);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot())
+    {
         UBLOG(logINFO, "MPIIOMigrationBECoProcessor::writeBoundaryConds start collect data rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     int blocksCount          = 0; // quantity of blocks, that belong to this process
@@ -601,7 +644,8 @@ void MPIIOMigrationBECoProcessor::writeBoundaryConds(int step)
     std::vector<SPtr<Block3D>> blocksVector[25];
     int minInitLevel = this->grid->getCoarsestInitializedLevel();
     int maxInitLevel = this->grid->getFinestInitializedLevel();
-    for (int level = minInitLevel; level <= maxInitLevel; level++) {
+    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    {
         grid->getBlocks(level, rank, blocksVector[level]);
         blocksCount += static_cast<int>(blocksVector[level].size());
     }
@@ -616,24 +660,26 @@ void MPIIOMigrationBECoProcessor::writeBoundaryConds(int step)
     int ic                         = 0;
     SPtr<BCArray3D> bcArr;
 
-    for (int level = minInitLevel; level <= maxInitLevel; level++) {
+    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    {
         for (SPtr<Block3D> block : blocksVector[level]) // all the blocks of the current level
         {
             bcArr = block->getKernel()->getBCProcessor()->getBCArray();
 
-            bcAddArray[ic].globalID =
-                block->getGlobalID();                // id of the block needed to find it while regenerating the grid
+            bcAddArray[ic].globalID = block->getGlobalID();                // id of the block needed to find it while regenerating the grid
             bcAddArray[ic].boundCond_count      = 0; // how many BoundaryConditions in this block
             bcAddArray[ic].indexContainer_count = 0; // how many indexContainer-values in this block
             bytesCount[ic]                      = sizeof(BCAddMigration);
             bcVector[ic].resize(0);
             indexContainerVector[ic].resize(0);
 
-            for (std::size_t bc = 0; bc < bcArr->getBCVectorSize(); bc++) {
+            for (std::size_t bc = 0; bc < bcArr->getBCVectorSize(); bc++) 
+            {
                 BoundaryCondition *bouCond = new BoundaryCondition();
                 if (bcArr->bcvector[bc] == NULL)
                     memset(bouCond, 0, sizeof(BoundaryCondition));
-                else {
+                else 
+                {
                     bouCond->noslipBoundaryFlags    = bcArr->bcvector[bc]->getNoSlipBoundary();
                     bouCond->slipBoundaryFlags      = bcArr->bcvector[bc]->getSlipBoundary();
                     bouCond->velocityBoundaryFlags  = bcArr->bcvector[bc]->getVelocityBoundary();
@@ -662,7 +708,8 @@ void MPIIOMigrationBECoProcessor::writeBoundaryConds(int step)
                 bytesCount[ic] += sizeof(BoundaryCondition);
             }
 
-            if (bcindexmatrixCountNotInit) {
+            if (bcindexmatrixCountNotInit)
+            {
                 boundCondParamStr.nx1                = static_cast<int>(bcArr->bcindexmatrix.getNX1());
                 boundCondParamStr.nx2                = static_cast<int>(bcArr->bcindexmatrix.getNX2());
                 boundCondParamStr.nx3                = static_cast<int>(bcArr->bcindexmatrix.getNX3());
@@ -670,11 +717,9 @@ void MPIIOMigrationBECoProcessor::writeBoundaryConds(int step)
                 bcindexmatrixCountNotInit            = false;
             }
 
-            bcindexmatrixVector.insert(bcindexmatrixVector.end(), bcArr->bcindexmatrix.getDataVector().begin(),
-                                       bcArr->bcindexmatrix.getDataVector().end());
+            bcindexmatrixVector.insert(bcindexmatrixVector.end(), bcArr->bcindexmatrix.getDataVector().begin(), bcArr->bcindexmatrix.getDataVector().end());
 
-            indexContainerVector[ic].insert(indexContainerVector[ic].begin(), bcArr->indexContainer.begin(),
-                                            bcArr->indexContainer.end());
+            indexContainerVector[ic].insert(indexContainerVector[ic].begin(), bcArr->indexContainer.begin(), bcArr->indexContainer.end());
             bcAddArray[ic].indexContainer_count = static_cast<int>(bcArr->indexContainer.size());
             count_indexContainer += bcAddArray[ic].indexContainer_count;
             bytesCount[ic] += bcAddArray[ic].indexContainer_count * sizeof(int);
@@ -688,10 +733,10 @@ void MPIIOMigrationBECoProcessor::writeBoundaryConds(int step)
     MPI_Type_contiguous(boundCondParamStr.bcindexmatrixCount, MPI_INT, &bcindexmatrixType);
     MPI_Type_commit(&bcindexmatrixType);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIOMigrationBECoProcessor::writeBoundaryConds start MPI IO rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     double start, finish;
@@ -712,13 +757,10 @@ void MPIIOMigrationBECoProcessor::writeBoundaryConds(int step)
     if (rc != MPI_SUCCESS)
         throw UbException(UB_EXARGS, "couldn't open file " + filename);
 
-    MPI_Offset write_offset = (MPI_Offset)(sizeof(int)) + (MPI_Offset)(bcAddArray[0].globalID) *
-                                                              (MPI_Offset)(boundCondParamStr.bcindexmatrixCount) *
-                                                              (MPI_Offset)(sizeof(int));
+    MPI_Offset write_offset = (MPI_Offset)(sizeof(int)) + (MPI_Offset)(bcAddArray[0].globalID) * (MPI_Offset)(boundCondParamStr.bcindexmatrixCount) * (MPI_Offset)(sizeof(int));
 
     MPI_File_write_at(file_handler, 0, &boundCondParamStr.bcindexmatrixCount, 1, MPI_INT, MPI_STATUS_IGNORE);
-    MPI_File_write_at(file_handler, write_offset, &bcindexmatrixVector[0], blocksCount, bcindexmatrixType,
-                      MPI_STATUS_IGNORE);
+    MPI_File_write_at(file_handler, write_offset, &bcindexmatrixVector[0], blocksCount, bcindexmatrixType, MPI_STATUS_IGNORE);
 
     MPI_File_sync(file_handler);
     MPI_File_close(&file_handler);
@@ -732,14 +774,17 @@ void MPIIOMigrationBECoProcessor::writeBoundaryConds(int step)
 
     MPI_File_write_at(file_handler, 0, &boundCondParamStr, 4, MPI_INT, MPI_STATUS_IGNORE);
 
-    write_offset =
-        (MPI_Offset)(sizeof(boundCondParam)) + (MPI_Offset)(grid->getNumberOfBlocks()) * (MPI_Offset)(sizeof(size_t));
+    write_offset = (MPI_Offset)(sizeof(boundCondParam)) + (MPI_Offset)(grid->getNumberOfBlocks()) * (MPI_Offset)(sizeof(size_t));
     size_t next_file_offset = 0;
-    if (size > 1) {
-        if (rank == 0) {
+    if (size > 1) 
+    {
+        if (rank == 0) 
+        {
             next_file_offset = write_offset + allBytesCount;
             MPI_Send(&next_file_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD);
-        } else {
+        } 
+        else 
+        {
             MPI_Recv(&write_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
             next_file_offset = write_offset + allBytesCount;
             if (rank < size - 1)
@@ -749,21 +794,17 @@ void MPIIOMigrationBECoProcessor::writeBoundaryConds(int step)
 
     MPI_Offset write_offsetIndex;
 
-    for (int nb = 0; nb < blocksCount; nb++) {
-        write_offsetIndex =
-            (MPI_Offset)(sizeof(boundCondParam)) + (MPI_Offset)(bcAddArray[nb].globalID) * (MPI_Offset)(sizeof(size_t));
+    for (int nb = 0; nb < blocksCount; nb++) 
+    {
+        write_offsetIndex = (MPI_Offset)(sizeof(boundCondParam)) + (MPI_Offset)(bcAddArray[nb].globalID) * (MPI_Offset)(sizeof(size_t));
         MPI_File_write_at(file_handler, write_offsetIndex, &write_offset, 1, MPI_LONG_LONG_INT, MPI_STATUS_IGNORE);
 
         MPI_File_write_at(file_handler, write_offset, &bcAddArray[nb], 3, MPI_INT, MPI_STATUS_IGNORE);
         if (bcVector[nb].size() > 0)
-            MPI_File_write_at(file_handler, write_offset + (MPI_Offset)(sizeof(BCAddMigration)), &bcVector[nb][0],
-                              bcAddArray[nb].boundCond_count, boundCondType, MPI_STATUS_IGNORE);
+            MPI_File_write_at(file_handler, write_offset + (MPI_Offset)(sizeof(BCAddMigration)), &bcVector[nb][0], bcAddArray[nb].boundCond_count, boundCondType, MPI_STATUS_IGNORE);
 
         if (indexContainerVector[nb].size() > 0)
-            MPI_File_write_at(
-                file_handler,
-                write_offset + (MPI_Offset)(sizeof(BCAddMigration)) +
-                    (MPI_Offset)(bcAddArray[nb].boundCond_count) * (MPI_Offset)(sizeof(BoundaryCondition)),
+            MPI_File_write_at(file_handler, write_offset + (MPI_Offset)(sizeof(BCAddMigration)) + (MPI_Offset)(bcAddArray[nb].boundCond_count) * (MPI_Offset)(sizeof(BoundaryCondition)),
                 &indexContainerVector[nb][0], bcAddArray[nb].indexContainer_count, MPI_INT, MPI_STATUS_IGNORE);
 
         write_offset += bytesCount[nb];
@@ -772,7 +813,8 @@ void MPIIOMigrationBECoProcessor::writeBoundaryConds(int step)
     MPI_File_sync(file_handler);
     MPI_File_close(&file_handler);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         finish = MPI_Wtime();
         UBLOG(logINFO, "MPIIOMigrationBECoProcessor::writeBoundaryConds time: " << finish - start << " s");
     }
@@ -792,8 +834,7 @@ void MPIIOMigrationBECoProcessor::restart(int step)
         UBLOG(logINFO, "Load check point - start");
 
     readBlocks(step);
-    SPtr<Grid3DVisitor> newMetisVisitor(new MetisPartitioningGridVisitor(comm, MetisPartitioningGridVisitor::LevelBased,
-                                                                         D3Q27System::BSW, MetisPartitioner::KWAY));
+    SPtr<Grid3DVisitor> newMetisVisitor(new MetisPartitioningGridVisitor(comm, MetisPartitioningGridVisitor::LevelBased, D3Q27System::BSW, MetisPartitioner::KWAY));
     grid->accept(newMetisVisitor);
 
     readDataSet(step);
@@ -806,8 +847,7 @@ void MPIIOMigrationBECoProcessor::restart(int step)
 
 void MPIIOMigrationBECoProcessor::readBlocks(int step) { MPIIOCoProcessor::readBlocks(step); }
 
-void MPIIOMigrationBECoProcessor::blocksExchange(int tagN, int ind1, int ind2, int doubleCountInBlock,
-                                                 std::vector<double> &pV, std::vector<double> *rawDataReceive)
+void MPIIOMigrationBECoProcessor::blocksExchange(int tagN, int ind1, int ind2, int doubleCountInBlock, std::vector<double> &pV, std::vector<double> *rawDataReceive)
 {
     int rank, size;
     MPI_Comm_rank(MPI_COMM_WORLD, &rank);
@@ -821,7 +861,8 @@ void MPIIOMigrationBECoProcessor::blocksExchange(int tagN, int ind1, int ind2, i
     int *blocksCounterRec  = new int[size];
 
     std::vector<double> *rawDataSend = new std::vector<double>[size];
-    for (int r = 0; r < size; r++) {
+    for (int r = 0; r < size; r++) 
+    {
         rawDataSend[r].resize(0);
         blocksCounterSend[r] = 0;
         blocksCounterRec[r]  = 0;
@@ -842,8 +883,7 @@ void MPIIOMigrationBECoProcessor::blocksExchange(int tagN, int ind1, int ind2, i
         {
             blocksCounterRec[tempRank]++;
             rawDataReceive[tempRank].push_back(double(indexB + ind));
-            rawDataReceive[tempRank].insert(rawDataReceive[tempRank].end(),
-                                            pV.begin() + ind * size_t(doubleCountInBlock),
+            rawDataReceive[tempRank].insert(rawDataReceive[tempRank].end(), pV.begin() + ind * size_t(doubleCountInBlock),
                                             pV.begin() + ind * size_t(doubleCountInBlock) + size_t(doubleCountInBlock));
         } else // we must send data to other processes
         {
@@ -856,17 +896,20 @@ void MPIIOMigrationBECoProcessor::blocksExchange(int tagN, int ind1, int ind2, i
 
     MPI_Request *requests = new MPI_Request[size * 2]; // send + receive
     int requestCount      = 0;
-    //   MPI_Status status;
 
-    for (int r = 0; r < size; r++) {
-        if (r != rank) {
+    for (int r = 0; r < size; r++) 
+    {
+        if (r != rank) 
+        {
             MPI_Irecv(&blocksCounterRec[r], 1, MPI_INT, r, tagN, MPI_COMM_WORLD, &requests[requestCount]);
             requestCount++;
         }
     }
 
-    for (int r = 0; r < size; r++) {
-        if (r != rank) {
+    for (int r = 0; r < size; r++) 
+    {
+        if (r != rank) 
+        {
             MPI_Isend(&blocksCounterSend[r], 1, MPI_INT, r, tagN, MPI_COMM_WORLD, &requests[requestCount]);
             requestCount++;
         }
@@ -877,7 +920,8 @@ void MPIIOMigrationBECoProcessor::blocksExchange(int tagN, int ind1, int ind2, i
     MPI_Type_contiguous(doubleCountInBlock + 1, MPI_DOUBLE, &sendBlockDoubleType);
     MPI_Type_commit(&sendBlockDoubleType);
 
-    for (int r = 0; r < size; r++) {
+    for (int r = 0; r < size; r++) 
+    {
         if (r != rank)
             rawDataReceive[r].resize(size_t(blocksCounterRec[r]) * size_t(doubleCountInBlock + 1));
     }
@@ -888,35 +932,39 @@ void MPIIOMigrationBECoProcessor::blocksExchange(int tagN, int ind1, int ind2, i
     const int maxQuant   = 400;
     int restQuant;
 
-    for (int r = 0; r < size; r++) {
-        if (r != rank) {
+    for (int r = 0; r < size; r++) 
+    {
+        if (r != rank) 
+        {
             sendRecCount = int(blocksCounterRec[r] / maxQuant);
             if (sendRecCount * maxQuant < blocksCounterRec[r])
                 sendRecCount++;
             requests = (MPI_Request *)realloc(requests, (requestCount + sendRecCount) * sizeof(MPI_Request));
 
-            for (int sc = 0; sc < sendRecCount; sc++) {
+            for (int sc = 0; sc < sendRecCount; sc++)
+            {
                 restQuant     = (sc < sendRecCount - 1) ? maxQuant : blocksCounterRec[r] - sc * maxQuant;
                 sendRecOffset = size_t(sc) * size_t(maxQuant) * size_t((doubleCountInBlock + 1));
-                MPI_Irecv(&rawDataReceive[r][sendRecOffset], restQuant, sendBlockDoubleType, r, tagN, MPI_COMM_WORLD,
-                          &requests[requestCount]);
+                MPI_Irecv(&rawDataReceive[r][sendRecOffset], restQuant, sendBlockDoubleType, r, tagN, MPI_COMM_WORLD, &requests[requestCount]);
                 requestCount++;
             }
         }
     }
 
-    for (int r = 0; r < size; r++) {
-        if (r != rank) {
+    for (int r = 0; r < size; r++) 
+    {
+        if (r != rank) 
+        {
             sendRecCount = int(blocksCounterSend[r] / maxQuant);
             if (sendRecCount * maxQuant < blocksCounterSend[r])
                 sendRecCount++;
             requests = (MPI_Request *)realloc(requests, (requestCount + sendRecCount) * sizeof(MPI_Request));
 
-            for (int sc = 0; sc < sendRecCount; sc++) {
+            for (int sc = 0; sc < sendRecCount; sc++) 
+            {
                 restQuant     = (sc < sendRecCount - 1) ? maxQuant : blocksCounterSend[r] - sc * maxQuant;
                 sendRecOffset = size_t(sc) * size_t(maxQuant) * size_t((doubleCountInBlock + 1));
-                MPI_Isend(&rawDataSend[r][sendRecOffset], restQuant, sendBlockDoubleType, r, tagN, MPI_COMM_WORLD,
-                          &requests[requestCount]);
+                MPI_Isend(&rawDataSend[r][sendRecOffset], restQuant, sendBlockDoubleType, r, tagN, MPI_COMM_WORLD,  &requests[requestCount]);
                 requestCount++;
             }
         }
@@ -942,13 +990,20 @@ void MPIIOMigrationBECoProcessor::readDataSet(int step)
         UB_THROW(UbException(UB_EXARGS, "bcProcessor does not exist!"));
     if (nue == -999.999)
         UB_THROW(UbException(UB_EXARGS, "nue is not initialised!"));
+    if (nuL == -999.999 )
+        UB_THROW(UbException(UB_EXARGS, "nuL is not initialised!"));
+    if (nuG == -999.999)
+        UB_THROW(UbException(UB_EXARGS, "nuG is not initialised!"));
+    if (densityRatio == -999.999)
+        UB_THROW(UbException(UB_EXARGS, "densityRatio is not initialised!"));
 
-    if (comm->isRoot()) {
+    if (comm->isRoot())
+    {
         UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readDataSet start MPI IO rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
+    bool multiPhase = false;
     dataSetParam dataSetParamStr1, dataSetParamStr2, dataSetParamStr3;
 
     int blocksCountAll   = grid->getNumberOfBlocks(); // quantity of all blocks in the grid
@@ -968,150 +1023,183 @@ void MPIIOMigrationBECoProcessor::readDataSet(int step)
         start = MPI_Wtime();
 
     MPI_File file_handler;
-    std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSet.bin";
+    std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetF.bin";
     int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler);
     if (rc != MPI_SUCCESS)
         throw UbException(UB_EXARGS, "couldn't open file " + filename);
 
     MPI_File_read_at(file_handler, (MPI_Offset)0, &dataSetParamStr1, 1, dataSetParamType, MPI_STATUS_IGNORE);
-    MPI_File_read_at(file_handler, (MPI_Offset)(sizeof(dataSetParam)), &dataSetParamStr2, 1, dataSetParamType,
-                     MPI_STATUS_IGNORE);
-    MPI_File_read_at(file_handler, (MPI_Offset)(2 * sizeof(dataSetParam)), &dataSetParamStr3, 1, dataSetParamType,
-                     MPI_STATUS_IGNORE);
+    MPI_File_read_at(file_handler, (MPI_Offset)(sizeof(dataSetParam)), &dataSetParamStr2, 1, dataSetParamType, MPI_STATUS_IGNORE);
+    MPI_File_read_at(file_handler, (MPI_Offset)(2 * sizeof(dataSetParam)), &dataSetParamStr3, 1, dataSetParamType, MPI_STATUS_IGNORE);
 
-    size_t doubleCountInBlock =
-        (dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3] +
+    size_t doubleCountInBlock = dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3] +
         dataSetParamStr2.nx[0] * dataSetParamStr2.nx[1] * dataSetParamStr2.nx[2] * dataSetParamStr2.nx[3] +
-        dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3]) * 2;
-    std::vector<double> doubleValuesArray(myBlocksCount * doubleCountInBlock); // double-values in all blocks
+        dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3];
+    std::vector<double> doubleValuesArrayF(size_t(myBlocksCount * doubleCountInBlock)); // double-values in all blocks  Fdistributions
+    std::vector<double> doubleValuesArrayH1; // double-values in all blocks  H1distributions
+    //std::vector<double> doubleValuesArrayH2; // double-values in all blocks  H2distributions
 
     MPI_Type_contiguous(int(doubleCountInBlock), MPI_DOUBLE, &dataSetDoubleType);
     MPI_Type_commit(&dataSetDoubleType);
 
-    MPI_Offset read_offset =
-        (MPI_Offset)(3 * sizeof(dataSetParam)) + (MPI_Offset)(indexB * doubleCountInBlock * sizeof(double));
-    MPI_File_read_at(file_handler, read_offset, &doubleValuesArray[0], int(myBlocksCount), dataSetDoubleType,
-                     MPI_STATUS_IGNORE);
+    //--------------------------------- F ---------------------------------------------------------
+    MPI_Offset read_offset = (MPI_Offset)(3 * sizeof(dataSetParam)) + (MPI_Offset)(indexB * doubleCountInBlock * sizeof(double));
+    MPI_File_read_at(file_handler, read_offset, &doubleValuesArrayF[0], int(myBlocksCount), dataSetDoubleType, MPI_STATUS_IGNORE);
 
     MPI_File_close(&file_handler);
+
+    //--------------------------------- H1 ---------------------------------------------------------
+    MPI_Offset fsize;
+    filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetH1.bin";
+    rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler);
+    if (rc != MPI_SUCCESS)
+        throw UbException(UB_EXARGS, "couldn't open file " + filename);
+    int fs = MPI_File_get_size(file_handler, &fsize);
+    if (fsize > 0)
+    {
+        multiPhase = true;
+        doubleValuesArrayH1.resize(myBlocksCount * doubleCountInBlock);
+
+        read_offset = (MPI_Offset)(indexB * doubleCountInBlock * sizeof(double)) ;
+        MPI_File_read_at(file_handler, read_offset, &doubleValuesArrayH1[0], int(myBlocksCount), dataSetDoubleType, MPI_STATUS_IGNORE);
+    }
+    MPI_File_close(&file_handler);
+
     MPI_Type_free(&dataSetDoubleType);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         finish = MPI_Wtime();
         UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readDataSet time: " << finish - start << " s");
         UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readDataSet start of exchange of data, rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
-    std::vector<double> *rawDataReceive = new std::vector<double>[size];
+    std::vector<double>* rawDataReceiveF = new std::vector<double>[size];
     for (int r = 0; r < size; r++)
-        rawDataReceive[r].resize(0);
+        rawDataReceiveF[r].resize(0);
+    blocksExchange(MESSAGE_TAG, indexB, indexE, int(doubleCountInBlock), doubleValuesArrayF, rawDataReceiveF);
 
-    blocksExchange(MESSAGE_TAG, indexB, indexE, int(doubleCountInBlock), doubleValuesArray, rawDataReceive);
+    std::vector<double>* rawDataReceiveH1 = new std::vector<double>[size];
+    for (int r = 0; r < size; r++)
+        rawDataReceiveH1[r].resize(0);
+    blocksExchange(MESSAGE_TAG, indexB, indexE, int(doubleCountInBlock), doubleValuesArrayH1, rawDataReceiveH1);
 
-    if (comm->isRoot()) {
+    /*    std::vector<double>* rawDataReceiveH2 = new std::vector<double>[size];
+        for (int r = 0; r < size; r++)
+            rawDataReceiveH2[r].resize(0);
+        blocksExchange(MESSAGE_TAG, indexB, indexE, int(doubleCountInBlock), doubleValuesArrayH2, rawDataReceiveH2);*/
+
+    if (comm->isRoot())
+    {
         finish = MPI_Wtime();
         UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readDataSet time: " << finish - start << " s");
         UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readDataSet start of restore of data, rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
-
+    
     //-------------------------------------- restore blocks ---------------------------------
     int blockID;
     std::vector<double> vectorsOfValuesF1, vectorsOfValuesF2, vectorsOfValuesF3;
-    std::vector<double> vectorsOfValuesH1, vectorsOfValuesH2, vectorsOfValuesH3;
+    std::vector<double> vectorsOfValuesH11, vectorsOfValuesH12, vectorsOfValuesH13;
+    //std::vector<double> vectorsOfValuesH21, vectorsOfValuesH22, vectorsOfValuesH23;
 
-    size_t vectorSize1 =
-        dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3];
-    size_t vectorSize2 =
-        dataSetParamStr2.nx[0] * dataSetParamStr2.nx[1] * dataSetParamStr2.nx[2] * dataSetParamStr2.nx[3];
-    size_t vectorSize3 =
-        dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3];
+    size_t vectorSize1 = dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3];
+    size_t vectorSize2 = dataSetParamStr2.nx[0] * dataSetParamStr2.nx[1] * dataSetParamStr2.nx[2] * dataSetParamStr2.nx[3];
+    size_t vectorSize3 = dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3];
 
     size_t index;
-    for (int r = 0; r < size; r++) {
+    for (int r = 0; r < size; r++) 
+    {
         index = 0;
-        for (int ii = 0; ii < int(rawDataReceive[r].size() / doubleCountInBlock); ii++) {
-            blockID = (int)(rawDataReceive[r][index]);
+        for (int ii = 0; ii < int(rawDataReceiveF[r].size() / doubleCountInBlock); ii++) 
+        {
+            blockID = (int)(rawDataReceiveF[r][index]);
             index += 1;
 
-            vectorsOfValuesF1.assign(rawDataReceive[r].data() + index, rawDataReceive[r].data() + index + vectorSize1);
-            index += vectorSize1;
-
-            vectorsOfValuesF2.assign(rawDataReceive[r].data() + index, rawDataReceive[r].data() + index + vectorSize2);
-            index += vectorSize2;
-
-            vectorsOfValuesF3.assign(rawDataReceive[r].data() + index, rawDataReceive[r].data() + index + vectorSize3);
-            index += vectorSize3;
-
-            vectorsOfValuesH1.assign(rawDataReceive[r].data() + index, rawDataReceive[r].data() + index + vectorSize1);
+            vectorsOfValuesF1.assign(rawDataReceiveF[r].data() + index, rawDataReceiveF[r].data() + index + vectorSize1);
+            if(multiPhase)
+                vectorsOfValuesH11.assign(rawDataReceiveH1[r].data() + index, rawDataReceiveH1[r].data() + index + vectorSize1);
+            //vectorsOfValuesH21.assign(rawDataReceiveH2[r].data() + index, rawDataReceiveH2[r].data() + index + vectorSize1);
             index += vectorSize1;
 
-            vectorsOfValuesH2.assign(rawDataReceive[r].data() + index, rawDataReceive[r].data() + index + vectorSize2);
+            vectorsOfValuesF2.assign(rawDataReceiveF[r].data() + index, rawDataReceiveF[r].data() + index + vectorSize2);
+            if (multiPhase)
+                vectorsOfValuesH12.assign(rawDataReceiveH1[r].data() + index, rawDataReceiveH1[r].data() + index + vectorSize1);
+            //vectorsOfValuesH22.assign(rawDataReceiveH2[r].data() + index, rawDataReceiveH2[r].data() + index + vectorSize1);
             index += vectorSize2;
 
-            vectorsOfValuesH3.assign(rawDataReceive[r].data() + index, rawDataReceive[r].data() + index + vectorSize3);
+            vectorsOfValuesF3.assign(rawDataReceiveF[r].data() + index, rawDataReceiveF[r].data() + index + vectorSize3);
+            if (multiPhase)
+                vectorsOfValuesH13.assign(rawDataReceiveH1[r].data() + index, rawDataReceiveH1[r].data() + index + vectorSize1);
+                //vectorsOfValuesH23.assign(rawDataReceiveH2[r].data() + index, rawDataReceiveH2[r].data() + index + vectorSize1);
             index += vectorSize3;
 
             SPtr<DistributionArray3D> mFdistributions(new D3Q27EsoTwist3DSplittedVector());
-            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)
-                ->setLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
-                    new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesF1, dataSetParamStr1.nx[0],
-                                                            dataSetParamStr1.nx[1], dataSetParamStr1.nx[2],
-                                                            dataSetParamStr1.nx[3])));
-            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)
-                ->setNonLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
-                    new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesF2, dataSetParamStr2.nx[0],
-                                                            dataSetParamStr2.nx[1], dataSetParamStr2.nx[2],
-                                                            dataSetParamStr2.nx[3])));
-            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)
-                ->setZeroDistributions(CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>(
+            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
+                    new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesF1, dataSetParamStr1.nx[0], dataSetParamStr1.nx[1], dataSetParamStr1.nx[2], dataSetParamStr1.nx[3])));
+            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setNonLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
+                    new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesF2, dataSetParamStr2.nx[0], dataSetParamStr2.nx[1], dataSetParamStr2.nx[2], dataSetParamStr2.nx[3])));
+            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setZeroDistributions(CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>(
                         vectorsOfValuesF3, dataSetParamStr3.nx[0], dataSetParamStr3.nx[1], dataSetParamStr3.nx[2])));
 
             dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setNX1(dataSetParamStr1.nx1);
             dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setNX2(dataSetParamStr1.nx2);
             dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setNX3(dataSetParamStr1.nx3);
 
-            SPtr<DistributionArray3D> mHdistributions(new D3Q27EsoTwist3DSplittedVector());
-            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mHdistributions)
-                ->setLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
-                    new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH1, dataSetParamStr1.nx[0],
-                                                            dataSetParamStr1.nx[1], dataSetParamStr1.nx[2],
-                                                            dataSetParamStr1.nx[3])));
-            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mHdistributions)
-                ->setNonLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
-                    new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH2, dataSetParamStr2.nx[0],
-                                                            dataSetParamStr2.nx[1], dataSetParamStr2.nx[2],
-                                                            dataSetParamStr2.nx[3])));
-            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mHdistributions)
-                ->setZeroDistributions( CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>(
-                        vectorsOfValuesH3, dataSetParamStr3.nx[0], dataSetParamStr3.nx[1], dataSetParamStr3.nx[2])));
-
-            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mHdistributions)->setNX1(dataSetParamStr1.nx1);
-            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mHdistributions)->setNX2(dataSetParamStr1.nx2);
-            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mHdistributions)->setNX3(dataSetParamStr1.nx3);
+            SPtr<DistributionArray3D> mH1distributions(new D3Q27EsoTwist3DSplittedVector());
+            if (multiPhase)
+            {
+                dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH1distributions)->setLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
+                    new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH11, dataSetParamStr1.nx[0], dataSetParamStr1.nx[1], dataSetParamStr1.nx[2], dataSetParamStr1.nx[3])));
+                dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH1distributions)->setNonLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
+                    new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH12, dataSetParamStr2.nx[0], dataSetParamStr2.nx[1], dataSetParamStr2.nx[2], dataSetParamStr2.nx[3])));
+                dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH1distributions)->setZeroDistributions(CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>(
+                    vectorsOfValuesH13, dataSetParamStr3.nx[0], dataSetParamStr3.nx[1], dataSetParamStr3.nx[2])));
+
+                dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH1distributions)->setNX1(dataSetParamStr1.nx1);
+                dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH1distributions)->setNX2(dataSetParamStr1.nx2);
+                dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH1distributions)->setNX3(dataSetParamStr1.nx3);
+            }
+
+            /*SPtr<DistributionArray3D> mH2distributions(new D3Q27EsoTwist3DSplittedVector());
+            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
+                    new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH21, dataSetParamStr1.nx[0], dataSetParamStr1.nx[1], dataSetParamStr1.nx[2], dataSetParamStr1.nx[3])));
+            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNonLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
+                    new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH22, dataSetParamStr2.nx[0], dataSetParamStr2.nx[1], dataSetParamStr2.nx[2], dataSetParamStr2.nx[3])));
+            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setZeroDistributions(CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>(
+                    vectorsOfValuesH23, dataSetParamStr3.nx[0], dataSetParamStr3.nx[1], dataSetParamStr3.nx[2])));
+
+            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNX1(dataSetParamStr1.nx1);
+            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNX2(dataSetParamStr1.nx2);
+            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNX3(dataSetParamStr1.nx3);*/
+
 
             // find the nesessary block and fill it
             SPtr<Block3D> block = grid->getBlock(blockID);
             this->lbmKernel->setBlock(block);
             SPtr<LBMKernel> kernel = this->lbmKernel->clone();
-            LBMReal collFactor     = LBMSystem::calcCollisionFactor(this->nue, block->getLevel());
+            LBMReal collFactor = LBMSystem::calcCollisionFactor(this->nue, block->getLevel());
+            LBMReal collFactorL = LBMSystem::calcCollisionFactor(this->nuL, block->getLevel());
+            LBMReal collFactorG = LBMSystem::calcCollisionFactor(this->nuG, block->getLevel());
             kernel->setCollisionFactor(collFactor);
             kernel->setIndex(block->getX1(), block->getX2(), block->getX3());
             kernel->setDeltaT(LBMSystem::getDeltaT(block->getLevel()));
+            kernel->setCollisionFactorMultiphase(collFactorL, collFactorG);
+            kernel->setDensityRatio(this->densityRatio);
             SPtr<DataSet3D> dataSetPtr = SPtr<DataSet3D>(new DataSet3D());
             dataSetPtr->setFdistributions(mFdistributions);
-            dataSetPtr->setHdistributions(mHdistributions);
+            if (multiPhase)
+                dataSetPtr->setHdistributions(mH1distributions);
+//            dataSetPtr->setHdistributions(mH2distributions);
             kernel->setDataSet(dataSetPtr);
             block->setKernel(kernel);
         }
     }
-    if (comm->isRoot()) {
+    //if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readDataSet end of restore of data, rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     //-------------------------------------------------------------
@@ -1149,11 +1237,16 @@ void MPIIOMigrationBECoProcessor::readDataSet(int step)
     if (arrPresence.isRelaxationFactorPresent)
         readArray(step, RelaxationFactor, std::string("/cpRelaxationFactor.bin"));
     //   readRelaxationFactor(step);
+ 
+    if (arrPresence.isPhaseField1Present)
+        readArray(step, PhaseField1, std::string("/cpPhaseField1.bin"));
 
-    if (arrPresence.isPhaseFieldPresent)
-        readArray(step, PhaseField, std::string("/cpPhaseField.bin"));
+    if (arrPresence.isPhaseField2Present)
+        readArray(step, PhaseField2, std::string("/cpPhaseField2.bin"));
 
-    delete[] rawDataReceive;
+    delete[] rawDataReceiveF;
+//    delete[] rawDataReceiveH1;
+//    delete[] rawDataReceiveH2;
 }
 
 void MPIIOMigrationBECoProcessor::readArray(int step, Arrays arrType, std::string fname)
@@ -1162,11 +1255,12 @@ void MPIIOMigrationBECoProcessor::readArray(int step, Arrays arrType, std::strin
     MPI_Comm_rank(MPI_COMM_WORLD, &rank);
     MPI_Comm_size(MPI_COMM_WORLD, &size);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readArray start MPI IO rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
+
     double start, finish;
     if (comm->isRoot())
         start = MPI_Wtime();
@@ -1194,56 +1288,53 @@ void MPIIOMigrationBECoProcessor::readArray(int step, Arrays arrType, std::strin
 
     MPI_File_read_at(file_handler, (MPI_Offset)0, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
 
-    size_t doubleCountInBlock =
-        dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+    size_t doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
     std::vector<double> doubleValuesArray(myBlocksCount * doubleCountInBlock); // double-values in all blocks
 
     MPI_Type_contiguous(int(doubleCountInBlock), MPI_DOUBLE, &dataSetDoubleType);
     MPI_Type_commit(&dataSetDoubleType);
 
-    MPI_Offset read_offset = (MPI_Offset)(sizeof(dataSetParam)) +
-                             (MPI_Offset)(indexB) * (MPI_Offset)(doubleCountInBlock) * (MPI_Offset)(sizeof(double));
-    MPI_File_read_at(file_handler, read_offset, &doubleValuesArray[0], int(myBlocksCount), dataSetDoubleType,
-                     MPI_STATUS_IGNORE);
+    MPI_Offset read_offset = (MPI_Offset)(sizeof(dataSetParam)) + (MPI_Offset)(indexB) * (MPI_Offset)(doubleCountInBlock) * (MPI_Offset)(sizeof(double));
+    MPI_File_read_at(file_handler, read_offset, &doubleValuesArray[0], int(myBlocksCount), dataSetDoubleType, MPI_STATUS_IGNORE);
 
     MPI_File_close(&file_handler);
     MPI_Type_free(&dataSetDoubleType);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         finish = MPI_Wtime();
         UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readArray time: " << finish - start << " s");
         UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readArray start of exchange of data, rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     std::vector<double> *rawDataReceive = new std::vector<double>[size];
     for (int r = 0; r < size; r++)
         rawDataReceive[r].resize(0);
 
-    blocksExchange(MESSAGE_TAG + int(arrType), indexB, indexE, int(doubleCountInBlock), doubleValuesArray,
-                   rawDataReceive);
+    blocksExchange(MESSAGE_TAG + int(arrType), indexB, indexE, int(doubleCountInBlock), doubleValuesArray, rawDataReceive);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         finish = MPI_Wtime();
         UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readArray end of exchange of data, rank = " << rank);
         UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readArray time: " << finish - start << " s");
         UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readArray start of restore of data, rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     //----------------------------- restore data ---------------------------------
     int blockID;
     std::vector<double> vectorsOfValues;
     size_t index;
-    size_t nextVectorSize =
-        dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+    size_t nextVectorSize = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
 
-    for (int r = 0; r < size; r++) {
+    for (int r = 0; r < size; r++) 
+    {
         index = 0;
-        for (int ii = 0; ii < int(rawDataReceive[r].size() / doubleCountInBlock); ii++) {
-            blockID             = (int)(rawDataReceive[r][index]);
+        for (int ii = 0; ii < int(rawDataReceive[r].size() / doubleCountInBlock); ii++) 
+        {
+            blockID = (int)(rawDataReceive[r][index]);
             SPtr<Block3D> block = grid->getBlock(blockID);
             index += 1;
 
@@ -1254,40 +1345,31 @@ void MPIIOMigrationBECoProcessor::readArray(int step, Arrays arrType, std::strin
             SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> ___4DArray;
             SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> ___3DArray;
 
-            switch (arrType) {
+            switch (arrType) 
+            {
                 case AverageDensity:
-                    ___4DArray =
-                        CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(
-                            vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2],
-                            dataSetParamStr.nx[3]));
+                    ___4DArray = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(
+                            vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
                     block->getKernel()->getDataSet()->setAverageDensity(___4DArray);
                     break;
                 case AverageVelocity:
-                    ___4DArray =
-                        CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(
-                            vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2],
-                            dataSetParamStr.nx[3]));
+                    ___4DArray = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(
+                            vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
                     block->getKernel()->getDataSet()->setAverageVelocity(___4DArray);
                     break;
                 case AverageFluktuations:
-                    ___4DArray =
-                        CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(
-                            vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2],
-                            dataSetParamStr.nx[3]));
+                    ___4DArray = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(
+                            vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
                     block->getKernel()->getDataSet()->setAverageFluctuations(___4DArray);
                     break;
                 case AverageTriple:
-                    ___4DArray =
-                        CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(
-                            vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2],
-                            dataSetParamStr.nx[3]));
+                    ___4DArray = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(
+                            vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
                     block->getKernel()->getDataSet()->setAverageTriplecorrelations(___4DArray);
                     break;
                 case ShearStressVal:
-                    ___4DArray =
-                        CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(
-                            vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2],
-                            dataSetParamStr.nx[3]));
+                    ___4DArray = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(
+                            vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
                     block->getKernel()->getDataSet()->setShearStressValues(___4DArray);
                     break;
                 case RelaxationFactor:
@@ -1295,14 +1377,18 @@ void MPIIOMigrationBECoProcessor::readArray(int step, Arrays arrType, std::strin
                         vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2]));
                     block->getKernel()->getDataSet()->setRelaxationFactor(___3DArray);
                     break;
-                case PhaseField:
+                case PhaseField1:
                     ___3DArray = CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>(
                         vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2]));
                     block->getKernel()->getDataSet()->setPhaseField(___3DArray);
                     break;
+                case PhaseField2:
+                    ___3DArray = CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>(
+                        vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2]));
+                    block->getKernel()->getDataSet()->setPhaseField2(___3DArray);
+                    break;
                 default:
-                    UB_THROW(
-                        UbException(UB_EXARGS, "MPIIOMigrationBECoProcessor::readArray : array type does not exist!"));
+                    UB_THROW(UbException(UB_EXARGS, "MPIIOMigrationBECoProcessor::readArray : array type does not exist!"));
                     break;
             } 
         }
@@ -1310,10 +1396,10 @@ void MPIIOMigrationBECoProcessor::readArray(int step, Arrays arrType, std::strin
 
     delete[] rawDataReceive;
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readArray end of restore of data, rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 }
 
@@ -1323,10 +1409,10 @@ void MPIIOMigrationBECoProcessor::readBoundaryConds(int step)
     MPI_Comm_rank(MPI_COMM_WORLD, &rank);
     MPI_Comm_size(MPI_COMM_WORLD, &size);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readBoundaryConds start MPI IO rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     double start, finish;
@@ -1360,25 +1446,24 @@ void MPIIOMigrationBECoProcessor::readBoundaryConds(int step)
     MPI_Type_contiguous(sizeOfBIM, MPI_INT, &bcindexmatrixType);
     MPI_Type_commit(&bcindexmatrixType);
 
-    MPI_Offset read_offset =
-        (MPI_Offset)(sizeof(int)) + (MPI_Offset)(indexB) * (MPI_Offset)(sizeOfBIM) * (MPI_Offset)(sizeof(int));
-    MPI_File_read_at(file_handler, read_offset, &bcindexmatrixVAll[0], int(myBlocksCount), bcindexmatrixType,
-                     MPI_STATUS_IGNORE);
+    MPI_Offset read_offset = (MPI_Offset)(sizeof(int)) + (MPI_Offset)(indexB) * (MPI_Offset)(sizeOfBIM) * (MPI_Offset)(sizeof(int));
+    MPI_File_read_at(file_handler, read_offset, &bcindexmatrixVAll[0], int(myBlocksCount), bcindexmatrixType, MPI_STATUS_IGNORE);
 
     MPI_File_close(&file_handler);
     MPI_Type_free(&bcindexmatrixType);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         finish = MPI_Wtime();
         UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readBoundaryConds time: " << finish - start << " s");
         UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readBoundaryConds start of exchange of data, rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     std::vector<int> *rawDataReceive = new std::vector<int>[size];
     std::vector<int> *rawDataSend    = new std::vector<int>[size];
-    for (int r = 0; r < size; r++) {
+    for (int r = 0; r < size; r++) 
+    {
         rawDataReceive[r].resize(0);
         rawDataSend[r].resize(0);
         rawDataReceive[r].push_back(0);
@@ -1396,14 +1481,12 @@ void MPIIOMigrationBECoProcessor::readBoundaryConds(int step)
         {
             rawDataReceive[tempRank][0]++;
             rawDataReceive[tempRank].push_back(indexB + ind);
-            rawDataReceive[tempRank].insert(rawDataReceive[tempRank].end(), bcindexmatrixVAll.begin() + ind * sizeOfBIM,
-                                            bcindexmatrixVAll.begin() + ind * sizeOfBIM + sizeOfBIM);
+            rawDataReceive[tempRank].insert(rawDataReceive[tempRank].end(), bcindexmatrixVAll.begin() + ind * sizeOfBIM, bcindexmatrixVAll.begin() + ind * sizeOfBIM + sizeOfBIM);
         } else // we must send data to other processes
         {
             rawDataSend[tempRank][0]++;
             rawDataSend[tempRank].push_back(indexB + ind);
-            rawDataSend[tempRank].insert(rawDataSend[tempRank].end(), bcindexmatrixVAll.begin() + ind * sizeOfBIM,
-                                         bcindexmatrixVAll.begin() + ind * sizeOfBIM + sizeOfBIM);
+            rawDataSend[tempRank].insert(rawDataSend[tempRank].end(), bcindexmatrixVAll.begin() + ind * sizeOfBIM, bcindexmatrixVAll.begin() + ind * sizeOfBIM + sizeOfBIM);
         }
     }
 
@@ -1414,9 +1497,11 @@ void MPIIOMigrationBECoProcessor::readBoundaryConds(int step)
     int intBlockCount;
     int rds;
 
-    for (int r = 0; r < size; r++) {
-        if (r != rank) {
-            rds           = int(rawDataSend[r].size());
+    for (int r = 0; r < size; r++) 
+    {
+        if (r != rank) 
+        {
+            rds = int(rawDataSend[r].size());
             intBlockCount = (int)(rds / SEND_BLOCK_SIZE);
             if (intBlockCount * SEND_BLOCK_SIZE < rds)
                 intBlockCount += 1;
@@ -1424,21 +1509,21 @@ void MPIIOMigrationBECoProcessor::readBoundaryConds(int step)
             for (int i = rds; i < intBlockCount * SEND_BLOCK_SIZE; i++)
                 rawDataSend[r].push_back(0);
 
-            MPI_Isend(&rawDataSend[r][0], intBlockCount, sendBlockIntType, r, MESSAGE_TAG + 7, MPI_COMM_WORLD,
-                      &requests[requestCount]);
+            MPI_Isend(&rawDataSend[r][0], intBlockCount, sendBlockIntType, r, MESSAGE_TAG + 7, MPI_COMM_WORLD, &requests[requestCount]);
             // MPI_Isend(&rawDataSend[r][0], rawDataSend[r].size(), MPI_INT, r, MESSAGE_TAG + 7, MPI_COMM_WORLD,
             // &requests[requestCount]);
             requestCount++;
         }
     }
 
-    for (int r = 0; r < size; r++) {
-        if (r != rank) {
+    for (int r = 0; r < size; r++) 
+    {
+        if (r != rank) 
+        {
             MPI_Probe(r, MESSAGE_TAG + 7, MPI_COMM_WORLD, &status);
             MPI_Get_count(&status, sendBlockIntType, &quant);
             rawDataReceive[r].resize(quant * SEND_BLOCK_SIZE);
-            MPI_Irecv(&rawDataReceive[r][0], quant, sendBlockIntType, r, MESSAGE_TAG + 7, MPI_COMM_WORLD,
-                      &requests[requestCount]);
+            MPI_Irecv(&rawDataReceive[r][0], quant, sendBlockIntType, r, MESSAGE_TAG + 7, MPI_COMM_WORLD, &requests[requestCount]);
             requestCount++;
         }
     }
@@ -1447,17 +1532,17 @@ void MPIIOMigrationBECoProcessor::readBoundaryConds(int step)
 
     //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         finish = MPI_Wtime();
         UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readBoundaryConds end of exchange of data, rank = " << rank);
         UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readBoundaryConds time: " << finish - start << " s");
         UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readBoundaryConds start of restore of data, rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpBC2.bin";
-    rc       = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler);
+    rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler);
     if (rc != MPI_SUCCESS)
         throw UbException(UB_EXARGS, "couldn't open file " + filename);
 
@@ -1475,10 +1560,12 @@ void MPIIOMigrationBECoProcessor::readBoundaryConds(int step)
     std::vector<int> indexContainerV;
     std::vector<int> bcindexmatrixV;
 
-    for (int r = 0; r < size; r++) {
+    for (int r = 0; r < size; r++) 
+    {
         index = 1;
 
-        for (int ii = 0; ii < rawDataReceive[r][0]; ii++) {
+        for (int ii = 0; ii < rawDataReceive[r][0]; ii++) 
+        {
             blockID = (int)(rawDataReceive[r][index]);
             index += 1;
 
@@ -1498,18 +1585,19 @@ void MPIIOMigrationBECoProcessor::readBoundaryConds(int step)
                                  bcAddArray.boundCond_count, boundCondType, MPI_STATUS_IGNORE);
 
             if (bcAddArray.indexContainer_count > 0)
-                MPI_File_read_at(file_handler,
-                                 read_offset2 + (MPI_Offset)(sizeof(BCAddMigration)) +
+                MPI_File_read_at(file_handler, read_offset2 + (MPI_Offset)(sizeof(BCAddMigration)) +
                                      (MPI_Offset)(bcAddArray.boundCond_count) * (MPI_Offset)(sizeof(BoundaryCondition)),
                                  &indexContainerV[0], bcAddArray.indexContainer_count, MPI_INT, MPI_STATUS_IGNORE);
 
             bcVector.resize(0);
 
-            for (int ibc = 0; ibc < bcAddArray.boundCond_count; ibc++) {
+            for (int ibc = 0; ibc < bcAddArray.boundCond_count; ibc++) 
+            {
                 SPtr<BoundaryConditions> bc;
                 if (memcmp(&bcArray[ibc], nullBouCond, sizeof(BoundaryCondition)) == 0)
                     bc = SPtr<BoundaryConditions>();
-                else {
+                else 
+                {
                     bc                         = SPtr<BoundaryConditions>(new BoundaryConditions);
                     bc->noslipBoundaryFlags    = bcArray[ibc].noslipBoundaryFlags;
                     bc->slipBoundaryFlags      = bcArray[ibc].slipBoundaryFlags;
@@ -1537,8 +1625,7 @@ void MPIIOMigrationBECoProcessor::readBoundaryConds(int step)
                 bcVector.push_back(bc);
             }
 
-            CbArray3D<int, IndexerX3X2X1> bcim(bcindexmatrixV, boundCondParamStr.nx1, boundCondParamStr.nx2,
-                                               boundCondParamStr.nx3);
+            CbArray3D<int, IndexerX3X2X1> bcim(bcindexmatrixV, boundCondParamStr.nx1, boundCondParamStr.nx2, boundCondParamStr.nx3);
             SPtr<Block3D> block1 = grid->getBlock(blockID);
 
             SPtr<BCProcessor> bcProc = bcProcessor->clone(block1->getKernel());
@@ -1562,12 +1649,12 @@ void MPIIOMigrationBECoProcessor::readBoundaryConds(int step)
     delete[] rawDataSend;
     delete[] requests;
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         finish = MPI_Wtime();
         UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readBoundaryConds end of restore of data, rank = " << rank);
         UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readBoundaryConds time: " << finish - start << " s");
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 }
 
@@ -1577,3 +1664,8 @@ void MPIIOMigrationBECoProcessor::setLBMKernel(SPtr<LBMKernel> kernel) { this->l
 void MPIIOMigrationBECoProcessor::setBCProcessor(SPtr<BCProcessor> bcProcessor) { this->bcProcessor = bcProcessor; }
 //////////////////////////////////////////////////////////////////////////
 void MPIIOMigrationBECoProcessor::setNu(double nu) { this->nue = nu; }
+
+void MPIIOMigrationBECoProcessor::setNuLG(double cfL, double cfG) { this->nuL = cfL;  this->nuG = cfG; }
+
+void MPIIOMigrationBECoProcessor::setDensityRatio(double dr) { this->densityRatio = dr; }
+
diff --git a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationBECoProcessor.h b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationBECoProcessor.h
index 4d825fde3956dcbe711f49b18b57cd929ba986d9..9a89ada1ae039d10cd53b06b189e5709398911c8 100644
--- a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationBECoProcessor.h
+++ b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationBECoProcessor.h
@@ -25,7 +25,8 @@ class MPIIOMigrationBECoProcessor : public MPIIOCoProcessor
         AverageTriple       = 4,
         ShearStressVal      = 5,
         RelaxationFactor    = 6,
-        PhaseField          = 7
+        PhaseField1         = 7,
+        PhaseField2 = 8
     };
 
 public:
@@ -71,6 +72,8 @@ public:
     //! The function truncates the data files
     void clearAllFiles(int step);
     void setNu(double nu);
+    void setNuLG(double cfL, double cfG);
+    void setDensityRatio(double dr);
 
     void blocksExchange(int tagN, int ind1, int ind2, int doubleCountInBlock, std::vector<double> &pV,
                         std::vector<double> *rawDataReceive);
@@ -91,6 +94,10 @@ private:
     SPtr<LBMKernel> lbmKernel;
     SPtr<BCProcessor> bcProcessor;
     double nue;
+    double nuL;
+    double nuG;
+    double densityRatio;
+
 };
 
 #endif
diff --git a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.cpp b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.cpp
index b4314e0162a21f8cb5af48399f9efb04abb2ecfa..d0ce025fda5428a74ec27901202b06acef4781e2 100644
--- a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.cpp
+++ b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.cpp
@@ -22,8 +22,7 @@
 
 using namespace MPIIODataStructures;
 
-MPIIOMigrationCoProcessor::MPIIOMigrationCoProcessor(SPtr<Grid3D> grid, SPtr<UbScheduler> s, const std::string &path,
-                                                     SPtr<Communicator> comm)
+MPIIOMigrationCoProcessor::MPIIOMigrationCoProcessor(SPtr<Grid3D> grid, SPtr<UbScheduler> s, const std::string &path, SPtr<Communicator> comm)
     : MPIIOCoProcessor(grid, s, path, comm)
 {
     memset(&boundCondParamStr, 0, sizeof(boundCondParamStr));
@@ -31,7 +30,7 @@ MPIIOMigrationCoProcessor::MPIIOMigrationCoProcessor(SPtr<Grid3D> grid, SPtr<UbS
     //-------------------------   define MPI types  ---------------------------------
 
     MPI_Datatype typesDataSet[3] = { MPI_DOUBLE, MPI_INT, MPI_CHAR };
-    int blocksDataSet[3]         = { 2, 2, 2 };
+    int blocksDataSet[3]         = { 5, 2, 2 };
     MPI_Aint offsetsDatatSet[3], lbDataSet, extentDataSet;
 
     offsetsDatatSet[0] = 0;
@@ -71,7 +70,8 @@ MPIIOMigrationCoProcessor::~MPIIOMigrationCoProcessor()
 //////////////////////////////////////////////////////////////////////////
 void MPIIOMigrationCoProcessor::process(double step)
 {
-    if (scheduler->isDue(step)) {
+    if (scheduler->isDue(step)) 
+    {
         if (comm->isRoot())
             UBLOG(logINFO, "MPIIOMigrationCoProcessor save step: " << step);
         if (comm->isRoot())
@@ -100,8 +100,7 @@ void MPIIOMigrationCoProcessor::clearAllFiles(int step)
     UbSystem::makeDirectory(path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step));
 
     std::string filename10 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpBC.bin";
-    int rc10 =
-        MPI_File_open(MPI_COMM_WORLD, filename10.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
+    int rc10 = MPI_File_open(MPI_COMM_WORLD, filename10.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
     if (rc10 != MPI_SUCCESS)
         throw UbException(UB_EXARGS, "couldn't open file " + filename10);
     MPI_File_set_size(file_handler, new_size);
@@ -125,69 +124,93 @@ void MPIIOMigrationCoProcessor::writeDataSet(int step)
     std::vector<SPtr<Block3D>> blocksVector[25];
     int minInitLevel = this->grid->getCoarsestInitializedLevel();
     int maxInitLevel = this->grid->getFinestInitializedLevel();
-    for (int level = minInitLevel; level <= maxInitLevel; level++) {
+    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    {
         grid->getBlocks(level, rank, blocksVector[level]);
         blocksCount += static_cast<int>(blocksVector[level].size());
     }
 
     dataSetParam dataSetParamStr1, dataSetParamStr2, dataSetParamStr3;
     DataSetMigration *dataSetArray = new DataSetMigration[blocksCount];
-    std::vector<double> doubleValuesArray; // double-values (arrays of f's) in all blocks
+    std::vector<double> doubleValuesArrayF; // double-values (arrays of f's) in all blocks  Fdistribution
+    std::vector<double> doubleValuesArrayH1; // double-values (arrays of f's) in all blocks  H1distribution
+    // std::vector<double> doubleValuesArrayH2; // double-values (arrays of f's) in all blocks  H2distribution
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIOMigrationCoProcessor::writeDataSet start collect data rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
+    bool multiPhase = false;
     DSArraysPresence arrPresence;
     bool firstBlock           = true;
     size_t doubleCountInBlock = 0;
     int ic                    = 0;
-    SPtr<D3Q27EsoTwist3DSplittedVector> D3Q27EsoTwist3DSplittedVectorPtrF, D3Q27EsoTwist3DSplittedVectorPtrH;
-    CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr localDistributionsF, localDistributionsH;
-    CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr nonLocalDistributionsF, nonLocalDistributionsH;
-    CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr zeroDistributionsF, zeroDistributionsH;
+    SPtr<D3Q27EsoTwist3DSplittedVector> D3Q27EsoTwist3DSplittedVectorPtrF = 0, D3Q27EsoTwist3DSplittedVectorPtrH1 = 0, D3Q27EsoTwist3DSplittedVectorPtrH2 = 0;
+    CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr localDistributionsF = 0, localDistributionsH1 = 0, localDistributionsH2 = 0;
+    CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr nonLocalDistributionsF = 0, nonLocalDistributionsH1 = 0, nonLocalDistributionsH2 = 0;
+    CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr zeroDistributionsF = 0, zeroDistributionsH1 = 0, zeroDistributionsH2 = 0;
+
+    SPtr<LBMKernel> kernel;
 
-    for (int level = minInitLevel; level <= maxInitLevel; level++) {
+    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    {
         for (SPtr<Block3D> block : blocksVector[level]) //	blocks of the current level
         {
-            dataSetArray[ic].globalID =
-                block->getGlobalID(); // id of the block needed to find it while regenerating the grid
-            dataSetArray[ic].ghostLayerWidth = block->getKernel()->getGhostLayerWidth();
-            dataSetArray[ic].collFactor      = block->getKernel()->getCollisionFactor();
-            dataSetArray[ic].deltaT          = block->getKernel()->getDeltaT();
-            dataSetArray[ic].compressible    = block->getKernel()->getCompressible();
-            dataSetArray[ic].withForcing     = block->getKernel()->getWithForcing();
-
-            D3Q27EsoTwist3DSplittedVectorPtrF = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(
-                block->getKernel()->getDataSet()->getFdistributions());
+            kernel = dynamicPointerCast<LBMKernel>(block->getKernel());
+
+            dataSetArray[ic].globalID = block->getGlobalID(); // id of the block needed to find it while regenerating the grid
+            dataSetArray[ic].ghostLayerWidth = kernel->getGhostLayerWidth();
+            dataSetArray[ic].collFactor = kernel->getCollisionFactor();
+            dataSetArray[ic].deltaT = kernel->getDeltaT();
+            dataSetArray[ic].compressible = kernel->getCompressible();
+            dataSetArray[ic].withForcing = kernel->getWithForcing();
+            dataSetArray[ic].collFactorL = kernel->getCollisionFactorL();
+            dataSetArray[ic].collFactorG = kernel->getCollisionFactorG();
+            dataSetArray[ic].densityRatio = kernel->getDensityRatio();
+
+            D3Q27EsoTwist3DSplittedVectorPtrF = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(block->getKernel()->getDataSet()->getFdistributions());
             localDistributionsF = D3Q27EsoTwist3DSplittedVectorPtrF->getLocalDistributions();
             nonLocalDistributionsF = D3Q27EsoTwist3DSplittedVectorPtrF->getNonLocalDistributions();
             zeroDistributionsF = D3Q27EsoTwist3DSplittedVectorPtrF->getZeroDistributions();
 
-            D3Q27EsoTwist3DSplittedVectorPtrH = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(
-                block->getKernel()->getDataSet()->getHdistributions());
-            localDistributionsH = D3Q27EsoTwist3DSplittedVectorPtrH->getLocalDistributions();
-            nonLocalDistributionsH = D3Q27EsoTwist3DSplittedVectorPtrH->getNonLocalDistributions();
-            zeroDistributionsH = D3Q27EsoTwist3DSplittedVectorPtrH->getZeroDistributions();
+            D3Q27EsoTwist3DSplittedVectorPtrH1 = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(block->getKernel()->getDataSet()->getHdistributions());
+            if (D3Q27EsoTwist3DSplittedVectorPtrH1 != 0)
+            {
+                multiPhase = true;
+                localDistributionsH1 = D3Q27EsoTwist3DSplittedVectorPtrH1->getLocalDistributions();
+                nonLocalDistributionsH1 = D3Q27EsoTwist3DSplittedVectorPtrH1->getNonLocalDistributions();
+                zeroDistributionsH1 = D3Q27EsoTwist3DSplittedVectorPtrH1->getZeroDistributions();
+            }
+
+            /*D3Q27EsoTwist3DSplittedVectorPtrH2 = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(block->getKernel()->getDataSet()->getH2distributions());
+            if (D3Q27EsoTwist3DSplittedVectorPtrH2 != 0)
+            {
+                localDistributionsH2 = D3Q27EsoTwist3DSplittedVectorPtrH2->getLocalDistributions();
+                nonLocalDistributionsH2 = D3Q27EsoTwist3DSplittedVectorPtrH2->getNonLocalDistributions();
+                zeroDistributionsH2 = D3Q27EsoTwist3DSplittedVectorPtrH2->getZeroDistributions();
+            }*/
 
             if (firstBlock) // && block->getKernel()) // when first (any) valid block...
             {
-                if (localDistributionsF) {
+                if (localDistributionsF)
+                {
                     dataSetParamStr1.nx[0] = static_cast<int>(localDistributionsF->getNX1());
                     dataSetParamStr1.nx[1] = static_cast<int>(localDistributionsF->getNX2());
                     dataSetParamStr1.nx[2] = static_cast<int>(localDistributionsF->getNX3());
                     dataSetParamStr1.nx[3] = static_cast<int>(localDistributionsF->getNX4());
                 }
 
-                if (nonLocalDistributionsF) {
+                if (nonLocalDistributionsF)
+                {
                     dataSetParamStr2.nx[0] = static_cast<int>(nonLocalDistributionsF->getNX1());
                     dataSetParamStr2.nx[1] = static_cast<int>(nonLocalDistributionsF->getNX2());
                     dataSetParamStr2.nx[2] = static_cast<int>(nonLocalDistributionsF->getNX3());
                     dataSetParamStr2.nx[3] = static_cast<int>(nonLocalDistributionsF->getNX4());
                 }
-                if (zeroDistributionsF) {
+                if (zeroDistributionsF)
+                {
                     dataSetParamStr3.nx[0] = static_cast<int>(zeroDistributionsF->getNX1());
                     dataSetParamStr3.nx[1] = static_cast<int>(zeroDistributionsF->getNX2());
                     dataSetParamStr3.nx[2] = static_cast<int>(zeroDistributionsF->getNX3());
@@ -195,94 +218,91 @@ void MPIIOMigrationCoProcessor::writeDataSet(int step)
                 }
 
                 // ... than save some parameters that are equal in all blocks
-                dataSetParamStr1.nx1 = dataSetParamStr2.nx1 = dataSetParamStr3.nx1 =
-                    static_cast<int>(block->getKernel()->getDataSet()->getFdistributions()->getNX1());
-                dataSetParamStr1.nx2 = dataSetParamStr2.nx2 = dataSetParamStr3.nx2 =
-                    static_cast<int>(block->getKernel()->getDataSet()->getFdistributions()->getNX2());
-                dataSetParamStr1.nx3 = dataSetParamStr2.nx3 = dataSetParamStr3.nx3 =
-                    static_cast<int>(block->getKernel()->getDataSet()->getFdistributions()->getNX3());
-
-                //  Fdistributions + Hdistributions
-                doubleCountInBlock =
-                    (dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3] +
-                        dataSetParamStr2.nx[0] * dataSetParamStr2.nx[1] * dataSetParamStr2.nx[2] * dataSetParamStr2.nx[3] +
-                        dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3]) * 2;
-
-                SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> averageDensityArray =
-                    block->getKernel()->getDataSet()->getAverageDensity();
+                dataSetParamStr1.nx1 = dataSetParamStr2.nx1 = dataSetParamStr3.nx1 = static_cast<int>(block->getKernel()->getDataSet()->getFdistributions()->getNX1());
+                dataSetParamStr1.nx2 = dataSetParamStr2.nx2 = dataSetParamStr3.nx2 = static_cast<int>(block->getKernel()->getDataSet()->getFdistributions()->getNX2());
+                dataSetParamStr1.nx3 = dataSetParamStr2.nx3 = dataSetParamStr3.nx3 = static_cast<int>(block->getKernel()->getDataSet()->getFdistributions()->getNX3());
+
+                doubleCountInBlock = dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3] +
+                    dataSetParamStr2.nx[0] * dataSetParamStr2.nx[1] * dataSetParamStr2.nx[2] * dataSetParamStr2.nx[3] +
+                    dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3];
+
+                SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> averageDensityArray = block->getKernel()->getDataSet()->getAverageDensity();
                 if (averageDensityArray)
                     arrPresence.isAverageDensityArrayPresent = true;
                 else
                     arrPresence.isAverageDensityArrayPresent = false;
 
-                SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> AverageVelocityArray3DPtr =
-                    block->getKernel()->getDataSet()->getAverageVelocity();
+                SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> AverageVelocityArray3DPtr = block->getKernel()->getDataSet()->getAverageVelocity();
                 if (AverageVelocityArray3DPtr)
                     arrPresence.isAverageVelocityArrayPresent = true;
                 else
                     arrPresence.isAverageVelocityArrayPresent = false;
 
-                SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> AverageFluctArray3DPtr =
-                    block->getKernel()->getDataSet()->getAverageFluctuations();
+                SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> AverageFluctArray3DPtr = block->getKernel()->getDataSet()->getAverageFluctuations();
                 if (AverageFluctArray3DPtr)
                     arrPresence.isAverageFluktuationsArrayPresent = true;
                 else
                     arrPresence.isAverageFluktuationsArrayPresent = false;
 
-                SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> AverageTripleArray3DPtr =
-                    block->getKernel()->getDataSet()->getAverageTriplecorrelations();
+                SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> AverageTripleArray3DPtr = block->getKernel()->getDataSet()->getAverageTriplecorrelations();
                 if (AverageTripleArray3DPtr)
                     arrPresence.isAverageTripleArrayPresent = true;
                 else
                     arrPresence.isAverageTripleArrayPresent = false;
 
-                SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> ShearStressValArray3DPtr =
-                    block->getKernel()->getDataSet()->getShearStressValues();
+                SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> ShearStressValArray3DPtr = block->getKernel()->getDataSet()->getShearStressValues();
                 if (ShearStressValArray3DPtr)
                     arrPresence.isShearStressValArrayPresent = true;
                 else
                     arrPresence.isShearStressValArrayPresent = false;
 
-                SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> relaxationFactor3DPtr =
-                    block->getKernel()->getDataSet()->getRelaxationFactor();
+                SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> relaxationFactor3DPtr = block->getKernel()->getDataSet()->getRelaxationFactor();
                 if (relaxationFactor3DPtr)
                     arrPresence.isRelaxationFactorPresent = true;
                 else
                     arrPresence.isRelaxationFactorPresent = false;
 
-                SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> phaseField3DPtr =
-                    block->getKernel()->getDataSet()->getPhaseField();
-                if (phaseField3DPtr)
-                    arrPresence.isPhaseFieldPresent = true;
+                SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> phaseField3DPtr1 = block->getKernel()->getDataSet()->getPhaseField();
+                if (phaseField3DPtr1)
+                    arrPresence.isPhaseField1Present = true;
+                else
+                    arrPresence.isPhaseField1Present = false;
+
+                SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> phaseField3DPtr2 = block->getKernel()->getDataSet()->getPhaseField2();
+                if (phaseField3DPtr2)
+                    arrPresence.isPhaseField2Present = true;
                 else
-                    arrPresence.isPhaseFieldPresent = false;
+                    arrPresence.isPhaseField2Present = false;
 
                 firstBlock = false;
             }
 
-            if (localDistributionsF && (dataSetParamStr1.nx[0] > 0) && (dataSetParamStr1.nx[1] > 0) &&
-                (dataSetParamStr1.nx[2] > 0) && (dataSetParamStr1.nx[3] > 0))
-                doubleValuesArray.insert(doubleValuesArray.end(), localDistributionsF->getDataVector().begin(),
-                    localDistributionsF->getDataVector().end());
-            if (nonLocalDistributionsF && (dataSetParamStr2.nx[0] > 0) && (dataSetParamStr2.nx[1] > 0) &&
-                (dataSetParamStr2.nx[2] > 0) && (dataSetParamStr2.nx[3] > 0))
-                doubleValuesArray.insert(doubleValuesArray.end(), nonLocalDistributionsF->getDataVector().begin(),
-                    nonLocalDistributionsF->getDataVector().end());
+            if (localDistributionsF && (dataSetParamStr1.nx[0] > 0) && (dataSetParamStr1.nx[1] > 0) && (dataSetParamStr1.nx[2] > 0) && (dataSetParamStr1.nx[3] > 0))
+                doubleValuesArrayF.insert(doubleValuesArrayF.end(), localDistributionsF->getDataVector().begin(), localDistributionsF->getDataVector().end());
+            if (nonLocalDistributionsF && (dataSetParamStr2.nx[0] > 0) && (dataSetParamStr2.nx[1] > 0) && (dataSetParamStr2.nx[2] > 0) && (dataSetParamStr2.nx[3] > 0))
+                doubleValuesArrayF.insert(doubleValuesArrayF.end(), nonLocalDistributionsF->getDataVector().begin(), nonLocalDistributionsF->getDataVector().end());
             if (zeroDistributionsF && (dataSetParamStr3.nx[0] > 0) && (dataSetParamStr3.nx[1] > 0) && (dataSetParamStr3.nx[2] > 0))
-                doubleValuesArray.insert(doubleValuesArray.end(), zeroDistributionsF->getDataVector().begin(),
-                    zeroDistributionsF->getDataVector().end());
-
-            if (localDistributionsH && (dataSetParamStr1.nx[0] > 0) && (dataSetParamStr1.nx[1] > 0) &&
-                (dataSetParamStr1.nx[2] > 0) && (dataSetParamStr1.nx[3] > 0))
-                doubleValuesArray.insert(doubleValuesArray.end(), localDistributionsH->getDataVector().begin(),
-                    localDistributionsH->getDataVector().end());
-            if (nonLocalDistributionsH && (dataSetParamStr2.nx[0] > 0) && (dataSetParamStr2.nx[1] > 0) &&
-                (dataSetParamStr2.nx[2] > 0) && (dataSetParamStr2.nx[3] > 0))
-                doubleValuesArray.insert(doubleValuesArray.end(), nonLocalDistributionsH->getDataVector().begin(),
-                    nonLocalDistributionsH->getDataVector().end());
-            if (zeroDistributionsH && (dataSetParamStr3.nx[0] > 0) && (dataSetParamStr3.nx[1] > 0) && (dataSetParamStr3.nx[2] > 0))
-                doubleValuesArray.insert(doubleValuesArray.end(), zeroDistributionsH->getDataVector().begin(),
-                    zeroDistributionsH->getDataVector().end());
+                doubleValuesArrayF.insert(doubleValuesArrayF.end(), zeroDistributionsF->getDataVector().begin(), zeroDistributionsF->getDataVector().end());
+
+            if (multiPhase)
+            {
+                if (localDistributionsH1 && (dataSetParamStr1.nx[0] > 0) && (dataSetParamStr1.nx[1] > 0) && (dataSetParamStr1.nx[2] > 0) && (dataSetParamStr1.nx[3] > 0))
+                    doubleValuesArrayH1.insert(doubleValuesArrayH1.end(), localDistributionsH1->getDataVector().begin(), localDistributionsH1->getDataVector().end());
+                if (nonLocalDistributionsH1 && (dataSetParamStr2.nx[0] > 0) && (dataSetParamStr2.nx[1] > 0) && (dataSetParamStr2.nx[2] > 0) && (dataSetParamStr2.nx[3] > 0))
+                    doubleValuesArrayH1.insert(doubleValuesArrayH1.end(), nonLocalDistributionsH1->getDataVector().begin(), nonLocalDistributionsH1->getDataVector().end());
+                if (zeroDistributionsH1 && (dataSetParamStr3.nx[0] > 0) && (dataSetParamStr3.nx[1] > 0) && (dataSetParamStr3.nx[2] > 0))
+                    doubleValuesArrayH1.insert(doubleValuesArrayH1.end(), zeroDistributionsH1->getDataVector().begin(), zeroDistributionsH1->getDataVector().end());
+            }
+
+            /*if (D3Q27EsoTwist3DSplittedVectorPtrH2 != 0)
+            {
+                if (localDistributionsH2 && (dataSetParamStr1.nx[0] > 0) && (dataSetParamStr1.nx[1] > 0) && (dataSetParamStr1.nx[2] > 0) && (dataSetParamStr1.nx[3] > 0))
+                    doubleValuesArrayH2.insert(doubleValuesArrayH2.end(), localDistributionsH2->getDataVector().begin(), localDistributionsH2->getDataVector().end());
+                if (nonLocalDistributionsH2 && (dataSetParamStr2.nx[0] > 0) && (dataSetParamStr2.nx[1] > 0) && (dataSetParamStr2.nx[2] > 0) && (dataSetParamStr2.nx[3] > 0))
+                    doubleValuesArrayH2.insert(doubleValuesArrayH2.end(), nonLocalDistributionsH2->getDataVector().begin(), nonLocalDistributionsH2->getDataVector().end());
+                if (zeroDistributionsH2 && (dataSetParamStr3.nx[0] > 0) && (dataSetParamStr3.nx[1] > 0) && (dataSetParamStr3.nx[2] > 0))
+                    doubleValuesArrayH2.insert(doubleValuesArrayH2.end(), zeroDistributionsH2->getDataVector().begin(), zeroDistributionsH2->getDataVector().end());
+            }*/
 
             ic++;
         }
@@ -292,10 +312,10 @@ void MPIIOMigrationCoProcessor::writeDataSet(int step)
     MPI_Type_contiguous(int(doubleCountInBlock), MPI_DOUBLE, &dataSetDoubleType);
     MPI_Type_commit(&dataSetDoubleType);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIOMigrationCoProcessor::writeDataSet start MPI IO rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     double start, finish;
@@ -311,32 +331,73 @@ void MPIIOMigrationCoProcessor::writeDataSet(int step)
 
     // write to the file
     MPI_File file_handler;
-    std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSet.bin";
+    std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetF.bin";
     int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
     if (rc != MPI_SUCCESS)
         throw UbException(UB_EXARGS, "couldn't open file " + filename);
 
     MPI_File_write_at(file_handler, (MPI_Offset)0, &dataSetParamStr1, 1, dataSetParamType, MPI_STATUS_IGNORE);
-    MPI_File_write_at(file_handler, (MPI_Offset)(sizeof(dataSetParam)), &dataSetParamStr2, 1, dataSetParamType,
-                      MPI_STATUS_IGNORE);
-    MPI_File_write_at(file_handler, (MPI_Offset)(2 * sizeof(dataSetParam)), &dataSetParamStr3, 1, dataSetParamType,
-                      MPI_STATUS_IGNORE);
-
+    MPI_File_write_at(file_handler, (MPI_Offset)(sizeof(dataSetParam)), &dataSetParamStr2, 1, dataSetParamType, MPI_STATUS_IGNORE);
+    MPI_File_write_at(file_handler, (MPI_Offset)(2 * sizeof(dataSetParam)), &dataSetParamStr3, 1, dataSetParamType, MPI_STATUS_IGNORE);
+    
     MPI_Offset write_offset;
     size_t sizeofOneDataSet = sizeof(DataSetMigration) + doubleCountInBlock * sizeof(double);
 
-    for (int nb = 0; nb < blocksCount; nb++) {
+    for (int nb = 0; nb < blocksCount; nb++) 
+    {
         write_offset = (MPI_Offset)(3 * sizeof(dataSetParam) + dataSetArray[nb].globalID * sizeofOneDataSet);
         MPI_File_write_at(file_handler, write_offset, &dataSetArray[nb], 1, dataSetType, MPI_STATUS_IGNORE);
-        MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(DataSetMigration)),
-                          &doubleValuesArray[nb * doubleCountInBlock], 1, dataSetDoubleType, MPI_STATUS_IGNORE);
+        MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(DataSetMigration)), &doubleValuesArrayF[nb * doubleCountInBlock], 1, dataSetDoubleType, MPI_STATUS_IGNORE);
     }
 
     MPI_File_sync(file_handler);
     MPI_File_close(&file_handler);
+
+    //-------------------------------- H1 ----------------------------------------------------
+    if (multiPhase)
+    {
+        filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetH1.bin";
+        rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
+        if (rc != MPI_SUCCESS)
+            throw UbException(UB_EXARGS, "couldn't open file " + filename);
+
+        sizeofOneDataSet = doubleCountInBlock * sizeof(double);
+
+        for (int nb = 0; nb < blocksCount; nb++) 
+        {
+            write_offset = (MPI_Offset)(dataSetArray[nb].globalID * sizeofOneDataSet);
+            MPI_File_write_at(file_handler, write_offset, &doubleValuesArrayH1[nb * doubleCountInBlock], 1, dataSetDoubleType, MPI_STATUS_IGNORE);
+        }
+
+        MPI_File_sync(file_handler);
+        MPI_File_close(&file_handler);
+    }
+
+    //-------------------------------- H2 ----------------------------------------------------
+    /*if (D3Q27EsoTwist3DSplittedVectorPtrH2 != 0)
+    {
+        filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetH2.bin";
+        rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
+        if (rc != MPI_SUCCESS)
+            throw UbException(UB_EXARGS, "couldn't open file " + filename);
+
+        sizeofOneDataSet = doubleCountInBlock * sizeof(double);
+
+        for (int nb = 0; nb < blocksCount; nb++) 
+        {
+            write_offset = (MPI_Offset)(dataSetArray[nb].globalID * sizeofOneDataSet);
+            MPI_File_write_at(file_handler, write_offset, &doubleValuesArrayH2[nb * doubleCountInBlock], 1, dataSetDoubleType, MPI_STATUS_IGNORE);
+        }
+
+        MPI_File_sync(file_handler);
+        MPI_File_close(&file_handler);
+    }*/
+    //--------------------------------
+
     MPI_Type_free(&dataSetDoubleType);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         finish = MPI_Wtime();
         UBLOG(logINFO, "MPIIOMigrationCoProcessor::writeDataSet time: " << finish - start << " s");
     }
@@ -375,8 +436,11 @@ void MPIIOMigrationCoProcessor::writeDataSet(int step)
     if (arrPresence.isRelaxationFactorPresent)
         write3DArray(step, RelaxationFactor, std::string("/cpRelaxationFactor.bin"));
 
-    if (arrPresence.isPhaseFieldPresent)
-        write3DArray(step, PhaseField, std::string("/cpPhaseField.bin"));
+    if (arrPresence.isPhaseField1Present)
+        write3DArray(step, PhaseField1, std::string("/cpPhaseField1.bin"));
+
+    if (arrPresence.isPhaseField2Present)
+        write3DArray(step, PhaseField2, std::string("/cpPhaseField2.bin"));
 
 }
 
@@ -391,7 +455,8 @@ void MPIIOMigrationCoProcessor::write4DArray(int step, Arrays arrayType, std::st
     std::vector<SPtr<Block3D>> blocksVector[25];
     int minInitLevel = this->grid->getCoarsestInitializedLevel();
     int maxInitLevel = this->grid->getFinestInitializedLevel();
-    for (int level = minInitLevel; level <= maxInitLevel; level++) {
+    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    {
         grid->getBlocks(level, rank, blocksVector[level]);
         blocksCount += static_cast<int>(blocksVector[level].size());
     }
@@ -400,10 +465,10 @@ void MPIIOMigrationCoProcessor::write4DArray(int step, Arrays arrayType, std::st
     std::vector<double> doubleValuesArray; // double-values of the AverageDensityArray in all blocks
     dataSetParam dataSetParamStr;
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIOMigrationCoProcessor::writeAverageDensityArray start collect data rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     bool firstBlock           = true;
@@ -411,13 +476,14 @@ void MPIIOMigrationCoProcessor::write4DArray(int step, Arrays arrayType, std::st
     int ic                    = 0;
     SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> ___Array;
 
-    for (int level = minInitLevel; level <= maxInitLevel; level++) {
+    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    {
         for (SPtr<Block3D> block : blocksVector[level]) //	blocks of the current level
         {
-            dataSetSmallArray[ic].globalID =
-                block->getGlobalID(); // id of the block needed to find it while regenerating the grid
+            dataSetSmallArray[ic].globalID = block->getGlobalID(); // id of the block needed to find it while regenerating the grid
 
-            switch (arrayType) {
+            switch (arrayType) 
+            {
                 case AverageDensity:
                     ___Array = block->getKernel()->getDataSet()->getAverageDensity();
                     break;
@@ -434,28 +500,24 @@ void MPIIOMigrationCoProcessor::write4DArray(int step, Arrays arrayType, std::st
                     ___Array = block->getKernel()->getDataSet()->getShearStressValues();
                     break;
                 default:
-                    UB_THROW(UbException(UB_EXARGS,
-                                         "MPIIOMigrationCoProcessor::write4DArray : 4D array type does not exist!"));
+                    UB_THROW(UbException(UB_EXARGS, "MPIIOMigrationCoProcessor::write4DArray : 4D array type does not exist!"));
                     break;
             }
 
             if (firstBlock) // when first (any) valid block...
             {
                 dataSetParamStr.nx1 = dataSetParamStr.nx2 = dataSetParamStr.nx3 = 0;
-                dataSetParamStr.nx[0]                                           = static_cast<int>(___Array->getNX1());
-                dataSetParamStr.nx[1]                                           = static_cast<int>(___Array->getNX2());
-                dataSetParamStr.nx[2]                                           = static_cast<int>(___Array->getNX3());
-                dataSetParamStr.nx[3]                                           = static_cast<int>(___Array->getNX4());
-                doubleCountInBlock =
-                    dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+                dataSetParamStr.nx[0] = static_cast<int>(___Array->getNX1());
+                dataSetParamStr.nx[1] = static_cast<int>(___Array->getNX2());
+                dataSetParamStr.nx[2] = static_cast<int>(___Array->getNX3());
+                dataSetParamStr.nx[3] = static_cast<int>(___Array->getNX4());
+                doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
 
                 firstBlock = false;
             }
 
-            if (___Array && (dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0) &&
-                (dataSetParamStr.nx[3] > 0))
-                doubleValuesArray.insert(doubleValuesArray.end(), ___Array->getDataVector().begin(),
-                                         ___Array->getDataVector().end());
+            if (___Array && (dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0) && (dataSetParamStr.nx[3] > 0))
+                doubleValuesArray.insert(doubleValuesArray.end(), ___Array->getDataVector().begin(), ___Array->getDataVector().end());
 
             ic++;
         }
@@ -465,10 +527,10 @@ void MPIIOMigrationCoProcessor::write4DArray(int step, Arrays arrayType, std::st
     MPI_Type_contiguous(int(doubleCountInBlock), MPI_DOUBLE, &dataSetDoubleType);
     MPI_Type_commit(&dataSetDoubleType);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIOMigrationCoProcessor::write4DArray start MPI IO rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     double start, finish;
@@ -489,7 +551,8 @@ void MPIIOMigrationCoProcessor::write4DArray(int step, Arrays arrayType, std::st
     MPI_Offset write_offset;
     size_t sizeofOneDataSet = sizeof(DataSetSmallMigration) + doubleCountInBlock * sizeof(double);
 
-    for (int nb = 0; nb < blocksCount; nb++) {
+    for (int nb = 0; nb < blocksCount; nb++) 
+    {
         write_offset = (MPI_Offset)(sizeof(dataSetParam) + dataSetSmallArray[nb].globalID * sizeofOneDataSet);
         MPI_File_write_at(file_handler, write_offset, &dataSetSmallArray[nb], 1, dataSetSmallType, MPI_STATUS_IGNORE);
         MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(DataSetSmallMigration)),
@@ -500,7 +563,8 @@ void MPIIOMigrationCoProcessor::write4DArray(int step, Arrays arrayType, std::st
     MPI_File_close(&file_handler);
     MPI_Type_free(&dataSetDoubleType);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         finish = MPI_Wtime();
         UBLOG(logINFO, "MPIIOMigrationCoProcessor::write4DArray time: " << finish - start << " s");
     }
@@ -519,7 +583,8 @@ void MPIIOMigrationCoProcessor::write3DArray(int step, Arrays arrayType, std::st
     std::vector<SPtr<Block3D>> blocksVector[25];
     int minInitLevel = this->grid->getCoarsestInitializedLevel();
     int maxInitLevel = this->grid->getFinestInitializedLevel();
-    for (int level = minInitLevel; level <= maxInitLevel; level++) {
+    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    {
         grid->getBlocks(level, rank, blocksVector[level]);
         blocksCount += static_cast<int>(blocksVector[level].size());
     }
@@ -528,10 +593,10 @@ void MPIIOMigrationCoProcessor::write3DArray(int step, Arrays arrayType, std::st
     std::vector<double> doubleValuesArray; // double-values (arrays of f's) in all blocks
     dataSetParam dataSetParamStr;
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIOMigrationCoProcessor::write3DArray start collect data rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     bool firstBlock           = true;
@@ -539,41 +604,42 @@ void MPIIOMigrationCoProcessor::write3DArray(int step, Arrays arrayType, std::st
     int ic                    = 0;
     SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> ___Array;
 
-    for (int level = minInitLevel; level <= maxInitLevel; level++) {
+    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    {
         for (SPtr<Block3D> block : blocksVector[level]) //	blocks of the current level
         {
-            dataSetSmallArray[ic].globalID =
-                block->getGlobalID(); // id of the block needed to find it while regenerating the grid
+            dataSetSmallArray[ic].globalID = block->getGlobalID(); // id of the block needed to find it while regenerating the grid
 
-            switch (arrayType) {
+            switch (arrayType) 
+            {
                 case RelaxationFactor:
                     ___Array = block->getKernel()->getDataSet()->getRelaxationFactor();
                     break;
-                case PhaseField:
+                case PhaseField1:
                     ___Array = block->getKernel()->getDataSet()->getPhaseField();
                     break;
+                case PhaseField2:
+                    ___Array = block->getKernel()->getDataSet()->getPhaseField2();
+                    break;
                 default:
-                    UB_THROW(UbException(UB_EXARGS,
-                                         "MPIIOMigrationCoProcessor::write3DArray : 3D array type does not exist!"));
+                    UB_THROW(UbException(UB_EXARGS, "MPIIOMigrationCoProcessor::write3DArray : 3D array type does not exist!"));
                     break;
             }
 
             if (firstBlock) // when first (any) valid block...
             {
                 dataSetParamStr.nx1 = dataSetParamStr.nx2 = dataSetParamStr.nx3 = 0;
-                dataSetParamStr.nx[0]                                           = static_cast<int>(___Array->getNX1());
-                dataSetParamStr.nx[1]                                           = static_cast<int>(___Array->getNX2());
-                dataSetParamStr.nx[2]                                           = static_cast<int>(___Array->getNX3());
-                dataSetParamStr.nx[3]                                           = 1;
-                doubleCountInBlock =
-                    dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+                dataSetParamStr.nx[0] = static_cast<int>(___Array->getNX1());
+                dataSetParamStr.nx[1] = static_cast<int>(___Array->getNX2());
+                dataSetParamStr.nx[2] = static_cast<int>(___Array->getNX3());
+                dataSetParamStr.nx[3] = 1;
+                doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
 
                 firstBlock = false;
             }
 
             if (___Array && (dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0))
-                doubleValuesArray.insert(doubleValuesArray.end(), ___Array->getDataVector().begin(),
-                                         ___Array->getDataVector().end());
+                doubleValuesArray.insert(doubleValuesArray.end(), ___Array->getDataVector().begin(), ___Array->getDataVector().end());
 
             ic++;
         }
@@ -583,10 +649,10 @@ void MPIIOMigrationCoProcessor::write3DArray(int step, Arrays arrayType, std::st
     MPI_Type_contiguous(int(doubleCountInBlock), MPI_DOUBLE, &dataSetDoubleType);
     MPI_Type_commit(&dataSetDoubleType);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIOMigrationCoProcessor::write3DArray start MPI IO rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     double start, finish;
@@ -613,7 +679,8 @@ void MPIIOMigrationCoProcessor::write3DArray(int step, Arrays arrayType, std::st
     size_t sizeofOneDataSet = sizeof(DataSetSmallMigration) + doubleCountInBlock * sizeof(double);
 
     MPI_Offset write_offset;
-    for (int nb = 0; nb < blocksCount; nb++) {
+    for (int nb = 0; nb < blocksCount; nb++) 
+    {
         write_offset = (MPI_Offset)(sizeof(dataSetParam) + dataSetSmallArray[nb].globalID * sizeofOneDataSet);
         MPI_File_write_at(file_handler, write_offset, &dataSetSmallArray[nb], 1, dataSetSmallType, MPI_STATUS_IGNORE);
         MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(DataSetSmallMigration)),
@@ -624,7 +691,8 @@ void MPIIOMigrationCoProcessor::write3DArray(int step, Arrays arrayType, std::st
     MPI_File_close(&file_handler);
     MPI_Type_free(&dataSetDoubleType);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         finish = MPI_Wtime();
         UBLOG(logINFO, "MPIIOMigrationCoProcessor::write3DArray time: " << finish - start << " s");
     }
@@ -1352,10 +1420,10 @@ void MPIIOMigrationCoProcessor::writeBoundaryConds(int step)
     MPI_Comm_rank(MPI_COMM_WORLD, &rank);
     MPI_Comm_size(MPI_COMM_WORLD, &size);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIOMigrationCoProcessor::writeBoundaryConds start collect data rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     int blocksCount          = 0; // quantity of blocks, that belong to this process
@@ -1366,7 +1434,8 @@ void MPIIOMigrationCoProcessor::writeBoundaryConds(int step)
     std::vector<SPtr<Block3D>> blocksVector[25];
     int minInitLevel = this->grid->getCoarsestInitializedLevel();
     int maxInitLevel = this->grid->getFinestInitializedLevel();
-    for (int level = minInitLevel; level <= maxInitLevel; level++) {
+    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    {
         grid->getBlocks(level, rank, blocksVector[level]);
         blocksCount += static_cast<int>(blocksVector[level].size());
     }
@@ -1381,25 +1450,27 @@ void MPIIOMigrationCoProcessor::writeBoundaryConds(int step)
     int ic                         = 0;
     SPtr<BCArray3D> bcArr;
 
-    for (int level = minInitLevel; level <= maxInitLevel; level++) {
+    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    {
         for (SPtr<Block3D> block : blocksVector[level]) // all the blocks of the current level
         {
             bcArr = block->getKernel()->getBCProcessor()->getBCArray();
 
-            bcAddArray[ic].globalID =
-                block->getGlobalID();                // id of the block needed to find it while regenerating the grid
+            bcAddArray[ic].globalID = block->getGlobalID();                // id of the block needed to find it while regenerating the grid
             bcAddArray[ic].boundCond_count      = 0; // how many BoundaryConditions in this block
             bcAddArray[ic].indexContainer_count = 0; // how many indexContainer-values in this block
-            bytesCount[ic]                      = sizeof(BCAddMigration);
+            bytesCount[ic] = sizeof(BCAddMigration);
             bcVector[ic].resize(0);
             bcindexmatrixVector[ic].resize(0);
             indexContainerVector[ic].resize(0);
 
-            for (std::size_t bc = 0; bc < bcArr->getBCVectorSize(); bc++) {
+            for (std::size_t bc = 0; bc < bcArr->getBCVectorSize(); bc++) 
+            {
                 BoundaryCondition *bouCond = new BoundaryCondition();
-                if (bcArr->bcvector[bc] == NULL) {
+                if (bcArr->bcvector[bc] == NULL) 
                     memset(bouCond, 0, sizeof(BoundaryCondition));
-                } else {
+                else 
+                {
                     bouCond->noslipBoundaryFlags    = bcArr->bcvector[bc]->getNoSlipBoundary();
                     bouCond->slipBoundaryFlags      = bcArr->bcvector[bc]->getSlipBoundary();
                     bouCond->velocityBoundaryFlags  = bcArr->bcvector[bc]->getVelocityBoundary();
@@ -1428,20 +1499,18 @@ void MPIIOMigrationCoProcessor::writeBoundaryConds(int step)
                 bytesCount[ic] += sizeof(BoundaryCondition);
             }
 
-            if (bcindexmatrixCountNotInit) {
+            if (bcindexmatrixCountNotInit) 
+            {
                 boundCondParamStr.nx1                = static_cast<int>(bcArr->bcindexmatrix.getNX1());
                 boundCondParamStr.nx2                = static_cast<int>(bcArr->bcindexmatrix.getNX2());
                 boundCondParamStr.nx3                = static_cast<int>(bcArr->bcindexmatrix.getNX3());
                 boundCondParamStr.bcindexmatrixCount = static_cast<int>(bcArr->bcindexmatrix.getDataVector().size());
                 bcindexmatrixCountNotInit            = false;
             }
-            bcindexmatrixVector[ic].insert(bcindexmatrixVector[ic].begin(),
-                                           bcArr->bcindexmatrix.getDataVector().begin(),
-                                           bcArr->bcindexmatrix.getDataVector().end());
+            bcindexmatrixVector[ic].insert(bcindexmatrixVector[ic].begin(), bcArr->bcindexmatrix.getDataVector().begin(), bcArr->bcindexmatrix.getDataVector().end());
             bytesCount[ic] += boundCondParamStr.bcindexmatrixCount * sizeof(int);
 
-            indexContainerVector[ic].insert(indexContainerVector[ic].begin(), bcArr->indexContainer.begin(),
-                                            bcArr->indexContainer.end());
+            indexContainerVector[ic].insert(indexContainerVector[ic].begin(), bcArr->indexContainer.begin(), bcArr->indexContainer.end());
             bcAddArray[ic].indexContainer_count = static_cast<int>(bcArr->indexContainer.size());
             count_indexContainer += bcAddArray[ic].indexContainer_count;
             bytesCount[ic] += bcAddArray[ic].indexContainer_count * sizeof(int);
@@ -1455,10 +1524,10 @@ void MPIIOMigrationCoProcessor::writeBoundaryConds(int step)
     MPI_Type_contiguous(boundCondParamStr.bcindexmatrixCount, MPI_INT, &bcindexmatrixType);
     MPI_Type_commit(&bcindexmatrixType);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIOMigrationCoProcessor::writeBoundaryConds start MPI IO rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: "<< Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     double start, finish;
@@ -1479,11 +1548,15 @@ void MPIIOMigrationCoProcessor::writeBoundaryConds(int step)
 
     MPI_Offset write_offset = (MPI_Offset)(sizeof(boundCondParam) + grid->getNumberOfBlocks() * sizeof(size_t));
     size_t next_file_offset = 0;
-    if (size > 1) {
-        if (rank == 0) {
+    if (size > 1) 
+    {
+        if (rank == 0) 
+        {
             next_file_offset = write_offset + allBytesCount;
             MPI_Send(&next_file_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD);
-        } else {
+        } 
+        else 
+        {
             MPI_Recv(&write_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
             next_file_offset = write_offset + allBytesCount;
             if (rank < size - 1)
@@ -1495,7 +1568,8 @@ void MPIIOMigrationCoProcessor::writeBoundaryConds(int step)
 
     MPI_Offset write_offsetIndex;
 
-    for (int nb = 0; nb < blocksCount; nb++) {
+    for (int nb = 0; nb < blocksCount; nb++) 
+    {
         write_offsetIndex = (MPI_Offset)(sizeof(boundCondParam) + bcAddArray[nb].globalID * sizeof(size_t));
         MPI_File_write_at(file_handler, write_offsetIndex, &write_offset, 1, MPI_LONG_LONG_INT, MPI_STATUS_IGNORE);
 
@@ -1505,17 +1579,12 @@ void MPIIOMigrationCoProcessor::writeBoundaryConds(int step)
                               bcAddArray[nb].boundCond_count, boundCondType, MPI_STATUS_IGNORE);
 
         if (bcindexmatrixVector[nb].size() > 0)
-            MPI_File_write_at(file_handler,
-                              (MPI_Offset)(write_offset + sizeof(BCAddMigration) +
-                                           bcAddArray[nb].boundCond_count * sizeof(BoundaryCondition)),
+            MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(BCAddMigration) + bcAddArray[nb].boundCond_count * sizeof(BoundaryCondition)),
                               &bcindexmatrixVector[nb][0], 1, bcindexmatrixType, MPI_STATUS_IGNORE);
 
         if (indexContainerVector[nb].size() > 0)
-            MPI_File_write_at(file_handler,
-                              (MPI_Offset)(write_offset + sizeof(BCAddMigration) +
-                                           bcAddArray[nb].boundCond_count * sizeof(BoundaryCondition) +
-                                           boundCondParamStr.bcindexmatrixCount * sizeof(int)),
-                              &indexContainerVector[nb][0], bcAddArray[nb].indexContainer_count, MPI_INT,
+            MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(BCAddMigration) + bcAddArray[nb].boundCond_count * sizeof(BoundaryCondition) +
+                              boundCondParamStr.bcindexmatrixCount * sizeof(int)), &indexContainerVector[nb][0], bcAddArray[nb].indexContainer_count, MPI_INT,
                               MPI_STATUS_IGNORE);
 
         write_offset += bytesCount[nb];
@@ -1525,7 +1594,8 @@ void MPIIOMigrationCoProcessor::writeBoundaryConds(int step)
     MPI_File_close(&file_handler);
     MPI_Type_free(&bcindexmatrixType);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         finish = MPI_Wtime();
         UBLOG(logINFO, "MPIIOMigrationCoProcessor::writeBoundaryConds time: " << finish - start << " s");
     }
@@ -1547,8 +1617,7 @@ void MPIIOMigrationCoProcessor::restart(int step)
 
     readBlocks(step);
 
-    SPtr<Grid3DVisitor> metisVisitor(new MetisPartitioningGridVisitor(comm, MetisPartitioningGridVisitor::LevelBased,
-                                                                      D3Q27System::BSW, MetisPartitioner::KWAY));
+    SPtr<Grid3DVisitor> metisVisitor(new MetisPartitioningGridVisitor(comm, MetisPartitioningGridVisitor::LevelBased, D3Q27System::BSW, MetisPartitioner::KWAY));
     grid->accept(metisVisitor);
 
     readDataSet(step);
@@ -1568,15 +1637,16 @@ void MPIIOMigrationCoProcessor::readDataSet(int step)
     MPI_Comm_rank(MPI_COMM_WORLD, &rank);
     MPI_Comm_size(MPI_COMM_WORLD, &size);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIOMigrationCoProcessor::readDataSet start MPI IO rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
     double start, finish;
     if (comm->isRoot())
         start = MPI_Wtime();
 
+    bool multiPhase = false;
     size_t blocksCount = 0; // quantity of the blocks, that belong to this process
     dataSetParam dataSetParamStr1, dataSetParamStr2, dataSetParamStr3;
 
@@ -1584,7 +1654,8 @@ void MPIIOMigrationCoProcessor::readDataSet(int step)
     std::vector<SPtr<Block3D>> blocksVector[25];
     int minInitLevel = this->grid->getCoarsestInitializedLevel();
     int maxInitLevel = this->grid->getFinestInitializedLevel();
-    for (int level = minInitLevel; level <= maxInitLevel; level++) {
+    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    {
         grid->getBlocks(level, rank, blocksVector[level]);
         blocksCount += static_cast<int>(blocksVector[level].size());
     }
@@ -1592,22 +1663,21 @@ void MPIIOMigrationCoProcessor::readDataSet(int step)
     DataSetMigration *dataSetArray = new DataSetMigration[blocksCount];
 
     MPI_File file_handler;
-    std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSet.bin";
+    std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetF.bin";
     int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler);
     if (rc != MPI_SUCCESS)
         throw UbException(UB_EXARGS, "couldn't open file " + filename);
 
     MPI_File_read_at(file_handler, (MPI_Offset)0, &dataSetParamStr1, 1, dataSetParamType, MPI_STATUS_IGNORE);
-    MPI_File_read_at(file_handler, (MPI_Offset)(sizeof(dataSetParam)), &dataSetParamStr2, 1, dataSetParamType,
-                     MPI_STATUS_IGNORE);
-    MPI_File_read_at(file_handler, (MPI_Offset)(2 * sizeof(dataSetParam)), &dataSetParamStr3, 1, dataSetParamType,
-                     MPI_STATUS_IGNORE);
+    MPI_File_read_at(file_handler, (MPI_Offset)(sizeof(dataSetParam)), &dataSetParamStr2, 1, dataSetParamType, MPI_STATUS_IGNORE);
+    MPI_File_read_at(file_handler, (MPI_Offset)(2 * sizeof(dataSetParam)), &dataSetParamStr3, 1, dataSetParamType, MPI_STATUS_IGNORE);
 
-    size_t doubleCountInBlock =
-        (dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3] +
+    size_t doubleCountInBlock = dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3] +
         dataSetParamStr2.nx[0] * dataSetParamStr2.nx[1] * dataSetParamStr2.nx[2] * dataSetParamStr2.nx[3] +
-        dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3] * 2);
-    std::vector<double> doubleValuesArray(blocksCount * doubleCountInBlock); // double-values in all blocks
+        dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3];
+    std::vector<double> doubleValuesArrayF(size_t(blocksCount * doubleCountInBlock)); // double-values in all blocks  Fdistributions
+    std::vector<double> doubleValuesArrayH1; // double-values in all blocks  H1distributions
+    //std::vector<double> doubleValuesArrayH2; // double-values in all blocks  H2distributions
 
     // define MPI_types depending on the block-specific information
     MPI_Type_contiguous(int(doubleCountInBlock), MPI_DOUBLE, &dataSetDoubleType);
@@ -1617,95 +1687,145 @@ void MPIIOMigrationCoProcessor::readDataSet(int step)
     MPI_Offset read_offset;
     size_t sizeofOneDataSet = size_t(sizeof(DataSetMigration) + doubleCountInBlock * sizeof(double));
 
-    for (int level = minInitLevel; level <= maxInitLevel; level++) {
+    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    {
         for (SPtr<Block3D> block : blocksVector[level]) //	blocks of the current level
         {
             read_offset = (MPI_Offset)(3 * sizeof(dataSetParam) + block->getGlobalID() * sizeofOneDataSet);
             MPI_File_read_at(file_handler, read_offset, &dataSetArray[ic], 1, dataSetType, MPI_STATUS_IGNORE);
             MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(DataSetMigration)),
-                             &doubleValuesArray[ic * doubleCountInBlock], 1, dataSetDoubleType, MPI_STATUS_IGNORE);
+                             &doubleValuesArrayF[ic * doubleCountInBlock], 1, dataSetDoubleType, MPI_STATUS_IGNORE);
             ic++;
         }
     }
 
     MPI_File_close(&file_handler);
-    MPI_Type_free(&dataSetDoubleType);
 
-    if (comm->isRoot()) {
-        finish = MPI_Wtime();
-        UBLOG(logINFO, "MPIIOMigrationCoProcessor::readDataSet time: " << finish - start << " s");
-        UBLOG(logINFO, "MPIIOMigrationCoProcessor::readDataSet start of restore of data, rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+    //----------------------------------------- H1 ----------------------------------------------------
+    MPI_Offset fsize;
+    filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetH1.bin";
+    rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler);
+    if (rc != MPI_SUCCESS)
+        throw UbException(UB_EXARGS, "couldn't open file " + filename);
+    int fs = MPI_File_get_size(file_handler, &fsize);
+    if (fsize > 0)
+    {
+        multiPhase = true;
+        doubleValuesArrayH1.resize(blocksCount * doubleCountInBlock);
+
+        sizeofOneDataSet = size_t(doubleCountInBlock * sizeof(double));
+
+        for (int level = minInitLevel; level <= maxInitLevel; level++)
+        {
+            for (SPtr<Block3D> block : blocksVector[level]) //	blocks of the current level
+            {
+                read_offset = (MPI_Offset)(block->getGlobalID() * sizeofOneDataSet);
+                MPI_File_read_at(file_handler, read_offset, &doubleValuesArrayH1[ic * doubleCountInBlock], 1, dataSetDoubleType, MPI_STATUS_IGNORE);
+                ic++;
+            }
+        }
+
     }
+    MPI_File_close(&file_handler);
 
-    size_t index = 0, vectorSize = 0;
-    std::vector<double> vectorsOfValuesF1, vectorsOfValuesF2, vectorsOfValuesF3;
-    std::vector<double> vectorsOfValuesH1, vectorsOfValuesH2, vectorsOfValuesH3;
+    //----------------------------------------- H2 ----------------------------------------------------
+  /*filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetH2.bin";
+    rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler);
+    if (rc != MPI_SUCCESS)
+        throw UbException(UB_EXARGS, "couldn't open file " + filename);
 
-    size_t vectorSize1 =
-        dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3];
-    size_t vectorSize2 =
-        dataSetParamStr2.nx[0] * dataSetParamStr2.nx[1] * dataSetParamStr2.nx[2] * dataSetParamStr2.nx[3];
-    size_t vectorSize3 =
-        dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3];
+    sizeofOneDataSet = size_t(doubleCountInBlock * sizeof(double));
+    doubleValuesArrayH2.resize(blocksCount * doubleCountInBlock);
 
-    for (std::size_t n = 0; n < blocksCount; n++) {
-        vectorsOfValuesF1.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + vectorSize1);
-        index += vectorSize1;
+    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    {
+        for (SPtr<Block3D> block : blocksVector[level]) //	blocks of the current level
+        {
+            read_offset = (MPI_Offset)(block->getGlobalID() * sizeofOneDataSet);
+            MPI_File_read_at(file_handler, read_offset, &doubleValuesArrayH2[ic * doubleCountInBlock], 1, dataSetDoubleType, MPI_STATUS_IGNORE);
+            ic++;
+        }
+    }
 
-        vectorsOfValuesF2.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + vectorSize2);
-        index += vectorSize2;
+    MPI_File_close(&file_handler);*/
 
-        vectorsOfValuesF3.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + vectorSize3);
-        index += vectorSize3;
+    MPI_Type_free(&dataSetDoubleType);
+
+    if (comm->isRoot()) 
+    {
+        finish = MPI_Wtime();
+        UBLOG(logINFO, "MPIIOMigrationCoProcessor::readDataSet time: " << finish - start << " s");
+        UBLOG(logINFO, "MPIIOMigrationCoProcessor::readDataSet start of restore of data, rank = " << rank);
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+    }
 
-        vectorsOfValuesH1.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + vectorSize1);
+    size_t index = 0;
+    std::vector<double> vectorsOfValuesF1, vectorsOfValuesF2, vectorsOfValuesF3;
+    std::vector<double> vectorsOfValuesH11, vectorsOfValuesH12, vectorsOfValuesH13;
+    //std::vector<double> vectorsOfValuesH21, vectorsOfValuesH22, vectorsOfValuesH23;
+
+    size_t vectorSize1 = dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3];
+    size_t vectorSize2 = dataSetParamStr2.nx[0] * dataSetParamStr2.nx[1] * dataSetParamStr2.nx[2] * dataSetParamStr2.nx[3];
+    size_t vectorSize3 = dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3];
+
+    for (std::size_t n = 0; n < blocksCount; n++) 
+    {
+        vectorsOfValuesF1.assign(doubleValuesArrayF.data() + index, doubleValuesArrayF.data() + index + vectorSize1);
+        if(multiPhase)
+            vectorsOfValuesH11.assign(doubleValuesArrayH1.data() + index, doubleValuesArrayH1.data() + index + vectorSize1);
+        //vectorsOfValuesH21.assign(doubleValuesArrayH2.data() + index, doubleValuesArrayH2.data() + index + vectorSize1);
         index += vectorSize1;
 
-        vectorsOfValuesH2.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + vectorSize2);
+        vectorsOfValuesF2.assign(doubleValuesArrayF.data() + index, doubleValuesArrayF.data() + index + vectorSize2);
+        if (multiPhase)
+            vectorsOfValuesH12.assign(doubleValuesArrayH1.data() + index, doubleValuesArrayH1.data() + index + vectorSize2);
+        //vectorsOfValuesH22.assign(doubleValuesArrayH2.data() + index, doubleValuesArrayH2.data() + index + vectorSize2);
         index += vectorSize2;
 
-        vectorsOfValuesH3.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + vectorSize3);
+        vectorsOfValuesF3.assign(doubleValuesArrayF.data() + index, doubleValuesArrayF.data() + index + vectorSize3);
+        if (multiPhase)
+            vectorsOfValuesH13.assign(doubleValuesArrayH1.data() + index, doubleValuesArrayH1.data() + index + vectorSize3);
+        //vectorsOfValuesH23.assign(doubleValuesArrayH2.data() + index, doubleValuesArrayH2.data() + index + vectorSize3);
         index += vectorSize3;
 
         SPtr<DistributionArray3D> mFdistributions(new D3Q27EsoTwist3DSplittedVector());
-        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)
-            ->setLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
-                new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesF1, dataSetParamStr1.nx[0],
-                                                        dataSetParamStr1.nx[1], dataSetParamStr1.nx[2],
-                                                        dataSetParamStr1.nx[3])));
-        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)
-            ->setNonLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
-                new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesF2, dataSetParamStr2.nx[0],
-                                                        dataSetParamStr2.nx[1], dataSetParamStr2.nx[2],
-                                                        dataSetParamStr2.nx[3])));
-        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)
-            ->setZeroDistributions(CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>(
+        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
+                new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesF1, dataSetParamStr1.nx[0], dataSetParamStr1.nx[1], dataSetParamStr1.nx[2], dataSetParamStr1.nx[3])));
+        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setNonLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
+                new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesF2, dataSetParamStr2.nx[0], dataSetParamStr2.nx[1], dataSetParamStr2.nx[2], dataSetParamStr2.nx[3])));
+        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setZeroDistributions(CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>(
                     vectorsOfValuesF3, dataSetParamStr3.nx[0], dataSetParamStr3.nx[1], dataSetParamStr3.nx[2])));
 
         dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setNX1(dataSetParamStr1.nx1);
         dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setNX2(dataSetParamStr1.nx2);
         dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setNX3(dataSetParamStr1.nx3);
 
-        SPtr<DistributionArray3D> mHdistributions(new D3Q27EsoTwist3DSplittedVector());
-        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mHdistributions)
-            ->setLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
-                new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH1, dataSetParamStr1.nx[0],
-                    dataSetParamStr1.nx[1], dataSetParamStr1.nx[2],
-                    dataSetParamStr1.nx[3])));
-        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mHdistributions)
-            ->setNonLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
-                new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH2, dataSetParamStr2.nx[0],
-                    dataSetParamStr2.nx[1], dataSetParamStr2.nx[2],
-                    dataSetParamStr2.nx[3])));
-        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mHdistributions)
-            ->setZeroDistributions(CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>(
-                vectorsOfValuesH3, dataSetParamStr3.nx[0], dataSetParamStr3.nx[1], dataSetParamStr3.nx[2])));
-
-        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mHdistributions)->setNX1(dataSetParamStr1.nx1);
-        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mHdistributions)->setNX2(dataSetParamStr1.nx2);
-        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mHdistributions)->setNX3(dataSetParamStr1.nx3);
+       SPtr<DistributionArray3D> mH1distributions(new D3Q27EsoTwist3DSplittedVector());
+       if (multiPhase)
+        {
+            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH1distributions)->setLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
+                new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH11, dataSetParamStr1.nx[0], dataSetParamStr1.nx[1], dataSetParamStr1.nx[2], dataSetParamStr1.nx[3])));
+            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH1distributions)->setNonLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
+                new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH12, dataSetParamStr2.nx[0], dataSetParamStr2.nx[1], dataSetParamStr2.nx[2], dataSetParamStr2.nx[3])));
+            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH1distributions)->setZeroDistributions(CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>(
+                vectorsOfValuesH13, dataSetParamStr3.nx[0], dataSetParamStr3.nx[1], dataSetParamStr3.nx[2])));
+
+            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH1distributions)->setNX1(dataSetParamStr1.nx1);
+            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH1distributions)->setNX2(dataSetParamStr1.nx2);
+            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH1distributions)->setNX3(dataSetParamStr1.nx3);
+         }
+
+        /*SPtr<DistributionArray3D> mH2distributions(new D3Q27EsoTwist3DSplittedVector());
+        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
+                new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH21, dataSetParamStr1.nx[0], dataSetParamStr1.nx[1], dataSetParamStr1.nx[2], dataSetParamStr1.nx[3])));
+        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNonLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
+                new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH22, dataSetParamStr2.nx[0], dataSetParamStr2.nx[1], dataSetParamStr2.nx[2], dataSetParamStr2.nx[3])));
+        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setZeroDistributions(CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>(
+                vectorsOfValuesH23, dataSetParamStr3.nx[0], dataSetParamStr3.nx[1], dataSetParamStr3.nx[2])));
+
+        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNX1(dataSetParamStr1.nx1);
+        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNX2(dataSetParamStr1.nx2);
+        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNX3(dataSetParamStr1.nx3);*/
 
         // find the nesessary block and fill it
         SPtr<Block3D> block = grid->getBlock(dataSetArray[n].globalID);
@@ -1716,17 +1836,22 @@ void MPIIOMigrationCoProcessor::readDataSet(int step)
         kernel->setDeltaT(dataSetArray[n].deltaT);
         kernel->setCompressible(dataSetArray[n].compressible);
         kernel->setWithForcing(dataSetArray[n].withForcing);
+        kernel->setCollisionFactorMultiphase(dataSetArray[n].collFactorL, dataSetArray[n].collFactorG);
+        kernel->setDensityRatio(dataSetArray[n].densityRatio);
+
         SPtr<DataSet3D> dataSetPtr = SPtr<DataSet3D>(new DataSet3D());
         dataSetPtr->setFdistributions(mFdistributions);
-        dataSetPtr->setHdistributions(mHdistributions);
+        if (multiPhase)
+            dataSetPtr->setHdistributions(mH1distributions);
+        //dataSetPtr->setH2distributions(mH2distributions);
         kernel->setDataSet(dataSetPtr);
         block->setKernel(kernel);
     }
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIOMigrationCoProcessor::readDataSet end of restore of data, rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     delete[] dataSetArray;
@@ -1766,8 +1891,12 @@ void MPIIOMigrationCoProcessor::readDataSet(int step)
         readArray(step, RelaxationFactor, std::string("/cpRelaxationFactor.bin"));
     //   readRelaxationFactor(step);
  
-    if (arrPresence.isPhaseFieldPresent)
-        readArray(step, PhaseField, std::string("/cpPhaseField.bin"));
+    if (arrPresence.isPhaseField1Present)
+        readArray(step, PhaseField1, std::string("/cpPhaseField1.bin"));
+
+    if (arrPresence.isPhaseField2Present)
+        readArray(step, PhaseField2, std::string("/cpPhaseField2.bin"));
+
 }
 
 void MPIIOMigrationCoProcessor::readArray(int step, Arrays arrType, std::string fname)
@@ -1776,10 +1905,10 @@ void MPIIOMigrationCoProcessor::readArray(int step, Arrays arrType, std::string
     MPI_Comm_rank(MPI_COMM_WORLD, &rank);
     MPI_Comm_size(MPI_COMM_WORLD, &size);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIOMigrationCoProcessor::readArray start MPI IO rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
     double start, finish;
     if (comm->isRoot())
@@ -1799,7 +1928,8 @@ void MPIIOMigrationCoProcessor::readArray(int step, Arrays arrType, std::string
     std::vector<SPtr<Block3D>> blocksVector[25];
     int minInitLevel = this->grid->getCoarsestInitializedLevel();
     int maxInitLevel = this->grid->getFinestInitializedLevel();
-    for (int level = minInitLevel; level <= maxInitLevel; level++) {
+    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    {
         grid->getBlocks(level, rank, blocksVector[level]);
         blocksCount += static_cast<int>(blocksVector[level].size());
     }
@@ -1807,8 +1937,7 @@ void MPIIOMigrationCoProcessor::readArray(int step, Arrays arrType, std::string
     MPI_File_read_at(file_handler, (MPI_Offset)0, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
 
     DataSetSmallMigration *dataSetSmallArray = new DataSetSmallMigration[blocksCount];
-    size_t doubleCountInBlock =
-        dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+    size_t doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
     std::vector<double> doubleValuesArray(blocksCount * doubleCountInBlock); // double-values in all blocks
 
     // define MPI_types depending on the block-specific information
@@ -1819,7 +1948,8 @@ void MPIIOMigrationCoProcessor::readArray(int step, Arrays arrType, std::string
     MPI_Offset read_offset;
     size_t sizeofOneDataSet = size_t(sizeof(DataSetSmallMigration) + doubleCountInBlock * sizeof(double));
 
-    for (int level = minInitLevel; level <= maxInitLevel; level++) {
+    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    {
         for (SPtr<Block3D> block : blocksVector[level]) //	blocks of the current level
         {
             read_offset = (MPI_Offset)(sizeof(dataSetParam) + block->getGlobalID() * sizeofOneDataSet);
@@ -1833,20 +1963,20 @@ void MPIIOMigrationCoProcessor::readArray(int step, Arrays arrType, std::string
     MPI_File_close(&file_handler);
     MPI_Type_free(&dataSetDoubleType);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         finish = MPI_Wtime();
         UBLOG(logINFO, "MPIIOMigrationCoProcessor::readArray readArray: " << finish - start << " s");
         UBLOG(logINFO, "MPIIOMigrationCoProcessor::readArray start of restore of data, rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     //----------------------------- restore data ---------------------------------
     size_t index = 0;
-    size_t nextVectorSize =
-        dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+    size_t nextVectorSize = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
     std::vector<double> vectorsOfValues;
-    for (std::size_t n = 0; n < blocksCount; n++) {
+    for (std::size_t n = 0; n < blocksCount; n++) 
+    {
         SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].globalID);
 
         vectorsOfValues.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + nextVectorSize);
@@ -1856,35 +1986,31 @@ void MPIIOMigrationCoProcessor::readArray(int step, Arrays arrType, std::string
         SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> ___4DArray;
         SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> ___3DArray;
 
-        switch (arrType) {
+        switch (arrType) 
+        {
             case AverageDensity:
                 ___4DArray = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(
-                    vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2],
-                    dataSetParamStr.nx[3]));
+                    vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
                 block->getKernel()->getDataSet()->setAverageDensity(___4DArray);
                 break;
             case AverageVelocity:
                 ___4DArray = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(
-                    vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2],
-                    dataSetParamStr.nx[3]));
+                    vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
                 block->getKernel()->getDataSet()->setAverageVelocity(___4DArray);
                 break;
             case AverageFluktuations:
                 ___4DArray = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(
-                    vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2],
-                    dataSetParamStr.nx[3]));
+                    vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
                 block->getKernel()->getDataSet()->setAverageFluctuations(___4DArray);
                 break;
             case AverageTriple:
                 ___4DArray = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(
-                    vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2],
-                    dataSetParamStr.nx[3]));
+                    vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
                 block->getKernel()->getDataSet()->setAverageTriplecorrelations(___4DArray);
                 break;
             case ShearStressVal:
                 ___4DArray = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(
-                    vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2],
-                    dataSetParamStr.nx[3]));
+                    vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
                 block->getKernel()->getDataSet()->setShearStressValues(___4DArray);
                 break;
             case RelaxationFactor:
@@ -1892,21 +2018,26 @@ void MPIIOMigrationCoProcessor::readArray(int step, Arrays arrType, std::string
                     vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2]));
                 block->getKernel()->getDataSet()->setRelaxationFactor(___3DArray);
                 break;
-            case PhaseField:
+            case PhaseField1:
                 ___3DArray = CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>(
                     vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2]));
                 block->getKernel()->getDataSet()->setPhaseField(___3DArray);
                 break;
+            case PhaseField2:
+                ___3DArray = CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>(
+                    vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2]));
+                block->getKernel()->getDataSet()->setPhaseField2(___3DArray);
+                break;
             default:
                 UB_THROW(UbException(UB_EXARGS, "MPIIOMigrationCoProcessor::readArray : array type does not exist!"));
                 break;
         }
     }
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIOMigrationCoProcessor::readArray end of restore of data, rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     delete[] dataSetSmallArray;
@@ -2548,10 +2679,10 @@ void MPIIOMigrationCoProcessor::readBoundaryConds(int step)
     MPI_Comm_rank(MPI_COMM_WORLD, &rank);
     MPI_Comm_size(MPI_COMM_WORLD, &size);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIOMigrationCoProcessor::readBoundaryConds start MPI IO rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     double start, finish;
@@ -2570,7 +2701,8 @@ void MPIIOMigrationCoProcessor::readBoundaryConds(int step)
     std::vector<SPtr<Block3D>> blocksVector[25];
     int minInitLevel = this->grid->getCoarsestInitializedLevel();
     int maxInitLevel = this->grid->getFinestInitializedLevel();
-    for (int level = minInitLevel; level <= maxInitLevel; level++) {
+    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    {
         grid->getBlocks(level, rank, blocksVector[level]);
         blocksCount += static_cast<int>(blocksVector[level].size());
     }
@@ -2585,12 +2717,12 @@ void MPIIOMigrationCoProcessor::readBoundaryConds(int step)
     std::vector<int> bcindexmatrixV;
     std::vector<int> indexContainerV;
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         finish = MPI_Wtime();
         UBLOG(logINFO, "MPIIOMigrationCoProcessor::readBoundaryConds time: " << finish - start << " s");
         UBLOG(logINFO, "MPIIOMigrationCoProcessor::readBoundaryConds start of restore of data, rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     MPI_File_read_at(file_handler, (MPI_Offset)0, &boundCondParamStr, 1, boundCondParamType, MPI_STATUS_IGNORE);
@@ -2599,7 +2731,8 @@ void MPIIOMigrationCoProcessor::readBoundaryConds(int step)
 
     int ic = 0;
     MPI_Offset read_offset1, read_offset2;
-    for (int level = minInitLevel; level <= maxInitLevel; level++) {
+    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    {
         for (SPtr<Block3D> block : blocksVector[level]) //	blocks of the current level
         {
             read_offset1 = (MPI_Offset)(sizeof(boundCondParam) + block->getGlobalID() * sizeof(size_t));
@@ -2611,31 +2744,30 @@ void MPIIOMigrationCoProcessor::readBoundaryConds(int step)
             intArray1 = new int[boundCondParamStr.bcindexmatrixCount];
             intArray2 = new int[bcAddArray[ic].indexContainer_count];
 
-            if (bcAddArray[ic].boundCond_count > 0) {
+            if (bcAddArray[ic].boundCond_count > 0) 
+            {
                 MPI_File_read_at(file_handler, (MPI_Offset)(read_offset2 + sizeof(BCAddMigration)), &bcArray[0],
                                  bcAddArray[ic].boundCond_count, boundCondType, MPI_STATUS_IGNORE);
             }
-            MPI_File_read_at(file_handler,
-                             (MPI_Offset)(read_offset2 + sizeof(BCAddMigration) +
-                                          bcAddArray[ic].boundCond_count * sizeof(BoundaryCondition)),
+            MPI_File_read_at(file_handler, (MPI_Offset)(read_offset2 + sizeof(BCAddMigration) + bcAddArray[ic].boundCond_count * sizeof(BoundaryCondition)),
                              &intArray1[0], 1, bcindexmatrixType, MPI_STATUS_IGNORE);
-            if (bcAddArray[ic].indexContainer_count > 0) {
-                MPI_File_read_at(file_handler,
-                                 (MPI_Offset)(read_offset2 + sizeof(BCAddMigration) +
-                                              bcAddArray[ic].boundCond_count * sizeof(BoundaryCondition) +
-                                              boundCondParamStr.bcindexmatrixCount * sizeof(int)),
-                                 &intArray2[0], bcAddArray[ic].indexContainer_count, MPI_INT, MPI_STATUS_IGNORE);
+            if (bcAddArray[ic].indexContainer_count > 0) 
+            {
+                MPI_File_read_at(file_handler, (MPI_Offset)(read_offset2 + sizeof(BCAddMigration) + bcAddArray[ic].boundCond_count * sizeof(BoundaryCondition) +
+                                 boundCondParamStr.bcindexmatrixCount * sizeof(int)), &intArray2[0], bcAddArray[ic].indexContainer_count, MPI_INT, MPI_STATUS_IGNORE);
             }
 
             bcindexmatrixV.resize(0);
             indexContainerV.resize(0);
             bcVector.resize(0);
 
-            for (int ibc = 0; ibc < bcAddArray[ic].boundCond_count; ibc++) {
+            for (int ibc = 0; ibc < bcAddArray[ic].boundCond_count; ibc++) 
+            {
                 SPtr<BoundaryConditions> bc;
                 if (memcmp(&bcArray[ibc], nullBouCond, sizeof(BoundaryCondition)) == 0)
                     bc = SPtr<BoundaryConditions>();
-                else {
+                else 
+                {
                     bc                         = SPtr<BoundaryConditions>(new BoundaryConditions);
                     bc->noslipBoundaryFlags    = bcArray[ibc].noslipBoundaryFlags;
                     bc->slipBoundaryFlags      = bcArray[ibc].slipBoundaryFlags;
@@ -2669,8 +2801,7 @@ void MPIIOMigrationCoProcessor::readBoundaryConds(int step)
             for (int b2 = 0; b2 < bcAddArray[ic].indexContainer_count; b2++)
                 indexContainerV.push_back(intArray2[b2]);
 
-            CbArray3D<int, IndexerX3X2X1> bcim(bcindexmatrixV, boundCondParamStr.nx1, boundCondParamStr.nx2,
-                                               boundCondParamStr.nx3);
+            CbArray3D<int, IndexerX3X2X1> bcim(bcindexmatrixV, boundCondParamStr.nx1, boundCondParamStr.nx2, boundCondParamStr.nx3);
             SPtr<Block3D> block1 = grid->getBlock(bcAddArray[ic].globalID);
 
             SPtr<BCProcessor> bcProc = bcProcessor->clone(block1->getKernel());
@@ -2694,10 +2825,10 @@ void MPIIOMigrationCoProcessor::readBoundaryConds(int step)
 
     delete nullBouCond;
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIOMigrationCoProcessor::readBoundaryConds end of restore of data, rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 }
 
diff --git a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.h b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.h
index ad7a93086afa379822fc7909a68fd39748dd607f..ca0de8f3e7ba315bc8a870f89063ea9f38d7b59f 100644
--- a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.h
+++ b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.h
@@ -25,7 +25,8 @@ public:
         AverageTriple       = 4,
         ShearStressVal      = 5,
         RelaxationFactor = 6,
-        PhaseField = 7
+        PhaseField1 = 7,
+        PhaseField2 = 8
     };
 
     MPIIOMigrationCoProcessor(SPtr<Grid3D> grid, SPtr<UbScheduler> s, const std::string &path, SPtr<Communicator> comm);
diff --git a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.cpp b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.cpp
index ed5d3b275c006700d29c43f16928d2ddc08827f0..49277be64ffca7f28a52eb3dfd3a252d1cb4c2fd 100644
--- a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.cpp
+++ b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.cpp
@@ -25,8 +25,7 @@
 
 using namespace MPIIODataStructures;
 
-MPIIORestartCoProcessor::MPIIORestartCoProcessor(SPtr<Grid3D> grid, SPtr<UbScheduler> s, const std::string &path,
-                                                 SPtr<Communicator> comm)
+MPIIORestartCoProcessor::MPIIORestartCoProcessor(SPtr<Grid3D> grid, SPtr<UbScheduler> s, const std::string &path, SPtr<Communicator> comm)
     : MPIIOCoProcessor(grid, s, path, comm)
 {
     memset(&boundCondParamStr, 0, sizeof(boundCondParamStr));
@@ -34,7 +33,7 @@ MPIIORestartCoProcessor::MPIIORestartCoProcessor(SPtr<Grid3D> grid, SPtr<UbSched
     //-------------------------   define MPI types  ---------------------------------
 
     MPI_Datatype typesDataSet[3] = { MPI_DOUBLE, MPI_INT, MPI_CHAR };
-    int blocksDataSet[3]         = { 2, 5, 2 };
+    int blocksDataSet[3]         = { 5, 5, 2 };
     MPI_Aint offsetsDatatSet[3], lbDataSet, extentDataSet;
 
     offsetsDatatSet[0] = 0;
@@ -80,7 +79,8 @@ MPIIORestartCoProcessor::~MPIIORestartCoProcessor()
 //////////////////////////////////////////////////////////////////////////
 void MPIIORestartCoProcessor::process(double step)
 {
-    if (scheduler->isDue(step)) {
+    if (scheduler->isDue(step)) 
+    {
         if (comm->isRoot())
             UBLOG(logINFO, "MPIIORestartCoProcessor save step: " << step);
         if (comm->isRoot())
@@ -130,56 +130,77 @@ void MPIIORestartCoProcessor::writeDataSet(int step)
     std::vector<SPtr<Block3D>> blocksVector[25];
     int minInitLevel = this->grid->getCoarsestInitializedLevel();
     int maxInitLevel = this->grid->getFinestInitializedLevel();
-    for (int level = minInitLevel; level <= maxInitLevel; level++) {
+    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    {
         grid->getBlocks(level, rank, blocksVector[level]);
         blocksCount += static_cast<int>(blocksVector[level].size());
     }
 
     dataSetParam dataSetParamStr1, dataSetParamStr2, dataSetParamStr3;
     DataSetRestart *dataSetArray = new DataSetRestart[blocksCount];
-    std::vector<double> doubleValuesArray; // double-values (arrays of f's) in all blocks
+    std::vector<double> doubleValuesArrayF; // double-values (arrays of f's) in all blocks  Fdistribution
+    std::vector<double> doubleValuesArrayH1; // double-values (arrays of f's) in all blocks  H1distribution
+    // std::vector<double> doubleValuesArrayH2; // double-values (arrays of f's) in all blocks  H2distribution
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIORestartCoProcessor::writeDataSet start collect data rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
+    bool multiPhase = false;
     DSArraysPresence arrPresence;
     bool firstBlock        = true;
     int doubleCountInBlock = 0;
     int ic                 = 0;
 
-    SPtr<D3Q27EsoTwist3DSplittedVector> D3Q27EsoTwist3DSplittedVectorPtrF, D3Q27EsoTwist3DSplittedVectorPtrH;
-    CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr localDistributionsF, localDistributionsH;
-    CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr nonLocalDistributionsF, nonLocalDistributionsH;
-    CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr zeroDistributionsF, zeroDistributionsH;
+    SPtr<D3Q27EsoTwist3DSplittedVector> D3Q27EsoTwist3DSplittedVectorPtrF, D3Q27EsoTwist3DSplittedVectorPtrH1, D3Q27EsoTwist3DSplittedVectorPtrH2;
+    CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr localDistributionsF, localDistributionsH1, localDistributionsH2;
+    CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr nonLocalDistributionsF, nonLocalDistributionsH1, nonLocalDistributionsH2;
+    CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr zeroDistributionsF, zeroDistributionsH1, zeroDistributionsH2;
  
-    for (int level = minInitLevel; level <= maxInitLevel; level++) {
+    SPtr<LBMKernel> kernel;
+
+    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    {
         for (SPtr<Block3D> block : blocksVector[level]) //	blocks of the current level
         {
-            dataSetArray[ic].x1 =
-                block->getX1(); // coordinates of the block needed to find it while regenerating the grid
-            dataSetArray[ic].x2              = block->getX2();
-            dataSetArray[ic].x3              = block->getX3();
-            dataSetArray[ic].level           = block->getLevel();
-            dataSetArray[ic].ghostLayerWidth = block->getKernel()->getGhostLayerWidth();
-            dataSetArray[ic].collFactor      = block->getKernel()->getCollisionFactor();
-            dataSetArray[ic].deltaT          = block->getKernel()->getDeltaT();
-            dataSetArray[ic].compressible    = block->getKernel()->getCompressible();
-            dataSetArray[ic].withForcing     = block->getKernel()->getWithForcing();
-
-            D3Q27EsoTwist3DSplittedVectorPtrF = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(
-                block->getKernel()->getDataSet()->getFdistributions());
+            kernel = dynamicPointerCast<LBMKernel>(block->getKernel());
+
+            dataSetArray[ic].x1 = block->getX1(); // coordinates of the block needed to find it while regenerating the grid
+            dataSetArray[ic].x2 = block->getX2();
+            dataSetArray[ic].x3 = block->getX3();
+            dataSetArray[ic].level = block->getLevel();
+            dataSetArray[ic].ghostLayerWidth = kernel->getGhostLayerWidth();
+            dataSetArray[ic].collFactor = kernel->getCollisionFactor();
+            dataSetArray[ic].deltaT = kernel->getDeltaT();
+            dataSetArray[ic].compressible = kernel->getCompressible();
+            dataSetArray[ic].withForcing = kernel->getWithForcing();
+            dataSetArray[ic].collFactorL = kernel->getCollisionFactorL();
+            dataSetArray[ic].collFactorG = kernel->getCollisionFactorG();
+            dataSetArray[ic].densityRatio = kernel->getDensityRatio();
+
+            D3Q27EsoTwist3DSplittedVectorPtrF = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(block->getKernel()->getDataSet()->getFdistributions());
             localDistributionsF    = D3Q27EsoTwist3DSplittedVectorPtrF->getLocalDistributions();
             nonLocalDistributionsF = D3Q27EsoTwist3DSplittedVectorPtrF->getNonLocalDistributions();
             zeroDistributionsF     = D3Q27EsoTwist3DSplittedVectorPtrF->getZeroDistributions();
 
-            D3Q27EsoTwist3DSplittedVectorPtrH = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(
-                block->getKernel()->getDataSet()->getHdistributions());
-            localDistributionsH    = D3Q27EsoTwist3DSplittedVectorPtrH->getLocalDistributions();
-            nonLocalDistributionsH = D3Q27EsoTwist3DSplittedVectorPtrH->getNonLocalDistributions();
-            zeroDistributionsH     = D3Q27EsoTwist3DSplittedVectorPtrH->getZeroDistributions();
+            D3Q27EsoTwist3DSplittedVectorPtrH1 = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(block->getKernel()->getDataSet()->getHdistributions());
+            if (D3Q27EsoTwist3DSplittedVectorPtrH1 != 0)
+            {
+                multiPhase = true;
+                localDistributionsH1 = D3Q27EsoTwist3DSplittedVectorPtrH1->getLocalDistributions();
+                nonLocalDistributionsH1 = D3Q27EsoTwist3DSplittedVectorPtrH1->getNonLocalDistributions();
+                zeroDistributionsH1 = D3Q27EsoTwist3DSplittedVectorPtrH1->getZeroDistributions();
+            }
+
+            /*D3Q27EsoTwist3DSplittedVectorPtrH2 = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(block->getKernel()->getDataSet()->getH2distributions());
+            if (D3Q27EsoTwist3DSplittedVectorPtrH2 != 0)
+            {
+                localDistributionsH2 = D3Q27EsoTwist3DSplittedVectorPtrH2->getLocalDistributions();
+                nonLocalDistributionsH2 = D3Q27EsoTwist3DSplittedVectorPtrH2->getNonLocalDistributions();
+                zeroDistributionsH2 = D3Q27EsoTwist3DSplittedVectorPtrH2->getZeroDistributions();
+            }*/
 
             if (firstBlock) // when first (any) valid block...
             {
@@ -204,96 +225,88 @@ void MPIIORestartCoProcessor::writeDataSet(int step)
                 }
 
                 // ... than save some parameters that are equal in all dataSets
-                dataSetParamStr1.nx1 = dataSetParamStr2.nx1 = dataSetParamStr3.nx1 =
-                    static_cast<int>(block->getKernel()->getDataSet()->getFdistributions()->getNX1());
-                dataSetParamStr1.nx2 = dataSetParamStr2.nx2 = dataSetParamStr3.nx2 =
-                    static_cast<int>(block->getKernel()->getDataSet()->getFdistributions()->getNX2());
-                dataSetParamStr1.nx3 = dataSetParamStr2.nx3 = dataSetParamStr3.nx3 =
-                    static_cast<int>(block->getKernel()->getDataSet()->getFdistributions()->getNX3());
-
-             //  Fdistributions + Hdistributions
-                doubleCountInBlock =
-                    (dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3] +
+                dataSetParamStr1.nx1 = dataSetParamStr2.nx1 = dataSetParamStr3.nx1 = static_cast<int>(block->getKernel()->getDataSet()->getFdistributions()->getNX1());
+                dataSetParamStr1.nx2 = dataSetParamStr2.nx2 = dataSetParamStr3.nx2 = static_cast<int>(block->getKernel()->getDataSet()->getFdistributions()->getNX2());
+                dataSetParamStr1.nx3 = dataSetParamStr2.nx3 = dataSetParamStr3.nx3 = static_cast<int>(block->getKernel()->getDataSet()->getFdistributions()->getNX3());
+
+                doubleCountInBlock = dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3] +
                      dataSetParamStr2.nx[0] * dataSetParamStr2.nx[1] * dataSetParamStr2.nx[2] * dataSetParamStr2.nx[3] +
-                     dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3]) * 2;
+                     dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3];
 
-                SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> averageDensityArray =
-                    block->getKernel()->getDataSet()->getAverageDensity();
+                SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> averageDensityArray = kernel->getDataSet()->getAverageDensity();
                 if (averageDensityArray)
                     arrPresence.isAverageDensityArrayPresent = true;
                 else
                     arrPresence.isAverageDensityArrayPresent = false;
 
-                SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> AverageVelocityArray3DPtr =
-                    block->getKernel()->getDataSet()->getAverageVelocity();
+                SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> AverageVelocityArray3DPtr = kernel->getDataSet()->getAverageVelocity();
                 if (AverageVelocityArray3DPtr)
                     arrPresence.isAverageVelocityArrayPresent = true;
                 else
                     arrPresence.isAverageVelocityArrayPresent = false;
 
-                SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> AverageFluctArray3DPtr =
-                    block->getKernel()->getDataSet()->getAverageFluctuations();
+                SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> AverageFluctArray3DPtr = kernel->getDataSet()->getAverageFluctuations();
                 if (AverageFluctArray3DPtr)
                     arrPresence.isAverageFluktuationsArrayPresent = true;
                 else
                     arrPresence.isAverageFluktuationsArrayPresent = false;
 
-                SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> AverageTripleArray3DPtr =
-                    block->getKernel()->getDataSet()->getAverageTriplecorrelations();
+                SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> AverageTripleArray3DPtr = kernel->getDataSet()->getAverageTriplecorrelations();
                 if (AverageTripleArray3DPtr)
                     arrPresence.isAverageTripleArrayPresent = true;
                 else
                     arrPresence.isAverageTripleArrayPresent = false;
 
-                SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> ShearStressValArray3DPtr =
-                    block->getKernel()->getDataSet()->getShearStressValues();
+                SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> ShearStressValArray3DPtr = kernel->getDataSet()->getShearStressValues();
                 if (ShearStressValArray3DPtr)
                     arrPresence.isShearStressValArrayPresent = true;
                 else
                     arrPresence.isShearStressValArrayPresent = false;
 
-                SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> relaxationFactor3DPtr =
-                    block->getKernel()->getDataSet()->getRelaxationFactor();
+                SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> relaxationFactor3DPtr = kernel->getDataSet()->getRelaxationFactor();
                 if (relaxationFactor3DPtr)
                     arrPresence.isRelaxationFactorPresent = true;
                 else
                     arrPresence.isRelaxationFactorPresent = false;
 
-                SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> phaseField3DPtr =
-                    block->getKernel()->getDataSet()->getPhaseField();
-                if (phaseField3DPtr)
-                    arrPresence.isPhaseFieldPresent = true;
+                SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> phaseField3DPtr1 = kernel->getDataSet()->getPhaseField();
+                if (phaseField3DPtr1)
+                    arrPresence.isPhaseField1Present = true;
+                else
+                    arrPresence.isPhaseField1Present = false;
+
+                SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> phaseField3DPtr2 = kernel->getDataSet()->getPhaseField2();
+                if (phaseField3DPtr2)
+                    arrPresence.isPhaseField2Present = true;
                 else
-                    arrPresence.isPhaseFieldPresent = false;
+                    arrPresence.isPhaseField2Present = false;
 
                 firstBlock = false;
             }
 
-            if (localDistributionsF && (dataSetParamStr1.nx[0] > 0) && (dataSetParamStr1.nx[1] > 0) &&
-                (dataSetParamStr1.nx[2] > 0) && (dataSetParamStr1.nx[3] > 0))
-                doubleValuesArray.insert(doubleValuesArray.end(), localDistributionsF->getDataVector().begin(),
-                                         localDistributionsF->getDataVector().end());
-            if (nonLocalDistributionsF && (dataSetParamStr2.nx[0] > 0) && (dataSetParamStr2.nx[1] > 0) &&
-                (dataSetParamStr2.nx[2] > 0) && (dataSetParamStr2.nx[3] > 0))
-                doubleValuesArray.insert(doubleValuesArray.end(), nonLocalDistributionsF->getDataVector().begin(),
-                                         nonLocalDistributionsF->getDataVector().end());
-            if (zeroDistributionsF && (dataSetParamStr3.nx[0] > 0) && (dataSetParamStr3.nx[1] > 0) &&
-                (dataSetParamStr3.nx[2] > 0))
-                doubleValuesArray.insert(doubleValuesArray.end(), zeroDistributionsF->getDataVector().begin(),
-                                         zeroDistributionsF->getDataVector().end());
-
-            if (localDistributionsH && (dataSetParamStr1.nx[0] > 0) && (dataSetParamStr1.nx[1] > 0) &&
-                (dataSetParamStr1.nx[2] > 0) && (dataSetParamStr1.nx[3] > 0))
-                doubleValuesArray.insert(doubleValuesArray.end(), localDistributionsH->getDataVector().begin(),
-                                         localDistributionsH->getDataVector().end());
-            if (nonLocalDistributionsH && (dataSetParamStr2.nx[0] > 0) && (dataSetParamStr2.nx[1] > 0) &&
-                (dataSetParamStr2.nx[2] > 0) && (dataSetParamStr2.nx[3] > 0))
-                doubleValuesArray.insert(doubleValuesArray.end(), nonLocalDistributionsH->getDataVector().begin(),
-                                         nonLocalDistributionsH->getDataVector().end());
-            if (zeroDistributionsH && (dataSetParamStr3.nx[0] > 0) && (dataSetParamStr3.nx[1] > 0) &&
-                (dataSetParamStr3.nx[2] > 0))
-                doubleValuesArray.insert(doubleValuesArray.end(), zeroDistributionsH->getDataVector().begin(),
-                                         zeroDistributionsH->getDataVector().end());
+            if (localDistributionsF && (dataSetParamStr1.nx[0] > 0) && (dataSetParamStr1.nx[1] > 0) && (dataSetParamStr1.nx[2] > 0) && (dataSetParamStr1.nx[3] > 0))
+                doubleValuesArrayF.insert(doubleValuesArrayF.end(), localDistributionsF->getDataVector().begin(), localDistributionsF->getDataVector().end());
+            if (nonLocalDistributionsF && (dataSetParamStr2.nx[0] > 0) && (dataSetParamStr2.nx[1] > 0) && (dataSetParamStr2.nx[2] > 0) && (dataSetParamStr2.nx[3] > 0))
+                doubleValuesArrayF.insert(doubleValuesArrayF.end(), nonLocalDistributionsF->getDataVector().begin(), nonLocalDistributionsF->getDataVector().end());
+            if (zeroDistributionsF && (dataSetParamStr3.nx[0] > 0) && (dataSetParamStr3.nx[1] > 0) && (dataSetParamStr3.nx[2] > 0))
+                doubleValuesArrayF.insert(doubleValuesArrayF.end(), zeroDistributionsF->getDataVector().begin(), zeroDistributionsF->getDataVector().end());
+
+            if (multiPhase)
+            {
+                if (localDistributionsH1 && (dataSetParamStr1.nx[0] > 0) && (dataSetParamStr1.nx[1] > 0) && (dataSetParamStr1.nx[2] > 0) && (dataSetParamStr1.nx[3] > 0))
+                    doubleValuesArrayH1.insert(doubleValuesArrayH1.end(), localDistributionsH1->getDataVector().begin(), localDistributionsH1->getDataVector().end());
+                if (nonLocalDistributionsH1 && (dataSetParamStr2.nx[0] > 0) && (dataSetParamStr2.nx[1] > 0) && (dataSetParamStr2.nx[2] > 0) && (dataSetParamStr2.nx[3] > 0))
+                    doubleValuesArrayH1.insert(doubleValuesArrayH1.end(), nonLocalDistributionsH1->getDataVector().begin(), nonLocalDistributionsH1->getDataVector().end());
+                if (zeroDistributionsH1 && (dataSetParamStr3.nx[0] > 0) && (dataSetParamStr3.nx[1] > 0) && (dataSetParamStr3.nx[2] > 0))
+                    doubleValuesArrayH1.insert(doubleValuesArrayH1.end(), zeroDistributionsH1->getDataVector().begin(), zeroDistributionsH1->getDataVector().end());
+            }
+
+            /*if (localDistributionsH2 && (dataSetParamStr1.nx[0] > 0) && (dataSetParamStr1.nx[1] > 0) && (dataSetParamStr1.nx[2] > 0) && (dataSetParamStr1.nx[3] > 0))
+                doubleValuesArrayH2.insert(doubleValuesArrayH2.end(), localDistributionsH2->getDataVector().begin(), localDistributionsH2->getDataVector().end());
+            if (nonLocalDistributionsH2 && (dataSetParamStr2.nx[0] > 0) && (dataSetParamStr2.nx[1] > 0) && (dataSetParamStr2.nx[2] > 0) && (dataSetParamStr2.nx[3] > 0))
+                doubleValuesArrayH2.insert(doubleValuesArrayH2.end(), nonLocalDistributionsH2->getDataVector().begin(), nonLocalDistributionsH2->getDataVector().end());
+            if (zeroDistributionsH2 && (dataSetParamStr3.nx[0] > 0) && (dataSetParamStr3.nx[1] > 0) && (dataSetParamStr3.nx[2] > 0))
+                doubleValuesArrayH2.insert(doubleValuesArrayH2.end(), zeroDistributionsH2->getDataVector().begin(), zeroDistributionsH2->getDataVector().end());*/
 
             ic++;
         }
@@ -303,10 +316,10 @@ void MPIIORestartCoProcessor::writeDataSet(int step)
     MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType);
     MPI_Type_commit(&dataSetDoubleType);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIORestartCoProcessor::writeDataSet start MPI IO rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     // write to the file
@@ -315,15 +328,17 @@ void MPIIORestartCoProcessor::writeDataSet(int step)
     MPI_Offset write_offset  = (MPI_Offset)(size * sizeof(int));
     size_t next_write_offset = 0;
 
-    if (size > 1) {
-        if (rank == 0) {
-            next_write_offset = write_offset + 3 * sizeof(dataSetParam) +
-                                blocksCount * (sizeof(DataSetRestart) + doubleCountInBlock * sizeof(double));
+    if (size > 1) 
+    {
+        if (rank == 0) 
+        {
+            next_write_offset = write_offset + 3 * sizeof(dataSetParam) + blocksCount * (sizeof(DataSetRestart) + doubleCountInBlock * sizeof(double));
             MPI_Send(&next_write_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD);
-        } else {
+        } 
+        else 
+        {
             MPI_Recv(&write_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
-            next_write_offset = write_offset + 3 * sizeof(dataSetParam) +
-                                blocksCount * (sizeof(DataSetRestart) + doubleCountInBlock * sizeof(double));
+            next_write_offset = write_offset + 3 * sizeof(dataSetParam) + blocksCount * (sizeof(DataSetRestart) + doubleCountInBlock * sizeof(double));
             if (rank < size - 1)
                 MPI_Send(&next_write_offset, 1, MPI_LONG_LONG_INT, rank + 1, 5, MPI_COMM_WORLD);
         }
@@ -342,7 +357,7 @@ void MPIIORestartCoProcessor::writeDataSet(int step)
 #endif
 
     MPI_File file_handler;
-    std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSet.bin";
+    std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetF.bin";
     int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
     if (rc != MPI_SUCCESS)
         throw UbException(UB_EXARGS, "couldn't open file " + filename);
@@ -351,27 +366,54 @@ void MPIIORestartCoProcessor::writeDataSet(int step)
     MPI_File_write_at(file_handler, (MPI_Offset)(rank * sizeof(int)), &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE);
     // each process writes common parameters of a dataSet
     MPI_File_write_at(file_handler, write_offset, &dataSetParamStr1, 1, dataSetParamType, MPI_STATUS_IGNORE);
-    MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(dataSetParam)), &dataSetParamStr2, 1,
-                      dataSetParamType, MPI_STATUS_IGNORE);
-    MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + 2 * sizeof(dataSetParam)), &dataSetParamStr3, 1,
-                      dataSetParamType, MPI_STATUS_IGNORE);
+    MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(dataSetParam)), &dataSetParamStr2, 1, dataSetParamType, MPI_STATUS_IGNORE);
+    MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + 2 * sizeof(dataSetParam)), &dataSetParamStr3, 1, dataSetParamType, MPI_STATUS_IGNORE);
     // each process writes data identifying blocks
-    MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + 3 * sizeof(dataSetParam)), dataSetArray, blocksCount,
-                      dataSetType, MPI_STATUS_IGNORE);
+    MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + 3 * sizeof(dataSetParam)), dataSetArray, blocksCount, dataSetType, MPI_STATUS_IGNORE);
     // each process writes the dataSet arrays
-    if (doubleValuesArray.size() > 0)
-        MPI_File_write_at(file_handler,
-                          (MPI_Offset)(write_offset + 3 * sizeof(dataSetParam) + blocksCount * sizeof(DataSetRestart)),
-                          &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
+    if (doubleValuesArrayF.size() > 0)
+        MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + 3 * sizeof(dataSetParam) + blocksCount * sizeof(DataSetRestart)),
+                          &doubleValuesArrayF[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
 
     MPI_File_sync(file_handler);
     MPI_File_close(&file_handler);
 
+    //------------------------------------------------------------------------------------------------------------------
+    if (multiPhase)
+    {
+        filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetH1.bin";
+        rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
+        if (rc != MPI_SUCCESS)
+            throw UbException(UB_EXARGS, "couldn't open file " + filename);
+
+        // each process writes the dataSet arrays
+        if (doubleValuesArrayH1.size() > 0)
+            MPI_File_write_at(file_handler, write_offset, &doubleValuesArrayH1[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
+
+        MPI_File_sync(file_handler);
+        MPI_File_close(&file_handler);
+    }
+
+    //--------------------------------------------------------------------------------------------------------------------
+    /*filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetH2.bin";
+    rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
+    if (rc != MPI_SUCCESS)
+        throw UbException(UB_EXARGS, "couldn't open file " + filename);
+
+    // each process writes the dataSet arrays
+    if (doubleValuesArrayH1.size() > 0)
+        MPI_File_write_at(file_handler, write_offset, &doubleValuesArrayH2[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
+
+    MPI_File_sync(file_handler);
+    MPI_File_close(&file_handler);*/
+
+    //--------------------------------
     MPI_Type_free(&dataSetDoubleType);
 
     delete[] dataSetArray;
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         finish = MPI_Wtime();
         UBLOG(logINFO, "MPIIORestartCoProcessor::writeDataSet time: " << finish - start << " s");
     }
@@ -403,8 +445,11 @@ void MPIIORestartCoProcessor::writeDataSet(int step)
     if (arrPresence.isRelaxationFactorPresent)
         writeRelaxationFactor(step);
 
-    if (arrPresence.isPhaseFieldPresent)
-        writePhaseField(step);
+    if (arrPresence.isPhaseField1Present)
+        writePhaseField(step, 1);
+
+    if (arrPresence.isPhaseField2Present)
+        writePhaseField(step, 2);
 }
 
 void MPIIORestartCoProcessor::writeAverageDensityArray(int step)
@@ -418,7 +463,8 @@ void MPIIORestartCoProcessor::writeAverageDensityArray(int step)
     std::vector<SPtr<Block3D>> blocksVector[25];
     int minInitLevel = this->grid->getCoarsestInitializedLevel();
     int maxInitLevel = this->grid->getFinestInitializedLevel();
-    for (int level = minInitLevel; level <= maxInitLevel; level++) {
+    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    {
         grid->getBlocks(level, rank, blocksVector[level]);
         blocksCount += static_cast<int>(blocksVector[level].size());
     }
@@ -427,10 +473,10 @@ void MPIIORestartCoProcessor::writeAverageDensityArray(int step)
     std::vector<double> doubleValuesArray; // double-values of the AverageDensityArray in all blocks
     dataSetParam dataSetParamStr;
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIORestartCoProcessor::writeAverageDensityArray start collect data rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     bool firstBlock        = true;
@@ -438,13 +484,13 @@ void MPIIORestartCoProcessor::writeAverageDensityArray(int step)
     int ic                 = 0;
     SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> averageDensityArray;
 
-    for (int level = minInitLevel; level <= maxInitLevel; level++) {
+    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    {
         for (SPtr<Block3D> block : blocksVector[level]) //	blocks of the current level
         {
-            dataSetSmallArray[ic].x1 =
-                block->getX1(); // coordinates of the block needed to find it while regenerating the grid
-            dataSetSmallArray[ic].x2    = block->getX2();
-            dataSetSmallArray[ic].x3    = block->getX3();
+            dataSetSmallArray[ic].x1 = block->getX1(); // coordinates of the block needed to find it while regenerating the grid
+            dataSetSmallArray[ic].x2 = block->getX2();
+            dataSetSmallArray[ic].x3 = block->getX3();
             dataSetSmallArray[ic].level = block->getLevel();
 
             averageDensityArray = block->getKernel()->getDataSet()->getAverageDensity();
@@ -462,10 +508,8 @@ void MPIIORestartCoProcessor::writeAverageDensityArray(int step)
                 firstBlock = false;
             }
 
-            if ((dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0) &&
-                (dataSetParamStr.nx[3] > 0))
-                doubleValuesArray.insert(doubleValuesArray.end(), averageDensityArray->getDataVector().begin(),
-                                         averageDensityArray->getDataVector().end());
+            if ((dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0) && (dataSetParamStr.nx[3] > 0))
+                doubleValuesArray.insert(doubleValuesArray.end(), averageDensityArray->getDataVector().begin(), averageDensityArray->getDataVector().end());
 
             ic++;
         }
@@ -475,10 +519,10 @@ void MPIIORestartCoProcessor::writeAverageDensityArray(int step)
     MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType);
     MPI_Type_commit(&dataSetDoubleType);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIORestartCoProcessor::writeAverageDensityArray start MPI IO rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     // write to the file
@@ -487,15 +531,17 @@ void MPIIORestartCoProcessor::writeAverageDensityArray(int step)
     MPI_Offset write_offset  = (MPI_Offset)(size * sizeof(int));
     size_t next_write_offset = 0;
 
-    if (size > 1) {
-        if (rank == 0) {
-            next_write_offset = write_offset + sizeof(dataSetParam) +
-                                blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
+    if (size > 1) 
+    {
+        if (rank == 0) 
+        {
+            next_write_offset = write_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
             MPI_Send(&next_write_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD);
-        } else {
+        } 
+        else 
+        {
             MPI_Recv(&write_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
-            next_write_offset = write_offset + sizeof(dataSetParam) +
-                                blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
+            next_write_offset = write_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
             if (rank < size - 1)
                 MPI_Send(&next_write_offset, 1, MPI_LONG_LONG_INT, rank + 1, 5, MPI_COMM_WORLD);
         }
@@ -524,19 +570,18 @@ void MPIIORestartCoProcessor::writeAverageDensityArray(int step)
     // each process writes common parameters of a dataSet
     MPI_File_write_at(file_handler, write_offset, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
     // each process writes data identifying blocks
-    MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(dataSetParam)), dataSetSmallArray, blocksCount,
-                      dataSetSmallType, MPI_STATUS_IGNORE);
+    MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(dataSetParam)), dataSetSmallArray, blocksCount, dataSetSmallType, MPI_STATUS_IGNORE);
     // each process writes the dataSet arrays
     if (doubleValuesArray.size() > 0)
-        MPI_File_write_at(file_handler,
-                          (MPI_Offset)(write_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)),
+        MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)),
                           &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
 
     MPI_File_sync(file_handler);
     MPI_File_close(&file_handler);
     MPI_Type_free(&dataSetDoubleType);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         finish = MPI_Wtime();
         UBLOG(logINFO, "MPIIORestartCoProcessor::writeAverageDensityArray time: " << finish - start << " s");
     }
@@ -555,7 +600,8 @@ void MPIIORestartCoProcessor::writeAverageVelocityArray(int step)
     std::vector<SPtr<Block3D>> blocksVector[25];
     int minInitLevel = this->grid->getCoarsestInitializedLevel();
     int maxInitLevel = this->grid->getFinestInitializedLevel();
-    for (int level = minInitLevel; level <= maxInitLevel; level++) {
+    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    {
         grid->getBlocks(level, rank, blocksVector[level]);
         blocksCount += static_cast<int>(blocksVector[level].size());
     }
@@ -564,10 +610,10 @@ void MPIIORestartCoProcessor::writeAverageVelocityArray(int step)
     std::vector<double> doubleValuesArray; // double-values (arrays of f's) in all blocks
     dataSetParam dataSetParamStr;
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIORestartCoProcessor::writeAverageVelocityArray start collect data rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     bool firstBlock        = true;
@@ -575,13 +621,13 @@ void MPIIORestartCoProcessor::writeAverageVelocityArray(int step)
     int ic                 = 0;
     SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> AverageVelocityArray3DPtr;
 
-    for (int level = minInitLevel; level <= maxInitLevel; level++) {
+    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    {
         for (SPtr<Block3D> block : blocksVector[level]) //	blocks of the current level
         {
-            dataSetSmallArray[ic].x1 =
-                block->getX1(); // coordinates of the block needed to find it while regenerating the grid
-            dataSetSmallArray[ic].x2    = block->getX2();
-            dataSetSmallArray[ic].x3    = block->getX3();
+            dataSetSmallArray[ic].x1 = block->getX1(); // coordinates of the block needed to find it while regenerating the grid
+            dataSetSmallArray[ic].x2 = block->getX2();
+            dataSetSmallArray[ic].x3 = block->getX3();
             dataSetSmallArray[ic].level = block->getLevel();
 
             AverageVelocityArray3DPtr = block->getKernel()->getDataSet()->getAverageVelocity();
@@ -593,16 +639,13 @@ void MPIIORestartCoProcessor::writeAverageVelocityArray(int step)
                 dataSetParamStr.nx[1] = static_cast<int>(AverageVelocityArray3DPtr->getNX2());
                 dataSetParamStr.nx[2] = static_cast<int>(AverageVelocityArray3DPtr->getNX3());
                 dataSetParamStr.nx[3] = static_cast<int>(AverageVelocityArray3DPtr->getNX4());
-                doubleCountInBlock =
-                    dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+                doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
 
                 firstBlock = false;
             }
 
-            if ((dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0) &&
-                (dataSetParamStr.nx[3] > 0))
-                doubleValuesArray.insert(doubleValuesArray.end(), AverageVelocityArray3DPtr->getDataVector().begin(),
-                                         AverageVelocityArray3DPtr->getDataVector().end());
+            if ((dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0) && (dataSetParamStr.nx[3] > 0))
+                doubleValuesArray.insert(doubleValuesArray.end(), AverageVelocityArray3DPtr->getDataVector().begin(), AverageVelocityArray3DPtr->getDataVector().end());
 
             ic++;
         }
@@ -612,10 +655,10 @@ void MPIIORestartCoProcessor::writeAverageVelocityArray(int step)
     MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType);
     MPI_Type_commit(&dataSetDoubleType);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIORestartCoProcessor::writeAverageVelocityArray start MPI IO rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     // write to the file
@@ -624,15 +667,16 @@ void MPIIORestartCoProcessor::writeAverageVelocityArray(int step)
     MPI_Offset write_offset  = (MPI_Offset)(size * sizeof(int));
     size_t next_write_offset = 0;
 
-    if (size > 1) {
-        if (rank == 0) {
-            next_write_offset = write_offset + sizeof(dataSetParam) +
-                                blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
+    if (size > 1) 
+    {
+        if (rank == 0) 
+        {
+            next_write_offset = write_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
             MPI_Send(&next_write_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD);
-        } else {
+        } else 
+        {
             MPI_Recv(&write_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
-            next_write_offset = write_offset + sizeof(dataSetParam) +
-                                blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
+            next_write_offset = write_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
             if (rank < size - 1)
                 MPI_Send(&next_write_offset, 1, MPI_LONG_LONG_INT, rank + 1, 5, MPI_COMM_WORLD);
         }
@@ -661,19 +705,18 @@ void MPIIORestartCoProcessor::writeAverageVelocityArray(int step)
     // each process writes common parameters of a dataSet
     MPI_File_write_at(file_handler, write_offset, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
     // each process writes data identifying blocks
-    MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(dataSetParam)), dataSetSmallArray, blocksCount,
-                      dataSetSmallType, MPI_STATUS_IGNORE);
+    MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(dataSetParam)), dataSetSmallArray, blocksCount, dataSetSmallType, MPI_STATUS_IGNORE);
     // each process writes the dataSet arrays
     if (doubleValuesArray.size() > 0)
-        MPI_File_write_at(file_handler,
-                          (MPI_Offset)(write_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)),
+        MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)),
                           &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
 
     MPI_File_sync(file_handler);
     MPI_File_close(&file_handler);
 
     MPI_Type_free(&dataSetDoubleType);
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         finish = MPI_Wtime();
         UBLOG(logINFO, "MPIIORestartCoProcessor::writeAverageVelocityArray time: " << finish - start << " s");
     }
@@ -692,7 +735,8 @@ void MPIIORestartCoProcessor::writeAverageFluktuationsArray(int step)
     std::vector<SPtr<Block3D>> blocksVector[25];
     int minInitLevel = this->grid->getCoarsestInitializedLevel();
     int maxInitLevel = this->grid->getFinestInitializedLevel();
-    for (int level = minInitLevel; level <= maxInitLevel; level++) {
+    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    {
         grid->getBlocks(level, rank, blocksVector[level]);
         blocksCount += static_cast<int>(blocksVector[level].size());
     }
@@ -701,10 +745,10 @@ void MPIIORestartCoProcessor::writeAverageFluktuationsArray(int step)
     std::vector<double> doubleValuesArray; // double-values (arrays of f's) in all blocks
     dataSetParam dataSetParamStr;
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIORestartCoProcessor::writeAverageFluktuationsArray start collect data rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     bool firstBlock        = true;
@@ -712,11 +756,11 @@ void MPIIORestartCoProcessor::writeAverageFluktuationsArray(int step)
     int ic                 = 0;
     SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> AverageFluctArray3DPtr;
 
-    for (int level = minInitLevel; level <= maxInitLevel; level++) {
+    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    {
         for (SPtr<Block3D> block : blocksVector[level]) //	blocks of the current level
         {
-            dataSetSmallArray[ic].x1 =
-                block->getX1(); // coordinates of the block needed to find it while regenerating the grid
+            dataSetSmallArray[ic].x1 = block->getX1(); // coordinates of the block needed to find it while regenerating the grid
             dataSetSmallArray[ic].x2    = block->getX2();
             dataSetSmallArray[ic].x3    = block->getX3();
             dataSetSmallArray[ic].level = block->getLevel();
@@ -730,14 +774,12 @@ void MPIIORestartCoProcessor::writeAverageFluktuationsArray(int step)
                 dataSetParamStr.nx[1] = static_cast<int>(AverageFluctArray3DPtr->getNX2());
                 dataSetParamStr.nx[2] = static_cast<int>(AverageFluctArray3DPtr->getNX3());
                 dataSetParamStr.nx[3] = static_cast<int>(AverageFluctArray3DPtr->getNX4());
-                doubleCountInBlock =
-                    dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+                doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
 
                 firstBlock = false;
             }
 
-            if ((dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0) &&
-                (dataSetParamStr.nx[3] > 0))
+            if ((dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0) &&(dataSetParamStr.nx[3] > 0))
                 doubleValuesArray.insert(doubleValuesArray.end(), AverageFluctArray3DPtr->getDataVector().begin(),
                                          AverageFluctArray3DPtr->getDataVector().end());
 
@@ -749,10 +791,10 @@ void MPIIORestartCoProcessor::writeAverageFluktuationsArray(int step)
     MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType);
     MPI_Type_commit(&dataSetDoubleType);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIORestartCoProcessor::writeAverageFluktuationsArray start MPI IO rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     // write to the file
@@ -761,15 +803,16 @@ void MPIIORestartCoProcessor::writeAverageFluktuationsArray(int step)
     MPI_Offset write_offset  = (MPI_Offset)(size * sizeof(int));
     size_t next_write_offset = 0;
 
-    if (size > 1) {
-        if (rank == 0) {
-            next_write_offset = write_offset + sizeof(dataSetParam) +
-                                blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
+    if (size > 1) 
+    {
+        if (rank == 0) 
+        {
+            next_write_offset = write_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
             MPI_Send(&next_write_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD);
-        } else {
+        } else 
+        {
             MPI_Recv(&write_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
-            next_write_offset = write_offset + sizeof(dataSetParam) +
-                                blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
+            next_write_offset = write_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
             if (rank < size - 1)
                 MPI_Send(&next_write_offset, 1, MPI_LONG_LONG_INT, rank + 1, 5, MPI_COMM_WORLD);
         }
@@ -788,8 +831,7 @@ void MPIIORestartCoProcessor::writeAverageFluktuationsArray(int step)
 #endif
 
     MPI_File file_handler;
-    std::string filename =
-        path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpAverageFluktuationsArray.bin";
+    std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpAverageFluktuationsArray.bin";
     int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
     if (rc != MPI_SUCCESS)
         throw UbException(UB_EXARGS, "couldn't open file " + filename);
@@ -803,15 +845,15 @@ void MPIIORestartCoProcessor::writeAverageFluktuationsArray(int step)
                       dataSetSmallType, MPI_STATUS_IGNORE);
     // each process writes the dataSet arrays
     if (doubleValuesArray.size() > 0)
-        MPI_File_write_at(file_handler,
-                          (MPI_Offset)(write_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)),
+        MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)),
                           &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
 
     MPI_File_sync(file_handler);
     MPI_File_close(&file_handler);
     MPI_Type_free(&dataSetDoubleType);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         finish = MPI_Wtime();
         UBLOG(logINFO, "MPIIORestartCoProcessor::writeAverageFluktuationsArray time: " << finish - start << " s");
     }
@@ -830,7 +872,8 @@ void MPIIORestartCoProcessor::writeAverageTripleArray(int step)
     std::vector<SPtr<Block3D>> blocksVector[25];
     int minInitLevel = this->grid->getCoarsestInitializedLevel();
     int maxInitLevel = this->grid->getFinestInitializedLevel();
-    for (int level = minInitLevel; level <= maxInitLevel; level++) {
+    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    {
         grid->getBlocks(level, rank, blocksVector[level]);
         blocksCount += static_cast<int>(blocksVector[level].size());
     }
@@ -839,10 +882,10 @@ void MPIIORestartCoProcessor::writeAverageTripleArray(int step)
     std::vector<double> doubleValuesArray; // double-values (arrays of f's) in all blocks
     dataSetParam dataSetParamStr;
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIORestartCoProcessor::writeAverageTripleArray start collect data rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     bool firstBlock        = true;
@@ -850,11 +893,11 @@ void MPIIORestartCoProcessor::writeAverageTripleArray(int step)
     int ic                 = 0;
     SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> AverageTripleArray3DPtr;
 
-    for (int level = minInitLevel; level <= maxInitLevel; level++) {
+    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    {
         for (SPtr<Block3D> block : blocksVector[level]) //	blocks of the current level
         {
-            dataSetSmallArray[ic].x1 =
-                block->getX1(); // coordinates of the block needed to find it while regenerating the grid
+            dataSetSmallArray[ic].x1 = block->getX1(); // coordinates of the block needed to find it while regenerating the grid
             dataSetSmallArray[ic].x2    = block->getX2();
             dataSetSmallArray[ic].x3    = block->getX3();
             dataSetSmallArray[ic].level = block->getLevel();
@@ -868,16 +911,13 @@ void MPIIORestartCoProcessor::writeAverageTripleArray(int step)
                 dataSetParamStr.nx[1] = static_cast<int>(AverageTripleArray3DPtr->getNX2());
                 dataSetParamStr.nx[2] = static_cast<int>(AverageTripleArray3DPtr->getNX3());
                 dataSetParamStr.nx[3] = static_cast<int>(AverageTripleArray3DPtr->getNX4());
-                doubleCountInBlock =
-                    dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+                doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
 
                 firstBlock = false;
             }
 
-            if ((dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0) &&
-                (dataSetParamStr.nx[3] > 0))
-                doubleValuesArray.insert(doubleValuesArray.end(), AverageTripleArray3DPtr->getDataVector().begin(),
-                                         AverageTripleArray3DPtr->getDataVector().end());
+            if ((dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0) && (dataSetParamStr.nx[3] > 0))
+                doubleValuesArray.insert(doubleValuesArray.end(), AverageTripleArray3DPtr->getDataVector().begin(), AverageTripleArray3DPtr->getDataVector().end());
 
             ic++;
         }
@@ -887,10 +927,10 @@ void MPIIORestartCoProcessor::writeAverageTripleArray(int step)
     MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType);
     MPI_Type_commit(&dataSetDoubleType);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIORestartCoProcessor::writeAverageTripleArray start MPI IO rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     // write to the file
@@ -899,15 +939,17 @@ void MPIIORestartCoProcessor::writeAverageTripleArray(int step)
     MPI_Offset write_offset  = (MPI_Offset)(size * sizeof(int));
     size_t next_write_offset = 0;
 
-    if (size > 1) {
-        if (rank == 0) {
-            next_write_offset = write_offset + sizeof(dataSetParam) +
-                                blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
+    if (size > 1) 
+    {
+        if (rank == 0) 
+        {
+            next_write_offset = write_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
             MPI_Send(&next_write_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD);
-        } else {
+        } 
+        else 
+        {
             MPI_Recv(&write_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
-            next_write_offset = write_offset + sizeof(dataSetParam) +
-                                blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
+            next_write_offset = write_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
             if (rank < size - 1)
                 MPI_Send(&next_write_offset, 1, MPI_LONG_LONG_INT, rank + 1, 5, MPI_COMM_WORLD);
         }
@@ -940,8 +982,7 @@ void MPIIORestartCoProcessor::writeAverageTripleArray(int step)
                       dataSetSmallType, MPI_STATUS_IGNORE);
     // each process writes the dataSet arrays
     if (doubleValuesArray.size() > 0)
-        MPI_File_write_at(file_handler,
-                          (MPI_Offset)(write_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)),
+        MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)),
                           &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
 
     MPI_File_sync(file_handler);
@@ -967,7 +1008,8 @@ void MPIIORestartCoProcessor::writeShearStressValArray(int step)
     std::vector<SPtr<Block3D>> blocksVector[25];
     int minInitLevel = this->grid->getCoarsestInitializedLevel();
     int maxInitLevel = this->grid->getFinestInitializedLevel();
-    for (int level = minInitLevel; level <= maxInitLevel; level++) {
+    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    {
         grid->getBlocks(level, rank, blocksVector[level]);
         blocksCount += static_cast<int>(blocksVector[level].size());
     }
@@ -976,10 +1018,10 @@ void MPIIORestartCoProcessor::writeShearStressValArray(int step)
     std::vector<double> doubleValuesArray; // double-values (arrays of f's) in all blocks
     dataSetParam dataSetParamStr;
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIORestartCoProcessor::writeShearStressValArray start collect data rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     bool firstBlock        = true;
@@ -987,11 +1029,11 @@ void MPIIORestartCoProcessor::writeShearStressValArray(int step)
     int ic                 = 0;
     SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> ShearStressValArray3DPtr;
 
-    for (int level = minInitLevel; level <= maxInitLevel; level++) {
+    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    {
         for (SPtr<Block3D> block : blocksVector[level]) //	blocks of the current level
         {
-            dataSetSmallArray[ic].x1 =
-                block->getX1(); // coordinates of the block needed to find it while regenerating the grid
+            dataSetSmallArray[ic].x1 = block->getX1(); // coordinates of the block needed to find it while regenerating the grid
             dataSetSmallArray[ic].x2    = block->getX2();
             dataSetSmallArray[ic].x3    = block->getX3();
             dataSetSmallArray[ic].level = block->getLevel();
@@ -1005,14 +1047,12 @@ void MPIIORestartCoProcessor::writeShearStressValArray(int step)
                 dataSetParamStr.nx[1] = static_cast<int>(ShearStressValArray3DPtr->getNX2());
                 dataSetParamStr.nx[2] = static_cast<int>(ShearStressValArray3DPtr->getNX3());
                 dataSetParamStr.nx[3] = static_cast<int>(ShearStressValArray3DPtr->getNX4());
-                doubleCountInBlock =
-                    dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+                doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
 
                 firstBlock = false;
             }
 
-            if ((dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0) &&
-                (dataSetParamStr.nx[3] > 0))
+            if ((dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0) && (dataSetParamStr.nx[3] > 0))
                 doubleValuesArray.insert(doubleValuesArray.end(), ShearStressValArray3DPtr->getDataVector().begin(),
                                          ShearStressValArray3DPtr->getDataVector().end());
 
@@ -1024,10 +1064,10 @@ void MPIIORestartCoProcessor::writeShearStressValArray(int step)
     MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType);
     MPI_Type_commit(&dataSetDoubleType);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIORestartCoProcessor::writeShearStressValArray start MPI IO rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     // write to the file
@@ -1036,15 +1076,17 @@ void MPIIORestartCoProcessor::writeShearStressValArray(int step)
     MPI_Offset write_offset  = (MPI_Offset)(size * sizeof(int));
     size_t next_write_offset = 0;
 
-    if (size > 1) {
-        if (rank == 0) {
-            next_write_offset = write_offset + sizeof(dataSetParam) +
-                                blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
+    if (size > 1) 
+    {
+        if (rank == 0) 
+        {
+            next_write_offset = write_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
             MPI_Send(&next_write_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD);
-        } else {
+        } 
+        else 
+        {
             MPI_Recv(&write_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
-            next_write_offset = write_offset + sizeof(dataSetParam) +
-                                blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
+            next_write_offset = write_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
             if (rank < size - 1)
                 MPI_Send(&next_write_offset, 1, MPI_LONG_LONG_INT, rank + 1, 5, MPI_COMM_WORLD);
         }
@@ -1077,15 +1119,15 @@ void MPIIORestartCoProcessor::writeShearStressValArray(int step)
                       dataSetSmallType, MPI_STATUS_IGNORE);
     // each process writes the dataSet arrays
     if (doubleValuesArray.size() > 0)
-        MPI_File_write_at(file_handler,
-                          (MPI_Offset)(write_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)),
+        MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)),
                           &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
 
     MPI_File_sync(file_handler);
     MPI_File_close(&file_handler);
     MPI_Type_free(&dataSetDoubleType);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         finish = MPI_Wtime();
         UBLOG(logINFO, "MPIIORestartCoProcessor::writeShearStressValArray time: " << finish - start << " s");
     }
@@ -1104,7 +1146,8 @@ void MPIIORestartCoProcessor::writeRelaxationFactor(int step)
     std::vector<SPtr<Block3D>> blocksVector[25];
     int minInitLevel = this->grid->getCoarsestInitializedLevel();
     int maxInitLevel = this->grid->getFinestInitializedLevel();
-    for (int level = minInitLevel; level <= maxInitLevel; level++) {
+    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    {
         grid->getBlocks(level, rank, blocksVector[level]);
         blocksCount += static_cast<int>(blocksVector[level].size());
     }
@@ -1113,10 +1156,10 @@ void MPIIORestartCoProcessor::writeRelaxationFactor(int step)
     std::vector<double> doubleValuesArray; // double-values (arrays of f's) in all blocks
     dataSetParam dataSetParamStr;
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIORestartCoProcessor::writeRelaxationFactor start collect data rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     bool firstBlock        = true;
@@ -1124,11 +1167,11 @@ void MPIIORestartCoProcessor::writeRelaxationFactor(int step)
     int ic                 = 0;
     SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> RelaxationFactor3DPtr;
 
-    for (int level = minInitLevel; level <= maxInitLevel; level++) {
+    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    {
         for (SPtr<Block3D> block : blocksVector[level]) //	blocks of the current level
         {
-            dataSetSmallArray[ic].x1 =
-                block->getX1(); // coordinates of the block needed to find it while regenerating the grid
+            dataSetSmallArray[ic].x1 = block->getX1(); // coordinates of the block needed to find it while regenerating the grid
             dataSetSmallArray[ic].x2    = block->getX2();
             dataSetSmallArray[ic].x3    = block->getX3();
             dataSetSmallArray[ic].level = block->getLevel();
@@ -1142,8 +1185,7 @@ void MPIIORestartCoProcessor::writeRelaxationFactor(int step)
                 dataSetParamStr.nx[1] = static_cast<int>(RelaxationFactor3DPtr->getNX2());
                 dataSetParamStr.nx[2] = static_cast<int>(RelaxationFactor3DPtr->getNX3());
                 dataSetParamStr.nx[3] = 1;
-                doubleCountInBlock =
-                    dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+                doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
 
                 firstBlock = false;
             }
@@ -1160,10 +1202,10 @@ void MPIIORestartCoProcessor::writeRelaxationFactor(int step)
     MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType);
     MPI_Type_commit(&dataSetDoubleType);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIORestartCoProcessor::writeRelaxationFactor start MPI IO rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     // write to the file
@@ -1172,15 +1214,17 @@ void MPIIORestartCoProcessor::writeRelaxationFactor(int step)
     MPI_Offset write_offset  = (MPI_Offset)(size * sizeof(int));
     size_t next_write_offset = 0;
 
-    if (size > 1) {
-        if (rank == 0) {
-            next_write_offset = write_offset + sizeof(dataSetParam) +
-                                blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
+    if (size > 1) 
+    {
+        if (rank == 0) 
+        {
+            next_write_offset = write_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
             MPI_Send(&next_write_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD);
-        } else {
+        } 
+        else 
+        {
             MPI_Recv(&write_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
-            next_write_offset = write_offset + sizeof(dataSetParam) +
-                                blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
+            next_write_offset = write_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
             if (rank < size - 1)
                 MPI_Send(&next_write_offset, 1, MPI_LONG_LONG_INT, rank + 1, 5, MPI_COMM_WORLD);
         }
@@ -1213,15 +1257,15 @@ void MPIIORestartCoProcessor::writeRelaxationFactor(int step)
                       dataSetSmallType, MPI_STATUS_IGNORE);
     // each process writes the dataSet arrays
     if (doubleValuesArray.size() > 0)
-        MPI_File_write_at(file_handler,
-                          (MPI_Offset)(write_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)),
+        MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)),
                           &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
 
     MPI_File_sync(file_handler);
     MPI_File_close(&file_handler);
     MPI_Type_free(&dataSetDoubleType);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         finish = MPI_Wtime();
         UBLOG(logINFO, "MPIIORestartCoProcessor::writeRelaxationFactor time: " << finish - start << " s");
     }
@@ -1229,7 +1273,7 @@ void MPIIORestartCoProcessor::writeRelaxationFactor(int step)
     delete[] dataSetSmallArray;
 }
 
-void MPIIORestartCoProcessor::writePhaseField(int step)
+void MPIIORestartCoProcessor::writePhaseField(int step, int fieldN)
 {
     int rank, size;
     MPI_Comm_rank(MPI_COMM_WORLD, &rank);
@@ -1240,7 +1284,8 @@ void MPIIORestartCoProcessor::writePhaseField(int step)
     std::vector<SPtr<Block3D>> blocksVector[25];
     int minInitLevel = this->grid->getCoarsestInitializedLevel();
     int maxInitLevel = this->grid->getFinestInitializedLevel();
-    for (int level = minInitLevel; level <= maxInitLevel; level++) {
+    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    {
         grid->getBlocks(level, rank, blocksVector[level]);
         blocksCount += static_cast<int>(blocksVector[level].size());
     }
@@ -1249,10 +1294,10 @@ void MPIIORestartCoProcessor::writePhaseField(int step)
     std::vector<double> doubleValuesArray; // double-values (arrays of f's) in all blocks
     dataSetParam dataSetParamStr;
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIORestartCoProcessor::writePhaseField start collect data rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     bool firstBlock        = true;
@@ -1260,7 +1305,8 @@ void MPIIORestartCoProcessor::writePhaseField(int step)
     int ic                 = 0;
     SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> PhaseField3DPtr;
 
-    for (int level = minInitLevel; level <= maxInitLevel; level++) {
+    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    {
         for (SPtr<Block3D> block : blocksVector[level]) //	blocks of the current level
         {
             dataSetSmallArray[ic].x1 = block->getX1(); // coordinates of the block needed to find it while regenerating the grid
@@ -1268,7 +1314,10 @@ void MPIIORestartCoProcessor::writePhaseField(int step)
             dataSetSmallArray[ic].x3 = block->getX3();
             dataSetSmallArray[ic].level = block->getLevel();
 
-            PhaseField3DPtr = block->getKernel()->getDataSet()->getPhaseField();
+            if(fieldN == 1)
+                PhaseField3DPtr = block->getKernel()->getDataSet()->getPhaseField();
+            else
+                PhaseField3DPtr = block->getKernel()->getDataSet()->getPhaseField2();
 
             if (firstBlock) // when first (any) valid block...
             {
@@ -1277,28 +1326,25 @@ void MPIIORestartCoProcessor::writePhaseField(int step)
                 dataSetParamStr.nx[1] = static_cast<int>(PhaseField3DPtr->getNX2());
                 dataSetParamStr.nx[2] = static_cast<int>(PhaseField3DPtr->getNX3());
                 dataSetParamStr.nx[3] = 1;
-                doubleCountInBlock =
-                    dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+                doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
 
                 firstBlock = false;
             }
-
             if ((dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0))
-                doubleValuesArray.insert(doubleValuesArray.end(), PhaseField3DPtr->getDataVector().begin(),
-                                         PhaseField3DPtr->getDataVector().end());
+                doubleValuesArray.insert(doubleValuesArray.end(), PhaseField3DPtr->getDataVector().begin(), PhaseField3DPtr->getDataVector().end());
 
             ic++;
         }
     }
-
+        
     // register new MPI-types depending on the block-specific information
     MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType);
     MPI_Type_commit(&dataSetDoubleType);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIORestartCoProcessor::writePhaseField start MPI IO rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     // write to the file
@@ -1307,15 +1353,17 @@ void MPIIORestartCoProcessor::writePhaseField(int step)
     MPI_Offset write_offset  = (MPI_Offset)(size * sizeof(int));
     size_t next_write_offset = 0;
 
-    if (size > 1) {
-        if (rank == 0) {
-            next_write_offset = write_offset + sizeof(dataSetParam) +
-                                blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
+    if (size > 1) 
+    {
+        if (rank == 0) 
+        {
+            next_write_offset = write_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
             MPI_Send(&next_write_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD);
-        } else {
+        } 
+        else 
+        {
             MPI_Recv(&write_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
-            next_write_offset = write_offset + sizeof(dataSetParam) +
-                                blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
+            next_write_offset = write_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
             if (rank < size - 1)
                 MPI_Send(&next_write_offset, 1, MPI_LONG_LONG_INT, rank + 1, 5, MPI_COMM_WORLD);
         }
@@ -1334,7 +1382,9 @@ void MPIIORestartCoProcessor::writePhaseField(int step)
 #endif
 
     MPI_File file_handler;
-    std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpPhaseField.bin";
+    std::string filename;
+    if(fieldN == 1) filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpPhaseField1.bin";
+    else filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpPhaseField2.bin";
     int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
     if (rc != MPI_SUCCESS)
         throw UbException(UB_EXARGS, "couldn't open file " + filename);
@@ -1348,15 +1398,15 @@ void MPIIORestartCoProcessor::writePhaseField(int step)
                       dataSetSmallType, MPI_STATUS_IGNORE);
     // each process writes the dataSet arrays
     if (doubleValuesArray.size() > 0)
-        MPI_File_write_at(file_handler,
-                          (MPI_Offset)(write_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)),
+        MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)),
                           &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
 
     MPI_File_sync(file_handler);
     MPI_File_close(&file_handler);
     MPI_Type_free(&dataSetDoubleType);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         finish = MPI_Wtime();
         UBLOG(logINFO, "MPIIORestartCoProcessor::writePhaseField time: " << finish - start << " s");
     }
@@ -1370,10 +1420,10 @@ void MPIIORestartCoProcessor::writeBoundaryConds(int step)
     MPI_Comm_rank(MPI_COMM_WORLD, &rank);
     MPI_Comm_size(MPI_COMM_WORLD, &size);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIORestartCoProcessor::writeBoundaryConds start collect data rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     int blocksCount          = 0; // quantity of blocks in the grid, max 2147483648 blocks!
@@ -1384,7 +1434,8 @@ void MPIIORestartCoProcessor::writeBoundaryConds(int step)
     std::vector<SPtr<Block3D>> blocksVector[25];
     int minInitLevel = this->grid->getCoarsestInitializedLevel();
     int maxInitLevel = this->grid->getFinestInitializedLevel();
-    for (int level = minInitLevel; level <= maxInitLevel; level++) {
+    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    {
         grid->getBlocks(level, rank, blocksVector[level]);
         blocksCount += static_cast<int>(blocksVector[level].size());
     }
@@ -1394,27 +1445,31 @@ void MPIIORestartCoProcessor::writeBoundaryConds(int step)
     std::vector<int> bcindexmatrixV;
     std::vector<int> indexContainerV;
     bool bcindexmatrixCountNotInit = true;
-    int ic                         = 0;
+    int ic = 0;
     SPtr<BCArray3D> bcArr;
 
-    for (int level = minInitLevel; level <= maxInitLevel; level++) {
+    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    {
         for (SPtr<Block3D> block : blocksVector[level]) // all the blocks of the current level
         {
             bcArr = block->getKernel()->getBCProcessor()->getBCArray();
 
-            bcAddArray[ic].x1 =
-                block->getX1(); // coordinates of the block needed to find it while regenerating the grid
+            bcAddArray[ic].x1 = block->getX1(); // coordinates of the block needed to find it while regenerating the grid
             bcAddArray[ic].x2                   = block->getX2();
             bcAddArray[ic].x3                   = block->getX3();
             bcAddArray[ic].level                = block->getLevel();
             bcAddArray[ic].boundCond_count      = 0; // how many BoundaryConditions in this block
             bcAddArray[ic].indexContainer_count = 0; // how many indexContainer-values in this block
 
-            for (std::size_t bc = 0; bc < bcArr->getBCVectorSize(); bc++) {
+            for (std::size_t bc = 0; bc < bcArr->getBCVectorSize(); bc++) 
+            {
                 BoundaryCondition *bouCond = new BoundaryCondition();
-                if (bcArr->bcvector[bc] == NULL) {
+                if (bcArr->bcvector[bc] == NULL) 
+                {
                     memset(bouCond, 0, sizeof(BoundaryCondition));
-                } else {
+                } 
+                else 
+                {
                     bouCond->noslipBoundaryFlags    = bcArr->bcvector[bc]->getNoSlipBoundary();
                     bouCond->slipBoundaryFlags      = bcArr->bcvector[bc]->getSlipBoundary();
                     bouCond->velocityBoundaryFlags  = bcArr->bcvector[bc]->getVelocityBoundary();
@@ -1445,15 +1500,15 @@ void MPIIORestartCoProcessor::writeBoundaryConds(int step)
             // the quantity of elements in the bcindexmatrix array (CbArray3D<int, IndexerX3X2X1>) in bcArray(BCArray3D)
             // is always equal, this will be the size of the "write-read-block" in MPI_write_.../MPI_read-functions when
             // writing/reading BoundConds
-            if (bcindexmatrixCountNotInit) {
+            if (bcindexmatrixCountNotInit) 
+            {
                 boundCondParamStr.nx1                = static_cast<int>(bcArr->bcindexmatrix.getNX1());
                 boundCondParamStr.nx2                = static_cast<int>(bcArr->bcindexmatrix.getNX2());
                 boundCondParamStr.nx3                = static_cast<int>(bcArr->bcindexmatrix.getNX3());
                 boundCondParamStr.bcindexmatrixCount = static_cast<int>(bcArr->bcindexmatrix.getDataVector().size());
                 bcindexmatrixCountNotInit            = false;
             }
-            bcindexmatrixV.insert(bcindexmatrixV.end(), bcArr->bcindexmatrix.getDataVector().begin(),
-                                  bcArr->bcindexmatrix.getDataVector().end());
+            bcindexmatrixV.insert(bcindexmatrixV.end(), bcArr->bcindexmatrix.getDataVector().begin(), bcArr->bcindexmatrix.getDataVector().end());
 
             indexContainerV.insert(indexContainerV.end(), bcArr->indexContainer.begin(), bcArr->indexContainer.end());
             bcAddArray[ic].indexContainer_count = static_cast<int>(bcArr->indexContainer.size());
@@ -1470,7 +1525,8 @@ void MPIIORestartCoProcessor::writeBoundaryConds(int step)
     int bcBlockCount = (int)(count_boundCond / BLOCK_SIZE);
     if (bcBlockCount * BLOCK_SIZE < (int)count_boundCond)
         bcBlockCount += 1;
-    for (int i = (int)count_boundCond; i < bcBlockCount * BLOCK_SIZE; i++) {
+    for (int i = (int)count_boundCond; i < bcBlockCount * BLOCK_SIZE; i++) 
+    {
         BoundaryCondition *bouCond = new BoundaryCondition();
         memset(bouCond, 0, sizeof(BoundaryCondition));
         bcVector.push_back(*bouCond);
@@ -1485,11 +1541,15 @@ void MPIIORestartCoProcessor::writeBoundaryConds(int step)
     MPI_Offset write_offset  = (MPI_Offset)(size * (3 * sizeof(int) + sizeof(boundCondParam)));
     size_t next_write_offset = 0;
 
-    if (size > 1) {
-        if (rank == 0) {
+    if (size > 1) 
+    {
+        if (rank == 0) 
+        {
             next_write_offset = write_offset + byteCount;
             MPI_Send(&next_write_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD);
-        } else {
+        } 
+        else 
+        {
             MPI_Recv(&write_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
             next_write_offset = write_offset + byteCount;
             if (rank < size - 1)
@@ -1497,10 +1557,10 @@ void MPIIORestartCoProcessor::writeBoundaryConds(int step)
         }
     }
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIORestartCoProcessor::writeBoundaryConds start MPI IO rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     double start, finish;
@@ -1526,14 +1586,11 @@ void MPIIORestartCoProcessor::writeBoundaryConds(int step)
     // each process writes the quantity of it's blocks
     MPI_File_write_at(file_handler, write_offset1, &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE);
     // each process writes the quantity of "big blocks" of BLOCK_SIZE of boundary conditions
-    MPI_File_write_at(file_handler, (MPI_Offset)(write_offset1 + sizeof(int)), &bcBlockCount, 1, MPI_INT,
-                      MPI_STATUS_IGNORE);
+    MPI_File_write_at(file_handler, (MPI_Offset)(write_offset1 + sizeof(int)), &bcBlockCount, 1, MPI_INT, MPI_STATUS_IGNORE);
     // each process writes the quantity of indexContainer elements in all blocks
-    MPI_File_write_at(file_handler, (MPI_Offset)(write_offset1 + 2 * sizeof(int)), &count_indexContainer, 1, MPI_INT,
-                      MPI_STATUS_IGNORE);
+    MPI_File_write_at(file_handler, (MPI_Offset)(write_offset1 + 2 * sizeof(int)), &count_indexContainer, 1, MPI_INT,  MPI_STATUS_IGNORE);
     // each process writes the quantity of bcindexmatrix elements in every block
-    MPI_File_write_at(file_handler, (MPI_Offset)(write_offset1 + 3 * sizeof(int)), &boundCondParamStr, 1,
-                      boundCondParamType, MPI_STATUS_IGNORE);
+    MPI_File_write_at(file_handler, (MPI_Offset)(write_offset1 + 3 * sizeof(int)), &boundCondParamStr, 1, boundCondParamType, MPI_STATUS_IGNORE);
 
     // each process writes data identifying the blocks
     MPI_File_write_at(file_handler, write_offset, bcAddArray, blocksCount, boundCondTypeAdd, MPI_STATUS_IGNORE);
@@ -1543,23 +1600,19 @@ void MPIIORestartCoProcessor::writeBoundaryConds(int step)
                           bcBlockCount, boundCondType1000, MPI_STATUS_IGNORE);
     // each process writes bcindexmatrix values
     if (bcindexmatrixV.size() > 0)
-        MPI_File_write_at(file_handler,
-                          (MPI_Offset)(write_offset + blocksCount * sizeof(BCAddRestart) +
-                                       bcBlockCount * BLOCK_SIZE * sizeof(BoundaryCondition)),
+        MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + blocksCount * sizeof(BCAddRestart) + bcBlockCount * BLOCK_SIZE * sizeof(BoundaryCondition)),
                           &bcindexmatrixV[0], blocksCount, bcindexmatrixType, MPI_STATUS_IGNORE);
     // each process writes indexContainer values
     if (indexContainerV.size() > 0)
-        MPI_File_write_at(file_handler,
-                          (MPI_Offset)(write_offset + blocksCount * sizeof(BCAddRestart) +
-                                       bcBlockCount * BLOCK_SIZE * sizeof(BoundaryCondition) +
-                                       blocksCount * boundCondParamStr.bcindexmatrixCount * sizeof(int)),
-                          &indexContainerV[0], count_indexContainer, MPI_INT, MPI_STATUS_IGNORE);
+        MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + blocksCount * sizeof(BCAddRestart) + bcBlockCount * BLOCK_SIZE * sizeof(BoundaryCondition) +
+                      blocksCount * boundCondParamStr.bcindexmatrixCount * sizeof(int)), &indexContainerV[0], count_indexContainer, MPI_INT, MPI_STATUS_IGNORE);
 
     MPI_File_sync(file_handler);
     MPI_File_close(&file_handler);
     MPI_Type_free(&bcindexmatrixType);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         finish = MPI_Wtime();
         UBLOG(logINFO, "MPIIORestartCoProcessor::writeBoundaryConds time: " << finish - start << " s");
     }
@@ -1593,17 +1646,18 @@ void MPIIORestartCoProcessor::readDataSet(int step)
     MPI_Comm_rank(MPI_COMM_WORLD, &rank);
     MPI_Comm_size(MPI_COMM_WORLD, &size);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIORestartCoProcessor::readDataSet start MPI IO rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
+
     double start, finish;
     if (comm->isRoot())
         start = MPI_Wtime();
 
     MPI_File file_handler;
-    std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSet.bin";
+    std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetF.bin";
     int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler);
     if (rc != MPI_SUCCESS)
         throw UbException(UB_EXARGS, "couldn't open file " + filename);
@@ -1611,6 +1665,7 @@ void MPIIORestartCoProcessor::readDataSet(int step)
     // calculate the read offset
     MPI_Offset read_offset  = (MPI_Offset)(size * sizeof(int));
     size_t next_read_offset = 0;
+    bool multiPhase = false;
 
     // read count of blocks
     int blocksCount = 0;
@@ -1618,122 +1673,146 @@ void MPIIORestartCoProcessor::readDataSet(int step)
 
     MPI_File_read_at(file_handler, (MPI_Offset)(rank * sizeof(int)), &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE);
     MPI_File_read_at(file_handler, read_offset, &dataSetParamStr1, 1, dataSetParamType, MPI_STATUS_IGNORE);
-    MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam)), &dataSetParamStr2, 1,
-                     dataSetParamType, MPI_STATUS_IGNORE);
-    MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + 2 * sizeof(dataSetParam)), &dataSetParamStr3, 1,
-                     dataSetParamType, MPI_STATUS_IGNORE);
+    MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam)), &dataSetParamStr2, 1, dataSetParamType, MPI_STATUS_IGNORE);
+    MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + 2 * sizeof(dataSetParam)), &dataSetParamStr3, 1, dataSetParamType, MPI_STATUS_IGNORE);
 
     DataSetRestart *dataSetArray = new DataSetRestart[blocksCount];
-    double doubleCountInBlock =
-        (dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3] +
+    double doubleCountInBlock = dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3] +
         dataSetParamStr2.nx[0] * dataSetParamStr2.nx[1] * dataSetParamStr2.nx[2] * dataSetParamStr2.nx[3] +
-        dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3]) * 2;
-    std::vector<double> doubleValuesArray(size_t(blocksCount * doubleCountInBlock)); // double-values in all blocks
+        dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3];
+    std::vector<double> doubleValuesArrayF(size_t(blocksCount * doubleCountInBlock)); // double-values in all blocks  Fdistributions
+    std::vector<double> doubleValuesArrayH1; // double-values in all blocks  H1distributions
+    //std::vector<double> doubleValuesArrayH2; // double-values in all blocks  H2distributions
 
     //   define MPI_types depending on the block-specific information
     MPI_Type_contiguous(int(doubleCountInBlock), MPI_DOUBLE, &dataSetDoubleType);
     MPI_Type_commit(&dataSetDoubleType);
 
-    if (size > 1) {
-        if (rank == 0) {
-            next_read_offset = read_offset + 3 * sizeof(dataSetParam) +
-                               blocksCount * (sizeof(DataSetRestart) + size_t(doubleCountInBlock) * sizeof(double));
+    if (size > 1) 
+    {
+        if (rank == 0) 
+        {
+            next_read_offset = read_offset + 3 * sizeof(dataSetParam) + blocksCount * (sizeof(DataSetRestart) + size_t(doubleCountInBlock) * sizeof(double));
             MPI_Send(&next_read_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD);
-        } else {
+        } 
+        else 
+        {
             MPI_Recv(&read_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
-            next_read_offset = read_offset + 3 * sizeof(dataSetParam) +
-                               blocksCount * (sizeof(DataSetRestart) + size_t(doubleCountInBlock) * sizeof(double));
+            next_read_offset = read_offset + 3 * sizeof(dataSetParam) + blocksCount * (sizeof(DataSetRestart) + size_t(doubleCountInBlock) * sizeof(double));
             if (rank < size - 1)
                 MPI_Send(&next_read_offset, 1, MPI_LONG_LONG_INT, rank + 1, 5, MPI_COMM_WORLD);
         }
     }
 
-    MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + 3 * sizeof(dataSetParam)), dataSetArray, blocksCount,
-                     dataSetType, MPI_STATUS_IGNORE);
-    MPI_File_read_at(file_handler,
-                     (MPI_Offset)(read_offset + 3 * sizeof(dataSetParam) + blocksCount * sizeof(DataSetRestart)),
-                     &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
+    MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + 3 * sizeof(dataSetParam)), dataSetArray, blocksCount, dataSetType, MPI_STATUS_IGNORE);
+    MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + 3 * sizeof(dataSetParam) + blocksCount * sizeof(DataSetRestart)),
+                     &doubleValuesArrayF[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
+    MPI_File_close(&file_handler);
+
+ //-------------------------------------- H1 -----------------------------
+    MPI_Offset fsize;
+    filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetH1.bin";
+    rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler);
+    if (rc != MPI_SUCCESS)
+        throw UbException(UB_EXARGS, "couldn't open file " + filename);
+    int fs = MPI_File_get_size(file_handler, &fsize);
+    if (fsize > 0)
+    {
+        multiPhase = true;
+        doubleValuesArrayH1.resize(blocksCount * doubleCountInBlock);
+        MPI_File_read_at(file_handler, read_offset, &doubleValuesArrayH1[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
+    }
     MPI_File_close(&file_handler);
+
+    //-------------------------------------- H2 -----------------------------
+       /*filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetH2.bin";
+    rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler);
+    if (rc != MPI_SUCCESS)
+        throw UbException(UB_EXARGS, "couldn't open file " + filename);
+
+    doubleValuesArrayH2.resize(blocksCount * doubleCountInBlock);
+    MPI_File_read_at(file_handler, read_offset, &doubleValuesArrayH2[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
+    MPI_File_close(&file_handler);*/
+
     MPI_Type_free(&dataSetDoubleType);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         finish = MPI_Wtime();
         UBLOG(logINFO, "MPIIORestartCoProcessor::readDataSet time: " << finish - start << " s");
         UBLOG(logINFO, "MPIIORestartCoProcessor::readDataSet start of restore of data, rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
-
+    
     size_t index = 0;
     std::vector<double> vectorsOfValuesF1, vectorsOfValuesF2, vectorsOfValuesF3;
-    std::vector<double> vectorsOfValuesH1, vectorsOfValuesH2, vectorsOfValuesH3;
-    size_t vectorSize1 =
-        dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3];
-    size_t vectorSize2 =
-        dataSetParamStr2.nx[0] * dataSetParamStr2.nx[1] * dataSetParamStr2.nx[2] * dataSetParamStr2.nx[3];
-    size_t vectorSize3 =
-        dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3];
-
-    for (int n = 0; n < blocksCount; n++) {
-        vectorsOfValuesF1.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + vectorSize1);
-        index += vectorSize1;
-
-        vectorsOfValuesF2.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + vectorSize2);
-        index += vectorSize2;
-
-        vectorsOfValuesF3.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + vectorSize3);
-        index += vectorSize3;
-
-        vectorsOfValuesH1.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + vectorSize1);
+    std::vector<double> vectorsOfValuesH11, vectorsOfValuesH12, vectorsOfValuesH13;
+    //std::vector<double> vectorsOfValuesH21, vectorsOfValuesH22, vectorsOfValuesH23;
+    size_t vectorSize1 = dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3];
+    size_t vectorSize2 = dataSetParamStr2.nx[0] * dataSetParamStr2.nx[1] * dataSetParamStr2.nx[2] * dataSetParamStr2.nx[3];
+    size_t vectorSize3 = dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3];
+
+    for (int n = 0; n < blocksCount; n++) 
+    {
+        vectorsOfValuesF1.assign(doubleValuesArrayF.data() + index, doubleValuesArrayF.data() + index + vectorSize1);
+        if (multiPhase)
+            vectorsOfValuesH11.assign(doubleValuesArrayH1.data() + index, doubleValuesArrayH1.data() + index + vectorSize1);
+        //vectorsOfValuesH21.assign(doubleValuesArrayH2.data() + index, doubleValuesArrayH2.data() + index + vectorSize1);
         index += vectorSize1;
 
-        vectorsOfValuesH2.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + vectorSize2);
+        vectorsOfValuesF2.assign(doubleValuesArrayF.data() + index, doubleValuesArrayF.data() + index + vectorSize2);
+        if (multiPhase)
+            vectorsOfValuesH12.assign(doubleValuesArrayH1.data() + index, doubleValuesArrayH1.data() + index + vectorSize2);
+        //vectorsOfValuesH22.assign(doubleValuesArrayH2.data() + index, doubleValuesArrayH2.data() + index + vectorSize2);
         index += vectorSize2;
 
-        vectorsOfValuesH3.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + vectorSize3);
+        vectorsOfValuesF3.assign(doubleValuesArrayF.data() + index, doubleValuesArrayF.data() + index + vectorSize3);
+        if (multiPhase)
+            vectorsOfValuesH13.assign(doubleValuesArrayH1.data() + index, doubleValuesArrayH1.data() + index + vectorSize3);
+        //vectorsOfValuesH23.assign(doubleValuesArrayH2.data() + index, doubleValuesArrayH2.data() + index + vectorSize3);
         index += vectorSize3;
 
         SPtr<DistributionArray3D> mFdistributions(new D3Q27EsoTwist3DSplittedVector());
-        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)
-            ->setLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
-                new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesF1, dataSetParamStr1.nx[0],
-                                                        dataSetParamStr1.nx[1], dataSetParamStr1.nx[2],
-                                                        dataSetParamStr1.nx[3])));
-        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)
-            ->setNonLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
-                new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesF2, dataSetParamStr2.nx[0],
-                                                        dataSetParamStr2.nx[1], dataSetParamStr2.nx[2],
-                                                        dataSetParamStr2.nx[3])));
-        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)
-            ->setZeroDistributions(CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>(
+        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
+                new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesF1, dataSetParamStr1.nx[0], dataSetParamStr1.nx[1], dataSetParamStr1.nx[2], dataSetParamStr1.nx[3])));
+        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setNonLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
+                new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesF2, dataSetParamStr2.nx[0], dataSetParamStr2.nx[1], dataSetParamStr2.nx[2], dataSetParamStr2.nx[3])));
+        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setZeroDistributions(CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>(
                     vectorsOfValuesF3, dataSetParamStr3.nx[0], dataSetParamStr3.nx[1], dataSetParamStr3.nx[2])));
 
         dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setNX1(dataSetParamStr1.nx1);
         dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setNX2(dataSetParamStr1.nx2);
         dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setNX3(dataSetParamStr1.nx3);
 
-        SPtr<DistributionArray3D> mHdistributions(new D3Q27EsoTwist3DSplittedVector());
-        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mHdistributions)
-            ->setLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
-                new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH1, dataSetParamStr1.nx[0],
-                                                        dataSetParamStr1.nx[1], dataSetParamStr1.nx[2],
-                                                        dataSetParamStr1.nx[3])));
-        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mHdistributions)
-            ->setNonLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
-                new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH2, dataSetParamStr2.nx[0],
-                                                        dataSetParamStr2.nx[1], dataSetParamStr2.nx[2],
-                                                        dataSetParamStr2.nx[3])));
-        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mHdistributions)
-            ->setZeroDistributions(CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>(
-                    vectorsOfValuesH3, dataSetParamStr3.nx[0], dataSetParamStr3.nx[1], dataSetParamStr3.nx[2])));
-
-        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mHdistributions)->setNX1(dataSetParamStr1.nx1);
-        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mHdistributions)->setNX2(dataSetParamStr1.nx2);
-        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mHdistributions)->setNX3(dataSetParamStr1.nx3);
-
+        SPtr<DistributionArray3D> mH1distributions(new D3Q27EsoTwist3DSplittedVector());
+        if (multiPhase)
+        {
+            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH1distributions)->setLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
+                new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH11, dataSetParamStr1.nx[0], dataSetParamStr1.nx[1], dataSetParamStr1.nx[2], dataSetParamStr1.nx[3])));
+            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH1distributions)->setNonLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
+                new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH12, dataSetParamStr2.nx[0], dataSetParamStr2.nx[1], dataSetParamStr2.nx[2], dataSetParamStr2.nx[3])));
+            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH1distributions)->setZeroDistributions(CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>(
+                vectorsOfValuesH13, dataSetParamStr3.nx[0], dataSetParamStr3.nx[1], dataSetParamStr3.nx[2])));
+
+            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH1distributions)->setNX1(dataSetParamStr1.nx1);
+            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH1distributions)->setNX2(dataSetParamStr1.nx2);
+            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH1distributions)->setNX3(dataSetParamStr1.nx3);
+        }
+        /*SPtr<DistributionArray3D> mH2distributions(new D3Q27EsoTwist3DSplittedVector());
+        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
+                new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH21, dataSetParamStr1.nx[0], dataSetParamStr1.nx[1], dataSetParamStr1.nx[2], dataSetParamStr1.nx[3])));
+        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNonLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
+                new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH22, dataSetParamStr2.nx[0], dataSetParamStr2.nx[1], dataSetParamStr2.nx[2], dataSetParamStr2.nx[3])));
+        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setZeroDistributions(CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>(
+                vectorsOfValuesH23, dataSetParamStr3.nx[0], dataSetParamStr3.nx[1], dataSetParamStr3.nx[2])));
+
+        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNX1(dataSetParamStr1.nx1);
+        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNX2(dataSetParamStr1.nx2);
+        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNX3(dataSetParamStr1.nx3);*/
+         
         // find the nesessary block and fill it
-        SPtr<Block3D> block =
-            grid->getBlock(dataSetArray[n].x1, dataSetArray[n].x2, dataSetArray[n].x3, dataSetArray[n].level);
+        SPtr<Block3D> block = grid->getBlock(dataSetArray[n].x1, dataSetArray[n].x2, dataSetArray[n].x3, dataSetArray[n].level);
+   
         this->lbmKernel->setBlock(block);
         SPtr<LBMKernel> kernel = this->lbmKernel->clone();
         kernel->setGhostLayerWidth(dataSetArray[n].ghostLayerWidth);
@@ -1741,17 +1820,22 @@ void MPIIORestartCoProcessor::readDataSet(int step)
         kernel->setDeltaT(dataSetArray[n].deltaT);
         kernel->setCompressible(dataSetArray[n].compressible);
         kernel->setWithForcing(dataSetArray[n].withForcing);
+        kernel->setCollisionFactorMultiphase(dataSetArray[n].collFactorL, dataSetArray[n].collFactorG);
+        kernel->setDensityRatio(dataSetArray[n].densityRatio);
+
         SPtr<DataSet3D> dataSetPtr = SPtr<DataSet3D>(new DataSet3D());
         dataSetPtr->setFdistributions(mFdistributions);
-        dataSetPtr->setHdistributions(mHdistributions);
+        if (multiPhase)
+            dataSetPtr->setHdistributions(mH1distributions);
+        //dataSetPtr->setH2distributions(mH2distributions);
         kernel->setDataSet(dataSetPtr);
         block->setKernel(kernel);
     }
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIORestartCoProcessor::readDataSet end of restore of data, rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     delete[] dataSetArray;
@@ -1785,8 +1869,11 @@ void MPIIORestartCoProcessor::readDataSet(int step)
     if (arrPresence.isRelaxationFactorPresent)
         readRelaxationFactor(step);
 
-    if (arrPresence.isPhaseFieldPresent)
-        readPhaseField(step);
+    if (arrPresence.isPhaseField1Present)
+        readPhaseField(step, 1);
+
+    if (arrPresence.isPhaseField2Present)
+        readPhaseField(step, 2);
 }
 
 void MPIIORestartCoProcessor::readAverageDensityArray(int step)
@@ -1795,10 +1882,10 @@ void MPIIORestartCoProcessor::readAverageDensityArray(int step)
     MPI_Comm_rank(MPI_COMM_WORLD, &rank);
     MPI_Comm_size(MPI_COMM_WORLD, &size);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIORestartCoProcessor::readAverageDensityArray start MPI IO rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
     double start, finish;
     if (comm->isRoot())
@@ -1816,12 +1903,10 @@ void MPIIORestartCoProcessor::readAverageDensityArray(int step)
     memset(&dataSetParamStr, 0, sizeof(dataSetParam));
 
     MPI_File_read_at(file_handler, (MPI_Offset)(rank * sizeof(int)), &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE);
-    MPI_File_read_at(file_handler, (MPI_Offset)(size * sizeof(int)), &dataSetParamStr, 1, dataSetParamType,
-                     MPI_STATUS_IGNORE);
+    MPI_File_read_at(file_handler, (MPI_Offset)(size * sizeof(int)), &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
 
     DataSetSmallRestart *dataSetSmallArray = new DataSetSmallRestart[blocksCount];
-    int doubleCountInBlock =
-        dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+    int doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
     std::vector<double> doubleValuesArray(blocksCount * doubleCountInBlock); // double-values in all blocks
 
     // define MPI_types depending on the block-specific information
@@ -1832,61 +1917,59 @@ void MPIIORestartCoProcessor::readAverageDensityArray(int step)
     MPI_Offset read_offset  = (MPI_Offset)(size * sizeof(int));
     size_t next_read_offset = 0;
 
-    if (size > 1) {
-        if (rank == 0) {
-            next_read_offset = read_offset + sizeof(dataSetParam) +
-                               blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
+    if (size > 1) 
+    {
+        if (rank == 0) 
+        {
+            next_read_offset = read_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
             MPI_Send(&next_read_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD);
-        } else {
+        } 
+        else 
+        {
             MPI_Recv(&read_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
-            next_read_offset = read_offset + sizeof(dataSetParam) +
-                               blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
+            next_read_offset = read_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
             if (rank < size - 1)
                 MPI_Send(&next_read_offset, 1, MPI_LONG_LONG_INT, rank + 1, 5, MPI_COMM_WORLD);
         }
     }
 
-    MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam)), dataSetSmallArray, blocksCount,
-                     dataSetSmallType, MPI_STATUS_IGNORE);
+    MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam)), dataSetSmallArray, blocksCount, dataSetSmallType, MPI_STATUS_IGNORE);
     if (doubleCountInBlock > 0)
-        MPI_File_read_at(file_handler,
-                         (MPI_Offset)(read_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)),
+        MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)),
                          &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
     MPI_File_close(&file_handler);
     MPI_Type_free(&dataSetDoubleType);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         finish = MPI_Wtime();
         UBLOG(logINFO, "MPIIORestartCoProcessor::readAverageDensityArray time: " << finish - start << " s");
         UBLOG(logINFO, "MPIIORestartCoProcessor::readAverageDensityArray start of restore of data, rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     size_t index = 0;
-    size_t nextVectorSize =
-        dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+    size_t nextVectorSize = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
     std::vector<double> vectorsOfValues;
-    for (int n = 0; n < blocksCount; n++) {
+    for (int n = 0; n < blocksCount; n++) 
+    {
         vectorsOfValues.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + nextVectorSize);
         index += nextVectorSize;
 
         // fill mAverageDensity arrays
         SPtr<AverageValuesArray3D> mAverageDensity;
-        mAverageDensity = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
-            new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1],
-                                                    dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
+        mAverageDensity = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues, 
+            dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
 
         // find the nesessary block and fill it
-        SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].x1, dataSetSmallArray[n].x2, dataSetSmallArray[n].x3,
-                                             dataSetSmallArray[n].level);
+        SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].x1, dataSetSmallArray[n].x2, dataSetSmallArray[n].x3, dataSetSmallArray[n].level);
         block->getKernel()->getDataSet()->setAverageDensity(mAverageDensity);
     }
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIORestartCoProcessor::readAverageDensityArray end of restore of data, rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     delete[] dataSetSmallArray;
@@ -1898,10 +1981,10 @@ void MPIIORestartCoProcessor::readAverageVelocityArray(int step)
     MPI_Comm_rank(MPI_COMM_WORLD, &rank);
     MPI_Comm_size(MPI_COMM_WORLD, &size);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIORestartCoProcessor::readAverageVelocityArray start MPI IO rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
     double start, finish;
     if (comm->isRoot())
@@ -1917,12 +2000,10 @@ void MPIIORestartCoProcessor::readAverageVelocityArray(int step)
     int blocksCount = 0;
     dataSetParam dataSetParamStr;
     MPI_File_read_at(file_handler, (MPI_Offset)(rank * sizeof(int)), &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE);
-    MPI_File_read_at(file_handler, (MPI_Offset)(size * sizeof(int)), &dataSetParamStr, 1, dataSetParamType,
-                     MPI_STATUS_IGNORE);
+    MPI_File_read_at(file_handler, (MPI_Offset)(size * sizeof(int)), &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
 
     DataSetSmallRestart *dataSetSmallArray = new DataSetSmallRestart[blocksCount];
-    int doubleCountInBlock =
-        dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+    int doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
     std::vector<double> doubleValuesArray(blocksCount * doubleCountInBlock); // double-values in all blocks
 
     // define MPI_types depending on the block-specific information
@@ -1933,61 +2014,59 @@ void MPIIORestartCoProcessor::readAverageVelocityArray(int step)
     MPI_Offset read_offset  = (MPI_Offset)(size * sizeof(int));
     size_t next_read_offset = 0;
 
-    if (size > 1) {
-        if (rank == 0) {
-            next_read_offset = read_offset + sizeof(dataSetParam) +
-                               blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
+    if (size > 1) 
+    {
+        if (rank == 0) 
+        {
+            next_read_offset = read_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
             MPI_Send(&next_read_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD);
-        } else {
+        } 
+        else 
+        {
             MPI_Recv(&read_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
-            next_read_offset = read_offset + sizeof(dataSetParam) +
-                               blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
+            next_read_offset = read_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
             if (rank < size - 1)
                 MPI_Send(&next_read_offset, 1, MPI_LONG_LONG_INT, rank + 1, 5, MPI_COMM_WORLD);
         }
     }
 
-    MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam)), dataSetSmallArray, blocksCount,
-                     dataSetSmallType, MPI_STATUS_IGNORE);
+    MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam)), dataSetSmallArray, blocksCount, dataSetSmallType, MPI_STATUS_IGNORE);
     if (doubleCountInBlock > 0)
-        MPI_File_read_at(file_handler,
-                         (MPI_Offset)(read_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)),
+        MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)),
                          &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
     MPI_File_close(&file_handler);
     MPI_Type_free(&dataSetDoubleType);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         finish = MPI_Wtime();
         UBLOG(logINFO, "MPIIORestartCoProcessor::readAverageVelocityArray time: " << finish - start << " s");
         UBLOG(logINFO, "MPIIORestartCoProcessor::readAverageVelocityArray start of restore of data, rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     size_t index = 0;
-    size_t nextVectorSize =
-        dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+    size_t nextVectorSize = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
     std::vector<double> vectorsOfValues;
-    for (int n = 0; n < blocksCount; n++) {
+    for (int n = 0; n < blocksCount; n++) 
+    {
         vectorsOfValues.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + nextVectorSize);
         index += nextVectorSize;
 
         // fill mAverageVelocity array
         SPtr<AverageValuesArray3D> mAverageVelocity;
-        mAverageVelocity = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
-            new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1],
-                                                    dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
+        mAverageVelocity = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues, dataSetParamStr.nx[0], 
+            dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
 
         // find the nesessary block and fill it
-        SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].x1, dataSetSmallArray[n].x2, dataSetSmallArray[n].x3,
-                                             dataSetSmallArray[n].level);
+        SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].x1, dataSetSmallArray[n].x2, dataSetSmallArray[n].x3, dataSetSmallArray[n].level);
         block->getKernel()->getDataSet()->setAverageVelocity(mAverageVelocity);
     }
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIORestartCoProcessor::readAverageVelocityArray end of restore of data, rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     delete[] dataSetSmallArray;
@@ -1999,10 +2078,10 @@ void MPIIORestartCoProcessor::readAverageFluktuationsArray(int step)
     MPI_Comm_rank(MPI_COMM_WORLD, &rank);
     MPI_Comm_size(MPI_COMM_WORLD, &size);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIORestartCoProcessor::readAverageFluktuationsArray start MPI IO rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
     double start, finish;
     if (comm->isRoot())
@@ -2019,12 +2098,10 @@ void MPIIORestartCoProcessor::readAverageFluktuationsArray(int step)
     int blocksCount = 0;
     dataSetParam dataSetParamStr;
     MPI_File_read_at(file_handler, (MPI_Offset)(rank * sizeof(int)), &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE);
-    MPI_File_read_at(file_handler, (MPI_Offset)(size * sizeof(int)), &dataSetParamStr, 1, dataSetParamType,
-                     MPI_STATUS_IGNORE);
+    MPI_File_read_at(file_handler, (MPI_Offset)(size * sizeof(int)), &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
 
     DataSetSmallRestart *dataSetSmallArray = new DataSetSmallRestart[blocksCount];
-    int doubleCountInBlock =
-        dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+    int doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
     std::vector<double> doubleValuesArray(blocksCount * doubleCountInBlock); // double-values in all blocks
 
     // define MPI_types depending on the block-specific information
@@ -2035,62 +2112,59 @@ void MPIIORestartCoProcessor::readAverageFluktuationsArray(int step)
     MPI_Offset read_offset  = (MPI_Offset)(size * sizeof(int));
     size_t next_read_offset = 0;
 
-    if (size > 1) {
-        if (rank == 0) {
-            next_read_offset = read_offset + sizeof(dataSetParam) +
-                               blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
+    if (size > 1) 
+    {
+        if (rank == 0) 
+        {
+            next_read_offset = read_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
             MPI_Send(&next_read_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD);
-        } else {
+        } 
+        else 
+        {
             MPI_Recv(&read_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
-            next_read_offset = read_offset + sizeof(dataSetParam) +
-                               blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
+            next_read_offset = read_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
             if (rank < size - 1)
                 MPI_Send(&next_read_offset, 1, MPI_LONG_LONG_INT, rank + 1, 5, MPI_COMM_WORLD);
         }
     }
 
-    MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam)), dataSetSmallArray, blocksCount,
-                     dataSetSmallType, MPI_STATUS_IGNORE);
+    MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam)), dataSetSmallArray, blocksCount, dataSetSmallType, MPI_STATUS_IGNORE);
     if (doubleCountInBlock > 0)
-        MPI_File_read_at(file_handler,
-                         (MPI_Offset)(read_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)),
+        MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)),
                          &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
     MPI_File_close(&file_handler);
     MPI_Type_free(&dataSetDoubleType);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         finish = MPI_Wtime();
         UBLOG(logINFO, "MPIIORestartCoProcessor::readAverageFluktuationsArray time: " << finish - start << " s");
-        UBLOG(logINFO,
-              "MPIIORestartCoProcessor::readAverageFluktuationsArray start of restore of data, rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "MPIIORestartCoProcessor::readAverageFluktuationsArray start of restore of data, rank = " << rank);
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     size_t index = 0;
-    size_t nextVectorSize =
-        dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+    size_t nextVectorSize = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
     std::vector<double> vectorsOfValues;
-    for (int n = 0; n < blocksCount; n++) {
+    for (int n = 0; n < blocksCount; n++) 
+    {
         vectorsOfValues.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + nextVectorSize);
         index += nextVectorSize;
 
         // fill AverageFluktuations array
         SPtr<AverageValuesArray3D> mAverageFluktuations;
-        mAverageFluktuations = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
-            new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1],
-                                                    dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
+        mAverageFluktuations = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues, 
+                dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
 
         // find the nesessary block and fill it
-        SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].x1, dataSetSmallArray[n].x2, dataSetSmallArray[n].x3,
-                                             dataSetSmallArray[n].level);
+        SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].x1, dataSetSmallArray[n].x2, dataSetSmallArray[n].x3, dataSetSmallArray[n].level);
         block->getKernel()->getDataSet()->setAverageFluctuations(mAverageFluktuations);
     }
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIORestartCoProcessor::readAverageFluktuationsArray end of restore of data, rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     delete[] dataSetSmallArray;
@@ -2102,10 +2176,10 @@ void MPIIORestartCoProcessor::readAverageTripleArray(int step)
     MPI_Comm_rank(MPI_COMM_WORLD, &rank);
     MPI_Comm_size(MPI_COMM_WORLD, &size);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIORestartCoProcessor::readAverageTripleArray start MPI IO rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
     double start, finish;
     if (comm->isRoot())
@@ -2121,12 +2195,10 @@ void MPIIORestartCoProcessor::readAverageTripleArray(int step)
     int blocksCount = 0;
     dataSetParam dataSetParamStr;
     MPI_File_read_at(file_handler, (MPI_Offset)(rank * sizeof(int)), &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE);
-    MPI_File_read_at(file_handler, (MPI_Offset)(size * sizeof(int)), &dataSetParamStr, 1, dataSetParamType,
-                     MPI_STATUS_IGNORE);
+    MPI_File_read_at(file_handler, (MPI_Offset)(size * sizeof(int)), &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
 
     DataSetSmallRestart *dataSetSmallArray = new DataSetSmallRestart[blocksCount];
-    int doubleCountInBlock =
-        dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+    int doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
     std::vector<double> doubleValuesArray(blocksCount * doubleCountInBlock); // double-values in all blocks
 
     // define MPI_types depending on the block-specific information
@@ -2137,61 +2209,59 @@ void MPIIORestartCoProcessor::readAverageTripleArray(int step)
     MPI_Offset read_offset  = (MPI_Offset)(size * sizeof(int));
     size_t next_read_offset = 0;
 
-    if (size > 1) {
-        if (rank == 0) {
-            next_read_offset = read_offset + sizeof(dataSetParam) +
-                               blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
+    if (size > 1) 
+    {
+        if (rank == 0) 
+        {
+            next_read_offset = read_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
             MPI_Send(&next_read_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD);
-        } else {
+        } 
+        else 
+        {
             MPI_Recv(&read_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
-            next_read_offset = read_offset + sizeof(dataSetParam) +
-                               blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
+            next_read_offset = read_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
             if (rank < size - 1)
                 MPI_Send(&next_read_offset, 1, MPI_LONG_LONG_INT, rank + 1, 5, MPI_COMM_WORLD);
         }
     }
 
-    MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam)), dataSetSmallArray, blocksCount,
-                     dataSetSmallType, MPI_STATUS_IGNORE);
+    MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam)), dataSetSmallArray, blocksCount, dataSetSmallType, MPI_STATUS_IGNORE);
     if (doubleCountInBlock > 0)
-        MPI_File_read_at(file_handler,
-                         (MPI_Offset)(read_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)),
+        MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)),
                          &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
     MPI_File_close(&file_handler);
     MPI_Type_free(&dataSetDoubleType);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         finish = MPI_Wtime();
         UBLOG(logINFO, "MPIIORestartCoProcessor::readAverageTripleArray time: " << finish - start << " s");
         UBLOG(logINFO, "MPIIORestartCoProcessor::readAverageTripleArray start of restore of data, rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     size_t index = 0;
-    size_t nextVectorSize =
-        dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+    size_t nextVectorSize = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
     std::vector<double> vectorsOfValues;
-    for (int n = 0; n < blocksCount; n++) {
+    for (int n = 0; n < blocksCount; n++) 
+    {
         vectorsOfValues.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + nextVectorSize);
         index += nextVectorSize;
 
         // fill AverageTriplecorrelations array
         SPtr<AverageValuesArray3D> mAverageTriplecorrelations;
-        mAverageTriplecorrelations = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
-            new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1],
-                                                    dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
+        mAverageTriplecorrelations = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues, 
+                dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
 
         // find the nesessary block and fill it
-        SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].x1, dataSetSmallArray[n].x2, dataSetSmallArray[n].x3,
-                                             dataSetSmallArray[n].level);
+        SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].x1, dataSetSmallArray[n].x2, dataSetSmallArray[n].x3, dataSetSmallArray[n].level);
         block->getKernel()->getDataSet()->setAverageTriplecorrelations(mAverageTriplecorrelations);
     }
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIORestartCoProcessor::readAverageTripleArray end of restore of data, rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     delete[] dataSetSmallArray;
@@ -2203,10 +2273,10 @@ void MPIIORestartCoProcessor::readShearStressValArray(int step)
     MPI_Comm_rank(MPI_COMM_WORLD, &rank);
     MPI_Comm_size(MPI_COMM_WORLD, &size);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIORestartCoProcessor::readShearStressValArray start MPI IO rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
     double start, finish;
     if (comm->isRoot())
@@ -2222,12 +2292,10 @@ void MPIIORestartCoProcessor::readShearStressValArray(int step)
     int blocksCount = 0;
     dataSetParam dataSetParamStr;
     MPI_File_read_at(file_handler, (MPI_Offset)(rank * sizeof(int)), &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE);
-    MPI_File_read_at(file_handler, (MPI_Offset)(size * sizeof(int)), &dataSetParamStr, 1, dataSetParamType,
-                     MPI_STATUS_IGNORE);
+    MPI_File_read_at(file_handler, (MPI_Offset)(size * sizeof(int)), &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
 
     DataSetSmallRestart *dataSetSmallArray = new DataSetSmallRestart[blocksCount];
-    int doubleCountInBlock =
-        dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+    int doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
     std::vector<double> doubleValuesArray(blocksCount * doubleCountInBlock); // double-values in all blocks
 
     // define MPI_types depending on the block-specific information
@@ -2238,61 +2306,59 @@ void MPIIORestartCoProcessor::readShearStressValArray(int step)
     MPI_Offset read_offset  = (MPI_Offset)(size * sizeof(int));
     size_t next_read_offset = 0;
 
-    if (size > 1) {
-        if (rank == 0) {
-            next_read_offset = read_offset + sizeof(dataSetParam) +
-                               blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
+    if (size > 1) 
+    {
+        if (rank == 0) 
+        {
+            next_read_offset = read_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
             MPI_Send(&next_read_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD);
-        } else {
+        } 
+        else 
+        {
             MPI_Recv(&read_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
-            next_read_offset = read_offset + sizeof(dataSetParam) +
-                               blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
+            next_read_offset = read_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
             if (rank < size - 1)
                 MPI_Send(&next_read_offset, 1, MPI_LONG_LONG_INT, rank + 1, 5, MPI_COMM_WORLD);
         }
     }
 
-    MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam)), dataSetSmallArray, blocksCount,
-                     dataSetSmallType, MPI_STATUS_IGNORE);
+    MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam)), dataSetSmallArray, blocksCount, dataSetSmallType, MPI_STATUS_IGNORE);
     if (doubleCountInBlock > 0)
-        MPI_File_read_at(file_handler,
-                         (MPI_Offset)(read_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)),
+        MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)),
                          &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
     MPI_File_close(&file_handler);
     MPI_Type_free(&dataSetDoubleType);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         finish = MPI_Wtime();
         UBLOG(logINFO, "MPIIORestartCoProcessor::readShearStressValArray time: " << finish - start << " s");
         UBLOG(logINFO, "MPIIORestartCoProcessor::readShearStressValArray start of restore of data, rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     size_t index = 0;
-    size_t nextVectorSize =
-        dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+    size_t nextVectorSize = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
     std::vector<double> vectorsOfValues;
-    for (int n = 0; n < blocksCount; n++) {
+    for (int n = 0; n < blocksCount; n++) 
+    {
         vectorsOfValues.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + nextVectorSize);
         index += nextVectorSize;
 
         // fill ShearStressValuesArray array
         SPtr<ShearStressValuesArray3D> mShearStressValues;
-        mShearStressValues = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
-            new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1],
-                                                    dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
+        mShearStressValues = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues, 
+                dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
 
         // find the nesessary block and fill it
-        SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].x1, dataSetSmallArray[n].x2, dataSetSmallArray[n].x3,
-                                             dataSetSmallArray[n].level);
+        SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].x1, dataSetSmallArray[n].x2, dataSetSmallArray[n].x3, dataSetSmallArray[n].level);
         block->getKernel()->getDataSet()->setShearStressValues(mShearStressValues);
     }
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIORestartCoProcessor::readShearStressValArray end of restore of data, rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     delete[] dataSetSmallArray;
@@ -2304,10 +2370,10 @@ void MPIIORestartCoProcessor::readRelaxationFactor(int step)
     MPI_Comm_rank(MPI_COMM_WORLD, &rank);
     MPI_Comm_size(MPI_COMM_WORLD, &size);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIORestartCoProcessor::readRelaxationFactor start MPI IO rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
     double start, finish;
     if (comm->isRoot())
@@ -2323,12 +2389,10 @@ void MPIIORestartCoProcessor::readRelaxationFactor(int step)
     int blocksCount = 0;
     dataSetParam dataSetParamStr;
     MPI_File_read_at(file_handler, (MPI_Offset)(rank * sizeof(int)), &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE);
-    MPI_File_read_at(file_handler, (MPI_Offset)(size * sizeof(int)), &dataSetParamStr, 1, dataSetParamType,
-                     MPI_STATUS_IGNORE);
+    MPI_File_read_at(file_handler, (MPI_Offset)(size * sizeof(int)), &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
 
     DataSetSmallRestart *dataSetSmallArray = new DataSetSmallRestart[blocksCount];
-    int doubleCountInBlock =
-        dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+    int doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
     std::vector<double> doubleValuesArray(blocksCount * doubleCountInBlock); // double-values in all blocks
 
     // define MPI_types depending on the block-specific information
@@ -2339,42 +2403,42 @@ void MPIIORestartCoProcessor::readRelaxationFactor(int step)
     MPI_Offset read_offset  = (MPI_Offset)(size * sizeof(int));
     size_t next_read_offset = 0;
 
-    if (size > 1) {
-        if (rank == 0) {
-            next_read_offset = read_offset + sizeof(dataSetParam) +
-                               blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
+    if (size > 1) 
+    {
+        if (rank == 0) 
+        {
+            next_read_offset = read_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
             MPI_Send(&next_read_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD);
-        } else {
+        } 
+        else 
+        {
             MPI_Recv(&read_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
-            next_read_offset = read_offset + sizeof(dataSetParam) +
-                               blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
+            next_read_offset = read_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
             if (rank < size - 1)
                 MPI_Send(&next_read_offset, 1, MPI_LONG_LONG_INT, rank + 1, 5, MPI_COMM_WORLD);
         }
     }
 
-    MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam)), dataSetSmallArray, blocksCount,
-                     dataSetSmallType, MPI_STATUS_IGNORE);
+    MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam)), dataSetSmallArray, blocksCount, dataSetSmallType, MPI_STATUS_IGNORE);
     if (doubleCountInBlock > 0)
-        MPI_File_read_at(file_handler,
-                         (MPI_Offset)(read_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)),
+        MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)),
                          &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
     MPI_File_close(&file_handler);
     MPI_Type_free(&dataSetDoubleType);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         finish = MPI_Wtime();
         UBLOG(logINFO, "MPIIORestartCoProcessor::readRelaxationFactor time: " << finish - start << " s");
         UBLOG(logINFO, "MPIIORestartCoProcessor::readRelaxationFactor start of restore of data, rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     size_t index = 0;
-    size_t nextVectorSize =
-        dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+    size_t nextVectorSize = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
     std::vector<double> vectorsOfValues;
-    for (int n = 0; n < blocksCount; n++) {
+    for (int n = 0; n < blocksCount; n++) 
+    {
         vectorsOfValues.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + nextVectorSize);
         index += nextVectorSize;
 
@@ -2384,37 +2448,38 @@ void MPIIORestartCoProcessor::readRelaxationFactor(int step)
             vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2]));
 
         // find the nesessary block and fill it
-        SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].x1, dataSetSmallArray[n].x2, dataSetSmallArray[n].x3,
-                                             dataSetSmallArray[n].level);
+        SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].x1, dataSetSmallArray[n].x2, dataSetSmallArray[n].x3, dataSetSmallArray[n].level);
         block->getKernel()->getDataSet()->setRelaxationFactor(mRelaxationFactor);
     }
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIORestartCoProcessor::readRelaxationFactor end of restore of data, rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     delete[] dataSetSmallArray;
 }
 
-void MPIIORestartCoProcessor::readPhaseField(int step)
+void MPIIORestartCoProcessor::readPhaseField(int step, int fieldN)
 {
     int rank, size;
     MPI_Comm_rank(MPI_COMM_WORLD, &rank);
     MPI_Comm_size(MPI_COMM_WORLD, &size);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIORestartCoProcessor::readPhaseField start MPI IO rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
     double start, finish;
     if (comm->isRoot())
         start = MPI_Wtime();
 
     MPI_File file_handler;
-    std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpPhaseField.bin";
+    std::string filename;
+    if(fieldN == 1) filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpPhaseField1.bin";
+    else filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpPhaseField2.bin";
     int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler);
     if (rc != MPI_SUCCESS)
         throw UbException(UB_EXARGS, "couldn't open file " + filename);
@@ -2427,8 +2492,7 @@ void MPIIORestartCoProcessor::readPhaseField(int step)
                      MPI_STATUS_IGNORE);
 
     DataSetSmallRestart *dataSetSmallArray = new DataSetSmallRestart[blocksCount];
-    int doubleCountInBlock =
-        dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+    int doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
     std::vector<double> doubleValuesArray(blocksCount * doubleCountInBlock); // double-values in all blocks
 
     // define MPI_types depending on the block-specific information
@@ -2439,42 +2503,43 @@ void MPIIORestartCoProcessor::readPhaseField(int step)
     MPI_Offset read_offset  = (MPI_Offset)(size * sizeof(int));
     size_t next_read_offset = 0;
 
-    if (size > 1) {
-        if (rank == 0) {
-            next_read_offset = read_offset + sizeof(dataSetParam) +
-                               blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
+    if (size > 1) 
+    {
+        if (rank == 0) 
+        {
+            next_read_offset = read_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
             MPI_Send(&next_read_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD);
-        } else {
+        } 
+        else 
+        {
             MPI_Recv(&read_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
-            next_read_offset = read_offset + sizeof(dataSetParam) +
-                               blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
+            next_read_offset = read_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
             if (rank < size - 1)
                 MPI_Send(&next_read_offset, 1, MPI_LONG_LONG_INT, rank + 1, 5, MPI_COMM_WORLD);
         }
     }
 
-    MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam)), dataSetSmallArray, blocksCount,
-                     dataSetSmallType, MPI_STATUS_IGNORE);
+    MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam)), dataSetSmallArray, blocksCount, dataSetSmallType, MPI_STATUS_IGNORE);
     if (doubleCountInBlock > 0)
-        MPI_File_read_at(file_handler,
-                         (MPI_Offset)(read_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)),
+        MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)),
                          &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
     MPI_File_close(&file_handler);
     MPI_Type_free(&dataSetDoubleType);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         finish = MPI_Wtime();
         UBLOG(logINFO, "MPIIORestartCoProcessor::readPhaseField time: " << finish - start << " s");
         UBLOG(logINFO, "MPIIORestartCoProcessor::readPhaseField start of restore of data, rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     size_t index = 0;
-    size_t nextVectorSize =
-        dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+    size_t nextVectorSize = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
     std::vector<double> vectorsOfValues;
-    for (int n = 0; n < blocksCount; n++) {
+
+    for (int n = 0; n < blocksCount; n++)
+    {
         vectorsOfValues.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + nextVectorSize);
         index += nextVectorSize;
 
@@ -2484,15 +2549,18 @@ void MPIIORestartCoProcessor::readPhaseField(int step)
             vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2]));
 
         // find the nesessary block and fill it
-        SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].x1, dataSetSmallArray[n].x2, dataSetSmallArray[n].x3,
-                                             dataSetSmallArray[n].level);
-        block->getKernel()->getDataSet()->setPhaseField(mPhaseField);
+        SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].x1, dataSetSmallArray[n].x2, dataSetSmallArray[n].x3, dataSetSmallArray[n].level);
+        if(fieldN == 1)
+            block->getKernel()->getDataSet()->setPhaseField(mPhaseField);
+        else
+            block->getKernel()->getDataSet()->setPhaseField2(mPhaseField);
+
     }
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    { 
         UBLOG(logINFO, "MPIIORestartCoProcessor::readPhaseField end of restore of data, rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     delete[] dataSetSmallArray;
@@ -2504,10 +2572,10 @@ void MPIIORestartCoProcessor::readBoundaryConds(int step)
     MPI_Comm_rank(MPI_COMM_WORLD, &rank);
     MPI_Comm_size(MPI_COMM_WORLD, &size);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         UBLOG(logINFO, "MPIIORestartCoProcessor::readBoundaryConds start MPI IO rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
     double start, finish;
     if (comm->isRoot())
@@ -2527,14 +2595,11 @@ void MPIIORestartCoProcessor::readBoundaryConds(int step)
     // read count of blocks
     MPI_File_read_at(file_handler, read_offset1, &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE);
     // read count of big BoundaryCondition blocks
-    MPI_File_read_at(file_handler, (MPI_Offset)(read_offset1 + sizeof(int)), &dataCount1000, 1, MPI_INT,
-                     MPI_STATUS_IGNORE);
+    MPI_File_read_at(file_handler, (MPI_Offset)(read_offset1 + sizeof(int)), &dataCount1000, 1, MPI_INT, MPI_STATUS_IGNORE);
     // read count of indexContainer values in all blocks
-    MPI_File_read_at(file_handler, (MPI_Offset)(read_offset1 + 2 * sizeof(int)), &dataCount2, 1, MPI_INT,
-                     MPI_STATUS_IGNORE);
+    MPI_File_read_at(file_handler, (MPI_Offset)(read_offset1 + 2 * sizeof(int)), &dataCount2, 1, MPI_INT, MPI_STATUS_IGNORE);
     // read count of bcindexmatrix values in every block
-    MPI_File_read_at(file_handler, (MPI_Offset)(read_offset1 + 3 * sizeof(int)), &boundCondParamStr, 1,
-                     boundCondParamType, MPI_STATUS_IGNORE);
+    MPI_File_read_at(file_handler, (MPI_Offset)(read_offset1 + 3 * sizeof(int)), &boundCondParamStr, 1, boundCondParamType, MPI_STATUS_IGNORE);
 
     MPI_Type_contiguous(boundCondParamStr.bcindexmatrixCount, MPI_INT, &bcindexmatrixType);
     MPI_Type_commit(&bcindexmatrixType);
@@ -2550,16 +2615,18 @@ void MPIIORestartCoProcessor::readBoundaryConds(int step)
     MPI_Offset read_offset  = (MPI_Offset)(size * (3 * sizeof(int) + sizeof(boundCondParam)));
     size_t next_read_offset = 0;
 
-    if (size > 1) {
-        if (rank == 0) {
-            next_read_offset = read_offset + blocksCount * sizeof(BCAddRestart) +
-                               dataCount * sizeof(BoundaryCondition) +
+    if (size > 1) 
+    {
+        if (rank == 0) 
+        {
+            next_read_offset = read_offset + blocksCount * sizeof(BCAddRestart) + dataCount * sizeof(BoundaryCondition) +
                                (blocksCount * boundCondParamStr.bcindexmatrixCount + dataCount2) * sizeof(int);
             MPI_Send(&next_read_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD);
-        } else {
+        } 
+        else 
+        {
             MPI_Recv(&read_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
-            next_read_offset = read_offset + blocksCount * sizeof(BCAddRestart) +
-                               dataCount * sizeof(BoundaryCondition) +
+            next_read_offset = read_offset + blocksCount * sizeof(BCAddRestart) + dataCount * sizeof(BoundaryCondition) +
                                (blocksCount * boundCondParamStr.bcindexmatrixCount + dataCount2) * sizeof(int);
             if (rank < size - 1)
                 MPI_Send(&next_read_offset, 1, MPI_LONG_LONG_INT, rank + 1, 5, MPI_COMM_WORLD);
@@ -2567,27 +2634,21 @@ void MPIIORestartCoProcessor::readBoundaryConds(int step)
     }
 
     MPI_File_read_at(file_handler, read_offset, bcAddArray, blocksCount, boundCondTypeAdd, MPI_STATUS_IGNORE);
-    MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + blocksCount * sizeof(BCAddRestart)), &bcArray[0],
-                     dataCount1000, boundCondType1000, MPI_STATUS_IGNORE);
-    MPI_File_read_at(
-        file_handler,
-        (MPI_Offset)(read_offset + blocksCount * sizeof(BCAddRestart) + dataCount * sizeof(BoundaryCondition)),
+    MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + blocksCount * sizeof(BCAddRestart)), &bcArray[0], dataCount1000, boundCondType1000, MPI_STATUS_IGNORE);
+    MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + blocksCount * sizeof(BCAddRestart) + dataCount * sizeof(BoundaryCondition)),
         &intArray1[0], blocksCount, bcindexmatrixType, MPI_STATUS_IGNORE);
-    MPI_File_read_at(file_handler,
-                     (MPI_Offset)(read_offset + blocksCount * sizeof(BCAddRestart) +
-                                  dataCount * sizeof(BoundaryCondition) +
-                                  blocksCount * boundCondParamStr.bcindexmatrixCount * sizeof(int)),
-                     &intArray2[0], dataCount2, MPI_INT, MPI_STATUS_IGNORE);
+    MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + blocksCount * sizeof(BCAddRestart) + dataCount * sizeof(BoundaryCondition) +
+                                  blocksCount * boundCondParamStr.bcindexmatrixCount * sizeof(int)), &intArray2[0], dataCount2, MPI_INT, MPI_STATUS_IGNORE);
 
     MPI_File_close(&file_handler);
     MPI_Type_free(&bcindexmatrixType);
 
-    if (comm->isRoot()) {
+    if (comm->isRoot()) 
+    {
         finish = MPI_Wtime();
         UBLOG(logINFO, "MPIIORestartCoProcessor::readBoundaryConds time: " << finish - start << " s");
         UBLOG(logINFO, "MPIIORestartCoProcessor::readBoundaryConds start of restore of data, rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     int index = 0, index1 = 0, index2 = 0;
@@ -2595,16 +2656,19 @@ void MPIIORestartCoProcessor::readBoundaryConds(int step)
     std::vector<int> bcindexmatrixV;
     std::vector<int> indexContainerV;
 
-    for (int n = 0; n < blocksCount; n++) {
+    for (int n = 0; n < blocksCount; n++) 
+    {
         bcVector.resize(0);
         bcindexmatrixV.resize(0);
         indexContainerV.resize(0);
 
-        for (int ibc = 0; ibc < bcAddArray[n].boundCond_count; ibc++) {
+        for (int ibc = 0; ibc < bcAddArray[n].boundCond_count; ibc++) 
+        {
             SPtr<BoundaryConditions> bc;
             if (memcmp(&bcArray[index], nullBouCond, sizeof(BoundaryCondition)) == 0)
                 bc = SPtr<BoundaryConditions>();
-            else {
+            else 
+            {
                 bc                         = SPtr<BoundaryConditions>(new BoundaryConditions);
                 bc->noslipBoundaryFlags    = bcArray[index].noslipBoundaryFlags;
                 bc->slipBoundaryFlags      = bcArray[index].slipBoundaryFlags;
@@ -2639,8 +2703,7 @@ void MPIIORestartCoProcessor::readBoundaryConds(int step)
         for (int b2 = 0; b2 < bcAddArray[n].indexContainer_count; b2++)
             indexContainerV.push_back(intArray2[index2++]);
 
-        CbArray3D<int, IndexerX3X2X1> bcim(bcindexmatrixV, boundCondParamStr.nx1, boundCondParamStr.nx2,
-                                           boundCondParamStr.nx3);
+        CbArray3D<int, IndexerX3X2X1> bcim(bcindexmatrixV, boundCondParamStr.nx1, boundCondParamStr.nx2, boundCondParamStr.nx3);
 
         SPtr<Block3D> block = grid->getBlock(bcAddArray[n].x1, bcAddArray[n].x2, bcAddArray[n].x3, bcAddArray[n].level);
         SPtr<BCProcessor> bcProc = bcProcessor->clone(block->getKernel());
@@ -2661,8 +2724,7 @@ void MPIIORestartCoProcessor::readBoundaryConds(int step)
 
     if (comm->isRoot()) {
         UBLOG(logINFO, "MPIIORestartCoProcessor::readBoundaryConds end of restore of data, rank = " << rank);
-        UBLOG(logINFO, "Physical Memory currently used by current process: "
-                           << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 }
 //////////////////////////////////////////////////////////////////////////
diff --git a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.h b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.h
index cbcf8c553943aa325f415cd123ae1fbe0bf4dcf3..57f559769a06d9a87a968ada73fbaba712da789b 100644
--- a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.h
+++ b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.h
@@ -4,6 +4,7 @@
 #include <mpi.h>
 //#include <PointerDefinitions.h>
 #include <string>
+#include <vector>
 
 #include "MPIIOCoProcessor.h"
 #include "MPIIODataStructures.h"
@@ -35,8 +36,8 @@ public:
     void writeAverageTripleArray(int step);
     void writeShearStressValArray(int step);
     void writeRelaxationFactor(int step);
-    void writePhaseField(int step);
-   //! Writes the boundary conditions of the blocks into the file cpBC.bin
+    void writePhaseField(int step, int num);
+    //! Writes the boundary conditions of the blocks into the file cpBC.bin
     void writeBoundaryConds(int step);
 
     //! Reads the blocks of the grid from the file cpBlocks.bin
@@ -49,7 +50,7 @@ public:
     void readAverageTripleArray(int step);
     void readShearStressValArray(int step);
     void readRelaxationFactor(int step);
-    void readPhaseField(int step);
+    void readPhaseField(int step, int num);
     //! Reads the boundary conditions of the blocks from the file cpBC.bin
     void readBoundaryConds(int step);
     //! The function sets LBMKernel
diff --git a/src/cpu/VirtualFluidsCore/CoProcessors/WriteMultiphaseQuantitiesCoProcessor.cpp b/src/cpu/VirtualFluidsCore/CoProcessors/WriteMultiphaseQuantitiesCoProcessor.cpp
index 312ed01adf39ff6eb4aaf0965d8df6763ad3e8d1..73034d88996a3c22d8a3aa9e86517c5cbe92ffc3 100644
--- a/src/cpu/VirtualFluidsCore/CoProcessors/WriteMultiphaseQuantitiesCoProcessor.cpp
+++ b/src/cpu/VirtualFluidsCore/CoProcessors/WriteMultiphaseQuantitiesCoProcessor.cpp
@@ -200,7 +200,7 @@ void WriteMultiphaseQuantitiesCoProcessor::addDataMQ(SPtr<Block3D> block)
 
     // nummern vergeben und node vector erstellen + daten sammeln
     CbArray3D<int> nodeNumbers((int)maxX1, (int)maxX2, (int)maxX3, -1);
-    CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr phaseField1(
+    CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr phaseField(
         new CbArray3D<LBMReal, IndexerX3X2X1>(maxX1, maxX2, maxX3, -999.0));
 
     for (int ix3 = minX3; ix3 < maxX3; ix3++) {
@@ -208,7 +208,7 @@ void WriteMultiphaseQuantitiesCoProcessor::addDataMQ(SPtr<Block3D> block)
             for (int ix1 = minX1; ix1 < maxX1; ix1++) {
                 if (!bcArray->isUndefined(ix1, ix2, ix3) && !bcArray->isSolid(ix1, ix2, ix3)) {
                     distributionsH->getDistribution(f, ix1, ix2, ix3);
-                    (*phaseField1)(ix1, ix2, ix3) =
+                    (*phaseField)(ix1, ix2, ix3) =
                         ((f[TNE] + f[BSW]) + (f[TSE] + f[BNW])) + ((f[BSE] + f[TNW]) + (f[TSW] + f[BNE])) +
                         (((f[NE] + f[SW]) + (f[SE] + f[NW])) + ((f[TE] + f[BW]) + (f[BE] + f[TW])) +
                          ((f[BN] + f[TS]) + (f[TN] + f[BS]))) +
@@ -243,7 +243,7 @@ void WriteMultiphaseQuantitiesCoProcessor::addDataMQ(SPtr<Block3D> block)
                     nodes.push_back(UbTupleFloat3(float(worldCoordinates[0]), float(worldCoordinates[1]),
                                                   float(worldCoordinates[2])));
 
-                    phi[REST] = (*phaseField1)(ix1, ix2, ix3);
+                    phi[REST] = (*phaseField)(ix1, ix2, ix3);
 
                     if ((ix1 == 0) || (ix2 == 0) || (ix3 == 0)) {
                         dX1_phi = 0.0;
@@ -254,32 +254,32 @@ void WriteMultiphaseQuantitiesCoProcessor::addDataMQ(SPtr<Block3D> block)
                         // vx2 = 0.0;
                         // vx3 = 0.0;
                     } else {
-                        phi[E]   = (*phaseField1)(ix1 + DX1[E], ix2 + DX2[E], ix3 + DX3[E]);
-                        phi[N]   = (*phaseField1)(ix1 + DX1[N], ix2 + DX2[N], ix3 + DX3[N]);
-                        phi[T]   = (*phaseField1)(ix1 + DX1[T], ix2 + DX2[T], ix3 + DX3[T]);
-                        phi[W]   = (*phaseField1)(ix1 + DX1[W], ix2 + DX2[W], ix3 + DX3[W]);
-                        phi[S]   = (*phaseField1)(ix1 + DX1[S], ix2 + DX2[S], ix3 + DX3[S]);
-                        phi[B]   = (*phaseField1)(ix1 + DX1[B], ix2 + DX2[B], ix3 + DX3[B]);
-                        phi[NE]  = (*phaseField1)(ix1 + DX1[NE], ix2 + DX2[NE], ix3 + DX3[NE]);
-                        phi[NW]  = (*phaseField1)(ix1 + DX1[NW], ix2 + DX2[NW], ix3 + DX3[NW]);
-                        phi[TE]  = (*phaseField1)(ix1 + DX1[TE], ix2 + DX2[TE], ix3 + DX3[TE]);
-                        phi[TW]  = (*phaseField1)(ix1 + DX1[TW], ix2 + DX2[TW], ix3 + DX3[TW]);
-                        phi[TN]  = (*phaseField1)(ix1 + DX1[TN], ix2 + DX2[TN], ix3 + DX3[TN]);
-                        phi[TS]  = (*phaseField1)(ix1 + DX1[TS], ix2 + DX2[TS], ix3 + DX3[TS]);
-                        phi[SW]  = (*phaseField1)(ix1 + DX1[SW], ix2 + DX2[SW], ix3 + DX3[SW]);
-                        phi[SE]  = (*phaseField1)(ix1 + DX1[SE], ix2 + DX2[SE], ix3 + DX3[SE]);
-                        phi[BW]  = (*phaseField1)(ix1 + DX1[BW], ix2 + DX2[BW], ix3 + DX3[BW]);
-                        phi[BE]  = (*phaseField1)(ix1 + DX1[BE], ix2 + DX2[BE], ix3 + DX3[BE]);
-                        phi[BS]  = (*phaseField1)(ix1 + DX1[BS], ix2 + DX2[BS], ix3 + DX3[BS]);
-                        phi[BN]  = (*phaseField1)(ix1 + DX1[BN], ix2 + DX2[BN], ix3 + DX3[BN]);
-                        phi[BSW] = (*phaseField1)(ix1 + DX1[BSW], ix2 + DX2[BSW], ix3 + DX3[BSW]);
-                        phi[BSE] = (*phaseField1)(ix1 + DX1[BSE], ix2 + DX2[BSE], ix3 + DX3[BSE]);
-                        phi[BNW] = (*phaseField1)(ix1 + DX1[BNW], ix2 + DX2[BNW], ix3 + DX3[BNW]);
-                        phi[BNE] = (*phaseField1)(ix1 + DX1[BNE], ix2 + DX2[BNE], ix3 + DX3[BNE]);
-                        phi[TNE] = (*phaseField1)(ix1 + DX1[TNE], ix2 + DX2[TNE], ix3 + DX3[TNE]);
-                        phi[TNW] = (*phaseField1)(ix1 + DX1[TNW], ix2 + DX2[TNW], ix3 + DX3[TNW]);
-                        phi[TSE] = (*phaseField1)(ix1 + DX1[TSE], ix2 + DX2[TSE], ix3 + DX3[TSE]);
-                        phi[TSW] = (*phaseField1)(ix1 + DX1[TSW], ix2 + DX2[TSW], ix3 + DX3[TSW]);
+                        phi[E]   = (*phaseField)(ix1 + DX1[E], ix2 + DX2[E], ix3 + DX3[E]);
+                        phi[N]   = (*phaseField)(ix1 + DX1[N], ix2 + DX2[N], ix3 + DX3[N]);
+                        phi[T]   = (*phaseField)(ix1 + DX1[T], ix2 + DX2[T], ix3 + DX3[T]);
+                        phi[W]   = (*phaseField)(ix1 + DX1[W], ix2 + DX2[W], ix3 + DX3[W]);
+                        phi[S]   = (*phaseField)(ix1 + DX1[S], ix2 + DX2[S], ix3 + DX3[S]);
+                        phi[B]   = (*phaseField)(ix1 + DX1[B], ix2 + DX2[B], ix3 + DX3[B]);
+                        phi[NE]  = (*phaseField)(ix1 + DX1[NE], ix2 + DX2[NE], ix3 + DX3[NE]);
+                        phi[NW]  = (*phaseField)(ix1 + DX1[NW], ix2 + DX2[NW], ix3 + DX3[NW]);
+                        phi[TE]  = (*phaseField)(ix1 + DX1[TE], ix2 + DX2[TE], ix3 + DX3[TE]);
+                        phi[TW]  = (*phaseField)(ix1 + DX1[TW], ix2 + DX2[TW], ix3 + DX3[TW]);
+                        phi[TN]  = (*phaseField)(ix1 + DX1[TN], ix2 + DX2[TN], ix3 + DX3[TN]);
+                        phi[TS]  = (*phaseField)(ix1 + DX1[TS], ix2 + DX2[TS], ix3 + DX3[TS]);
+                        phi[SW]  = (*phaseField)(ix1 + DX1[SW], ix2 + DX2[SW], ix3 + DX3[SW]);
+                        phi[SE]  = (*phaseField)(ix1 + DX1[SE], ix2 + DX2[SE], ix3 + DX3[SE]);
+                        phi[BW]  = (*phaseField)(ix1 + DX1[BW], ix2 + DX2[BW], ix3 + DX3[BW]);
+                        phi[BE]  = (*phaseField)(ix1 + DX1[BE], ix2 + DX2[BE], ix3 + DX3[BE]);
+                        phi[BS]  = (*phaseField)(ix1 + DX1[BS], ix2 + DX2[BS], ix3 + DX3[BS]);
+                        phi[BN]  = (*phaseField)(ix1 + DX1[BN], ix2 + DX2[BN], ix3 + DX3[BN]);
+                        phi[BSW] = (*phaseField)(ix1 + DX1[BSW], ix2 + DX2[BSW], ix3 + DX3[BSW]);
+                        phi[BSE] = (*phaseField)(ix1 + DX1[BSE], ix2 + DX2[BSE], ix3 + DX3[BSE]);
+                        phi[BNW] = (*phaseField)(ix1 + DX1[BNW], ix2 + DX2[BNW], ix3 + DX3[BNW]);
+                        phi[BNE] = (*phaseField)(ix1 + DX1[BNE], ix2 + DX2[BNE], ix3 + DX3[BNE]);
+                        phi[TNE] = (*phaseField)(ix1 + DX1[TNE], ix2 + DX2[TNE], ix3 + DX3[TNE]);
+                        phi[TNW] = (*phaseField)(ix1 + DX1[TNW], ix2 + DX2[TNW], ix3 + DX3[TNW]);
+                        phi[TSE] = (*phaseField)(ix1 + DX1[TSE], ix2 + DX2[TSE], ix3 + DX3[TSE]);
+                        phi[TSW] = (*phaseField)(ix1 + DX1[TSW], ix2 + DX2[TSW], ix3 + DX3[TSW]);
                         dX1_phi  = 0.0 * gradX1_phi(phi);
                         dX2_phi  = 0.0 * gradX2_phi(phi);
                         dX3_phi  = 0.0 * gradX3_phi(phi);
diff --git a/src/cpu/VirtualFluidsCore/Connectors/FullDirectConnector.h b/src/cpu/VirtualFluidsCore/Connectors/FullDirectConnector.h
index e7bfaa2c325301177826dac671c37f9cf59ad63e..6d8877ee909183dcb4088ccb77f6726e83447ba8 100644
--- a/src/cpu/VirtualFluidsCore/Connectors/FullDirectConnector.h
+++ b/src/cpu/VirtualFluidsCore/Connectors/FullDirectConnector.h
@@ -49,7 +49,7 @@ public:
 
 protected:
     virtual inline void updatePointers() = 0;
-    virtual void exchangeData();
+    void exchangeData();
     virtual inline void exchangeData(int x1From, int x2From, int x3From, int x1To, int x2To, int x3To) = 0;
 
     int maxX1;
diff --git a/src/cpu/VirtualFluidsCore/Connectors/OneDistributionFullVectorConnector.cpp b/src/cpu/VirtualFluidsCore/Connectors/OneDistributionFullVectorConnector.cpp
index adda86a09e81075af093211561d7f0ee023e94f5..739efcddb9ceea5c0951df83833d64ad90bb02c5 100644
--- a/src/cpu/VirtualFluidsCore/Connectors/OneDistributionFullVectorConnector.cpp
+++ b/src/cpu/VirtualFluidsCore/Connectors/OneDistributionFullVectorConnector.cpp
@@ -12,6 +12,8 @@ OneDistributionFullVectorConnector::OneDistributionFullVectorConnector(SPtr<Bloc
 //////////////////////////////////////////////////////////////////////////
 void OneDistributionFullVectorConnector::init()
 {
+    FullVectorConnector::init();
+    
     fDis = dynamicPointerCast<EsoTwist3D>(block.lock()->getKernel()->getDataSet()->getFdistributions());
 
     int anz = 27;
diff --git a/src/cpu/VirtualFluidsCore/Connectors/ThreeDistributionsFullVectorConnector.cpp b/src/cpu/VirtualFluidsCore/Connectors/ThreeDistributionsFullVectorConnector.cpp
index af6c20d3229a77b00b4d79df9197f8ad9f11f84d..2e726fc7b88c9ef229e503924eadcf53a9b06dfd 100644
--- a/src/cpu/VirtualFluidsCore/Connectors/ThreeDistributionsFullVectorConnector.cpp
+++ b/src/cpu/VirtualFluidsCore/Connectors/ThreeDistributionsFullVectorConnector.cpp
@@ -50,6 +50,8 @@ ThreeDistributionsFullVectorConnector::ThreeDistributionsFullVectorConnector(SPt
 //////////////////////////////////////////////////////////////////////////
 void ThreeDistributionsFullVectorConnector::init()
 {
+   FullVectorConnector::init();
+
    fDis = dynamicPointerCast<EsoTwist3D>(block.lock()->getKernel()->getDataSet()->getFdistributions());
    hDis = dynamicPointerCast<EsoTwist3D>(block.lock()->getKernel()->getDataSet()->getHdistributions());
    h2Dis = dynamicPointerCast<EsoTwist3D>(block.lock()->getKernel()->getDataSet()->getH2distributions());
diff --git a/src/cpu/VirtualFluidsCore/Connectors/TwoDistributionsFullVectorConnector.cpp b/src/cpu/VirtualFluidsCore/Connectors/TwoDistributionsFullVectorConnector.cpp
index a37bff3fc3b34667555ce02200fd2296b7514032..7fe8bc3643c337323ef25ee35c260597744e6191 100644
--- a/src/cpu/VirtualFluidsCore/Connectors/TwoDistributionsFullVectorConnector.cpp
+++ b/src/cpu/VirtualFluidsCore/Connectors/TwoDistributionsFullVectorConnector.cpp
@@ -50,6 +50,8 @@ TwoDistributionsFullVectorConnector::TwoDistributionsFullVectorConnector(SPtr<Bl
 //////////////////////////////////////////////////////////////////////////
 void TwoDistributionsFullVectorConnector::init()
 {
+   FullVectorConnector::init();
+
    fDis = dynamicPointerCast<EsoTwist3D>(block.lock()->getKernel()->getDataSet()->getFdistributions());
    hDis = dynamicPointerCast<EsoTwist3D>(block.lock()->getKernel()->getDataSet()->getHdistributions());
 
diff --git a/src/cpu/VirtualFluidsCore/Data/DataSet3D.h b/src/cpu/VirtualFluidsCore/Data/DataSet3D.h
index 1563f415a5c4043fdd7249362bbc2b053acb6137..e53e38a74daea2a2a40ca53eff1aa1f4febcc27a 100644
--- a/src/cpu/VirtualFluidsCore/Data/DataSet3D.h
+++ b/src/cpu/VirtualFluidsCore/Data/DataSet3D.h
@@ -55,8 +55,8 @@ public:
     SPtr<DistributionArray3D> getHdistributions() const;
     void setHdistributions(SPtr<DistributionArray3D> distributions);
 
-    SPtr<DistributionArray3D> getH1distributions() const;
-    void setH1distributions(SPtr<DistributionArray3D> distributions);
+    //SPtr<DistributionArray3D> getH1distributions() const;
+    //void setH1distributions(SPtr<DistributionArray3D> distributions);
 
     SPtr<DistributionArray3D> getH2distributions() const;
     void setH2distributions(SPtr<DistributionArray3D> distributions);
@@ -91,23 +91,18 @@ public:
 protected:
 private:
     SPtr<DistributionArray3D> fdistributions;
-
     SPtr<DistributionArray3D> hdistributions;
-
-    SPtr<DistributionArray3D> h1distributions;
+    //SPtr<DistributionArray3D> h1distributions;
     SPtr<DistributionArray3D> h2distributions;
-
+ 
     SPtr<AverageValuesArray3D> averageValues;
-
     SPtr<AverageValuesArray3D> averageDensity;
     SPtr<AverageValuesArray3D> averageVelocity;
     SPtr<AverageValuesArray3D> averageFluktuations;
     SPtr<AverageValuesArray3D> averageTriplecorrelations;
-
     SPtr<ShearStressValuesArray3D> shearStressValues;
 
     SPtr<RelaxationFactorArray3D> relaxationFactor;
-
     SPtr<PhaseFieldArray3D> phaseField;
     SPtr<PhaseFieldArray3D> phaseField2;
 };
@@ -120,9 +115,9 @@ inline SPtr<DistributionArray3D> DataSet3D::getHdistributions() const { return h
 
 inline void DataSet3D::setHdistributions(SPtr<DistributionArray3D> distributions) { hdistributions = distributions; }
 
-inline SPtr<DistributionArray3D> DataSet3D::getH1distributions() const { return h1distributions; }
-
-inline void DataSet3D::setH1distributions(SPtr<DistributionArray3D> distributions) { h1distributions = distributions; }
+//inline SPtr<DistributionArray3D> DataSet3D::getH1distributions() const { return h1distributions; }
+//
+//inline void DataSet3D::setH1distributions(SPtr<DistributionArray3D> distributions) { h1distributions = distributions; }
 
 inline SPtr<DistributionArray3D> DataSet3D::getH2distributions() const { return h2distributions; }
 
diff --git a/src/cpu/VirtualFluidsCore/LBM/MultiphaseScratchCumulantLBMKernel.cpp b/src/cpu/VirtualFluidsCore/LBM/MultiphaseScratchCumulantLBMKernel.cpp
index a62fef298fc0c18eb053c1138949ab1fb8e58ead..f0cfacbf8a54476e2789080aa6072d4b8487cf7d 100644
--- a/src/cpu/VirtualFluidsCore/LBM/MultiphaseScratchCumulantLBMKernel.cpp
+++ b/src/cpu/VirtualFluidsCore/LBM/MultiphaseScratchCumulantLBMKernel.cpp
@@ -284,10 +284,25 @@ void MultiphaseScratchCumulantLBMKernel::calculate(int step)
                         LBMReal dX2_phi = gradX2_phi();
                         LBMReal dX3_phi = gradX3_phi();
 
+
                         LBMReal denom = sqrt(dX1_phi * dX1_phi + dX2_phi * dX2_phi + dX3_phi * dX3_phi) + 1e-9;
                         LBMReal normX1 = dX1_phi/denom;
 						LBMReal normX2 = dX2_phi/denom;
-						LBMReal normX3 = dX3_phi/denom;
+						LBMReal normX3 = dX3_phi/denom; 
+
+
+						///test for magnitude of gradient from phase indicator directly
+						//if (fabs((1.0 - phi[REST]) * (phi[REST]) */* c4*/ - (denom- 1e-9)) / denom > 1e-3 &&phi[REST]>0.4 &&phi[REST]<0.6) {
+						//	std::cout << (1.0 - phi[REST]) * (phi[REST])  // *c4 
+						//		<< " " << denom <<" "<< ((1.0 - phi[REST]) * (phi[REST]) * c4 ) / denom << std::endl;
+						//}
+						//dX1_phi = (1.0 - phi[REST]) * (phi[REST]) /* c4 */* normX1;
+						//dX2_phi = (1.0 - phi[REST]) * (phi[REST]) /* c4 */* normX2;
+						//dX3_phi = (1.0 - phi[REST]) * (phi[REST]) /* c4 */* normX3;
+
+						//denom = 1.0;
+
+						///!test
 
 						collFactorM = collFactorL + (collFactorL - collFactorG) * (phi[REST] - phiH) / (phiH - phiL);
 
@@ -663,12 +678,23 @@ void MultiphaseScratchCumulantLBMKernel::calculate(int step)
 			   // Cumulants
 			   ////////////////////////////////////////////////////////////////////////////////////
 			   LBMReal OxxPyyPzz = 1.; //omega2 or bulk viscosity
-			   LBMReal OxyyPxzz = 1.;//-s9;//2+s9;//
-			   LBMReal OxyyMxzz  = 1.;//2+s9;//
-			   LBMReal O4 = 1.;
+			   //LBMReal OxyyPxzz = 2.0 - collFactorM;// 1.;//-s9;//2+s9;//
+			   //LBMReal OxyyMxzz  = 2.0 - collFactorM;// 1.;//2+s9;//
+			   LBMReal O4 = 1.0;//collFactorM;// 1.;
 			   LBMReal O5 = 1.;
 			   LBMReal O6 = 1.;
 
+
+			   /////fourth order parameters; here only for test. Move out of loop!
+
+			   LBMReal OxyyPxzz = 8.0 * (collFactorM - 2.0) * (OxxPyyPzz * (3.0 * collFactorM - 1.0) - 5.0 * collFactorM) / (8.0 * (5.0 - 2.0 * collFactorM) * collFactorM + OxxPyyPzz * (8.0 + collFactorM * (9.0 * collFactorM - 26.0)));
+			   LBMReal OxyyMxzz = 8.0 * (collFactorM - 2.0) * (collFactorM + OxxPyyPzz * (3.0 * collFactorM - 7.0)) / (OxxPyyPzz * (56.0 - 42.0 * collFactorM + 9.0 * collFactorM * collFactorM) - 8.0 * collFactorM);
+			   LBMReal Oxyz = 24.0 * (collFactorM - 2.0) * (4.0 * collFactorM * collFactorM + collFactorM * OxxPyyPzz * (18.0 - 13.0 * collFactorM) + OxxPyyPzz * OxxPyyPzz * (2.0 + collFactorM * (6.0 * collFactorM - 11.0))) / (16.0 * collFactorM * collFactorM * (collFactorM - 6.0) - 2.0 * collFactorM * OxxPyyPzz * (216.0 + 5.0 * collFactorM * (9.0 * collFactorM - 46.0)) + OxxPyyPzz * OxxPyyPzz * (collFactorM * (3.0 * collFactorM - 10.0) * (15.0 * collFactorM - 28.0) - 48.0));
+			   LBMReal A = (4.0 * collFactorM * collFactorM + 2.0 * collFactorM * OxxPyyPzz * (collFactorM - 6.0) + OxxPyyPzz * OxxPyyPzz * (collFactorM * (10.0 - 3.0 * collFactorM) - 4.0)) / ((collFactorM - OxxPyyPzz) * (OxxPyyPzz * (2.0 + 3.0 * collFactorM) - 8.0 * collFactorM));
+			   //FIXME:  warning C4459: declaration of 'B' hides global declaration (message : see declaration of 'D3Q27System::B' )
+			   LBMReal B = (4.0 * collFactorM * OxxPyyPzz * (9.0 * collFactorM - 16.0) - 4.0 * collFactorM * collFactorM - 2.0 * OxxPyyPzz * OxxPyyPzz * (2.0 + 9.0 * collFactorM * (collFactorM - 2.0))) / (3.0 * (collFactorM - OxxPyyPzz) * (OxxPyyPzz * (2.0 + 3.0 * collFactorM) - 8.0 * collFactorM));
+
+
 			   //Cum 4.
 			   //LBMReal CUMcbb = mfcbb - ((mfcaa + c1o3 * oMdrho) * mfabb + 2. * mfbba * mfbab); // till 18.05.2015
 			   //LBMReal CUMbcb = mfbcb - ((mfaca + c1o3 * oMdrho) * mfbab + 2. * mfbba * mfabb); // till 18.05.2015
@@ -719,11 +745,15 @@ void MultiphaseScratchCumulantLBMKernel::calculate(int step)
 			   mfbab += c1o6 * (dX1_phi * vvz + dX3_phi * vvx) * correctionScaling;
 			   mfbba += c1o6 * (dX1_phi * vvy + dX2_phi * vvx) * correctionScaling;
 
-			   LBMReal dxux = 0.0;// -c1o2 * collFactorM * (mxxMyy + mxxMzz) + c1o2 * OxxPyyPzz * (/*mfaaa*/ -mxxPyyPzz);
-			   LBMReal dyuy = 0.0;// dxux + collFactorM * c3o2 * mxxMyy;
-			   LBMReal dzuz = 0.0;// dxux + collFactorM * c3o2 * mxxMzz;
+			   LBMReal dxux = -c1o2 * collFactorM * (mxxMyy + mxxMzz) + c1o2 * OxxPyyPzz * (/*mfaaa*/ -mxxPyyPzz);
+			   LBMReal dyuy =  dxux + collFactorM * c3o2 * mxxMyy;
+			   LBMReal dzuz =  dxux + collFactorM * c3o2 * mxxMzz;
 
-			   //relax
+			   LBMReal Dxy = -three * collFactorM * mfbba;
+			   LBMReal Dxz = -three * collFactorM * mfbab;
+			   LBMReal Dyz = -three * collFactorM * mfabb;
+
+			   ////relax unfiltered
 			   mxxPyyPzz += OxxPyyPzz * (/*mfaaa*/ - mxxPyyPzz) - 3. * (1. - c1o2 * OxxPyyPzz) * (vx2 * dxux + vy2 * dyuy + vz2 * dzuz);
 			   mxxMyy += collFactorM * (-mxxMyy) - 3. * (1. - c1o2 * collFactorM) * (vx2 * dxux - vy2 * dyuy);
 			   mxxMzz += collFactorM * (-mxxMzz) - 3. * (1. - c1o2 * collFactorM) * (vx2 * dxux - vz2 * dzuz);
@@ -732,6 +762,25 @@ void MultiphaseScratchCumulantLBMKernel::calculate(int step)
 			   mfbab += collFactorM * (-mfbab);
 			   mfbba += collFactorM * (-mfbba);
 
+
+			   //relax filtered
+			   //LBMReal interfaceFilter=0.001;
+			   //LBMReal interfaceFactor = c1;// (dX1_phi * dX1_phi + dX2_phi * dX2_phi + dX3_phi * dX3_phi);
+
+			   //mxxPyyPzz += OxxPyyPzz * (/*mfaaa*/ -mxxPyyPzz) - 3. * (1. - c1o2 * OxxPyyPzz) * (vx2 * dxux + vy2 * dyuy + vz2 * dzuz);
+			   //
+			   //wadjust = collFactorM + (1. - collFactorM) * fabs(mxxMyy) / (fabs(mxxMyy) * interfaceFactor + interfaceFilter)* interfaceFactor;
+			   //mxxMyy += wadjust * (-mxxMyy);// -3. * (1. - c1o2 * collFactorM) * (vx2 * dxux - vy2 * dyuy);
+			   //wadjust = collFactorM + (1. - collFactorM) * fabs(mxxMzz) / (fabs(mxxMzz) * interfaceFactor + interfaceFilter) * interfaceFactor;
+			   //mxxMzz += wadjust * (-mxxMzz);// -3. * (1. - c1o2 * collFactorM) * (vx2 * dxux - vz2 * dzuz);
+
+			   //wadjust = collFactorM + (1. - collFactorM) * fabs(mfabb) / (fabs(mfabb) * interfaceFactor + interfaceFilter) * interfaceFactor;
+			   //mfabb += wadjust * (-mfabb);
+			   //wadjust = collFactorM + (1. - collFactorM) * fabs(mfbab) / (fabs(mfbab) * interfaceFactor + interfaceFilter) * interfaceFactor;
+			   //mfbab += wadjust * (-mfbab);
+			   //wadjust = collFactorM + (1. - collFactorM) * fabs(mfbba) / (fabs(mfbba) * interfaceFactor + interfaceFilter) * interfaceFactor;
+			   //mfbba += wadjust * (-mfbba);
+
 			   //applying phase field gradients second part:
 			   //mxxPyyPzz += c2o3 * rhoToPhi * (dX1_phi * vvx + dX2_phi * vvy + dX3_phi * vvz);
 			   mxxPyyPzz += (1.0 / 6.0) * (dX1_phi * vvx + dX2_phi * vvy + dX3_phi * vvz) * correctionScaling; // As in Hesam's code
@@ -763,7 +812,7 @@ void MultiphaseScratchCumulantLBMKernel::calculate(int step)
 			   LBMReal mxyyMxzz = mfbca - mfbac;
 
 			   //relax
-			   wadjust = OxyyMxzz + (1. - OxyyMxzz) * fabs(mfbbb) / (fabs(mfbbb) + qudricLimit);
+			   wadjust = Oxyz + (1. - Oxyz) * fabs(mfbbb) / (fabs(mfbbb) + qudricLimit);
 			   mfbbb += wadjust * (-mfbbb);
 			   wadjust = OxyyPxzz + (1. - OxyyPxzz) * fabs(mxxyPyzz) / (fabs(mxxyPyzz) + qudricLimit);
 			   mxxyPyzz += wadjust * (-mxxyPyzz);
@@ -787,19 +836,71 @@ void MultiphaseScratchCumulantLBMKernel::calculate(int step)
 			   mfbac = (-mxyyMxzz + mxyyPxzz) * c1o2;
 
 			   //4.
-			   CUMacc += O4 * (-CUMacc);
-			   CUMcac += O4 * (-CUMcac);
-			   CUMcca += O4 * (-CUMcca);
+			   //CUMacc += O4 * (-CUMacc);
+			   //CUMcac += O4 * (-CUMcac);
+			   //CUMcca += O4 * (-CUMcca);
+
+			   //CUMbbc += O4 * (-CUMbbc);
+			   //CUMbcb += O4 * (-CUMbcb);
+			   //CUMcbb += O4 * (-CUMcbb);
 
-			   CUMbbc += O4 * (-CUMbbc);
-			   CUMbcb += O4 * (-CUMbcb);
-			   CUMcbb += O4 * (-CUMcbb);
+
+			   CUMacc = -O4 * (one / collFactorM - c1o2) * (dyuy + dzuz) * c2o3 * A + (one - O4) * (CUMacc);
+			   CUMcac = -O4 * (one / collFactorM - c1o2) * (dxux + dzuz) * c2o3 * A + (one - O4) * (CUMcac);
+			   CUMcca = -O4 * (one / collFactorM - c1o2) * (dyuy + dxux) * c2o3 * A + (one - O4) * (CUMcca);
+			   CUMbbc = -O4 * (one / collFactorM - c1o2) * Dxy * c1o3 * B + (one - O4) * (CUMbbc);
+			   CUMbcb = -O4 * (one / collFactorM - c1o2) * Dxz * c1o3 * B + (one - O4) * (CUMbcb);
+			   CUMcbb = -O4 * (one / collFactorM - c1o2) * Dyz * c1o3 * B + (one - O4) * (CUMcbb);
+
+
+
+
+			   //CUMacc -= (one / collFactorM - c1o2) * (dyuy + dzuz) * c2o3 * A ;
+			   //CUMcac -= (one / collFactorM - c1o2) * (dxux + dzuz) * c2o3 * A ;
+			   //CUMcca -= (one / collFactorM - c1o2) * (dyuy + dxux) * c2o3 * A ;
+			   //CUMbbc -= (one / collFactorM - c1o2) * Dxy * c1o3 * B ;
+			   //CUMbcb -= (one / collFactorM - c1o2) * Dxz * c1o3 * B ;
+			   //CUMcbb -= (one / collFactorM - c1o2) * Dyz * c1o3 * B ;
+
+			   //wadjust = O4 + (1. - O4) * fabs(CUMacc) / (fabs(CUMacc) + qudricLimit);
+			   //CUMacc += wadjust * (-CUMacc);
+			   //wadjust = O4 + (1. - O4) * fabs(CUMcac) / (fabs(CUMcac) + qudricLimit);
+			   //CUMcac += wadjust * (-CUMcac);
+			   //wadjust = O4 + (1. - O4) * fabs(CUMcca) / (fabs(CUMcca) + qudricLimit);
+			   //CUMcca += wadjust * (-CUMcca);
+			   //wadjust = O4 + (1. - O4) * fabs(CUMbbc) / (fabs(CUMbbc) + qudricLimit);
+			   //CUMbbc += wadjust * (-CUMbbc);
+			   //wadjust = O4 + (1. - O4) * fabs(CUMbcb) / (fabs(CUMbcb) + qudricLimit);
+			   //CUMbcb += wadjust * (-CUMbcb);
+			   //wadjust = O4 + (1. - O4) * fabs(CUMcbb) / (fabs(CUMcbb) + qudricLimit);
+			   //CUMcbb += wadjust * (-CUMcbb);
+
+
+
+
+
+
+			   //CUMacc += (one / collFactorM - c1o2) * (dyuy + dzuz) * c2o3 * A;
+			   //CUMcac += (one / collFactorM - c1o2) * (dxux + dzuz) * c2o3 * A;
+			   //CUMcca += (one / collFactorM - c1o2) * (dyuy + dxux) * c2o3 * A;
+			   //CUMbbc += (one / collFactorM - c1o2) * Dxy * c1o3 * B;
+			   //CUMbcb += (one / collFactorM - c1o2) * Dxz * c1o3 * B;
+			   //CUMcbb += (one / collFactorM - c1o2) * Dyz * c1o3 * B;
 
 			   //5.
 			   CUMbcc += O5 * (-CUMbcc);
 			   CUMcbc += O5 * (-CUMcbc);
 			   CUMccb += O5 * (-CUMccb);
 
+
+			   //wadjust = O5 + (1. - O5) * fabs(CUMbcc) / (fabs(CUMbcc) + qudricLimit);
+			   //CUMbcc += wadjust * (-CUMbcc);
+			   //wadjust = O5 + (1. - O5) * fabs(CUMcbc) / (fabs(CUMcbc) + qudricLimit);
+			   //CUMbcc += wadjust * (-CUMcbc);
+			   //wadjust = O5 + (1. - O5) * fabs(CUMccb) / (fabs(CUMccb) + qudricLimit);
+			   //CUMbcc += wadjust * (-CUMccb);
+
+
 			   //6.
 			   CUMccc += O6 * (-CUMccc);
 
diff --git a/src/cpu/VirtualFluidsCore/Parallel/MPIIODataStructures.h b/src/cpu/VirtualFluidsCore/Parallel/MPIIODataStructures.h
index 8b284fc2f768472a4115c61cd567ce0b37b7f4e9..c8bd2d0797af86858b40a1a29a154107f04e46c8 100644
--- a/src/cpu/VirtualFluidsCore/Parallel/MPIIODataStructures.h
+++ b/src/cpu/VirtualFluidsCore/Parallel/MPIIODataStructures.h
@@ -59,6 +59,9 @@ struct dataSetParam {
 struct DataSetRestart {
     double collFactor;
     double deltaT;
+    double collFactorL; // for Multiphase model
+    double collFactorG; // for Multiphase model
+    double densityRatio;// for Multiphase model
     int x1;
     int x2;
     int x3;
@@ -74,6 +77,9 @@ struct DataSetRestart {
 struct DataSetMigration {
     double collFactor;
     double deltaT;
+    double collFactorL; // for Multiphase model
+    double collFactorG; // for Multiphase model
+    double densityRatio;// for Multiphase model
     int globalID;
     int ghostLayerWidth;
     bool compressible;
@@ -164,7 +170,8 @@ struct DSArraysPresence {
     bool isAverageTripleArrayPresent;
     bool isShearStressValArrayPresent;
     bool isRelaxationFactorPresent;
-    bool isPhaseFieldPresent;
+    bool isPhaseField1Present;
+    bool isPhaseField2Present;
 };
 } // namespace MPIIODataStructures
 #endif
\ No newline at end of file
diff --git a/src/cpu/VirtualFluidsCore/Utilities/CheckpointConverter.cpp b/src/cpu/VirtualFluidsCore/Utilities/CheckpointConverter.cpp
index 914659afa685814842904e9622c31b875d6a2207..b66eff480e99102edf332cfd750e0d2b6965ba83 100644
--- a/src/cpu/VirtualFluidsCore/Utilities/CheckpointConverter.cpp
+++ b/src/cpu/VirtualFluidsCore/Utilities/CheckpointConverter.cpp
@@ -77,7 +77,7 @@ CheckpointConverter::CheckpointConverter(SPtr<Grid3D> grid, const std::string &p
     //---------------------------------------
 
     MPI_Datatype typesDataSetRead[3] = { MPI_DOUBLE, MPI_INT, MPI_CHAR };
-    int blocksDataSetRead[3]         = { 2, 5, 2 };
+    int blocksDataSetRead[3]         = { 3, 5, 2 };
     MPI_Aint offsetsDataSetRead[3], lbDataSetRead, extentDataSetRead;
 
     offsetsDataSetRead[0] = 0;
@@ -358,6 +358,7 @@ void CheckpointConverter::convertDataSet(int step, int procCount)
             dataSetWriteArray[nb].deltaT          = dataSetReadArray[nb].deltaT;
             dataSetWriteArray[nb].compressible    = dataSetReadArray[nb].compressible;
             dataSetWriteArray[nb].withForcing     = dataSetReadArray[nb].withForcing;
+//            dataSetWriteArray[nb].densityRatio    = dataSetReadArray[nb].densityRatio;
 
             write_offset = (MPI_Offset)(3 * sizeof(dataSetParam) + dataSetWriteArray[nb].globalID * sizeofOneDataSet);
             MPI_File_write_at(file_handlerW, write_offset, &dataSetWriteArray[nb], 1, dataSetTypeWrite,