diff --git a/apps/cpu/LaminarTubeFlow/ltf.cfg b/apps/cpu/LaminarTubeFlow/ltf.cfg
index 8b8e33e4998835da80d2121925acc7d95c3ccd20..e28d6adc3893dd9055d05835028a5eaa508eb958 100644
--- a/apps/cpu/LaminarTubeFlow/ltf.cfg
+++ b/apps/cpu/LaminarTubeFlow/ltf.cfg
@@ -16,11 +16,11 @@ Re = 10
 
 logToFile = false
 
-newStart = true
-restartStep = 100000
+newStart = false
+restartStep = 10
 
-cpStart = 100000
-cpStep = 100000
+cpStart = 10
+cpStep = 10
 
 outTime = 1000
 endTime = 1000
\ No newline at end of file
diff --git a/apps/cpu/LaminarTubeFlow/ltf.cpp b/apps/cpu/LaminarTubeFlow/ltf.cpp
index e523dd2de7416ea5189dbceab200725d89f15424..b108e49be44762a8c085754f91a36d22babea098 100644
--- a/apps/cpu/LaminarTubeFlow/ltf.cpp
+++ b/apps/cpu/LaminarTubeFlow/ltf.cpp
@@ -105,11 +105,16 @@ void run(string configname)
       kernel->setBCProcessor(bcProc);
 
       //////////////////////////////////////////////////////////////////////////
+      SPtr<Grid3DVisitor> metisVisitor(new MetisPartitioningGridVisitor(comm, MetisPartitioningGridVisitor::LevelBased, D3Q27System::B));
       //restart
       SPtr<UbScheduler> mSch(new UbScheduler(cpStep, cpStart));
-      SPtr<MPIIOMigrationCoProcessor> migCoProcessor(new MPIIOMigrationCoProcessor(grid, mSch, pathname + "/mig", comm));
+      //SPtr<MPIIOMigrationCoProcessor> migCoProcessor(new MPIIOMigrationCoProcessor(grid, mSch, metisVisitor, pathname + "/mig", comm));
+      SPtr<MPIIOMigrationBECoProcessor> migCoProcessor(new MPIIOMigrationBECoProcessor(grid, mSch, metisVisitor, pathname + "/mig", comm));
       migCoProcessor->setLBMKernel(kernel);
       migCoProcessor->setBCProcessor(bcProc);
+      migCoProcessor->setNu(nuLB);
+      migCoProcessor->setNuLG(0.01, 0.01);
+      migCoProcessor->setDensityRatio(1);
       //////////////////////////////////////////////////////////////////////////
 
       SPtr<D3Q27Interactor> inflowInt;
@@ -212,7 +217,7 @@ void run(string configname)
          //outflow
          SPtr<D3Q27Interactor> outflowInt = SPtr<D3Q27Interactor>(new D3Q27Interactor(geoOutflow, grid, denBCAdapter, Interactor3D::SOLID));
 
-         SPtr<Grid3DVisitor> metisVisitor(new MetisPartitioningGridVisitor(comm, MetisPartitioningGridVisitor::LevelBased, D3Q27System::B));
+         //SPtr<Grid3DVisitor> metisVisitor(new MetisPartitioningGridVisitor(comm, MetisPartitioningGridVisitor::LevelBased, D3Q27System::B));
          InteractorsHelper intHelper(grid, metisVisitor);
          intHelper.addInteractor(cylinderInt);
          intHelper.addInteractor(inflowInt);
diff --git a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOCoProcessor.cpp b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOCoProcessor.cpp
index a3572c8c40ed63144080c1803d728393eaf30547..bc25bf37fad165e658967313bf6f5d9893a5559e 100644
--- a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOCoProcessor.cpp
+++ b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOCoProcessor.cpp
@@ -55,7 +55,7 @@ MPIIOCoProcessor::MPIIOCoProcessor(SPtr<Grid3D> grid, SPtr<UbScheduler> s, const
     //-----------------------------------------------------------------------
 
     MPI_Datatype typesBC[3] = { MPI_LONG_LONG_INT, MPI_FLOAT, MPI_CHAR };
-    int blocksBC[3]         = { 5, 38, 1 };
+    int blocksBC[3]         = { 5, 33, 1 };
     MPI_Aint offsetsBC[3], lbBC, extentBC;
 
     offsetsBC[0] = 0;
@@ -422,8 +422,7 @@ void MPIIOCoProcessor::clearAllFiles(int step)
     MPI_File_set_size(file_handler, new_size);
     MPI_File_close(&file_handler);
 
-    std::string filename6 =
-        path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpAverageFluktuationsArray.bin";
+    std::string filename6 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpAverageFluktuationsArray.bin";
     // MPI_File_delete(filename6.c_str(), info);
     int rc6 = MPI_File_open(MPI_COMM_WORLD, filename6.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
     if (rc6 != MPI_SUCCESS)
diff --git a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationBECoProcessor.cpp b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationBECoProcessor.cpp
index d82c594e6b127f2ade7979f8945b057e7ef6db6c..366166a86d125f936f747a57d365a96a6daf6edc 100644
--- a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationBECoProcessor.cpp
+++ b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationBECoProcessor.cpp
@@ -25,10 +25,11 @@ using namespace MPIIODataStructures;
 #define MESSAGE_TAG 80
 #define SEND_BLOCK_SIZE 100000
 
-MPIIOMigrationBECoProcessor::MPIIOMigrationBECoProcessor(SPtr<Grid3D> grid, SPtr<UbScheduler> s, const std::string &path, SPtr<Communicator> comm)
+MPIIOMigrationBECoProcessor::MPIIOMigrationBECoProcessor(SPtr<Grid3D> grid, SPtr<UbScheduler> s, SPtr<Grid3DVisitor> mV, const std::string &path, SPtr<Communicator> comm)
     : MPIIOCoProcessor(grid, s, path, comm), nue(-999.999), nuL(-999.999), nuG(-999.999), densityRatio(-999.999)
 {
     memset(&boundCondParamStr, 0, sizeof(boundCondParamStr));
+    metisVisitor = mV;
 
     //-------------------------   define MPI types  ---------------------------------
 
@@ -116,7 +117,7 @@ void MPIIOMigrationBECoProcessor::writeDataSet(int step)
     int firstGlobalID;
     std::vector<double> doubleValuesArrayF; // double-values (arrays of f's) in all blocks  Fdistribution
     std::vector<double> doubleValuesArrayH1; // double-values (arrays of f's) in all blocks  H1distribution
-    // std::vector<double> doubleValuesArrayH2; // double-values (arrays of f's) in all blocks  H2distribution
+    std::vector<double> doubleValuesArrayH2; // double-values (arrays of f's) in all blocks  H2distribution
 
     if (comm->isRoot()) 
     {
@@ -124,7 +125,8 @@ void MPIIOMigrationBECoProcessor::writeDataSet(int step)
         UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
-    bool multiPhase = false;
+    bool multiPhase1 = false;
+    bool multiPhase2 = false;
     DSArraysPresence arrPresence;
     bool firstBlock        = true;
     int doubleCountInBlock = 0;
@@ -146,19 +148,20 @@ void MPIIOMigrationBECoProcessor::writeDataSet(int step)
             D3Q27EsoTwist3DSplittedVectorPtrH1 = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(block->getKernel()->getDataSet()->getHdistributions());
             if (D3Q27EsoTwist3DSplittedVectorPtrH1 != 0)
             {
-                multiPhase = true;
+                multiPhase1 = true;
                 localDistributionsH1 = D3Q27EsoTwist3DSplittedVectorPtrH1->getLocalDistributions();
                 nonLocalDistributionsH1 = D3Q27EsoTwist3DSplittedVectorPtrH1->getNonLocalDistributions();
                 zeroDistributionsH1 = D3Q27EsoTwist3DSplittedVectorPtrH1->getZeroDistributions();
             }
 
-            /*D3Q27EsoTwist3DSplittedVectorPtrH2 = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(block->getKernel()->getDataSet()->getH2distributions());
+            D3Q27EsoTwist3DSplittedVectorPtrH2 = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(block->getKernel()->getDataSet()->getH2distributions());
             if (D3Q27EsoTwist3DSplittedVectorPtrH2 != 0)
             {
+                multiPhase2 = true;
                 localDistributionsH2 = D3Q27EsoTwist3DSplittedVectorPtrH2->getLocalDistributions();
                 nonLocalDistributionsH2 = D3Q27EsoTwist3DSplittedVectorPtrH2->getNonLocalDistributions();
                 zeroDistributionsH2 = D3Q27EsoTwist3DSplittedVectorPtrH2->getZeroDistributions();
-            }*/
+            }
 
 
             if (firstBlock) // && block->getKernel()) // when first (any) valid block...
@@ -253,7 +256,7 @@ void MPIIOMigrationBECoProcessor::writeDataSet(int step)
             if (zeroDistributionsF && (dataSetParamStr3.nx[0] > 0) && (dataSetParamStr3.nx[1] > 0) && (dataSetParamStr3.nx[2] > 0))
                 doubleValuesArrayF.insert(doubleValuesArrayF.end(), zeroDistributionsF->getDataVector().begin(), zeroDistributionsF->getDataVector().end());
 
-            if (multiPhase)
+            if (multiPhase1)
             {
                 if (localDistributionsH1 && (dataSetParamStr1.nx[0] > 0) && (dataSetParamStr1.nx[1] > 0) && (dataSetParamStr1.nx[2] > 0) && (dataSetParamStr1.nx[3] > 0))
                     doubleValuesArrayH1.insert(doubleValuesArrayH1.end(), localDistributionsH1->getDataVector().begin(), localDistributionsH1->getDataVector().end());
@@ -263,7 +266,7 @@ void MPIIOMigrationBECoProcessor::writeDataSet(int step)
                     doubleValuesArrayH1.insert(doubleValuesArrayH1.end(), zeroDistributionsH1->getDataVector().begin(), zeroDistributionsH1->getDataVector().end());
             }
 
-            /*if (D3Q27EsoTwist3DSplittedVectorPtrH2 != 0)
+            if (multiPhase2)
             {
                 if (localDistributionsH2 && (dataSetParamStr1.nx[0] > 0) && (dataSetParamStr1.nx[1] > 0) && (dataSetParamStr1.nx[2] > 0) && (dataSetParamStr1.nx[3] > 0))
                 doubleValuesArrayH2.insert(doubleValuesArrayH2.end(), localDistributionsH2->getDataVector().begin(), localDistributionsH2->getDataVector().end());
@@ -271,7 +274,7 @@ void MPIIOMigrationBECoProcessor::writeDataSet(int step)
                 doubleValuesArrayH2.insert(doubleValuesArrayH2.end(), nonLocalDistributionsH2->getDataVector().begin(), nonLocalDistributionsH2->getDataVector().end());
                 if (zeroDistributionsH2 && (dataSetParamStr3.nx[0] > 0) && (dataSetParamStr3.nx[1] > 0) && (dataSetParamStr3.nx[2] > 0))
                 doubleValuesArrayH2.insert(doubleValuesArrayH2.end(), zeroDistributionsH2->getDataVector().begin(), zeroDistributionsH2->getDataVector().end());
-            }*/
+            }
 
             ic++;
         }
@@ -316,7 +319,7 @@ void MPIIOMigrationBECoProcessor::writeDataSet(int step)
     MPI_File_close(&file_handler);
 
     //-------------------------------- H1 ------------------------------------------------
-    if (multiPhase)
+    if (multiPhase1)
     {
         filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetH1.bin";
         rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
@@ -331,7 +334,7 @@ void MPIIOMigrationBECoProcessor::writeDataSet(int step)
     }
 
     //-------------------------------- H2 --------------------------------------------------
-    /*if (D3Q27EsoTwist3DSplittedVectorPtr2 != 0)
+    if (multiPhase2)
     {
         filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetH2.bin";
         rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
@@ -343,7 +346,7 @@ void MPIIOMigrationBECoProcessor::writeDataSet(int step)
 
         MPI_File_sync(file_handler);
         MPI_File_close(&file_handler);
-    }    */
+    }
 
     //--------------------------------
 
@@ -689,11 +692,6 @@ void MPIIOMigrationBECoProcessor::writeBoundaryConds(int step)
                     bouCond->bcVelocityX2           = (float)bcArr->bcvector[bc]->getBoundaryVelocityX2();
                     bouCond->bcVelocityX3           = (float)bcArr->bcvector[bc]->getBoundaryVelocityX3();
                     bouCond->bcDensity              = (float)bcArr->bcvector[bc]->getBoundaryDensity();
-                    bouCond->bcLodiDensity          = (float)bcArr->bcvector[bc]->getDensityLodiDensity();
-                    bouCond->bcLodiVelocityX1       = (float)bcArr->bcvector[bc]->getDensityLodiVelocityX1();
-                    bouCond->bcLodiVelocityX2       = (float)bcArr->bcvector[bc]->getDensityLodiVelocityX2();
-                    bouCond->bcLodiVelocityX3       = (float)bcArr->bcvector[bc]->getDensityLodiVelocityX3();
-                    bouCond->bcLodiLentgh           = (float)bcArr->bcvector[bc]->getDensityLodiLength();
                     bouCond->nx1                    = (float)bcArr->bcvector[bc]->nx1;
                     bouCond->nx2                    = (float)bcArr->bcvector[bc]->nx2;
                     bouCond->nx3                    = (float)bcArr->bcvector[bc]->nx3;
@@ -834,8 +832,7 @@ void MPIIOMigrationBECoProcessor::restart(int step)
         UBLOG(logINFO, "Load check point - start");
 
     readBlocks(step);
-    SPtr<Grid3DVisitor> newMetisVisitor(new MetisPartitioningGridVisitor(comm, MetisPartitioningGridVisitor::LevelBased, D3Q27System::BSW, MetisPartitioner::KWAY));
-    grid->accept(newMetisVisitor);
+    grid->accept(metisVisitor);
 
     readDataSet(step);
     readBoundaryConds(step);
@@ -972,6 +969,8 @@ void MPIIOMigrationBECoProcessor::blocksExchange(int tagN, int ind1, int ind2, i
 
     MPI_Waitall(requestCount, &requests[0], MPI_STATUSES_IGNORE);
 
+    MPI_Type_free(&sendBlockDoubleType);
+
     delete[] blocksCounterSend;
     delete[] blocksCounterRec;
     delete[] rawDataSend;
@@ -1003,7 +1002,8 @@ void MPIIOMigrationBECoProcessor::readDataSet(int step)
         UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
-    bool multiPhase = false;
+    bool multiPhase1 = false;
+    bool multiPhase2 = false;
     dataSetParam dataSetParamStr1, dataSetParamStr2, dataSetParamStr3;
 
     int blocksCountAll   = grid->getNumberOfBlocks(); // quantity of all blocks in the grid
@@ -1037,7 +1037,7 @@ void MPIIOMigrationBECoProcessor::readDataSet(int step)
         dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3];
     std::vector<double> doubleValuesArrayF(size_t(myBlocksCount * doubleCountInBlock)); // double-values in all blocks  Fdistributions
     std::vector<double> doubleValuesArrayH1; // double-values in all blocks  H1distributions
-    //std::vector<double> doubleValuesArrayH2; // double-values in all blocks  H2distributions
+    std::vector<double> doubleValuesArrayH2; // double-values in all blocks  H2distributions
 
     MPI_Type_contiguous(int(doubleCountInBlock), MPI_DOUBLE, &dataSetDoubleType);
     MPI_Type_commit(&dataSetDoubleType);
@@ -1057,7 +1057,7 @@ void MPIIOMigrationBECoProcessor::readDataSet(int step)
     MPI_File_get_size(file_handler, &fsize);
     if (fsize > 0)
     {
-        multiPhase = true;
+        multiPhase1 = true;
         doubleValuesArrayH1.resize(myBlocksCount * doubleCountInBlock);
 
         read_offset = (MPI_Offset)(indexB * doubleCountInBlock * sizeof(double)) ;
@@ -1065,6 +1065,22 @@ void MPIIOMigrationBECoProcessor::readDataSet(int step)
     }
     MPI_File_close(&file_handler);
 
+    //--------------------------------- H2 ---------------------------------------------------------
+    filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetH2.bin";
+    rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler);
+    if (rc != MPI_SUCCESS)
+        throw UbException(UB_EXARGS, "couldn't open file " + filename);
+    fs = MPI_File_get_size(file_handler, &fsize);
+    if (fsize > 0)
+    {
+        multiPhase2 = true;
+        doubleValuesArrayH2.resize(myBlocksCount * doubleCountInBlock);
+
+        read_offset = (MPI_Offset)(indexB * doubleCountInBlock * sizeof(double));
+        MPI_File_read_at(file_handler, read_offset, &doubleValuesArrayH2[0], int(myBlocksCount), dataSetDoubleType, MPI_STATUS_IGNORE);
+    }
+    MPI_File_close(&file_handler);
+
     MPI_Type_free(&dataSetDoubleType);
 
     if (comm->isRoot()) 
@@ -1079,16 +1095,23 @@ void MPIIOMigrationBECoProcessor::readDataSet(int step)
     for (int r = 0; r < size; r++)
         rawDataReceiveF[r].resize(0);
     blocksExchange(MESSAGE_TAG, indexB, indexE, int(doubleCountInBlock), doubleValuesArrayF, rawDataReceiveF);
+    
 
     std::vector<double>* rawDataReceiveH1 = new std::vector<double>[size];
-    for (int r = 0; r < size; r++)
-        rawDataReceiveH1[r].resize(0);
-    blocksExchange(MESSAGE_TAG, indexB, indexE, int(doubleCountInBlock), doubleValuesArrayH1, rawDataReceiveH1);
+    if (multiPhase1)
+    {
+        for (int r = 0; r < size; r++)
+            rawDataReceiveH1[r].resize(0);
+        blocksExchange(MESSAGE_TAG, indexB, indexE, int(doubleCountInBlock), doubleValuesArrayH1, rawDataReceiveH1);
+    }
 
-    /*    std::vector<double>* rawDataReceiveH2 = new std::vector<double>[size];
+    std::vector<double>* rawDataReceiveH2 = new std::vector<double>[size];
+    if (multiPhase2)
+    {
         for (int r = 0; r < size; r++)
             rawDataReceiveH2[r].resize(0);
-        blocksExchange(MESSAGE_TAG, indexB, indexE, int(doubleCountInBlock), doubleValuesArrayH2, rawDataReceiveH2);*/
+        blocksExchange(MESSAGE_TAG, indexB, indexE, int(doubleCountInBlock), doubleValuesArrayH2, rawDataReceiveH2);
+    }
 
     if (comm->isRoot())
     {
@@ -1102,7 +1125,7 @@ void MPIIOMigrationBECoProcessor::readDataSet(int step)
     int blockID;
     std::vector<double> vectorsOfValuesF1, vectorsOfValuesF2, vectorsOfValuesF3;
     std::vector<double> vectorsOfValuesH11, vectorsOfValuesH12, vectorsOfValuesH13;
-    //std::vector<double> vectorsOfValuesH21, vectorsOfValuesH22, vectorsOfValuesH23;
+    std::vector<double> vectorsOfValuesH21, vectorsOfValuesH22, vectorsOfValuesH23;
 
     size_t vectorSize1 = dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3];
     size_t vectorSize2 = dataSetParamStr2.nx[0] * dataSetParamStr2.nx[1] * dataSetParamStr2.nx[2] * dataSetParamStr2.nx[3];
@@ -1118,21 +1141,24 @@ void MPIIOMigrationBECoProcessor::readDataSet(int step)
             index += 1;
 
             vectorsOfValuesF1.assign(rawDataReceiveF[r].data() + index, rawDataReceiveF[r].data() + index + vectorSize1);
-            if(multiPhase)
+            if(multiPhase1)
                 vectorsOfValuesH11.assign(rawDataReceiveH1[r].data() + index, rawDataReceiveH1[r].data() + index + vectorSize1);
-            //vectorsOfValuesH21.assign(rawDataReceiveH2[r].data() + index, rawDataReceiveH2[r].data() + index + vectorSize1);
+            if (multiPhase2)
+                vectorsOfValuesH21.assign(rawDataReceiveH2[r].data() + index, rawDataReceiveH2[r].data() + index + vectorSize1);
             index += vectorSize1;
 
             vectorsOfValuesF2.assign(rawDataReceiveF[r].data() + index, rawDataReceiveF[r].data() + index + vectorSize2);
-            if (multiPhase)
-                vectorsOfValuesH12.assign(rawDataReceiveH1[r].data() + index, rawDataReceiveH1[r].data() + index + vectorSize1);
-            //vectorsOfValuesH22.assign(rawDataReceiveH2[r].data() + index, rawDataReceiveH2[r].data() + index + vectorSize1);
+            if (multiPhase1)
+                vectorsOfValuesH12.assign(rawDataReceiveH1[r].data() + index, rawDataReceiveH1[r].data() + index + vectorSize2);
+            if (multiPhase2)
+                vectorsOfValuesH22.assign(rawDataReceiveH2[r].data() + index, rawDataReceiveH2[r].data() + index + vectorSize2);
             index += vectorSize2;
 
             vectorsOfValuesF3.assign(rawDataReceiveF[r].data() + index, rawDataReceiveF[r].data() + index + vectorSize3);
-            if (multiPhase)
-                vectorsOfValuesH13.assign(rawDataReceiveH1[r].data() + index, rawDataReceiveH1[r].data() + index + vectorSize1);
-                //vectorsOfValuesH23.assign(rawDataReceiveH2[r].data() + index, rawDataReceiveH2[r].data() + index + vectorSize1);
+            if (multiPhase1)
+                vectorsOfValuesH13.assign(rawDataReceiveH1[r].data() + index, rawDataReceiveH1[r].data() + index + vectorSize3);
+            if (multiPhase2)
+                vectorsOfValuesH23.assign(rawDataReceiveH2[r].data() + index, rawDataReceiveH2[r].data() + index + vectorSize3);
             index += vectorSize3;
 
             SPtr<DistributionArray3D> mFdistributions(new D3Q27EsoTwist3DSplittedVector());
@@ -1148,7 +1174,7 @@ void MPIIOMigrationBECoProcessor::readDataSet(int step)
             dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setNX3(dataSetParamStr1.nx3);
 
             SPtr<DistributionArray3D> mH1distributions(new D3Q27EsoTwist3DSplittedVector());
-            if (multiPhase)
+            if (multiPhase1)
             {
                 dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH1distributions)->setLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
                     new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH11, dataSetParamStr1.nx[0], dataSetParamStr1.nx[1], dataSetParamStr1.nx[2], dataSetParamStr1.nx[3])));
@@ -1162,18 +1188,20 @@ void MPIIOMigrationBECoProcessor::readDataSet(int step)
                 dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH1distributions)->setNX3(dataSetParamStr1.nx3);
             }
 
-            /*SPtr<DistributionArray3D> mH2distributions(new D3Q27EsoTwist3DSplittedVector());
-            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
+            SPtr<DistributionArray3D> mH2distributions(new D3Q27EsoTwist3DSplittedVector());
+            if (multiPhase2)
+            {
+                dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
                     new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH21, dataSetParamStr1.nx[0], dataSetParamStr1.nx[1], dataSetParamStr1.nx[2], dataSetParamStr1.nx[3])));
-            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNonLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
-                    new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH22, dataSetParamStr2.nx[0], dataSetParamStr2.nx[1], dataSetParamStr2.nx[2], dataSetParamStr2.nx[3])));
-            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setZeroDistributions(CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>(
-                    vectorsOfValuesH23, dataSetParamStr3.nx[0], dataSetParamStr3.nx[1], dataSetParamStr3.nx[2])));
-
-            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNX1(dataSetParamStr1.nx1);
-            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNX2(dataSetParamStr1.nx2);
-            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNX3(dataSetParamStr1.nx3);*/
-
+                dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNonLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
+                        new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH22, dataSetParamStr2.nx[0], dataSetParamStr2.nx[1], dataSetParamStr2.nx[2], dataSetParamStr2.nx[3])));
+                dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setZeroDistributions(CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>(
+                        vectorsOfValuesH23, dataSetParamStr3.nx[0], dataSetParamStr3.nx[1], dataSetParamStr3.nx[2])));
+
+                dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNX1(dataSetParamStr1.nx1);
+                dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNX2(dataSetParamStr1.nx2);
+                dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNX3(dataSetParamStr1.nx3);
+            }
 
             // find the nesessary block and fill it
             SPtr<Block3D> block = grid->getBlock(blockID);
@@ -1189,14 +1217,16 @@ void MPIIOMigrationBECoProcessor::readDataSet(int step)
             kernel->setDensityRatio(this->densityRatio);
             SPtr<DataSet3D> dataSetPtr = SPtr<DataSet3D>(new DataSet3D());
             dataSetPtr->setFdistributions(mFdistributions);
-            if (multiPhase)
+            if (multiPhase1)
                 dataSetPtr->setHdistributions(mH1distributions);
-//            dataSetPtr->setHdistributions(mH2distributions);
+            if (multiPhase2)
+                dataSetPtr->setH2distributions(mH2distributions);
             kernel->setDataSet(dataSetPtr);
             block->setKernel(kernel);
         }
     }
-    //if (comm->isRoot()) 
+
+    if (comm->isRoot()) 
     {
         UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readDataSet end of restore of data, rank = " << rank);
         UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
@@ -1245,7 +1275,7 @@ void MPIIOMigrationBECoProcessor::readDataSet(int step)
         readArray(step, PhaseField2, std::string("/cpPhaseField2.bin"));
 
     delete[] rawDataReceiveF;
-//    delete[] rawDataReceiveH1;
+    delete[] rawDataReceiveH1;
 //    delete[] rawDataReceiveH2;
 }
 
@@ -1608,11 +1638,6 @@ void MPIIOMigrationBECoProcessor::readBoundaryConds(int step)
                     bc->bcVelocityX2           = bcArray[ibc].bcVelocityX2;
                     bc->bcVelocityX3           = bcArray[ibc].bcVelocityX3;
                     bc->bcDensity              = bcArray[ibc].bcDensity;
-                    bc->bcLodiDensity          = bcArray[ibc].bcLodiDensity;
-                    bc->bcLodiVelocityX1       = bcArray[ibc].bcLodiVelocityX1;
-                    bc->bcLodiVelocityX2       = bcArray[ibc].bcLodiVelocityX2;
-                    bc->bcLodiVelocityX3       = bcArray[ibc].bcLodiVelocityX3;
-                    bc->bcLodiLentgh           = bcArray[ibc].bcLodiLentgh;
 
                     bc->nx1 = bcArray[ibc].nx1;
                     bc->nx2 = bcArray[ibc].nx2;
diff --git a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationBECoProcessor.h b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationBECoProcessor.h
index 9a89ada1ae039d10cd53b06b189e5709398911c8..ee8766eebaa5e3baa41ddb35270cdbfe4670db1f 100644
--- a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationBECoProcessor.h
+++ b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationBECoProcessor.h
@@ -13,6 +13,7 @@ class UbScheduler;
 class Communicator;
 class BCProcessor;
 class LBMKernel;
+class Grid3DVisitor;
 
 //! \class MPIWriteBlocksBECoProcessor
 //! \brief Writes the grid each timestep into the files and reads the grip from the files before regenerating
@@ -30,8 +31,7 @@ class MPIIOMigrationBECoProcessor : public MPIIOCoProcessor
     };
 
 public:
-    MPIIOMigrationBECoProcessor(SPtr<Grid3D> grid, SPtr<UbScheduler> s, const std::string &path,
-                                SPtr<Communicator> comm);
+    MPIIOMigrationBECoProcessor(SPtr<Grid3D> grid, SPtr<UbScheduler> s, SPtr<Grid3DVisitor> mV, const std::string &path, SPtr<Communicator> comm);
     ~MPIIOMigrationBECoProcessor() override;
     //! Each timestep writes the grid into the files
     void process(double step) override;
@@ -93,11 +93,11 @@ private:
     MPIIODataStructures::boundCondParam boundCondParamStr;
     SPtr<LBMKernel> lbmKernel;
     SPtr<BCProcessor> bcProcessor;
+    SPtr<Grid3DVisitor> metisVisitor;
     double nue;
     double nuL;
     double nuG;
     double densityRatio;
-
 };
 
 #endif
diff --git a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.cpp b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.cpp
index 2c5a547c4cca531fe50e7255b0aba6a6a4b5c6e9..a392bd33e7cd118de2bae9e82095aa53f810ce2f 100644
--- a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.cpp
+++ b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.cpp
@@ -22,10 +22,11 @@
 
 using namespace MPIIODataStructures;
 
-MPIIOMigrationCoProcessor::MPIIOMigrationCoProcessor(SPtr<Grid3D> grid, SPtr<UbScheduler> s, const std::string &path, SPtr<Communicator> comm)
+MPIIOMigrationCoProcessor::MPIIOMigrationCoProcessor(SPtr<Grid3D> grid, SPtr<UbScheduler> s, SPtr<Grid3DVisitor> mV, const std::string &path, SPtr<Communicator> comm)
     : MPIIOCoProcessor(grid, s, path, comm)
 {
     memset(&boundCondParamStr, 0, sizeof(boundCondParamStr));
+    metisVisitor = mV;
 
     //-------------------------   define MPI types  ---------------------------------
 
@@ -134,7 +135,7 @@ void MPIIOMigrationCoProcessor::writeDataSet(int step)
     DataSetMigration *dataSetArray = new DataSetMigration[blocksCount];
     std::vector<double> doubleValuesArrayF; // double-values (arrays of f's) in all blocks  Fdistribution
     std::vector<double> doubleValuesArrayH1; // double-values (arrays of f's) in all blocks  H1distribution
-    // std::vector<double> doubleValuesArrayH2; // double-values (arrays of f's) in all blocks  H2distribution
+    std::vector<double> doubleValuesArrayH2; // double-values (arrays of f's) in all blocks  H2distribution
 
     if (comm->isRoot()) 
     {
@@ -142,8 +143,10 @@ void MPIIOMigrationCoProcessor::writeDataSet(int step)
         UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
-    bool multiPhase = false;
+    bool multiPhase1 = false;
+    bool multiPhase2 = false;
     DSArraysPresence arrPresence;
+    memset(&arrPresence, 0, sizeof(arrPresence));
     bool firstBlock           = true;
     size_t doubleCountInBlock = 0;
     int ic                    = 0;
@@ -178,19 +181,20 @@ void MPIIOMigrationCoProcessor::writeDataSet(int step)
             D3Q27EsoTwist3DSplittedVectorPtrH1 = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(block->getKernel()->getDataSet()->getHdistributions());
             if (D3Q27EsoTwist3DSplittedVectorPtrH1 != 0)
             {
-                multiPhase = true;
+                multiPhase1 = true;
                 localDistributionsH1 = D3Q27EsoTwist3DSplittedVectorPtrH1->getLocalDistributions();
                 nonLocalDistributionsH1 = D3Q27EsoTwist3DSplittedVectorPtrH1->getNonLocalDistributions();
                 zeroDistributionsH1 = D3Q27EsoTwist3DSplittedVectorPtrH1->getZeroDistributions();
             }
 
-            /*D3Q27EsoTwist3DSplittedVectorPtrH2 = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(block->getKernel()->getDataSet()->getH2distributions());
+            D3Q27EsoTwist3DSplittedVectorPtrH2 = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(block->getKernel()->getDataSet()->getH2distributions());
             if (D3Q27EsoTwist3DSplittedVectorPtrH2 != 0)
             {
+                multiPhase2 = true;
                 localDistributionsH2 = D3Q27EsoTwist3DSplittedVectorPtrH2->getLocalDistributions();
                 nonLocalDistributionsH2 = D3Q27EsoTwist3DSplittedVectorPtrH2->getNonLocalDistributions();
                 zeroDistributionsH2 = D3Q27EsoTwist3DSplittedVectorPtrH2->getZeroDistributions();
-            }*/
+            }
 
             if (firstBlock) // && block->getKernel()) // when first (any) valid block...
             {
@@ -284,7 +288,7 @@ void MPIIOMigrationCoProcessor::writeDataSet(int step)
             if (zeroDistributionsF && (dataSetParamStr3.nx[0] > 0) && (dataSetParamStr3.nx[1] > 0) && (dataSetParamStr3.nx[2] > 0))
                 doubleValuesArrayF.insert(doubleValuesArrayF.end(), zeroDistributionsF->getDataVector().begin(), zeroDistributionsF->getDataVector().end());
 
-            if (multiPhase)
+            if (multiPhase1)
             {
                 if (localDistributionsH1 && (dataSetParamStr1.nx[0] > 0) && (dataSetParamStr1.nx[1] > 0) && (dataSetParamStr1.nx[2] > 0) && (dataSetParamStr1.nx[3] > 0))
                     doubleValuesArrayH1.insert(doubleValuesArrayH1.end(), localDistributionsH1->getDataVector().begin(), localDistributionsH1->getDataVector().end());
@@ -294,7 +298,7 @@ void MPIIOMigrationCoProcessor::writeDataSet(int step)
                     doubleValuesArrayH1.insert(doubleValuesArrayH1.end(), zeroDistributionsH1->getDataVector().begin(), zeroDistributionsH1->getDataVector().end());
             }
 
-            /*if (D3Q27EsoTwist3DSplittedVectorPtrH2 != 0)
+            if (multiPhase2)
             {
                 if (localDistributionsH2 && (dataSetParamStr1.nx[0] > 0) && (dataSetParamStr1.nx[1] > 0) && (dataSetParamStr1.nx[2] > 0) && (dataSetParamStr1.nx[3] > 0))
                     doubleValuesArrayH2.insert(doubleValuesArrayH2.end(), localDistributionsH2->getDataVector().begin(), localDistributionsH2->getDataVector().end());
@@ -302,7 +306,7 @@ void MPIIOMigrationCoProcessor::writeDataSet(int step)
                     doubleValuesArrayH2.insert(doubleValuesArrayH2.end(), nonLocalDistributionsH2->getDataVector().begin(), nonLocalDistributionsH2->getDataVector().end());
                 if (zeroDistributionsH2 && (dataSetParamStr3.nx[0] > 0) && (dataSetParamStr3.nx[1] > 0) && (dataSetParamStr3.nx[2] > 0))
                     doubleValuesArrayH2.insert(doubleValuesArrayH2.end(), zeroDistributionsH2->getDataVector().begin(), zeroDistributionsH2->getDataVector().end());
-            }*/
+            }
 
             ic++;
         }
@@ -354,7 +358,7 @@ void MPIIOMigrationCoProcessor::writeDataSet(int step)
     MPI_File_close(&file_handler);
 
     //-------------------------------- H1 ----------------------------------------------------
-    if (multiPhase)
+    if (multiPhase1)
     {
         filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetH1.bin";
         rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
@@ -374,7 +378,7 @@ void MPIIOMigrationCoProcessor::writeDataSet(int step)
     }
 
     //-------------------------------- H2 ----------------------------------------------------
-    /*if (D3Q27EsoTwist3DSplittedVectorPtrH2 != 0)
+    if (multiPhase2)
     {
         filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetH2.bin";
         rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
@@ -391,7 +395,7 @@ void MPIIOMigrationCoProcessor::writeDataSet(int step)
 
         MPI_File_sync(file_handler);
         MPI_File_close(&file_handler);
-    }*/
+    }
     //--------------------------------
 
     MPI_Type_free(&dataSetDoubleType);
@@ -1480,11 +1484,6 @@ void MPIIOMigrationCoProcessor::writeBoundaryConds(int step)
                     bouCond->bcVelocityX2           = (float)bcArr->bcvector[bc]->getBoundaryVelocityX2();
                     bouCond->bcVelocityX3           = (float)bcArr->bcvector[bc]->getBoundaryVelocityX3();
                     bouCond->bcDensity              = (float)bcArr->bcvector[bc]->getBoundaryDensity();
-                    bouCond->bcLodiDensity          = (float)bcArr->bcvector[bc]->getDensityLodiDensity();
-                    bouCond->bcLodiVelocityX1       = (float)bcArr->bcvector[bc]->getDensityLodiVelocityX1();
-                    bouCond->bcLodiVelocityX2       = (float)bcArr->bcvector[bc]->getDensityLodiVelocityX2();
-                    bouCond->bcLodiVelocityX3       = (float)bcArr->bcvector[bc]->getDensityLodiVelocityX3();
-                    bouCond->bcLodiLentgh           = (float)bcArr->bcvector[bc]->getDensityLodiLength();
                     bouCond->nx1                    = (float)bcArr->bcvector[bc]->nx1;
                     bouCond->nx2                    = (float)bcArr->bcvector[bc]->nx2;
                     bouCond->nx3                    = (float)bcArr->bcvector[bc]->nx3;
@@ -1617,7 +1616,6 @@ void MPIIOMigrationCoProcessor::restart(int step)
 
     readBlocks(step);
 
-    SPtr<Grid3DVisitor> metisVisitor(new MetisPartitioningGridVisitor(comm, MetisPartitioningGridVisitor::LevelBased, D3Q27System::BSW, MetisPartitioner::KWAY));
     grid->accept(metisVisitor);
 
     readDataSet(step);
@@ -1646,7 +1644,8 @@ void MPIIOMigrationCoProcessor::readDataSet(int step)
     if (comm->isRoot())
         start = MPI_Wtime();
 
-    bool multiPhase = false;
+    bool multiPhase1 = false;
+    bool multiPhase2 = false;
     size_t blocksCount = 0; // quantity of the blocks, that belong to this process
     dataSetParam dataSetParamStr1, dataSetParamStr2, dataSetParamStr3;
 
@@ -1677,7 +1676,7 @@ void MPIIOMigrationCoProcessor::readDataSet(int step)
         dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3];
     std::vector<double> doubleValuesArrayF(size_t(blocksCount * doubleCountInBlock)); // double-values in all blocks  Fdistributions
     std::vector<double> doubleValuesArrayH1; // double-values in all blocks  H1distributions
-    //std::vector<double> doubleValuesArrayH2; // double-values in all blocks  H2distributions
+    std::vector<double> doubleValuesArrayH2; // double-values in all blocks  H2distributions
 
     // define MPI_types depending on the block-specific information
     MPI_Type_contiguous(int(doubleCountInBlock), MPI_DOUBLE, &dataSetDoubleType);
@@ -1710,7 +1709,7 @@ void MPIIOMigrationCoProcessor::readDataSet(int step)
     MPI_File_get_size(file_handler, &fsize);
     if (fsize > 0)
     {
-        multiPhase = true;
+        multiPhase1 = true;
         doubleValuesArrayH1.resize(blocksCount * doubleCountInBlock);
 
         sizeofOneDataSet = size_t(doubleCountInBlock * sizeof(double));
@@ -1729,25 +1728,31 @@ void MPIIOMigrationCoProcessor::readDataSet(int step)
     MPI_File_close(&file_handler);
 
     //----------------------------------------- H2 ----------------------------------------------------
-  /*filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetH2.bin";
+    filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetH2.bin";
     rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler);
     if (rc != MPI_SUCCESS)
         throw UbException(UB_EXARGS, "couldn't open file " + filename);
 
-    sizeofOneDataSet = size_t(doubleCountInBlock * sizeof(double));
-    doubleValuesArrayH2.resize(blocksCount * doubleCountInBlock);
-
-    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    fs = MPI_File_get_size(file_handler, &fsize);
+    if (fsize > 0)
     {
-        for (SPtr<Block3D> block : blocksVector[level]) //	blocks of the current level
+        multiPhase2 = true;
+        doubleValuesArrayH2.resize(blocksCount * doubleCountInBlock);
+
+        sizeofOneDataSet = size_t(doubleCountInBlock * sizeof(double));
+
+        for (int level = minInitLevel; level <= maxInitLevel; level++)
         {
-            read_offset = (MPI_Offset)(block->getGlobalID() * sizeofOneDataSet);
-            MPI_File_read_at(file_handler, read_offset, &doubleValuesArrayH2[ic * doubleCountInBlock], 1, dataSetDoubleType, MPI_STATUS_IGNORE);
-            ic++;
+            for (SPtr<Block3D> block : blocksVector[level]) //	blocks of the current level
+            {
+                read_offset = (MPI_Offset)(block->getGlobalID() * sizeofOneDataSet);
+                MPI_File_read_at(file_handler, read_offset, &doubleValuesArrayH2[ic * doubleCountInBlock], 1, dataSetDoubleType, MPI_STATUS_IGNORE);
+                ic++;
+            }
         }
-    }
 
-    MPI_File_close(&file_handler);*/
+    }
+    MPI_File_close(&file_handler);
 
     MPI_Type_free(&dataSetDoubleType);
 
@@ -1762,7 +1767,7 @@ void MPIIOMigrationCoProcessor::readDataSet(int step)
     size_t index = 0;
     std::vector<double> vectorsOfValuesF1, vectorsOfValuesF2, vectorsOfValuesF3;
     std::vector<double> vectorsOfValuesH11, vectorsOfValuesH12, vectorsOfValuesH13;
-    //std::vector<double> vectorsOfValuesH21, vectorsOfValuesH22, vectorsOfValuesH23;
+    std::vector<double> vectorsOfValuesH21, vectorsOfValuesH22, vectorsOfValuesH23;
 
     size_t vectorSize1 = dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3];
     size_t vectorSize2 = dataSetParamStr2.nx[0] * dataSetParamStr2.nx[1] * dataSetParamStr2.nx[2] * dataSetParamStr2.nx[3];
@@ -1771,21 +1776,24 @@ void MPIIOMigrationCoProcessor::readDataSet(int step)
     for (std::size_t n = 0; n < blocksCount; n++) 
     {
         vectorsOfValuesF1.assign(doubleValuesArrayF.data() + index, doubleValuesArrayF.data() + index + vectorSize1);
-        if(multiPhase)
+        if(multiPhase1)
             vectorsOfValuesH11.assign(doubleValuesArrayH1.data() + index, doubleValuesArrayH1.data() + index + vectorSize1);
-        //vectorsOfValuesH21.assign(doubleValuesArrayH2.data() + index, doubleValuesArrayH2.data() + index + vectorSize1);
+        if (multiPhase2)
+            vectorsOfValuesH21.assign(doubleValuesArrayH2.data() + index, doubleValuesArrayH2.data() + index + vectorSize1);
         index += vectorSize1;
 
         vectorsOfValuesF2.assign(doubleValuesArrayF.data() + index, doubleValuesArrayF.data() + index + vectorSize2);
-        if (multiPhase)
+        if (multiPhase1)
             vectorsOfValuesH12.assign(doubleValuesArrayH1.data() + index, doubleValuesArrayH1.data() + index + vectorSize2);
-        //vectorsOfValuesH22.assign(doubleValuesArrayH2.data() + index, doubleValuesArrayH2.data() + index + vectorSize2);
+        if (multiPhase2)
+            vectorsOfValuesH22.assign(doubleValuesArrayH2.data() + index, doubleValuesArrayH2.data() + index + vectorSize2);
         index += vectorSize2;
 
         vectorsOfValuesF3.assign(doubleValuesArrayF.data() + index, doubleValuesArrayF.data() + index + vectorSize3);
-        if (multiPhase)
+        if (multiPhase1)
             vectorsOfValuesH13.assign(doubleValuesArrayH1.data() + index, doubleValuesArrayH1.data() + index + vectorSize3);
-        //vectorsOfValuesH23.assign(doubleValuesArrayH2.data() + index, doubleValuesArrayH2.data() + index + vectorSize3);
+        if (multiPhase2)
+            vectorsOfValuesH23.assign(doubleValuesArrayH2.data() + index, doubleValuesArrayH2.data() + index + vectorSize3);
         index += vectorSize3;
 
         SPtr<DistributionArray3D> mFdistributions(new D3Q27EsoTwist3DSplittedVector());
@@ -1801,7 +1809,7 @@ void MPIIOMigrationCoProcessor::readDataSet(int step)
         dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setNX3(dataSetParamStr1.nx3);
 
        SPtr<DistributionArray3D> mH1distributions(new D3Q27EsoTwist3DSplittedVector());
-       if (multiPhase)
+       if (multiPhase1)
         {
             dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH1distributions)->setLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
                 new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH11, dataSetParamStr1.nx[0], dataSetParamStr1.nx[1], dataSetParamStr1.nx[2], dataSetParamStr1.nx[3])));
@@ -1815,17 +1823,20 @@ void MPIIOMigrationCoProcessor::readDataSet(int step)
             dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH1distributions)->setNX3(dataSetParamStr1.nx3);
          }
 
-        /*SPtr<DistributionArray3D> mH2distributions(new D3Q27EsoTwist3DSplittedVector());
-        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
+        SPtr<DistributionArray3D> mH2distributions(new D3Q27EsoTwist3DSplittedVector());
+        if (multiPhase2)
+        {
+            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
                 new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH21, dataSetParamStr1.nx[0], dataSetParamStr1.nx[1], dataSetParamStr1.nx[2], dataSetParamStr1.nx[3])));
-        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNonLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
-                new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH22, dataSetParamStr2.nx[0], dataSetParamStr2.nx[1], dataSetParamStr2.nx[2], dataSetParamStr2.nx[3])));
-        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setZeroDistributions(CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>(
-                vectorsOfValuesH23, dataSetParamStr3.nx[0], dataSetParamStr3.nx[1], dataSetParamStr3.nx[2])));
-
-        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNX1(dataSetParamStr1.nx1);
-        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNX2(dataSetParamStr1.nx2);
-        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNX3(dataSetParamStr1.nx3);*/
+            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNonLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
+                    new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH22, dataSetParamStr2.nx[0], dataSetParamStr2.nx[1], dataSetParamStr2.nx[2], dataSetParamStr2.nx[3])));
+            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setZeroDistributions(CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>(
+                    vectorsOfValuesH23, dataSetParamStr3.nx[0], dataSetParamStr3.nx[1], dataSetParamStr3.nx[2])));
+
+            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNX1(dataSetParamStr1.nx1);
+            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNX2(dataSetParamStr1.nx2);
+            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNX3(dataSetParamStr1.nx3);
+        }
 
         // find the nesessary block and fill it
         SPtr<Block3D> block = grid->getBlock(dataSetArray[n].globalID);
@@ -1841,9 +1852,10 @@ void MPIIOMigrationCoProcessor::readDataSet(int step)
 
         SPtr<DataSet3D> dataSetPtr = SPtr<DataSet3D>(new DataSet3D());
         dataSetPtr->setFdistributions(mFdistributions);
-        if (multiPhase)
+        if (multiPhase1)
             dataSetPtr->setHdistributions(mH1distributions);
-        //dataSetPtr->setH2distributions(mH2distributions);
+        if (multiPhase2)
+            dataSetPtr->setH2distributions(mH2distributions);
         kernel->setDataSet(dataSetPtr);
         block->setKernel(kernel);
     }
@@ -2778,11 +2790,6 @@ void MPIIOMigrationCoProcessor::readBoundaryConds(int step)
                     bc->bcVelocityX2           = bcArray[ibc].bcVelocityX2;
                     bc->bcVelocityX3           = bcArray[ibc].bcVelocityX3;
                     bc->bcDensity              = bcArray[ibc].bcDensity;
-                    bc->bcLodiDensity          = bcArray[ibc].bcLodiDensity;
-                    bc->bcLodiVelocityX1       = bcArray[ibc].bcLodiVelocityX1;
-                    bc->bcLodiVelocityX2       = bcArray[ibc].bcLodiVelocityX2;
-                    bc->bcLodiVelocityX3       = bcArray[ibc].bcLodiVelocityX3;
-                    bc->bcLodiLentgh           = bcArray[ibc].bcLodiLentgh;
 
                     bc->nx1 = bcArray[ibc].nx1;
                     bc->nx2 = bcArray[ibc].nx2;
diff --git a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.h b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.h
index ca0de8f3e7ba315bc8a870f89063ea9f38d7b59f..f2fa6cc942d21a5cc460c76b207d63daab70a9fb 100644
--- a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.h
+++ b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.h
@@ -12,6 +12,7 @@ class UbScheduler;
 class Communicator;
 class BCProcessor;
 class LBMKernel;
+class Grid3DVisitor;
 
 //! \class MPIWriteBlocksCoProcessor
 //! \brief Writes the grid each timestep into the files and reads the grip from the files before regenerating
@@ -29,7 +30,7 @@ public:
         PhaseField2 = 8
     };
 
-    MPIIOMigrationCoProcessor(SPtr<Grid3D> grid, SPtr<UbScheduler> s, const std::string &path, SPtr<Communicator> comm);
+    MPIIOMigrationCoProcessor(SPtr<Grid3D> grid, SPtr<UbScheduler> s, SPtr<Grid3DVisitor> mV, const std::string &path, SPtr<Communicator> comm);
     ~MPIIOMigrationCoProcessor() override;
     //! Each timestep writes the grid into the files
     void process(double step) override;
@@ -84,6 +85,7 @@ private:
     SPtr<LBMKernel> lbmKernel;
     SPtr<BCProcessor> bcProcessor;
     // double nue;
+    SPtr<Grid3DVisitor> metisVisitor;
 };
 
 #endif
diff --git a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.cpp b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.cpp
index 036fea0c780bf2a74b22789ee6e0cb605ddbd065..9f9590e3493c743e5c5013d9bf43e5df35ecbe23 100644
--- a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.cpp
+++ b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.cpp
@@ -140,7 +140,7 @@ void MPIIORestartCoProcessor::writeDataSet(int step)
     DataSetRestart *dataSetArray = new DataSetRestart[blocksCount];
     std::vector<double> doubleValuesArrayF; // double-values (arrays of f's) in all blocks  Fdistribution
     std::vector<double> doubleValuesArrayH1; // double-values (arrays of f's) in all blocks  H1distribution
-    // std::vector<double> doubleValuesArrayH2; // double-values (arrays of f's) in all blocks  H2distribution
+    std::vector<double> doubleValuesArrayH2; // double-values (arrays of f's) in all blocks  H2distribution
 
     if (comm->isRoot()) 
     {
@@ -148,7 +148,8 @@ void MPIIORestartCoProcessor::writeDataSet(int step)
         UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
-    bool multiPhase = false;
+    bool multiPhase1 = false;
+    bool multiPhase2 = false;
     DSArraysPresence arrPresence;
     bool firstBlock        = true;
     int doubleCountInBlock = 0;
@@ -188,36 +189,40 @@ void MPIIORestartCoProcessor::writeDataSet(int step)
             D3Q27EsoTwist3DSplittedVectorPtrH1 = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(block->getKernel()->getDataSet()->getHdistributions());
             if (D3Q27EsoTwist3DSplittedVectorPtrH1 != 0)
             {
-                multiPhase = true;
+                multiPhase1 = true;
                 localDistributionsH1 = D3Q27EsoTwist3DSplittedVectorPtrH1->getLocalDistributions();
                 nonLocalDistributionsH1 = D3Q27EsoTwist3DSplittedVectorPtrH1->getNonLocalDistributions();
                 zeroDistributionsH1 = D3Q27EsoTwist3DSplittedVectorPtrH1->getZeroDistributions();
             }
 
-            /*D3Q27EsoTwist3DSplittedVectorPtrH2 = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(block->getKernel()->getDataSet()->getH2distributions());
+            D3Q27EsoTwist3DSplittedVectorPtrH2 = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(block->getKernel()->getDataSet()->getH2distributions());
             if (D3Q27EsoTwist3DSplittedVectorPtrH2 != 0)
             {
+                multiPhase2 = true;
                 localDistributionsH2 = D3Q27EsoTwist3DSplittedVectorPtrH2->getLocalDistributions();
                 nonLocalDistributionsH2 = D3Q27EsoTwist3DSplittedVectorPtrH2->getNonLocalDistributions();
                 zeroDistributionsH2 = D3Q27EsoTwist3DSplittedVectorPtrH2->getZeroDistributions();
-            }*/
+            }
 
             if (firstBlock) // when first (any) valid block...
             {
-                if (localDistributionsF) {
+                if (localDistributionsF) 
+                {
                     dataSetParamStr1.nx[0] = static_cast<int>(localDistributionsF->getNX1());
                     dataSetParamStr1.nx[1] = static_cast<int>(localDistributionsF->getNX2());
                     dataSetParamStr1.nx[2] = static_cast<int>(localDistributionsF->getNX3());
                     dataSetParamStr1.nx[3] = static_cast<int>(localDistributionsF->getNX4());
                 }
 
-                if (nonLocalDistributionsF) {
+                if (nonLocalDistributionsF) 
+                {
                     dataSetParamStr2.nx[0] = static_cast<int>(nonLocalDistributionsF->getNX1());
                     dataSetParamStr2.nx[1] = static_cast<int>(nonLocalDistributionsF->getNX2());
                     dataSetParamStr2.nx[2] = static_cast<int>(nonLocalDistributionsF->getNX3());
                     dataSetParamStr2.nx[3] = static_cast<int>(nonLocalDistributionsF->getNX4());
                 }
-                if (zeroDistributionsF) {
+                if (zeroDistributionsF) 
+                {
                     dataSetParamStr3.nx[0] = static_cast<int>(zeroDistributionsF->getNX1());
                     dataSetParamStr3.nx[1] = static_cast<int>(zeroDistributionsF->getNX2());
                     dataSetParamStr3.nx[2] = static_cast<int>(zeroDistributionsF->getNX3());
@@ -291,7 +296,7 @@ void MPIIORestartCoProcessor::writeDataSet(int step)
             if (zeroDistributionsF && (dataSetParamStr3.nx[0] > 0) && (dataSetParamStr3.nx[1] > 0) && (dataSetParamStr3.nx[2] > 0))
                 doubleValuesArrayF.insert(doubleValuesArrayF.end(), zeroDistributionsF->getDataVector().begin(), zeroDistributionsF->getDataVector().end());
 
-            if (multiPhase)
+            if (multiPhase1)
             {
                 if (localDistributionsH1 && (dataSetParamStr1.nx[0] > 0) && (dataSetParamStr1.nx[1] > 0) && (dataSetParamStr1.nx[2] > 0) && (dataSetParamStr1.nx[3] > 0))
                     doubleValuesArrayH1.insert(doubleValuesArrayH1.end(), localDistributionsH1->getDataVector().begin(), localDistributionsH1->getDataVector().end());
@@ -300,15 +305,17 @@ void MPIIORestartCoProcessor::writeDataSet(int step)
                 if (zeroDistributionsH1 && (dataSetParamStr3.nx[0] > 0) && (dataSetParamStr3.nx[1] > 0) && (dataSetParamStr3.nx[2] > 0))
                     doubleValuesArrayH1.insert(doubleValuesArrayH1.end(), zeroDistributionsH1->getDataVector().begin(), zeroDistributionsH1->getDataVector().end());
             }
-
-            /*if (localDistributionsH2 && (dataSetParamStr1.nx[0] > 0) && (dataSetParamStr1.nx[1] > 0) && (dataSetParamStr1.nx[2] > 0) && (dataSetParamStr1.nx[3] > 0))
-                doubleValuesArrayH2.insert(doubleValuesArrayH2.end(), localDistributionsH2->getDataVector().begin(), localDistributionsH2->getDataVector().end());
-            if (nonLocalDistributionsH2 && (dataSetParamStr2.nx[0] > 0) && (dataSetParamStr2.nx[1] > 0) && (dataSetParamStr2.nx[2] > 0) && (dataSetParamStr2.nx[3] > 0))
-                doubleValuesArrayH2.insert(doubleValuesArrayH2.end(), nonLocalDistributionsH2->getDataVector().begin(), nonLocalDistributionsH2->getDataVector().end());
-            if (zeroDistributionsH2 && (dataSetParamStr3.nx[0] > 0) && (dataSetParamStr3.nx[1] > 0) && (dataSetParamStr3.nx[2] > 0))
-                doubleValuesArrayH2.insert(doubleValuesArrayH2.end(), zeroDistributionsH2->getDataVector().begin(), zeroDistributionsH2->getDataVector().end());*/
-
-            ic++;
+            if (multiPhase2)
+            {
+                if (localDistributionsH2 && (dataSetParamStr1.nx[0] > 0) && (dataSetParamStr1.nx[1] > 0) && (dataSetParamStr1.nx[2] > 0) && (dataSetParamStr1.nx[3] > 0))
+                    doubleValuesArrayH2.insert(doubleValuesArrayH2.end(), localDistributionsH2->getDataVector().begin(), localDistributionsH2->getDataVector().end());
+                if (nonLocalDistributionsH2 && (dataSetParamStr2.nx[0] > 0) && (dataSetParamStr2.nx[1] > 0) && (dataSetParamStr2.nx[2] > 0) && (dataSetParamStr2.nx[3] > 0))
+                    doubleValuesArrayH2.insert(doubleValuesArrayH2.end(), nonLocalDistributionsH2->getDataVector().begin(), nonLocalDistributionsH2->getDataVector().end());
+                if (zeroDistributionsH2 && (dataSetParamStr3.nx[0] > 0) && (dataSetParamStr3.nx[1] > 0) && (dataSetParamStr3.nx[2] > 0))
+                    doubleValuesArrayH2.insert(doubleValuesArrayH2.end(), zeroDistributionsH2->getDataVector().begin(), zeroDistributionsH2->getDataVector().end());
+             }
+
+           ic++;
         }
     }
 
@@ -379,7 +386,7 @@ void MPIIORestartCoProcessor::writeDataSet(int step)
     MPI_File_close(&file_handler);
 
     //------------------------------------------------------------------------------------------------------------------
-    if (multiPhase)
+    if (multiPhase1)
     {
         filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetH1.bin";
         rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
@@ -394,20 +401,23 @@ void MPIIORestartCoProcessor::writeDataSet(int step)
         MPI_File_close(&file_handler);
     }
 
-    //--------------------------------------------------------------------------------------------------------------------
-    /*filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetH2.bin";
-    rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
-    if (rc != MPI_SUCCESS)
-        throw UbException(UB_EXARGS, "couldn't open file " + filename);
+        //--------------------------------------------------------------------------------------------------------------------
+    if (multiPhase2)
+    {
+        filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetH2.bin";
+        rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
+        if (rc != MPI_SUCCESS)
+            throw UbException(UB_EXARGS, "couldn't open file " + filename);
 
-    // each process writes the dataSet arrays
-    if (doubleValuesArrayH1.size() > 0)
-        MPI_File_write_at(file_handler, write_offset, &doubleValuesArrayH2[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
+        // each process writes the dataSet arrays
+        if (doubleValuesArrayH2.size() > 0)
+            MPI_File_write_at(file_handler, write_offset, &doubleValuesArrayH2[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
 
-    MPI_File_sync(file_handler);
-    MPI_File_close(&file_handler);*/
+        MPI_File_sync(file_handler);
+        MPI_File_close(&file_handler);
+    }
+    //------------------------------------------------------------------------------------------------------------------------
 
-    //--------------------------------
     MPI_Type_free(&dataSetDoubleType);
 
     delete[] dataSetArray;
@@ -1479,11 +1489,6 @@ void MPIIORestartCoProcessor::writeBoundaryConds(int step)
                     bouCond->bcVelocityX2           = (float)bcArr->bcvector[bc]->getBoundaryVelocityX2();
                     bouCond->bcVelocityX3           = (float)bcArr->bcvector[bc]->getBoundaryVelocityX3();
                     bouCond->bcDensity              = (float)bcArr->bcvector[bc]->getBoundaryDensity();
-                    bouCond->bcLodiDensity          = (float)bcArr->bcvector[bc]->getDensityLodiDensity();
-                    bouCond->bcLodiVelocityX1       = (float)bcArr->bcvector[bc]->getDensityLodiVelocityX1();
-                    bouCond->bcLodiVelocityX2       = (float)bcArr->bcvector[bc]->getDensityLodiVelocityX2();
-                    bouCond->bcLodiVelocityX3       = (float)bcArr->bcvector[bc]->getDensityLodiVelocityX3();
-                    bouCond->bcLodiLentgh           = (float)bcArr->bcvector[bc]->getDensityLodiLength();
                     bouCond->nx1                    = (float)bcArr->bcvector[bc]->nx1;
                     bouCond->nx2                    = (float)bcArr->bcvector[bc]->nx2;
                     bouCond->nx3                    = (float)bcArr->bcvector[bc]->nx3;
@@ -1665,7 +1670,8 @@ void MPIIORestartCoProcessor::readDataSet(int step)
     // calculate the read offset
     MPI_Offset read_offset  = (MPI_Offset)(size * sizeof(int));
     size_t next_read_offset = 0;
-    bool multiPhase = false;
+    bool multiPhase1 = false;
+    bool multiPhase2 = false;
 
     // read count of blocks
     int blocksCount = 0;
@@ -1682,7 +1688,7 @@ void MPIIORestartCoProcessor::readDataSet(int step)
         dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3];
     std::vector<double> doubleValuesArrayF(size_t(blocksCount * doubleCountInBlock)); // double-values in all blocks  Fdistributions
     std::vector<double> doubleValuesArrayH1; // double-values in all blocks  H1distributions
-    //std::vector<double> doubleValuesArrayH2; // double-values in all blocks  H2distributions
+    std::vector<double> doubleValuesArrayH2; // double-values in all blocks  H2distributions
 
     //   define MPI_types depending on the block-specific information
     MPI_Type_contiguous(int(doubleCountInBlock), MPI_DOUBLE, &dataSetDoubleType);
@@ -1718,21 +1724,27 @@ void MPIIORestartCoProcessor::readDataSet(int step)
     MPI_File_get_size(file_handler, &fsize);
     if (fsize > 0)
     {
-        multiPhase = true;
+        multiPhase1 = true;
         doubleValuesArrayH1.resize(blocksCount * doubleCountInBlock);
         MPI_File_read_at(file_handler, read_offset, &doubleValuesArrayH1[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
     }
     MPI_File_close(&file_handler);
 
     //-------------------------------------- H2 -----------------------------
-       /*filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetH2.bin";
+    filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetH2.bin";
     rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler);
     if (rc != MPI_SUCCESS)
         throw UbException(UB_EXARGS, "couldn't open file " + filename);
 
-    doubleValuesArrayH2.resize(blocksCount * doubleCountInBlock);
-    MPI_File_read_at(file_handler, read_offset, &doubleValuesArrayH2[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
-    MPI_File_close(&file_handler);*/
+    fs = MPI_File_get_size(file_handler, &fsize);
+    if (fsize > 0)
+    {
+        multiPhase2 = true;
+        doubleValuesArrayH2.resize(blocksCount * doubleCountInBlock);
+        MPI_File_read_at(file_handler, read_offset, &doubleValuesArrayH2[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
+    }
+    MPI_File_close(&file_handler);
+    //-------------------------------------------------------------------
 
     MPI_Type_free(&dataSetDoubleType);
 
@@ -1747,7 +1759,7 @@ void MPIIORestartCoProcessor::readDataSet(int step)
     size_t index = 0;
     std::vector<double> vectorsOfValuesF1, vectorsOfValuesF2, vectorsOfValuesF3;
     std::vector<double> vectorsOfValuesH11, vectorsOfValuesH12, vectorsOfValuesH13;
-    //std::vector<double> vectorsOfValuesH21, vectorsOfValuesH22, vectorsOfValuesH23;
+    std::vector<double> vectorsOfValuesH21, vectorsOfValuesH22, vectorsOfValuesH23;
     size_t vectorSize1 = dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3];
     size_t vectorSize2 = dataSetParamStr2.nx[0] * dataSetParamStr2.nx[1] * dataSetParamStr2.nx[2] * dataSetParamStr2.nx[3];
     size_t vectorSize3 = dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3];
@@ -1755,21 +1767,24 @@ void MPIIORestartCoProcessor::readDataSet(int step)
     for (int n = 0; n < blocksCount; n++) 
     {
         vectorsOfValuesF1.assign(doubleValuesArrayF.data() + index, doubleValuesArrayF.data() + index + vectorSize1);
-        if (multiPhase)
+        if (multiPhase1)
             vectorsOfValuesH11.assign(doubleValuesArrayH1.data() + index, doubleValuesArrayH1.data() + index + vectorSize1);
-        //vectorsOfValuesH21.assign(doubleValuesArrayH2.data() + index, doubleValuesArrayH2.data() + index + vectorSize1);
+        if (multiPhase2)
+            vectorsOfValuesH21.assign(doubleValuesArrayH2.data() + index, doubleValuesArrayH2.data() + index + vectorSize1);
         index += vectorSize1;
 
         vectorsOfValuesF2.assign(doubleValuesArrayF.data() + index, doubleValuesArrayF.data() + index + vectorSize2);
-        if (multiPhase)
+        if (multiPhase1)
             vectorsOfValuesH12.assign(doubleValuesArrayH1.data() + index, doubleValuesArrayH1.data() + index + vectorSize2);
-        //vectorsOfValuesH22.assign(doubleValuesArrayH2.data() + index, doubleValuesArrayH2.data() + index + vectorSize2);
+        if (multiPhase2)
+            vectorsOfValuesH22.assign(doubleValuesArrayH2.data() + index, doubleValuesArrayH2.data() + index + vectorSize2);
         index += vectorSize2;
 
         vectorsOfValuesF3.assign(doubleValuesArrayF.data() + index, doubleValuesArrayF.data() + index + vectorSize3);
-        if (multiPhase)
+        if (multiPhase1)
             vectorsOfValuesH13.assign(doubleValuesArrayH1.data() + index, doubleValuesArrayH1.data() + index + vectorSize3);
-        //vectorsOfValuesH23.assign(doubleValuesArrayH2.data() + index, doubleValuesArrayH2.data() + index + vectorSize3);
+        if (multiPhase2)
+            vectorsOfValuesH23.assign(doubleValuesArrayH2.data() + index, doubleValuesArrayH2.data() + index + vectorSize3);
         index += vectorSize3;
 
         SPtr<DistributionArray3D> mFdistributions(new D3Q27EsoTwist3DSplittedVector());
@@ -1785,7 +1800,7 @@ void MPIIORestartCoProcessor::readDataSet(int step)
         dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setNX3(dataSetParamStr1.nx3);
 
         SPtr<DistributionArray3D> mH1distributions(new D3Q27EsoTwist3DSplittedVector());
-        if (multiPhase)
+        if (multiPhase1)
         {
             dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH1distributions)->setLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
                 new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH11, dataSetParamStr1.nx[0], dataSetParamStr1.nx[1], dataSetParamStr1.nx[2], dataSetParamStr1.nx[3])));
@@ -1798,18 +1813,22 @@ void MPIIORestartCoProcessor::readDataSet(int step)
             dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH1distributions)->setNX2(dataSetParamStr1.nx2);
             dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH1distributions)->setNX3(dataSetParamStr1.nx3);
         }
-        /*SPtr<DistributionArray3D> mH2distributions(new D3Q27EsoTwist3DSplittedVector());
-        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
-                new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH21, dataSetParamStr1.nx[0], dataSetParamStr1.nx[1], dataSetParamStr1.nx[2], dataSetParamStr1.nx[3])));
-        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNonLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
-                new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH22, dataSetParamStr2.nx[0], dataSetParamStr2.nx[1], dataSetParamStr2.nx[2], dataSetParamStr2.nx[3])));
-        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setZeroDistributions(CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>(
-                vectorsOfValuesH23, dataSetParamStr3.nx[0], dataSetParamStr3.nx[1], dataSetParamStr3.nx[2])));
-
-        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNX1(dataSetParamStr1.nx1);
-        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNX2(dataSetParamStr1.nx2);
-        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNX3(dataSetParamStr1.nx3);*/
-         
+
+        SPtr<DistributionArray3D> mH2distributions(new D3Q27EsoTwist3DSplittedVector());
+        if (multiPhase2)
+        {
+            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
+                    new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH21, dataSetParamStr1.nx[0], dataSetParamStr1.nx[1], dataSetParamStr1.nx[2], dataSetParamStr1.nx[3])));
+            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNonLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
+                    new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesH22, dataSetParamStr2.nx[0], dataSetParamStr2.nx[1], dataSetParamStr2.nx[2], dataSetParamStr2.nx[3])));
+            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setZeroDistributions(CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>(
+                    vectorsOfValuesH23, dataSetParamStr3.nx[0], dataSetParamStr3.nx[1], dataSetParamStr3.nx[2])));
+
+            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNX1(dataSetParamStr1.nx1);
+            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNX2(dataSetParamStr1.nx2);
+            dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNX3(dataSetParamStr1.nx3);
+        }
+
         // find the nesessary block and fill it
         SPtr<Block3D> block = grid->getBlock(dataSetArray[n].x1, dataSetArray[n].x2, dataSetArray[n].x3, dataSetArray[n].level);
    
@@ -1825,9 +1844,10 @@ void MPIIORestartCoProcessor::readDataSet(int step)
 
         SPtr<DataSet3D> dataSetPtr = SPtr<DataSet3D>(new DataSet3D());
         dataSetPtr->setFdistributions(mFdistributions);
-        if (multiPhase)
+        if (multiPhase1)
             dataSetPtr->setHdistributions(mH1distributions);
-        //dataSetPtr->setH2distributions(mH2distributions);
+        if (multiPhase2)
+            dataSetPtr->setH2distributions(mH2distributions);
         kernel->setDataSet(dataSetPtr);
         block->setKernel(kernel);
     }
@@ -2679,11 +2699,6 @@ void MPIIORestartCoProcessor::readBoundaryConds(int step)
                 bc->bcVelocityX2           = bcArray[index].bcVelocityX2;
                 bc->bcVelocityX3           = bcArray[index].bcVelocityX3;
                 bc->bcDensity              = bcArray[index].bcDensity;
-                bc->bcLodiDensity          = bcArray[index].bcLodiDensity;
-                bc->bcLodiVelocityX1       = bcArray[index].bcLodiVelocityX1;
-                bc->bcLodiVelocityX2       = bcArray[index].bcLodiVelocityX2;
-                bc->bcLodiVelocityX3       = bcArray[index].bcLodiVelocityX3;
-                bc->bcLodiLentgh           = bcArray[index].bcLodiLentgh;
 
                 bc->nx1 = bcArray[index].nx1;
                 bc->nx2 = bcArray[index].nx2;
diff --git a/src/cpu/VirtualFluidsCore/Parallel/MPIIODataStructures.h b/src/cpu/VirtualFluidsCore/Parallel/MPIIODataStructures.h
index c8bd2d0797af86858b40a1a29a154107f04e46c8..a7272a307e4039b466e758ad2bc95edf9b087e78 100644
--- a/src/cpu/VirtualFluidsCore/Parallel/MPIIODataStructures.h
+++ b/src/cpu/VirtualFluidsCore/Parallel/MPIIODataStructures.h
@@ -118,12 +118,6 @@ struct BoundaryCondition {
     float bcVelocityX3;
     float bcDensity;
 
-    float bcLodiDensity;
-    float bcLodiVelocityX1;
-    float bcLodiVelocityX2;
-    float bcLodiVelocityX3;
-    float bcLodiLentgh;
-
     float nx1, nx2, nx3;
     float q[26];
 
diff --git a/src/cpu/VirtualFluidsCore/Utilities/CheckpointConverter.cpp b/src/cpu/VirtualFluidsCore/Utilities/CheckpointConverter.cpp
index b66eff480e99102edf332cfd750e0d2b6965ba83..78001c1d5c4687a296df912e8d81c659d031a6e7 100644
--- a/src/cpu/VirtualFluidsCore/Utilities/CheckpointConverter.cpp
+++ b/src/cpu/VirtualFluidsCore/Utilities/CheckpointConverter.cpp
@@ -77,7 +77,7 @@ CheckpointConverter::CheckpointConverter(SPtr<Grid3D> grid, const std::string &p
     //---------------------------------------
 
     MPI_Datatype typesDataSetRead[3] = { MPI_DOUBLE, MPI_INT, MPI_CHAR };
-    int blocksDataSetRead[3]         = { 3, 5, 2 };
+    int blocksDataSetRead[3]         = { 5, 5, 2 };
     MPI_Aint offsetsDataSetRead[3], lbDataSetRead, extentDataSetRead;
 
     offsetsDataSetRead[0] = 0;
@@ -93,7 +93,7 @@ CheckpointConverter::CheckpointConverter(SPtr<Grid3D> grid, const std::string &p
     //-----------------------------------------------------------------------
 
     MPI_Datatype typesDataSetWrite[3] = { MPI_DOUBLE, MPI_INT, MPI_CHAR };
-    int blocksDataSetWrite[3]         = { 2, 2, 2 };
+    int blocksDataSetWrite[3]         = { 5, 2, 2 };
     MPI_Aint offsetsDataSetWrite[3], lbDataSetWrite, extentDataSetWrite;
 
     offsetsDataSetWrite[0] = 0;
@@ -147,8 +147,7 @@ void CheckpointConverter::convertBlocks(int step, int procCount)
     MPI_File file_handlerW;
     UbSystem::makeDirectory(path + "/mig/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step));
     std::string filenameW = path + "/mig/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpBlocks.bin";
-    int rcW = MPI_File_open(MPI_COMM_WORLD, filenameW.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL,
-                            &file_handlerW);
+    int rcW = MPI_File_open(MPI_COMM_WORLD, filenameW.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &file_handlerW);
     if (rcW != MPI_SUCCESS)
         throw UbException(UB_EXARGS, "couldn't open file " + filenameW);
 
@@ -159,21 +158,21 @@ void CheckpointConverter::convertBlocks(int step, int procCount)
     GridParam *gridParameters = new GridParam;
 
     // calculate the read offset
-    procCount =
-        1; // readBlocks and writeBlocks in both MPIIORestartCoProcessor and MPIIOMigrationCoProcessor have size == 1!
+    procCount = 1; // readBlocks and writeBlocks in both MPIIORestartCoProcessor and MPIIOMigrationCoProcessor have size == 1!
     MPI_Offset read_offset = (MPI_Offset)(procCount * sizeof(int));
 
     // read parameters of the grid and blocks
     MPI_File_read_at(file_handlerR, read_offset, gridParameters, 1, gridParamType, MPI_STATUS_IGNORE);
-    MPI_File_read_at(file_handlerR, (MPI_Offset)(read_offset + sizeof(GridParam)), &block3dArray[0], blocksCount,
-                     block3dType, MPI_STATUS_IGNORE);
+    MPI_File_read_at(file_handlerR, (MPI_Offset)(read_offset + sizeof(GridParam)), &block3dArray[0], blocksCount, block3dType, MPI_STATUS_IGNORE);
 
     // clear the grid
     std::vector<SPtr<Block3D>> blocksVector[25];
     int minInitLevel = this->grid->getCoarsestInitializedLevel();
-    if (minInitLevel > -1) {
+    if (minInitLevel > -1) 
+    {
         int maxInitLevel = this->grid->getFinestInitializedLevel();
-        for (int level = minInitLevel; level <= maxInitLevel; level++) {
+        for (int level = minInitLevel; level <= maxInitLevel; level++) 
+        {
             grid->getBlocks(level, blocksVector[level]);
             for (SPtr<Block3D> block : blocksVector[level]) //	blocks of the current level
                 grid->deleteBlock(block);
@@ -233,9 +232,9 @@ void CheckpointConverter::convertBlocks(int step, int procCount)
     grid->setPeriodicX3(gridParameters->periodicX3);
 
     // regenerate blocks
-    for (int n = 0; n < blocksCount; n++) {
-        SPtr<Block3D> block(
-            new Block3D(block3dArray[n].x1, block3dArray[n].x2, block3dArray[n].x3, block3dArray[n].level));
+    for (int n = 0; n < blocksCount; n++) 
+    {
+        SPtr<Block3D> block(new Block3D(block3dArray[n].x1, block3dArray[n].x2, block3dArray[n].x3, block3dArray[n].level));
         block->setActive(block3dArray[n].active);
         block->setBundle(block3dArray[n].bundle);
         block->setRank(block3dArray[n].rank);
@@ -255,7 +254,8 @@ void CheckpointConverter::convertBlocks(int step, int procCount)
 
     // refresh globalID in all the blocks
     SPtr<Block3D> block;
-    for (int n = 0; n < blocksCount; n++) {
+    for (int n = 0; n < blocksCount; n++) 
+    {
         block = grid->getBlock(block3dArray[n].x1, block3dArray[n].x2, block3dArray[n].x3, block3dArray[n].level);
         block3dArray[n].globalID = block->getGlobalID();
     }
@@ -265,8 +265,7 @@ void CheckpointConverter::convertBlocks(int step, int procCount)
 
     MPI_File_write_at(file_handlerW, 0, &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE);
     MPI_File_write_at(file_handlerW, write_offset, gridParameters, 1, gridParamType, MPI_STATUS_IGNORE);
-    MPI_File_write_at(file_handlerW, (MPI_Offset)(write_offset + sizeof(GridParam)), &block3dArray[0], blocksCount,
-                      block3dType, MPI_STATUS_IGNORE);
+    MPI_File_write_at(file_handlerW, (MPI_Offset)(write_offset + sizeof(GridParam)), &block3dArray[0], blocksCount, block3dType, MPI_STATUS_IGNORE);
 
     MPI_File_close(&file_handlerR);
     MPI_File_close(&file_handlerW);
@@ -282,19 +281,31 @@ void CheckpointConverter::convertDataSet(int step, int procCount)
 {
     // file to read from
     MPI_File file_handlerR;
-    std::string filenameR = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSet.bin";
+    std::string filenameR = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetF.bin";
     int rcR = MPI_File_open(MPI_COMM_WORLD, filenameR.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handlerR);
     if (rcR != MPI_SUCCESS)
         throw UbException(UB_EXARGS, "couldn't open file " + filenameR);
 
     // file to write to
     MPI_File file_handlerW;
-    std::string filenameW = path + "/mig/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSet.bin";
-    int rcW = MPI_File_open(MPI_COMM_WORLD, filenameW.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL,
-                            &file_handlerW);
+    std::string filenameW = path + "/mig/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetF.bin";
+    int rcW = MPI_File_open(MPI_COMM_WORLD, filenameW.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &file_handlerW);
     if (rcW != MPI_SUCCESS)
         throw UbException(UB_EXARGS, "couldn't open file " + filenameW);
 
+    MPI_File file_handlerR1;
+    std::string filenameR1 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetH1.bin";
+    rcR = MPI_File_open(MPI_COMM_WORLD, filenameR1.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handlerR1);
+    if (rcR != MPI_SUCCESS)
+        throw UbException(UB_EXARGS, "couldn't open file " + filenameR1);
+
+    MPI_File file_handlerW1;
+    std::string filenameW1 = path + "/mig/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetH1.bin";
+    rcW = MPI_File_open(MPI_COMM_WORLD, filenameW1.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &file_handlerW1);
+    if (rcW != MPI_SUCCESS)
+        throw UbException(UB_EXARGS, "couldn't open file " + filenameW1);
+
+    MPI_Offset fsize;
     double start, finish;
     start = MPI_Wtime();
 
@@ -304,22 +315,21 @@ void CheckpointConverter::convertDataSet(int step, int procCount)
     DataSetMigration *dataSetWriteArray;
     size_t doubleCountInBlock;
     std::vector<double> doubleValuesArray;
+    std::vector<double> doubleValuesArrayH1; // double-values in all blocks  H1distributions
     size_t sizeofOneDataSet;
 
     // calculate the read offset
     MPI_Offset read_offset = (MPI_Offset)(procCount * sizeof(int));
     MPI_Offset write_offset;
 
-    for (int pc = 0; pc < procCount; pc++) {
+    for (int pc = 0; pc < procCount; pc++) 
+    {
         // read count of blocks and parameters of data arrays
         MPI_File_read_at(file_handlerR, (MPI_Offset)(pc * sizeof(int)), &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE);
         MPI_File_read_at(file_handlerR, read_offset, &dataSetParamStr1, 1, dataSetParamType, MPI_STATUS_IGNORE);
-        MPI_File_read_at(file_handlerR, (MPI_Offset)(read_offset + sizeof(dataSetParam)), &dataSetParamStr2, 1,
-                         dataSetParamType, MPI_STATUS_IGNORE);
-        MPI_File_read_at(file_handlerR, (MPI_Offset)(read_offset + 2 * sizeof(dataSetParam)), &dataSetParamStr3, 1,
-                         dataSetParamType, MPI_STATUS_IGNORE);
-        doubleCountInBlock =
-            dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3] +
+        MPI_File_read_at(file_handlerR, (MPI_Offset)(read_offset + sizeof(dataSetParam)), &dataSetParamStr2, 1, dataSetParamType, MPI_STATUS_IGNORE);
+        MPI_File_read_at(file_handlerR, (MPI_Offset)(read_offset + 2 * sizeof(dataSetParam)), &dataSetParamStr3, 1, dataSetParamType, MPI_STATUS_IGNORE);
+        doubleCountInBlock = dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3] +
             dataSetParamStr2.nx[0] * dataSetParamStr2.nx[1] * dataSetParamStr2.nx[2] * dataSetParamStr2.nx[3] +
             dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3];
 
@@ -327,56 +337,77 @@ void CheckpointConverter::convertDataSet(int step, int procCount)
         dataSetWriteArray = new DataSetMigration[blocksCount];
         doubleValuesArray.resize(blocksCount * doubleCountInBlock);
 
+        MPI_Type_contiguous(int(doubleCountInBlock), MPI_DOUBLE, &dataSetDoubleType);
+        MPI_Type_commit(&dataSetDoubleType);
+
         // read data
         MPI_File_read_at(file_handlerR, (MPI_Offset)(read_offset + 3 * sizeof(dataSetParam)), dataSetReadArray,
                          blocksCount, dataSetTypeRead, MPI_STATUS_IGNORE);
-        MPI_File_read_at(file_handlerR,
-                         (MPI_Offset)(read_offset + 3 * sizeof(dataSetParam) + blocksCount * sizeof(DataSetRestart)),
-                         &doubleValuesArray[0], int(blocksCount * doubleCountInBlock), MPI_DOUBLE, MPI_STATUS_IGNORE);
+        MPI_File_read_at(file_handlerR, (MPI_Offset)(read_offset + 3 * sizeof(dataSetParam) + blocksCount * sizeof(DataSetRestart)),
+                         &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
 
         // offset to read the data of the next process
-        read_offset =
-            read_offset + (MPI_Offset)(3 * sizeof(dataSetParam) +
-                                       blocksCount * (sizeof(DataSetRestart) + doubleCountInBlock * sizeof(double)));
+        read_offset = read_offset + (MPI_Offset)(3 * sizeof(dataSetParam) + blocksCount * (sizeof(DataSetRestart) + doubleCountInBlock * sizeof(double)));
 
         // write parameters of data arrays
         MPI_File_write_at(file_handlerW, (MPI_Offset)0, &dataSetParamStr1, 1, dataSetParamType, MPI_STATUS_IGNORE);
-        MPI_File_write_at(file_handlerW, (MPI_Offset)(sizeof(dataSetParam)), &dataSetParamStr2, 1, dataSetParamType,
-                          MPI_STATUS_IGNORE);
-        MPI_File_write_at(file_handlerW, (MPI_Offset)(2 * sizeof(dataSetParam)), &dataSetParamStr3, 1, dataSetParamType,
-                          MPI_STATUS_IGNORE);
+        MPI_File_write_at(file_handlerW, (MPI_Offset)(sizeof(dataSetParam)), &dataSetParamStr2, 1, dataSetParamType, MPI_STATUS_IGNORE);
+        MPI_File_write_at(file_handlerW, (MPI_Offset)(2 * sizeof(dataSetParam)), &dataSetParamStr3, 1, dataSetParamType, MPI_STATUS_IGNORE);
 
         sizeofOneDataSet = sizeof(DataSetMigration) + doubleCountInBlock * sizeof(double);
 
         // write blocks and their data arrays
-        for (int nb = 0; nb < blocksCount; nb++) {
-            SPtr<Block3D> block                   = grid->getBlock(dataSetReadArray[nb].x1, dataSetReadArray[nb].x2,
-                                                 dataSetReadArray[nb].x3, dataSetReadArray[nb].level);
+        for (int nb = 0; nb < blocksCount; nb++) 
+        {
+            SPtr<Block3D> block = grid->getBlock(dataSetReadArray[nb].x1, dataSetReadArray[nb].x2, dataSetReadArray[nb].x3, dataSetReadArray[nb].level);
             dataSetWriteArray[nb].globalID        = block->getGlobalID();
             dataSetWriteArray[nb].ghostLayerWidth = dataSetReadArray[nb].ghostLayerWidth;
             dataSetWriteArray[nb].collFactor      = dataSetReadArray[nb].collFactor;
             dataSetWriteArray[nb].deltaT          = dataSetReadArray[nb].deltaT;
             dataSetWriteArray[nb].compressible    = dataSetReadArray[nb].compressible;
             dataSetWriteArray[nb].withForcing     = dataSetReadArray[nb].withForcing;
-//            dataSetWriteArray[nb].densityRatio    = dataSetReadArray[nb].densityRatio;
+            dataSetWriteArray[nb].collFactorL = dataSetReadArray[nb].collFactorL; // for Multiphase model
+            dataSetWriteArray[nb].collFactorG = dataSetReadArray[nb].collFactorG; // for Multiphase model
+            dataSetWriteArray[nb].densityRatio = dataSetReadArray[nb].densityRatio;// for Multiphase model
 
             write_offset = (MPI_Offset)(3 * sizeof(dataSetParam) + dataSetWriteArray[nb].globalID * sizeofOneDataSet);
-            MPI_File_write_at(file_handlerW, write_offset, &dataSetWriteArray[nb], 1, dataSetTypeWrite,
-                              MPI_STATUS_IGNORE);
+            MPI_File_write_at(file_handlerW, write_offset, &dataSetWriteArray[nb], 1, dataSetTypeWrite, MPI_STATUS_IGNORE);
             MPI_File_write_at(file_handlerW, (MPI_Offset)(write_offset + sizeof(DataSetMigration)),
-                              &doubleValuesArray[nb * doubleCountInBlock], int(doubleCountInBlock), MPI_DOUBLE,
-                              MPI_STATUS_IGNORE);
+                              &doubleValuesArray[nb * doubleCountInBlock], 1, dataSetDoubleType, MPI_STATUS_IGNORE);
         }
 
+        //-------------------------------------- H1 -----------------------------
+        int fs = MPI_File_get_size(file_handlerR1, &fsize);
+        if (fsize > 0)
+        {
+            doubleValuesArrayH1.resize(blocksCount * doubleCountInBlock);
+            MPI_File_read_at(file_handlerR1, read_offset, &doubleValuesArrayH1[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
+
+            sizeofOneDataSet = doubleCountInBlock * sizeof(double);
+
+            for (int nb = 0; nb < blocksCount; nb++)
+            {
+                write_offset = (MPI_Offset)(dataSetWriteArray[nb].globalID * sizeofOneDataSet);
+                MPI_File_write_at(file_handlerW1, write_offset, &doubleValuesArrayH1[nb * doubleCountInBlock], 1, dataSetDoubleType, MPI_STATUS_IGNORE);
+            }
+
+        }
+
+        MPI_Type_free(&dataSetDoubleType);
+
         delete[] dataSetReadArray;
         delete[] dataSetWriteArray;
     }
 
-    MPI_File_close(&file_handlerR);
+    MPI_File_close(&file_handlerR1);
+    MPI_File_sync(file_handlerW1);
+    MPI_File_close(&file_handlerW1);
 
+    MPI_File_close(&file_handlerR);
     MPI_File_sync(file_handlerW);
     MPI_File_close(&file_handlerW);
 
+//--------------------------------------------------------------------------------
     DSArraysPresence arrPresence;
     MPI_File file_handler1;
     std::string filename1 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpArrays.bin";
@@ -400,28 +431,22 @@ void CheckpointConverter::convertDataSet(int step, int procCount)
     std::string filenameWW = path + "/mig/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step);
 
     if (arrPresence.isAverageDensityArrayPresent)
-        convert___Array(step, procCount, filenameRR + "/cpAverageDensityArray.bin",
-                        filenameWW + "/cpAverageDensityArray.bin");
+        convert___Array(step, procCount, filenameRR + "/cpAverageDensityArray.bin", filenameWW + "/cpAverageDensityArray.bin");
 
     if (arrPresence.isAverageVelocityArrayPresent)
-        convert___Array(step, procCount, filenameRR + "/cpAverageVelocityArray.bin",
-                        filenameWW + "/cpAverageVelocityArray.bin");
+        convert___Array(step, procCount, filenameRR + "/cpAverageVelocityArray.bin", filenameWW + "/cpAverageVelocityArray.bin");
 
     if (arrPresence.isAverageFluktuationsArrayPresent)
-        convert___Array(step, procCount, filenameRR + "/cpAverageFluktuationsArray.bin",
-                        filenameWW + "/cpAverageFluktuationsArray.bin");
+        convert___Array(step, procCount, filenameRR + "/cpAverageFluktuationsArray.bin", filenameWW + "/cpAverageFluktuationsArray.bin");
 
     if (arrPresence.isAverageTripleArrayPresent)
-        convert___Array(step, procCount, filenameRR + "/cpAverageTripleArray.bin",
-                        filenameWW + "/cpAverageTripleArray.bin");
+        convert___Array(step, procCount, filenameRR + "/cpAverageTripleArray.bin", filenameWW + "/cpAverageTripleArray.bin");
 
     if (arrPresence.isShearStressValArrayPresent)
-        convert___Array(step, procCount, filenameRR + "/cpShearStressValArray.bin",
-                        filenameWW + "/cpShearStressValArray.bin");
+        convert___Array(step, procCount, filenameRR + "/cpShearStressValArray.bin", filenameWW + "/cpShearStressValArray.bin");
 
     if (arrPresence.isRelaxationFactorPresent)
-        convert___Array(step, procCount, filenameRR + "/cpRelaxationFactor.bin",
-                        filenameWW + "/cpRelaxationFactor.bin");
+        convert___Array(step, procCount, filenameRR + "/cpRelaxationFactor.bin", filenameWW + "/cpRelaxationFactor.bin");
 
     finish = MPI_Wtime();
     UBLOG(logINFO, "UtilConvertor::convertDataSet time: " << finish - start << " s");
@@ -439,8 +464,7 @@ void CheckpointConverter::convert___Array(int /*step*/, int procCount, std::stri
         throw UbException(UB_EXARGS, "couldn't open file " + filenameR);
 
     MPI_File file_handlerW;
-    int rcW = MPI_File_open(MPI_COMM_WORLD, filenameW.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL,
-                            &file_handlerW);
+    int rcW = MPI_File_open(MPI_COMM_WORLD, filenameW.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &file_handlerW);
     if (rcW != MPI_SUCCESS)
         throw UbException(UB_EXARGS, "couldn't open file " + filenameW);
 
@@ -457,32 +481,29 @@ void CheckpointConverter::convert___Array(int /*step*/, int procCount, std::stri
     MPI_Offset write_offset;
     size_t sizeofOneDataSet;
 
-    for (int pc = 0; pc < procCount; pc++) {
+    for (int pc = 0; pc < procCount; pc++) 
+    {
         MPI_File_read_at(file_handlerR, (MPI_Offset)(pc * sizeof(int)), &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE);
         MPI_File_read_at(file_handlerR, read_offset, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
 
         dataSetSmallReadArray  = new DataSetSmallRestart[blocksCount];
         dataSetSmallWriteArray = new DataSetSmallMigration[blocksCount];
-        doubleCountInBlock =
-            dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+        doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
         doubleValuesArray.resize(blocksCount * doubleCountInBlock);
 
-        MPI_File_read_at(file_handlerR, (MPI_Offset)(read_offset + sizeof(dataSetParam)), dataSetSmallReadArray,
-                         blocksCount * 4, MPI_INT, MPI_STATUS_IGNORE);
+        MPI_File_read_at(file_handlerR, (MPI_Offset)(read_offset + sizeof(dataSetParam)), dataSetSmallReadArray, blocksCount * 4, MPI_INT, MPI_STATUS_IGNORE);
         if (doubleCountInBlock > 0)
-            MPI_File_read_at(
-                file_handlerR,
-                (MPI_Offset)(read_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)),
+            MPI_File_read_at(file_handlerR, (MPI_Offset)(read_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)),
                 &doubleValuesArray[0], blocksCount * doubleCountInBlock, MPI_DOUBLE, MPI_STATUS_IGNORE);
 
-        read_offset = read_offset + sizeof(dataSetParam) +
-                      blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
+        read_offset = read_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
 
         sizeofOneDataSet = sizeof(DataSetSmallMigration) + doubleCountInBlock * sizeof(double);
 
         MPI_File_write_at(file_handlerW, 0, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
 
-        for (int nb = 0; nb < blocksCount; nb++) {
+        for (int nb = 0; nb < blocksCount; nb++) 
+        {
             SPtr<Block3D> block = grid->getBlock(dataSetSmallReadArray[nb].x1, dataSetSmallReadArray[nb].x2,
                                                  dataSetSmallReadArray[nb].x3, dataSetSmallReadArray[nb].level);
             dataSetSmallWriteArray[nb].globalID = block->getGlobalID();
@@ -490,8 +511,7 @@ void CheckpointConverter::convert___Array(int /*step*/, int procCount, std::stri
             write_offset = (MPI_Offset)(sizeof(dataSetParam) + dataSetSmallWriteArray[nb].globalID * sizeofOneDataSet);
             MPI_File_write_at(file_handlerW, write_offset, &dataSetSmallWriteArray[nb], 1, MPI_INT, MPI_STATUS_IGNORE);
             MPI_File_write_at(file_handlerW, (MPI_Offset)(write_offset + sizeof(DataSetSmallMigration)),
-                              &doubleValuesArray[nb * doubleCountInBlock], doubleCountInBlock, MPI_DOUBLE,
-                              MPI_STATUS_IGNORE);
+                              &doubleValuesArray[nb * doubleCountInBlock], doubleCountInBlock, MPI_DOUBLE, MPI_STATUS_IGNORE);
         }
 
         delete[] dataSetSmallReadArray;
@@ -518,8 +538,7 @@ void CheckpointConverter::convertBC(int step, int procCount)
     // file to write to
     MPI_File file_handlerW;
     std::string filenameW = path + "/mig/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpBC.bin";
-    int rcW = MPI_File_open(MPI_COMM_WORLD, filenameW.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL,
-                            &file_handlerW);
+    int rcW = MPI_File_open(MPI_COMM_WORLD, filenameW.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &file_handlerW);
     if (rcW != MPI_SUCCESS)
         throw UbException(UB_EXARGS, "couldn't open file " + filenameW);
 
@@ -546,7 +565,8 @@ void CheckpointConverter::convertBC(int step, int procCount)
     MPI_Offset write_offset = (MPI_Offset)(sizeof(boundCondParam) + grid->getNumberOfBlocks() * sizeof(size_t));
     MPI_Offset write_offsetIndex;
 
-    for (int pc = 0; pc < procCount; pc++) {
+    for (int pc = 0; pc < procCount; pc++) 
+    {
         read_offset = (MPI_Offset)(pc * (3 * sizeof(int) + sizeof(boundCondParam)));
 
         // read count of blocks
@@ -574,15 +594,10 @@ void CheckpointConverter::convertBC(int step, int procCount)
         MPI_File_read_at(file_handlerR, read_offset1, bcAddReadArray, blocksCount * 6, MPI_INT, MPI_STATUS_IGNORE);
         MPI_File_read_at(file_handlerR, (MPI_Offset)(read_offset1 + blocksCount * sizeof(BCAddRestart)), &bcArray[0],
                          dataCount1000, boundCondType1000, MPI_STATUS_IGNORE);
-        MPI_File_read_at(
-            file_handlerR,
-            (MPI_Offset)(read_offset1 + blocksCount * sizeof(BCAddRestart) + dataCount * sizeof(BoundaryCondition)),
-            &intArray1[0], blocksCount * boundCondParamStr.bcindexmatrixCount, MPI_INT, MPI_STATUS_IGNORE);
-        MPI_File_read_at(file_handlerR,
-                         (MPI_Offset)(read_offset1 + blocksCount * sizeof(BCAddRestart) +
-                                      dataCount * sizeof(BoundaryCondition) +
-                                      blocksCount * boundCondParamStr.bcindexmatrixCount * sizeof(int)),
-                         &intArray2[0], dataCount2, MPI_INT, MPI_STATUS_IGNORE);
+        MPI_File_read_at(file_handlerR, (MPI_Offset)(read_offset1 + blocksCount * sizeof(BCAddRestart) + dataCount * sizeof(BoundaryCondition)),
+                         &intArray1[0], blocksCount * boundCondParamStr.bcindexmatrixCount, MPI_INT, MPI_STATUS_IGNORE);
+        MPI_File_read_at(file_handlerR, (MPI_Offset)(read_offset1 + blocksCount * sizeof(BCAddRestart) + dataCount * sizeof(BoundaryCondition) +
+                         blocksCount * boundCondParamStr.bcindexmatrixCount * sizeof(int)), &intArray2[0], dataCount2, MPI_INT, MPI_STATUS_IGNORE);
 
         // offset to read the data of the next process
         read_offset1 = read_offset1 + blocksCount * sizeof(BCAddRestart) + dataCount * sizeof(BoundaryCondition) +
@@ -593,14 +608,12 @@ void CheckpointConverter::convertBC(int step, int procCount)
         indexBC = 0;
         indexC  = 0;
         // write blocks and their BC data arrays
-        for (int nb = 0; nb < blocksCount; nb++) {
-            SPtr<Block3D> block = grid->getBlock(bcAddReadArray[nb].x1, bcAddReadArray[nb].x2, bcAddReadArray[nb].x3,
-                                                 bcAddReadArray[nb].level);
+        for (int nb = 0; nb < blocksCount; nb++) 
+        {
+            SPtr<Block3D> block = grid->getBlock(bcAddReadArray[nb].x1, bcAddReadArray[nb].x2, bcAddReadArray[nb].x3, bcAddReadArray[nb].level);
             bcAddWriteArray[nb].globalID = block->getGlobalID();
-            bcAddWriteArray[nb].boundCond_count =
-                bcAddReadArray[nb].boundCond_count; // how many BoundaryConditions in this block
-            bcAddWriteArray[nb].indexContainer_count =
-                bcAddReadArray[nb].indexContainer_count; // how many indexContainer-values in this block
+            bcAddWriteArray[nb].boundCond_count = bcAddReadArray[nb].boundCond_count; // how many BoundaryConditions in this block
+            bcAddWriteArray[nb].indexContainer_count = bcAddReadArray[nb].indexContainer_count; // how many indexContainer-values in this block
 
             write_offsetIndex = (MPI_Offset)(sizeof(boundCondParam) + bcAddWriteArray[nb].globalID * sizeof(size_t));
             MPI_File_write_at(file_handlerW, write_offsetIndex, &write_offset, 1, MPI_LONG_LONG_INT, MPI_STATUS_IGNORE);
@@ -612,24 +625,16 @@ void CheckpointConverter::convertBC(int step, int procCount)
             indexBC += bcAddWriteArray[nb].boundCond_count;
 
             if (boundCondParamStr.bcindexmatrixCount > 0)
-                MPI_File_write_at(file_handlerW,
-                                  (MPI_Offset)(write_offset + sizeof(BCAddMigration) +
-                                               bcAddWriteArray[nb].boundCond_count * sizeof(BoundaryCondition)),
-                                  &intArray1[nb * boundCondParamStr.bcindexmatrixCount],
-                                  boundCondParamStr.bcindexmatrixCount, MPI_INT, MPI_STATUS_IGNORE);
+                MPI_File_write_at(file_handlerW, (MPI_Offset)(write_offset + sizeof(BCAddMigration) + bcAddWriteArray[nb].boundCond_count * sizeof(BoundaryCondition)),
+                                  &intArray1[nb * boundCondParamStr.bcindexmatrixCount], boundCondParamStr.bcindexmatrixCount, MPI_INT, MPI_STATUS_IGNORE);
 
             if (bcAddWriteArray[nb].indexContainer_count > 0)
-                MPI_File_write_at(file_handlerW,
-                                  (MPI_Offset)(write_offset + sizeof(BCAddMigration) +
-                                               bcAddWriteArray[nb].boundCond_count * sizeof(BoundaryCondition) +
-                                               boundCondParamStr.bcindexmatrixCount * sizeof(int)),
-                                  &intArray2[indexC], bcAddWriteArray[nb].indexContainer_count, MPI_INT,
-                                  MPI_STATUS_IGNORE);
+                MPI_File_write_at(file_handlerW, (MPI_Offset)(write_offset + sizeof(BCAddMigration) + bcAddWriteArray[nb].boundCond_count * sizeof(BoundaryCondition) +
+                boundCondParamStr.bcindexmatrixCount * sizeof(int)), &intArray2[indexC], bcAddWriteArray[nb].indexContainer_count, MPI_INT, MPI_STATUS_IGNORE);
             indexC += bcAddWriteArray[nb].indexContainer_count;
 
             write_offset += sizeof(BCAddMigration) + bcAddWriteArray[nb].boundCond_count * sizeof(BoundaryCondition) +
-                            boundCondParamStr.bcindexmatrixCount * sizeof(int) +
-                            bcAddWriteArray[nb].indexContainer_count * sizeof(int);
+                            boundCondParamStr.bcindexmatrixCount * sizeof(int) + bcAddWriteArray[nb].indexContainer_count * sizeof(int);
         }
 
         delete[] bcAddReadArray;
diff --git a/src/cpu/VirtualFluidsCore/Utilities/CheckpointConverter.h b/src/cpu/VirtualFluidsCore/Utilities/CheckpointConverter.h
index bad6116ccf8e1ebc32b8f7d6e12c3d36bc1b4e46..294081a4fe6265a1aa8ef74c5ec79cb8a13efedf 100644
--- a/src/cpu/VirtualFluidsCore/Utilities/CheckpointConverter.h
+++ b/src/cpu/VirtualFluidsCore/Utilities/CheckpointConverter.h
@@ -32,7 +32,7 @@ protected:
 private:
     MPI_Datatype gridParamType, block3dType;
     MPI_Datatype dataSetParamType, dataSetTypeRead, dataSetTypeWrite;
-    MPI_Datatype boundCondType, boundCondType1000;
+    MPI_Datatype boundCondType, boundCondType1000, dataSetDoubleType;
 
     MPIIODataStructures::boundCondParam boundCondParamStr;
 };