From 91788f64aabbbddb6a2b9bbb7f77ce947b1156df Mon Sep 17 00:00:00 2001
From: alena <akaranchuk@list.ru>
Date: Mon, 9 Aug 2021 17:34:46 +0200
Subject: [PATCH] Fix issures adding of PressureField

---
 .../CoProcessors/MPIIOCoProcessor.cpp         |    9 +-
 .../MPIIOMigrationCoProcessor.cpp             | 1357 +----------------
 2 files changed, 10 insertions(+), 1356 deletions(-)

diff --git a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOCoProcessor.cpp b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOCoProcessor.cpp
index bc25bf37f..1ef3f3bc0 100644
--- a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOCoProcessor.cpp
+++ b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOCoProcessor.cpp
@@ -70,7 +70,7 @@ MPIIOCoProcessor::MPIIOCoProcessor(SPtr<Grid3D> grid, SPtr<UbScheduler> s, const
 
     //---------------------------------------
 
-    MPI_Type_contiguous(8, MPI_CHAR, &arrayPresenceType);
+    MPI_Type_contiguous(9, MPI_CHAR, &arrayPresenceType);
     MPI_Type_commit(&arrayPresenceType);
 }
 
@@ -468,6 +468,13 @@ void MPIIOCoProcessor::clearAllFiles(int step)
     MPI_File_set_size(file_handler, new_size);
     MPI_File_close(&file_handler);
 
+    std::string filename12 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpPressureField.bin";
+    int rc12 = MPI_File_open(MPI_COMM_WORLD, filename12.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
+    if (rc12 != MPI_SUCCESS)
+        throw UbException(UB_EXARGS, "couldn't open file " + filename12);
+    MPI_File_set_size(file_handler, new_size);
+    MPI_File_close(&file_handler);
+
 }
 
 void MPIIOCoProcessor::writeCpTimeStep(int step)
diff --git a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.cpp b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.cpp
index 4c2c10a0e..fe84de2d1 100644
--- a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.cpp
+++ b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.cpp
@@ -425,23 +425,18 @@ void MPIIOMigrationCoProcessor::writeDataSet(int step)
 
     if (arrPresence.isAverageDensityArrayPresent)
         write4DArray(step, AverageDensity, std::string("/cpAverageDensityArray.bin"));
-    // writeAverageDensityArray(step);
 
     if (arrPresence.isAverageVelocityArrayPresent)
         write4DArray(step, AverageVelocity, std::string("/cpAverageVelocityArray.bin"));
-    // writeAverageVelocityArray(step);
 
     if (arrPresence.isAverageFluktuationsArrayPresent)
         write4DArray(step, AverageFluktuations, std::string("/cpAverageFluktuationsArray.bin"));
-    // writeAverageFluktuationsArray(step);
 
     if (arrPresence.isAverageTripleArrayPresent)
         write4DArray(step, AverageTriple, std::string("/cpAverageTripleArray.bin"));
-    // writeAverageTripleArray(step);
 
     if (arrPresence.isShearStressValArrayPresent)
         write4DArray(step, ShearStressVal, std::string("/cpShearStressValArray.bin"));
-    // writeShearStressValArray(step);
 
     if (arrPresence.isRelaxationFactorPresent)
         write3DArray(step, RelaxationFactor, std::string("/cpRelaxationFactor.bin"));
@@ -716,720 +711,6 @@ void MPIIOMigrationCoProcessor::write3DArray(int step, Arrays arrayType, std::st
     delete[] dataSetSmallArray;
 }
 
-/*
-void MPIIOMigrationCoProcessor::writeAverageDensityArray(int step)
-{
-   int rank, size;
-   MPI_Comm_rank(MPI_COMM_WORLD, &rank);
-   MPI_Comm_size(MPI_COMM_WORLD, &size);
-
-   int blocksCount = 0; // quantity of blocks in the grid, max 2147483648 blocks!
-
-   std::vector<SPtr<Block3D>> blocksVector[25];
-   int minInitLevel = this->grid->getCoarsestInitializedLevel();
-   int maxInitLevel = this->grid->getFinestInitializedLevel();
-   for (int level = minInitLevel; level <= maxInitLevel; level++)
-   {
-      grid->getBlocks(level, rank, blocksVector[level]);
-      blocksCount += static_cast<int>(blocksVector[level].size());
-   }
-
-   DataSetSmallMigration* dataSetSmallArray = new DataSetSmallMigration[blocksCount];
-   std::vector<double> doubleValuesArray; // double-values of the AverageDensityArray in all blocks
-   dataSetParam dataSetParamStr;
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationCoProcessor::writeAverageDensityArray start collect data rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() /
-1073741824.0 << " GB");
-   }
-
-   bool firstBlock = true;
-   size_t doubleCountInBlock = 0;
-   int ic = 0;
-   for (int level = minInitLevel; level <= maxInitLevel; level++)
-   {
-      for (SPtr<Block3D> block : blocksVector[level])  //	blocks of the current level
-      {
-         dataSetSmallArray[ic].globalID = block->getGlobalID();     // id of the block needed to find it while
-regenerating the grid
-
-         SPtr< CbArray4D<LBMReal, IndexerX4X3X2X1> > averageDensityArray =
-block->getKernel()->getDataSet()->getAverageDensity();
-
-         if (firstBlock) // when first (any) valid block...
-         {
-            //if (averageDensityArray)
-            //{
-            dataSetParamStr.nx1 = dataSetParamStr.nx2 = dataSetParamStr.nx3 = 0;
-            dataSetParamStr.nx[0] = static_cast<int>(averageDensityArray->getNX1());
-            dataSetParamStr.nx[1] = static_cast<int>(averageDensityArray->getNX2());
-            dataSetParamStr.nx[2] = static_cast<int>(averageDensityArray->getNX3());
-            dataSetParamStr.nx[3] = static_cast<int>(averageDensityArray->getNX4());
-            doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] *
-dataSetParamStr.nx[3];
-            //}
-            //else
-            //   break;
-
-            firstBlock = false;
-         }
-
-         if (averageDensityArray && (dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2]
-> 0) && (dataSetParamStr.nx[3] > 0)) doubleValuesArray.insert(doubleValuesArray.end(),
-averageDensityArray->getDataVector().begin(), averageDensityArray->getDataVector().end());
-
-         ic++;
-      }
-   }
-
-   // register new MPI-types depending on the block-specific information
-   MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType);
-   MPI_Type_commit(&dataSetDoubleType);
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationCoProcessor::writeAverageDensityArray start MPI IO rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() /
-1073741824.0 << " GB");
-   }
-
-   double start, finish;
-   if (comm->isRoot()) start = MPI_Wtime();
-
-   MPI_Info info = MPI_INFO_NULL;
-
-   MPI_File file_handler;
-   std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpAverageDensityArray.bin";
-   int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
-   if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename);
-
-   // each process writes common parameters of a dataSet
-   MPI_File_write_at(file_handler, 0, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
-
-   MPI_Offset write_offset;
-   size_t sizeofOneDataSet = sizeof(DataSetSmallMigration) + doubleCountInBlock * sizeof(double);
-
-   for (size_t nb = 0; nb < blocksCount; nb++)
-   {
-      write_offset = (MPI_Offset)(sizeof(dataSetParam) + dataSetSmallArray[nb].globalID * sizeofOneDataSet);
-      MPI_File_write_at(file_handler, write_offset, &dataSetSmallArray[nb], 1, dataSetSmallType, MPI_STATUS_IGNORE);
-      MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(DataSetSmallMigration)), &doubleValuesArray[nb
-* doubleCountInBlock], 1, dataSetDoubleType, MPI_STATUS_IGNORE);
-   }
-
-   MPI_File_sync(file_handler);
-   MPI_File_close(&file_handler);
-   MPI_Type_free(&dataSetDoubleType);
-
-   if (comm->isRoot())
-   {
-      finish = MPI_Wtime();
-      UBLOG(logINFO, "MPIIOMigrationCoProcessor::writeAverageDensityArray time: " << finish - start << " s");
-   }
-
-   delete[] dataSetSmallArray;
-}
-
-void MPIIOMigrationCoProcessor::writeAverageVelocityArray(int step)
-{
-   int rank, size;
-   MPI_Comm_rank(MPI_COMM_WORLD, &rank);
-   MPI_Comm_size(MPI_COMM_WORLD, &size);
-
-   int blocksCount = 0; // quantity of blocks in the grid, max 2147483648 blocks!
-
-   std::vector<SPtr<Block3D>> blocksVector[25];
-   int minInitLevel = this->grid->getCoarsestInitializedLevel();
-   int maxInitLevel = this->grid->getFinestInitializedLevel();
-   for (int level = minInitLevel; level <= maxInitLevel; level++)
-   {
-      grid->getBlocks(level, rank, blocksVector[level]);
-      blocksCount += static_cast<int>(blocksVector[level].size());
-   }
-
-   DataSetSmallMigration* dataSetSmallArray = new DataSetSmallMigration[blocksCount];
-   std::vector<double> doubleValuesArray; // double-values (arrays of f's) in all blocks
-   dataSetParam dataSetParamStr;
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationCoProcessor::writeAverageVelocityArray start collect data rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() /
-1073741824.0 << " GB");
-   }
-
-   bool firstBlock = true;
-   size_t doubleCountInBlock = 0;
-   int ic = 0;
-   for (int level = minInitLevel; level <= maxInitLevel; level++)
-   {
-      for (SPtr<Block3D> block : blocksVector[level])  //	blocks of the current level
-      {
-         dataSetSmallArray[ic].globalID = block->getGlobalID();     // id of the block needed to find it while
-regenerating the grid
-
-         SPtr< CbArray4D<LBMReal, IndexerX4X3X2X1> > AverageVelocityArray3DPtr =
-block->getKernel()->getDataSet()->getAverageVelocity();
-
-         if (firstBlock) // when first (any) valid block...
-         {
-            //if (AverageVelocityArray3DPtr)
-            //{
-            dataSetParamStr.nx1 = dataSetParamStr.nx2 = dataSetParamStr.nx3 = 0;
-            dataSetParamStr.nx[0] = static_cast<int>(AverageVelocityArray3DPtr->getNX1());
-            dataSetParamStr.nx[1] = static_cast<int>(AverageVelocityArray3DPtr->getNX2());
-            dataSetParamStr.nx[2] = static_cast<int>(AverageVelocityArray3DPtr->getNX3());
-            dataSetParamStr.nx[3] = static_cast<int>(AverageVelocityArray3DPtr->getNX4());
-            doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] *
-dataSetParamStr.nx[3];
-            //}
-            //else
-            //   break;
-
-            firstBlock = false;
-         }
-
-         if (AverageVelocityArray3DPtr && (dataSetParamStr.nx[0]>0) && (dataSetParamStr.nx[1]>0) &&
-(dataSetParamStr.nx[2]>0) && (dataSetParamStr.nx[3]>0)) doubleValuesArray.insert(doubleValuesArray.end(),
-AverageVelocityArray3DPtr->getDataVector().begin(), AverageVelocityArray3DPtr->getDataVector().end());
-
-         ic++;
-      }
-   }
-
-   // register new MPI-types depending on the block-specific information
-   MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType);
-   MPI_Type_commit(&dataSetDoubleType);
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationCoProcessor::writeAverageVelocityArray start MPI IO rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() /
-1073741824.0 << " GB");
-   }
-
-   double start, finish;
-   if (comm->isRoot()) start = MPI_Wtime();
-
-   MPI_Info info = MPI_INFO_NULL;
-
-   MPI_File file_handler;
-   std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpAverageVelocityArray.bin";
-   int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
-   if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename);
-
-   // each process writes common parameters of a dataSet
-   MPI_File_write_at(file_handler, 0, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
-
-   MPI_Offset write_offset;
-   size_t sizeofOneDataSet = sizeof(DataSetSmallMigration) + doubleCountInBlock * sizeof(double);
-
-   for (size_t nb = 0; nb < blocksCount; nb++)
-   {
-      write_offset = (MPI_Offset)(sizeof(dataSetParam) + dataSetSmallArray[nb].globalID * sizeofOneDataSet);
-      MPI_File_write_at(file_handler, write_offset, &dataSetSmallArray[nb], 1, dataSetSmallType, MPI_STATUS_IGNORE);
-      MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(DataSetSmallMigration)), &doubleValuesArray[nb
-* doubleCountInBlock], 1, dataSetDoubleType, MPI_STATUS_IGNORE);
-   }
-
-   MPI_File_sync(file_handler);
-   MPI_File_close(&file_handler);
-   MPI_Type_free(&dataSetDoubleType);
-
-   if (comm->isRoot())
-   {
-      finish = MPI_Wtime();
-      UBLOG(logINFO, "MPIIOMigrationCoProcessor::writeAverageVelocityArray time: " << finish - start << " s");
-   }
-
-   delete[] dataSetSmallArray;
-}
-
-void MPIIOMigrationCoProcessor::writeAverageFluktuationsArray(int step)
-{
-   int rank, size;
-   MPI_Comm_rank(MPI_COMM_WORLD, &rank);
-   MPI_Comm_size(MPI_COMM_WORLD, &size);
-
-   int blocksCount = 0; // quantity of blocks in the grid, max 2147483648 blocks!
-
-   std::vector<SPtr<Block3D>> blocksVector[25];
-   int minInitLevel = this->grid->getCoarsestInitializedLevel();
-   int maxInitLevel = this->grid->getFinestInitializedLevel();
-   for (int level = minInitLevel; level <= maxInitLevel; level++)
-   {
-      grid->getBlocks(level, rank, blocksVector[level]);
-      blocksCount += static_cast<int>(blocksVector[level].size());
-   }
-
-   DataSetSmallMigration* dataSetSmallArray = new DataSetSmallMigration[blocksCount];
-   std::vector<double> doubleValuesArray; // double-values (arrays of f's) in all blocks
-   dataSetParam dataSetParamStr;
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationCoProcessor::writeAverageFluktuationsArray start collect data rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() /
-1073741824.0 << " GB");
-   }
-
-   bool firstBlock = true;
-   size_t doubleCountInBlock = 0;
-   int ic = 0;
-   for (int level = minInitLevel; level <= maxInitLevel; level++)
-   {
-      for (SPtr<Block3D> block : blocksVector[level])  //	blocks of the current level
-      {
-         dataSetSmallArray[ic].globalID = block->getGlobalID();     // id of the block needed to find it while
-regenerating the grid
-
-         SPtr< CbArray4D<LBMReal, IndexerX4X3X2X1> > AverageFluctArray3DPtr =
-block->getKernel()->getDataSet()->getAverageFluctuations();
-
-         if (firstBlock) // when first (any) valid block...
-         {
-            //if (AverageFluctArray3DPtr)
-            //{
-            dataSetParamStr.nx1 = dataSetParamStr.nx2 = dataSetParamStr.nx3 = 0;
-            dataSetParamStr.nx[0] = static_cast<int>(AverageFluctArray3DPtr->getNX1());
-            dataSetParamStr.nx[1] = static_cast<int>(AverageFluctArray3DPtr->getNX2());
-            dataSetParamStr.nx[2] = static_cast<int>(AverageFluctArray3DPtr->getNX3());
-            dataSetParamStr.nx[3] = static_cast<int>(AverageFluctArray3DPtr->getNX4());
-            doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] *
-dataSetParamStr.nx[3];
-            //}
-            //else
-            //   break;
-
-            firstBlock = false;
-         }
-
-         if (AverageFluctArray3DPtr && (dataSetParamStr.nx[0]>0) && (dataSetParamStr.nx[1]>0) &&
-(dataSetParamStr.nx[2]>0) && (dataSetParamStr.nx[3]>0)) doubleValuesArray.insert(doubleValuesArray.end(),
-AverageFluctArray3DPtr->getDataVector().begin(), AverageFluctArray3DPtr->getDataVector().end());
-
-         ic++;
-      }
-   }
-
-   // register new MPI-types depending on the block-specific information
-   MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType);
-   MPI_Type_commit(&dataSetDoubleType);
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationCoProcessor::writeAverageFluktuationsArray start MPI IO rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() /
-1073741824.0 << " GB");
-   }
-
-   double start, finish;
-   if (comm->isRoot()) start = MPI_Wtime();
-
-   MPI_Info info = MPI_INFO_NULL;
-#ifdef HLRN_LUSTRE
-   MPI_Info_create(&info);
-   MPI_Info_set(info, "striping_factor", "40");
-   MPI_Info_set(info, "striping_unit", "4M");
-#endif
-
-   MPI_File file_handler;
-   std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpAverageFluktuationsArray.bin";
-   int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
-   if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename);
-
-   // each process writes common parameters of a dataSet
-   MPI_File_write_at(file_handler, 0, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
-
-   MPI_Offset write_offset;
-   size_t sizeofOneDataSet = sizeof(DataSetSmallMigration) + doubleCountInBlock * sizeof(double);
-
-   for (size_t nb = 0; nb < blocksCount; nb++)
-   {
-      write_offset = (MPI_Offset)(sizeof(dataSetParam) + dataSetSmallArray[nb].globalID * sizeofOneDataSet);
-      MPI_File_write_at(file_handler, write_offset, &dataSetSmallArray[nb], 1, dataSetSmallType, MPI_STATUS_IGNORE);
-      MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(DataSetSmallMigration)), &doubleValuesArray[nb
-* doubleCountInBlock], 1, dataSetDoubleType, MPI_STATUS_IGNORE);
-   }
-
-   MPI_File_sync(file_handler);
-   MPI_File_close(&file_handler);
-   MPI_Type_free(&dataSetDoubleType);
-
-   if (comm->isRoot())
-   {
-      finish = MPI_Wtime();
-      UBLOG(logINFO, "MPIIOMigrationCoProcessor::writeAverageFluktuationsArray time: " << finish - start << " s");
-   }
-
-   delete[] dataSetSmallArray;
-}
-
-void MPIIOMigrationCoProcessor::writeAverageTripleArray(int step)
-{
-   int rank, size;
-   MPI_Comm_rank(MPI_COMM_WORLD, &rank);
-   MPI_Comm_size(MPI_COMM_WORLD, &size);
-
-   int blocksCount = 0; // quantity of blocks in the grid, max 2147483648 blocks!
-
-   std::vector<SPtr<Block3D>> blocksVector[25];
-   int minInitLevel = this->grid->getCoarsestInitializedLevel();
-   int maxInitLevel = this->grid->getFinestInitializedLevel();
-   for (int level = minInitLevel; level <= maxInitLevel; level++)
-   {
-      grid->getBlocks(level, rank, blocksVector[level]);
-      blocksCount += static_cast<int>(blocksVector[level].size());
-   }
-
-   DataSetSmallMigration* dataSetSmallArray = new DataSetSmallMigration[blocksCount];
-   std::vector<double> doubleValuesArray; // double-values (arrays of f's) in all blocks
-   dataSetParam dataSetParamStr;
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationCoProcessor::writeAverageTripleArray start collect data rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() /
-1073741824.0 << " GB");
-   }
-
-   bool firstBlock = true;
-   size_t doubleCountInBlock = 0;
-   int ic = 0;
-   for (int level = minInitLevel; level <= maxInitLevel; level++)
-   {
-      for (SPtr<Block3D> block : blocksVector[level])  //	blocks of the current level
-      {
-         dataSetSmallArray[ic].globalID = block->getGlobalID();     // id of the block needed to find it while
-regenerating the grid
-
-         SPtr< CbArray4D<LBMReal, IndexerX4X3X2X1> > AverageTripleArray3DPtr =
-block->getKernel()->getDataSet()->getAverageTriplecorrelations();
-
-         if (firstBlock) // when first (any) valid block...
-         {
-            //if (AverageTripleArray3DPtr)
-            //{
-            dataSetParamStr.nx1 = dataSetParamStr.nx2 = dataSetParamStr.nx3 = 0;
-            dataSetParamStr.nx[0] = static_cast<int>(AverageTripleArray3DPtr->getNX1());
-            dataSetParamStr.nx[1] = static_cast<int>(AverageTripleArray3DPtr->getNX2());
-            dataSetParamStr.nx[2] = static_cast<int>(AverageTripleArray3DPtr->getNX3());
-            dataSetParamStr.nx[3] = static_cast<int>(AverageTripleArray3DPtr->getNX4());
-            doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] *
-dataSetParamStr.nx[3];
-            //}
-            //else
-            //   break;
-
-            firstBlock = false;
-         }
-
-         if (AverageTripleArray3DPtr && (dataSetParamStr.nx[0]>0) && (dataSetParamStr.nx[1]>0) &&
-(dataSetParamStr.nx[2]>0) && (dataSetParamStr.nx[3]>0)) doubleValuesArray.insert(doubleValuesArray.end(),
-AverageTripleArray3DPtr->getDataVector().begin(), AverageTripleArray3DPtr->getDataVector().end());
-
-         ic++;
-      }
-   }
-
-   // register new MPI-types depending on the block-specific information
-   MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType);
-   MPI_Type_commit(&dataSetDoubleType);
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationCoProcessor::writeAverageTripleArray start MPI IO rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() /
-1073741824.0 << " GB");
-   }
-
-   double start, finish;
-   if (comm->isRoot()) start = MPI_Wtime();
-
-   MPI_Info info = MPI_INFO_NULL;
-
-#ifdef HLRN_LUSTRE
-   MPI_Info_create(&info);
-   MPI_Info_set(info, "striping_factor", "40");
-   MPI_Info_set(info, "striping_unit", "4M");
-#endif
-
-   MPI_File file_handler;
-   std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpAverageTripleArray.bin";
-   int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
-   if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename);
-
-   // each process writes common parameters of a dataSet
-   MPI_File_write_at(file_handler, 0, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
-
-   MPI_Offset write_offset;
-   size_t sizeofOneDataSet = sizeof(DataSetSmallMigration) + doubleCountInBlock * sizeof(double);
-
-   for (size_t nb = 0; nb < blocksCount; nb++)
-   {
-      write_offset = (MPI_Offset)(sizeof(dataSetParam) + dataSetSmallArray[nb].globalID * sizeofOneDataSet);
-      MPI_File_write_at(file_handler, write_offset, &dataSetSmallArray[nb], 1, dataSetSmallType, MPI_STATUS_IGNORE);
-      MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(DataSetSmallMigration)), &doubleValuesArray[nb
-* doubleCountInBlock], 1, dataSetDoubleType, MPI_STATUS_IGNORE);
-   }
-
-   MPI_File_sync(file_handler);
-   MPI_File_close(&file_handler);
-   MPI_Type_free(&dataSetDoubleType);
-
-   if (comm->isRoot())
-   {
-      finish = MPI_Wtime();
-      UBLOG(logINFO, "MPIIOMigrationCoProcessor::writeAverageTripleArray time: " << finish - start << " s");
-   }
-
-   delete[] dataSetSmallArray;
-}
-
-void MPIIOMigrationCoProcessor::writeShearStressValArray(int step)
-{
-   int rank, size;
-   MPI_Comm_rank(MPI_COMM_WORLD, &rank);
-   MPI_Comm_size(MPI_COMM_WORLD, &size);
-
-   int blocksCount = 0; // quantity of blocks in the grid, max 2147483648 blocks!
-
-   std::vector<SPtr<Block3D>> blocksVector[25];
-   int minInitLevel = this->grid->getCoarsestInitializedLevel();
-   int maxInitLevel = this->grid->getFinestInitializedLevel();
-   for (int level = minInitLevel; level <= maxInitLevel; level++)
-   {
-      grid->getBlocks(level, rank, blocksVector[level]);
-      blocksCount += static_cast<int>(blocksVector[level].size());
-   }
-
-   DataSetSmallMigration* dataSetSmallArray = new DataSetSmallMigration[blocksCount];
-   std::vector<double> doubleValuesArray; // double-values (arrays of f's) in all blocks
-   dataSetParam dataSetParamStr;
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationCoProcessor::writeShearStressValArray start collect data rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() /
-1073741824.0 << " GB");
-   }
-
-   bool firstBlock = true;
-   size_t doubleCountInBlock = 0;
-   int ic = 0;
-   for (int level = minInitLevel; level <= maxInitLevel; level++)
-   {
-      for (SPtr<Block3D> block : blocksVector[level])  //	blocks of the current level
-      {
-         dataSetSmallArray[ic].globalID = block->getGlobalID();     // id of the block needed to find it while
-regenerating the grid
-
-         SPtr< CbArray4D<LBMReal, IndexerX4X3X2X1> > ShearStressValArray3DPtr =
-block->getKernel()->getDataSet()->getShearStressValues();
-
-         if (firstBlock) // when first (any) valid block...
-         {
-            //if (ShearStressValArray3DPtr)
-            //{
-            dataSetParamStr.nx1 = dataSetParamStr.nx2 = dataSetParamStr.nx3 = 0;
-            dataSetParamStr.nx[0] = static_cast<int>(ShearStressValArray3DPtr->getNX1());
-            dataSetParamStr.nx[1] = static_cast<int>(ShearStressValArray3DPtr->getNX2());
-            dataSetParamStr.nx[2] = static_cast<int>(ShearStressValArray3DPtr->getNX3());
-            dataSetParamStr.nx[3] = static_cast<int>(ShearStressValArray3DPtr->getNX4());
-            doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] *
-dataSetParamStr.nx[3];
-            //}
-            //else
-            //   break;
-
-            firstBlock = false;
-         }
-
-         if (ShearStressValArray3DPtr && (dataSetParamStr.nx[0]>0) && (dataSetParamStr.nx[1]>0) &&
-(dataSetParamStr.nx[2]>0) && (dataSetParamStr.nx[3]>0)) doubleValuesArray.insert(doubleValuesArray.end(),
-ShearStressValArray3DPtr->getDataVector().begin(), ShearStressValArray3DPtr->getDataVector().end());
-
-         ic++;
-      }
-   }
-
-   // register new MPI-types depending on the block-specific information
-   MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType);
-   MPI_Type_commit(&dataSetDoubleType);
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationCoProcessor::writeShearStressValArray start MPI IO rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() /
-1073741824.0 << " GB");
-   }
-
-   double start, finish;
-   if (comm->isRoot()) start = MPI_Wtime();
-
-   MPI_Info info = MPI_INFO_NULL;
-
-#ifdef HLRN_LUSTRE
-   MPI_Info_create(&info);
-   MPI_Info_set(info, "striping_factor", "40");
-   MPI_Info_set(info, "striping_unit", "4M");
-#endif
-
-   MPI_File file_handler;
-   std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpShearStressValArray.bin";
-   int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
-   if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename);
-
-   // each process writes common parameters of a dataSet
-   MPI_File_write_at(file_handler, 0, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
-
-   MPI_Offset write_offset;
-   size_t sizeofOneDataSet = sizeof(DataSetSmallMigration) + doubleCountInBlock * sizeof(double);
-
-   for (size_t nb = 0; nb < blocksCount; nb++)
-   {
-      write_offset = (MPI_Offset)(sizeof(dataSetParam) + dataSetSmallArray[nb].globalID * sizeofOneDataSet);
-      MPI_File_write_at(file_handler, write_offset, &dataSetSmallArray[nb], 1, dataSetSmallType, MPI_STATUS_IGNORE);
-      MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(DataSetSmallMigration)), &doubleValuesArray[nb
-* doubleCountInBlock], 1, dataSetDoubleType, MPI_STATUS_IGNORE);
-   }
-
-   MPI_File_sync(file_handler);
-   MPI_File_close(&file_handler);
-   MPI_Type_free(&dataSetDoubleType);
-
-   if (comm->isRoot())
-   {
-      finish = MPI_Wtime();
-      UBLOG(logINFO, "MPIIOMigrationCoProcessor::writeShearStressValArray time: " << finish - start << " s");
-   }
-
-   delete[] dataSetSmallArray;
-}
-
-void MPIIOMigrationCoProcessor::writeRelaxationFactor(int step)
-{
-   int rank, size;
-   MPI_Comm_rank(MPI_COMM_WORLD, &rank);
-   MPI_Comm_size(MPI_COMM_WORLD, &size);
-
-   int blocksCount = 0; // quantity of blocks in the grid, max 2147483648 blocks!
-
-   std::vector<SPtr<Block3D>> blocksVector[25];
-   int minInitLevel = this->grid->getCoarsestInitializedLevel();
-   int maxInitLevel = this->grid->getFinestInitializedLevel();
-   for (int level = minInitLevel; level <= maxInitLevel; level++)
-   {
-      grid->getBlocks(level, rank, blocksVector[level]);
-      blocksCount += static_cast<int>(blocksVector[level].size());
-   }
-
-   DataSetSmallMigration* dataSetSmallArray = new DataSetSmallMigration[blocksCount];
-   std::vector<double> doubleValuesArray; // double-values (arrays of f's) in all blocks
-   dataSetParam dataSetParamStr;
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationCoProcessor::writeRelaxationFactor start collect data rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() /
-1073741824.0 << " GB");
-   }
-
-   bool firstBlock = true;
-   size_t doubleCountInBlock = 0;
-   int ic = 0;
-   for (int level = minInitLevel; level <= maxInitLevel; level++)
-   {
-      for (SPtr<Block3D> block : blocksVector[level])  //	blocks of the current level
-      {
-         dataSetSmallArray[ic].globalID = block->getGlobalID();     // id of the block needed to find it while
-regenerating the grid
-
-         SPtr< CbArray3D<LBMReal, IndexerX3X2X1> > relaxationFactor3DPtr =
-block->getKernel()->getDataSet()->getRelaxationFactor();
-
-         if (firstBlock) // when first (any) valid block...
-         {
-            //if (relaxationFactor3DPtr)
-            //{
-            dataSetParamStr.nx1 = dataSetParamStr.nx2 = dataSetParamStr.nx3 = 0;
-            dataSetParamStr.nx[0] = static_cast<int>(relaxationFactor3DPtr->getNX1());
-            dataSetParamStr.nx[1] = static_cast<int>(relaxationFactor3DPtr->getNX2());
-            dataSetParamStr.nx[2] = static_cast<int>(relaxationFactor3DPtr->getNX3());
-            dataSetParamStr.nx[3] = 1;
-            doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] *
-dataSetParamStr.nx[3];
-            //}
-            //else
-            //   break;
-
-            firstBlock = false;
-         }
-
-         if (relaxationFactor3DPtr && (dataSetParamStr.nx[0]>0) && (dataSetParamStr.nx[1]>0) &&
-(dataSetParamStr.nx[2]>0)) doubleValuesArray.insert(doubleValuesArray.end(),
-relaxationFactor3DPtr->getDataVector().begin(), relaxationFactor3DPtr->getDataVector().end());
-
-         ic++;
-      }
-   }
-
-   // register new MPI-types depending on the block-specific information
-   MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType);
-   MPI_Type_commit(&dataSetDoubleType);
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationCoProcessor::writeRelaxationFactor start MPI IO rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() /
-1073741824.0 << " GB");
-   }
-
-   double start, finish;
-   if (comm->isRoot()) start = MPI_Wtime();
-
-   MPI_Info info = MPI_INFO_NULL;
-
-#ifdef HLRN_LUSTRE
-   MPI_Info_create(&info);
-   MPI_Info_set(info, "striping_factor", "40");
-   MPI_Info_set(info, "striping_unit", "4M");
-#endif
-
-   MPI_File file_handler;
-   std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpRelaxationFactor.bin";
-   int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
-   if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename);
-
-   // each process writes common parameters of a dataSet
-   MPI_File_write_at(file_handler, 0, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
-
-   MPI_Offset write_offset;
-   size_t sizeofOneDataSet = sizeof(DataSetSmallMigration) + doubleCountInBlock * sizeof(double);
-
-   for (size_t nb = 0; nb < blocksCount; nb++)
-   {
-      write_offset = (MPI_Offset)(sizeof(dataSetParam) + dataSetSmallArray[nb].globalID * sizeofOneDataSet);
-      MPI_File_write_at(file_handler, write_offset, &dataSetSmallArray[nb], 1, dataSetSmallType, MPI_STATUS_IGNORE);
-      MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(DataSetSmallMigration)), &doubleValuesArray[nb
-* doubleCountInBlock], 1, dataSetDoubleType, MPI_STATUS_IGNORE);
-   }
-
-   MPI_File_sync(file_handler);
-   MPI_File_close(&file_handler);
-   MPI_Type_free(&dataSetDoubleType);
-
-   if (comm->isRoot())
-   {
-      finish = MPI_Wtime();
-      UBLOG(logINFO, "MPIIOMigrationCoProcessor::writeRelaxationFactor time: " << finish - start << " s");
-   }
-
-   delete[] dataSetSmallArray;
-}
-*/
 void MPIIOMigrationCoProcessor::writeBoundaryConds(int step)
 {
     int rank, size;
@@ -1713,6 +994,7 @@ void MPIIOMigrationCoProcessor::readDataSet(int step)
     MPI_File_close(&file_handler);
 
     //----------------------------------------- H1 ----------------------------------------------------
+    ic = 0;
     MPI_Offset fsize;
     filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetH1.bin";
     rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler);
@@ -1740,6 +1022,7 @@ void MPIIOMigrationCoProcessor::readDataSet(int step)
     MPI_File_close(&file_handler);
 
     //----------------------------------------- H2 ----------------------------------------------------
+    ic = 0;
     filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetH2.bin";
     rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler);
     if (rc != MPI_SUCCESS)
@@ -1893,27 +1176,21 @@ void MPIIOMigrationCoProcessor::readDataSet(int step)
 
     if (arrPresence.isAverageDensityArrayPresent)
         readArray(step, AverageDensity, std::string("/cpAverageDensityArray.bin"));
-    // readAverageDensityArray(step);
 
     if (arrPresence.isAverageVelocityArrayPresent)
         readArray(step, AverageVelocity, std::string("/cpAverageVelocityArray.bin"));
-    //   readAverageVelocityArray(step);
 
     if (arrPresence.isAverageFluktuationsArrayPresent)
         readArray(step, AverageFluktuations, std::string("/cpAverageFluktuationsArray.bin"));
-    //   readAverageFluktuationsArray(step);
 
     if (arrPresence.isAverageTripleArrayPresent)
         readArray(step, AverageTriple, std::string("/cpAverageTripleArray.bin"));
-    //  readAverageTripleArray(step);
 
     if (arrPresence.isShearStressValArrayPresent)
         readArray(step, ShearStressVal, std::string("/cpShearStressValArray.bin"));
-    //   readShearStressValArray(step);
 
     if (arrPresence.isRelaxationFactorPresent)
         readArray(step, RelaxationFactor, std::string("/cpRelaxationFactor.bin"));
-    //   readRelaxationFactor(step);
  
     if (arrPresence.isPhaseField1Present)
         readArray(step, PhaseField1, std::string("/cpPhaseField1.bin"));
@@ -2075,636 +1352,6 @@ void MPIIOMigrationCoProcessor::readArray(int step, Arrays arrType, std::string
     delete[] dataSetSmallArray;
 }
 
-/*void MPIIOMigrationCoProcessor::readAverageDensityArray(int step)
-{
-   int rank, size;
-   MPI_Comm_rank(MPI_COMM_WORLD, &rank);
-   MPI_Comm_size(MPI_COMM_WORLD, &size);
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationCoProcessor::readAverageDensityArray start MPI IO rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() /
-1073741824.0 << " GB");
-   }
-   double start, finish;
-   if (comm->isRoot()) start = MPI_Wtime();
-
-   MPI_File file_handler;
-   std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpAverageDensityArray.bin";
-   int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler);
-   if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename);
-
-   // read count of blocks
-   size_t blocksCount = 0;
-   dataSetParam dataSetParamStr;
-   memset(&dataSetParamStr, 0, sizeof(dataSetParam));
-
-   // read from the grid the blocks, that belong to this process
-   std::vector<SPtr<Block3D>> blocksVector[25];
-   int minInitLevel = this->grid->getCoarsestInitializedLevel();
-   int maxInitLevel = this->grid->getFinestInitializedLevel();
-   for (int level = minInitLevel; level <= maxInitLevel; level++)
-   {
-      grid->getBlocks(level, rank, blocksVector[level]);
-      blocksCount += static_cast<int>(blocksVector[level].size());
-   }
-
-   MPI_File_read_at(file_handler, (MPI_Offset)0, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
-
-   DataSetSmallMigration* dataSetSmallArray = new DataSetSmallMigration[blocksCount];
-   size_t doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] *
-dataSetParamStr.nx[3]; std::vector<double> doubleValuesArray(blocksCount * doubleCountInBlock); // double-values in all
-blocks
-
-   // define MPI_types depending on the block-specific information
-   MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType);
-   MPI_Type_commit(&dataSetDoubleType);
-
-   size_t ic = 0;
-   MPI_Offset read_offset;
-   size_t sizeofOneDataSet = size_t(sizeof(DataSetSmallMigration) + doubleCountInBlock * sizeof(double));
-
-   for (int level = minInitLevel; level <= maxInitLevel; level++)
-   {
-      for (SPtr<Block3D> block : blocksVector[level])  //	blocks of the current level
-      {
-         read_offset = (MPI_Offset)(sizeof(dataSetParam) + block->getGlobalID() * sizeofOneDataSet);
-         MPI_File_read_at(file_handler, read_offset, &dataSetSmallArray[ic], 1, dataSetSmallType, MPI_STATUS_IGNORE);
-         MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(DataSetSmallMigration)), &doubleValuesArray[ic
-* doubleCountInBlock], 1, dataSetDoubleType, MPI_STATUS_IGNORE); ic++;
-      }
-   }
-
-   MPI_File_close(&file_handler);
-   MPI_Type_free(&dataSetDoubleType);
-
-   if (comm->isRoot())
-   {
-      finish = MPI_Wtime();
-      UBLOG(logINFO, "MPIIOMigrationCoProcessor::readAverageDensityArray time: " << finish - start << " s");
-      UBLOG(logINFO, "MPIIOMigrationCoProcessor::readAverageDensityArray start of restore of data, rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() /
-1073741824.0 << " GB");
-   }
-
-   size_t index = 0;
-   size_t nextVectorSize = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] *
-dataSetParamStr.nx[3]; std::vector<double> vectorsOfValues; for (int n = 0; n < blocksCount; n++)
-   {
-      vectorsOfValues.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + nextVectorSize);
-      index += nextVectorSize;
-
-      // fill mAverageDensity arrays
-      SPtr<AverageValuesArray3D> mAverageDensity;
-      //if
-((dataSetParamStr.nx[0]==0)&&(dataSetParamStr.nx[1]==0)&&(dataSetParamStr.nx[2]==0)&&(dataSetParamStr.nx[3]==0))
-      //   mAverageDensity = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr();
-      //else
-      mAverageDensity = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal,
-IndexerX4X3X2X1>(vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2],
-dataSetParamStr.nx[3]));
-
-      //std::cout << "rank=" << rank << ", dataSetArray[n].globalID=" << dataSetSmallArray[n].globalID << std::endl;
-      // find the nesessary block and fill it
-      SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].globalID);
-      block->getKernel()->getDataSet()->setAverageDensity(mAverageDensity);
-   }
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationCoProcessor::readAverageDensityArray end of restore of data, rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() /
-1073741824.0 << " GB");
-   }
-
-   delete[] dataSetSmallArray;
-}
-
-void MPIIOMigrationCoProcessor::readAverageVelocityArray(int step)
-{
-   int rank, size;
-   MPI_Comm_rank(MPI_COMM_WORLD, &rank);
-   MPI_Comm_size(MPI_COMM_WORLD, &size);
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationCoProcessor::readAverageVelocityArray start MPI IO rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() /
-1073741824.0 << " GB");
-   }
-   double start, finish;
-   if (comm->isRoot()) start = MPI_Wtime();
-
-   MPI_File file_handler;
-   std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpAverageVelocityArray.bin";
-   int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler);
-   if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename);
-
-   // read count of blocks
-   size_t blocksCount = 0;
-   dataSetParam dataSetParamStr;
-   memset(&dataSetParamStr, 0, sizeof(dataSetParam));
-
-   // read from the grid the blocks, that belong to this process
-   std::vector<SPtr<Block3D>> blocksVector[25];
-   int minInitLevel = this->grid->getCoarsestInitializedLevel();
-   int maxInitLevel = this->grid->getFinestInitializedLevel();
-   for (int level = minInitLevel; level <= maxInitLevel; level++)
-   {
-      grid->getBlocks(level, rank, blocksVector[level]);
-      blocksCount += static_cast<int>(blocksVector[level].size());
-   }
-
-   MPI_File_read_at(file_handler, (MPI_Offset)0, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
-
-   DataSetSmallMigration* dataSetSmallArray = new DataSetSmallMigration[blocksCount];
-   size_t doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] *
-dataSetParamStr.nx[3]; std::vector<double> doubleValuesArray(blocksCount * doubleCountInBlock); // double-values in all
-blocks
-
-   // define MPI_types depending on the block-specific information
-   MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType);
-   MPI_Type_commit(&dataSetDoubleType);
-
-   size_t ic = 0;
-   MPI_Offset read_offset;
-   size_t sizeofOneDataSet = size_t(sizeof(DataSetSmallMigration) + doubleCountInBlock * sizeof(double));
-
-   for (int level = minInitLevel; level <= maxInitLevel; level++)
-   {
-      for (SPtr<Block3D> block : blocksVector[level])  //	blocks of the current level
-      {
-         read_offset = (MPI_Offset)(sizeof(dataSetParam) + block->getGlobalID() * sizeofOneDataSet);
-         MPI_File_read_at(file_handler, read_offset, &dataSetSmallArray[ic], 1, dataSetSmallType, MPI_STATUS_IGNORE);
-         MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(DataSetSmallMigration)), &doubleValuesArray[ic
-* doubleCountInBlock], 1, dataSetDoubleType, MPI_STATUS_IGNORE); ic++;
-      }
-   }
-
-   MPI_File_close(&file_handler);
-   MPI_Type_free(&dataSetDoubleType);
-
-   if (comm->isRoot())
-   {
-      finish = MPI_Wtime();
-      UBLOG(logINFO, "MPIIOMigrationCoProcessor::readAverageVelocityArray time: " << finish - start << " s");
-      UBLOG(logINFO, "MPIIOMigrationCoProcessor::readAverageVelocityArray start of restore of data, rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() /
-1073741824.0 << " GB");
-   }
-
-   size_t index = 0;
-   size_t nextVectorSize = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] *
-dataSetParamStr.nx[3]; std::vector<double> vectorsOfValues; for (int n = 0; n < blocksCount; n++)
-   {
-      vectorsOfValues.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + nextVectorSize);
-      index += nextVectorSize;
-
-      // fill mAverageVelocity array
-      SPtr<AverageValuesArray3D> mAverageVelocity;
-      //if ((dataSetParamStr.nx[0] == 0) && (dataSetParamStr.nx[1] == 0) && (dataSetParamStr.nx[2] == 0) &&
-(dataSetParamStr.nx[3] == 0))
-      //   mAverageVelocity = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr();
-      //else
-      mAverageVelocity = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal,
-IndexerX4X3X2X1>(vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2],
-dataSetParamStr.nx[3]));
-
-      // find the nesessary block and fill it
-      SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].globalID);
-      block->getKernel()->getDataSet()->setAverageVelocity(mAverageVelocity);
-   }
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationCoProcessor::readAverageVelocityArray end of restore of data, rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() /
-1073741824.0 << " GB");
-   }
-
-   delete[] dataSetSmallArray;
-}
-
-void MPIIOMigrationCoProcessor::readAverageFluktuationsArray(int step)
-{
-   int rank, size;
-   MPI_Comm_rank(MPI_COMM_WORLD, &rank);
-   MPI_Comm_size(MPI_COMM_WORLD, &size);
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationCoProcessor::readAverageFluktuationsArray start MPI IO rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() /
-1073741824.0 << " GB");
-   }
-   double start, finish;
-   if (comm->isRoot()) start = MPI_Wtime();
-
-   MPI_File file_handler;
-   std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpAverageFluktuationsArray.bin";
-   int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler);
-   if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename);
-
-   // read count of blocks
-   size_t blocksCount = 0;
-   dataSetParam dataSetParamStr;
-   memset(&dataSetParamStr, 0, sizeof(dataSetParam));
-
-   // read from the grid the blocks, that belong to this process
-   std::vector<SPtr<Block3D>> blocksVector[25];
-   int minInitLevel = this->grid->getCoarsestInitializedLevel();
-   int maxInitLevel = this->grid->getFinestInitializedLevel();
-   for (int level = minInitLevel; level <= maxInitLevel; level++)
-   {
-      grid->getBlocks(level, rank, blocksVector[level]);
-      blocksCount += static_cast<int>(blocksVector[level].size());
-   }
-
-   MPI_File_read_at(file_handler, (MPI_Offset)0, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
-
-   DataSetSmallMigration* dataSetSmallArray = new DataSetSmallMigration[blocksCount];
-   int doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] *
-dataSetParamStr.nx[3]; std::vector<double> doubleValuesArray(blocksCount * doubleCountInBlock); // double-values in all
-blocks
-
-   // define MPI_types depending on the block-specific information
-   MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType);
-   MPI_Type_commit(&dataSetDoubleType);
-
-   size_t ic = 0;
-   MPI_Offset read_offset;
-   size_t sizeofOneDataSet = size_t(sizeof(DataSetSmallMigration) + doubleCountInBlock * sizeof(double));
-
-   for (int level = minInitLevel; level <= maxInitLevel; level++)
-   {
-      for (SPtr<Block3D> block : blocksVector[level])  //	blocks of the current level
-      {
-         read_offset = (MPI_Offset)(sizeof(dataSetParam) + block->getGlobalID() * sizeofOneDataSet);
-         MPI_File_read_at(file_handler, read_offset, &dataSetSmallArray[ic], 1, dataSetSmallType, MPI_STATUS_IGNORE);
-         MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(DataSetSmallMigration)), &doubleValuesArray[ic
-* doubleCountInBlock], 1, dataSetDoubleType, MPI_STATUS_IGNORE); ic++;
-      }
-   }
-
-   MPI_File_close(&file_handler);
-   MPI_Type_free(&dataSetDoubleType);
-
-   if (comm->isRoot())
-   {
-      finish = MPI_Wtime();
-      UBLOG(logINFO, "MPIIOMigrationCoProcessor::readAverageFluktuationsArray time: " << finish - start << " s");
-      UBLOG(logINFO, "MPIIOMigrationCoProcessor::readAverageFluktuationsArray start of restore of data, rank = " <<
-rank); UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() /
-1073741824.0 << " GB");
-   }
-
-   size_t index = 0;
-   size_t nextVectorSize = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] *
-dataSetParamStr.nx[3]; std::vector<double> vectorsOfValues; for (int n = 0; n < blocksCount; n++)
-   {
-      vectorsOfValues.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + nextVectorSize);
-      index += nextVectorSize;
-
-      // fill AverageFluktuations array
-      SPtr<AverageValuesArray3D> mAverageFluktuations;
-      //if ((dataSetParamStr.nx[0] == 0) && (dataSetParamStr.nx[1] == 0) && (dataSetParamStr.nx[2] == 0) &&
-(dataSetParamStr.nx[3] == 0))
-      //   mAverageFluktuations = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr();
-      //else
-      mAverageFluktuations = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal,
-IndexerX4X3X2X1>(vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2],
-dataSetParamStr.nx[3]));
-
-      // find the nesessary block and fill it
-      SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].globalID);
-      block->getKernel()->getDataSet()->setAverageFluctuations(mAverageFluktuations);
-   }
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationCoProcessor::readAverageFluktuationsArray end of restore of data, rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() /
-1073741824.0 << " GB");
-   }
-
-   delete[] dataSetSmallArray;
-}
-
-void MPIIOMigrationCoProcessor::readAverageTripleArray(int step)
-{
-   int rank, size;
-   MPI_Comm_rank(MPI_COMM_WORLD, &rank);
-   MPI_Comm_size(MPI_COMM_WORLD, &size);
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationCoProcessor::readAverageTripleArray start MPI IO rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() /
-1073741824.0 << " GB");
-   }
-   double start, finish;
-   if (comm->isRoot()) start = MPI_Wtime();
-
-   MPI_File file_handler;
-   std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpAverageTripleArray.bin";
-   int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler);
-   if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename);
-
-   // read count of blocks
-   size_t blocksCount = 0;
-   dataSetParam dataSetParamStr;
-   memset(&dataSetParamStr, 0, sizeof(dataSetParam));
-
-   // read from the grid the blocks, that belong to this process
-   std::vector<SPtr<Block3D>> blocksVector[25];
-   int minInitLevel = this->grid->getCoarsestInitializedLevel();
-   int maxInitLevel = this->grid->getFinestInitializedLevel();
-   for (int level = minInitLevel; level <= maxInitLevel; level++)
-   {
-      grid->getBlocks(level, rank, blocksVector[level]);
-      blocksCount += static_cast<int>(blocksVector[level].size());
-   }
-
-   MPI_File_read_at(file_handler, (MPI_Offset)0, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
-
-   DataSetSmallMigration* dataSetSmallArray = new DataSetSmallMigration[blocksCount];
-   size_t doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] *
-dataSetParamStr.nx[3]; std::vector<double> doubleValuesArray(blocksCount * doubleCountInBlock); // double-values in all
-blocks
-
-   // define MPI_types depending on the block-specific information
-   MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType);
-   MPI_Type_commit(&dataSetDoubleType);
-
-   size_t ic = 0;
-   MPI_Offset read_offset;
-   size_t sizeofOneDataSet = size_t(sizeof(DataSetSmallMigration) + doubleCountInBlock * sizeof(double));
-
-   for (int level = minInitLevel; level <= maxInitLevel; level++)
-   {
-      for (SPtr<Block3D> block : blocksVector[level])  //	blocks of the current level
-      {
-         read_offset = (MPI_Offset)(sizeof(dataSetParam) + block->getGlobalID() * sizeofOneDataSet);
-         MPI_File_read_at(file_handler, read_offset, &dataSetSmallArray[ic], 1, dataSetSmallType, MPI_STATUS_IGNORE);
-         MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(DataSetSmallMigration)), &doubleValuesArray[ic
-* doubleCountInBlock], 1, dataSetDoubleType, MPI_STATUS_IGNORE); ic++;
-      }
-   }
-
-   MPI_File_close(&file_handler);
-   MPI_Type_free(&dataSetDoubleType);
-
-   if (comm->isRoot())
-   {
-      finish = MPI_Wtime();
-      UBLOG(logINFO, "MPIIOMigrationCoProcessor::readAverageTripleArray time: " << finish - start << " s");
-      UBLOG(logINFO, "MPIIOMigrationCoProcessor::readAverageTripleArray start of restore of data, rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() /
-1073741824.0 << " GB");
-   }
-
-   size_t index = 0;
-   size_t nextVectorSize = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] *
-dataSetParamStr.nx[3]; std::vector<double> vectorsOfValues; for (int n = 0; n < blocksCount; n++)
-   {
-      vectorsOfValues.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + nextVectorSize);
-      index += nextVectorSize;
-
-      // fill AverageTriplecorrelations array
-      SPtr<AverageValuesArray3D> mAverageTriplecorrelations;
-      //if ((dataSetParamStr.nx[0] == 0) && (dataSetParamStr.nx[1] == 0) && (dataSetParamStr.nx[2] == 0) &&
-(dataSetParamStr.nx[3] == 0))
-      //   mAverageTriplecorrelations = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr();
-      //else
-      mAverageTriplecorrelations = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal,
-IndexerX4X3X2X1>(vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2],
-dataSetParamStr.nx[3]));
-
-      // find the nesessary block and fill it
-      SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].globalID);
-      block->getKernel()->getDataSet()->setAverageTriplecorrelations(mAverageTriplecorrelations);
-   }
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationCoProcessor::readAverageTripleArray end of restore of data, rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() /
-1073741824.0 << " GB");
-   }
-
-   delete[] dataSetSmallArray;
-}
-
-void MPIIOMigrationCoProcessor::readShearStressValArray(int step)
-{
-   int rank, size;
-   MPI_Comm_rank(MPI_COMM_WORLD, &rank);
-   MPI_Comm_size(MPI_COMM_WORLD, &size);
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationCoProcessor::readShearStressValArray start MPI IO rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() /
-1073741824.0 << " GB");
-   }
-   double start, finish;
-   if (comm->isRoot()) start = MPI_Wtime();
-
-   MPI_File file_handler;
-   std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpShearStressValArray.bin";
-   int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler);
-   if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename);
-
-   // read count of blocks
-   size_t blocksCount = 0;
-   dataSetParam dataSetParamStr;
-   memset(&dataSetParamStr, 0, sizeof(dataSetParam));
-
-   // read from the grid the blocks, that belong to this process
-   std::vector<SPtr<Block3D>> blocksVector[25];
-   int minInitLevel = this->grid->getCoarsestInitializedLevel();
-   int maxInitLevel = this->grid->getFinestInitializedLevel();
-   for (int level = minInitLevel; level <= maxInitLevel; level++)
-   {
-      grid->getBlocks(level, rank, blocksVector[level]);
-      blocksCount += static_cast<int>(blocksVector[level].size());
-   }
-
-   MPI_File_read_at(file_handler, (MPI_Offset)0, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
-
-   DataSetSmallMigration* dataSetSmallArray = new DataSetSmallMigration[blocksCount];
-   size_t doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] *
-dataSetParamStr.nx[3]; std::vector<double> doubleValuesArray(blocksCount * doubleCountInBlock); // double-values in all
-blocks
-
-   // define MPI_types depending on the block-specific information
-   MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType);
-   MPI_Type_commit(&dataSetDoubleType);
-
-   size_t ic = 0;
-   MPI_Offset read_offset;
-   size_t sizeofOneDataSet = size_t(sizeof(DataSetSmallMigration) + doubleCountInBlock * sizeof(double));
-
-   for (int level = minInitLevel; level <= maxInitLevel; level++)
-   {
-      for (SPtr<Block3D> block : blocksVector[level])  //	blocks of the current level
-      {
-         read_offset = (MPI_Offset)(sizeof(dataSetParam) + block->getGlobalID() * sizeofOneDataSet);
-         MPI_File_read_at(file_handler, read_offset, &dataSetSmallArray[ic], 1, dataSetSmallType, MPI_STATUS_IGNORE);
-         MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(DataSetSmallMigration)), &doubleValuesArray[ic
-* doubleCountInBlock], 1, dataSetDoubleType, MPI_STATUS_IGNORE); ic++;
-      }
-   }
-
-   MPI_File_close(&file_handler);
-   MPI_Type_free(&dataSetDoubleType);
-
-   if (comm->isRoot())
-   {
-      finish = MPI_Wtime();
-      UBLOG(logINFO, "MPIIOMigrationCoProcessor::readShearStressValArray time: " << finish - start << " s");
-      UBLOG(logINFO, "MPIIOMigrationCoProcessor::readShearStressValArray start of restore of data, rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() /
-1073741824.0 << " GB");
-   }
-
-   size_t index = 0;
-   size_t nextVectorSize = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] *
-dataSetParamStr.nx[3]; std::vector<double> vectorsOfValues; for (int n = 0; n < blocksCount; n++)
-   {
-      vectorsOfValues.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + nextVectorSize);
-      index += nextVectorSize;
-
-      // fill ShearStressValuesArray array
-      SPtr<ShearStressValuesArray3D> mShearStressValues;
-      //if ((dataSetParamStr.nx[0] == 0) && (dataSetParamStr.nx[1] == 0) && (dataSetParamStr.nx[2] == 0) &&
-(dataSetParamStr.nx[3] == 0))
-      //   mShearStressValues = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr();
-      //else
-      mShearStressValues = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal,
-IndexerX4X3X2X1>(vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2],
-dataSetParamStr.nx[3]));
-
-      // find the nesessary block and fill it
-      SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].globalID);
-      block->getKernel()->getDataSet()->setShearStressValues(mShearStressValues);
-   }
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationCoProcessor::readShearStressValArray end of restore of data, rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() /
-1073741824.0 << " GB");
-   }
-
-   delete[] dataSetSmallArray;
-}
-
-void MPIIOMigrationCoProcessor::readRelaxationFactor(int step)
-{
-   int rank, size;
-   MPI_Comm_rank(MPI_COMM_WORLD, &rank);
-   MPI_Comm_size(MPI_COMM_WORLD, &size);
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationCoProcessor::readRelaxationFactor start MPI IO rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() /
-1073741824.0 << " GB");
-   }
-   double start, finish;
-   if (comm->isRoot()) start = MPI_Wtime();
-
-   MPI_File file_handler;
-   std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpRelaxationFactor.bin";
-   int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler);
-   if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename);
-
-   // read count of blocks
-   size_t blocksCount = 0;
-   dataSetParam dataSetParamStr;
-   memset(&dataSetParamStr, 0, sizeof(dataSetParam));
-
-   // read from the grid the blocks, that belong to this process
-   std::vector<SPtr<Block3D>> blocksVector[25];
-   int minInitLevel = this->grid->getCoarsestInitializedLevel();
-   int maxInitLevel = this->grid->getFinestInitializedLevel();
-   for (int level = minInitLevel; level <= maxInitLevel; level++)
-   {
-      grid->getBlocks(level, rank, blocksVector[level]);
-      blocksCount += static_cast<int>(blocksVector[level].size());
-   }
-
-   MPI_File_read_at(file_handler, (MPI_Offset)0, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
-
-   DataSetSmallMigration* dataSetSmallArray = new DataSetSmallMigration[blocksCount];
-   size_t doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] *
-dataSetParamStr.nx[3]; std::vector<double> doubleValuesArray(blocksCount * doubleCountInBlock); // double-values in all
-blocks
-
-   // define MPI_types depending on the block-specific information
-   MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType);
-   MPI_Type_commit(&dataSetDoubleType);
-
-   size_t ic = 0;
-   MPI_Offset read_offset;
-   size_t sizeofOneDataSet = size_t(sizeof(DataSetSmallMigration) + doubleCountInBlock * sizeof(double));
-
-   for (int level = minInitLevel; level <= maxInitLevel; level++)
-   {
-      for (SPtr<Block3D> block : blocksVector[level])  //	blocks of the current level
-      {
-         read_offset = (MPI_Offset)(sizeof(dataSetParam) + block->getGlobalID() * sizeofOneDataSet);
-         MPI_File_read_at(file_handler, read_offset, &dataSetSmallArray[ic], 1, dataSetSmallType, MPI_STATUS_IGNORE);
-         MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(DataSetSmallMigration)), &doubleValuesArray[ic
-* doubleCountInBlock], 1, dataSetDoubleType, MPI_STATUS_IGNORE); ic++;
-      }
-   }
-
-   MPI_File_close(&file_handler);
-   MPI_Type_free(&dataSetDoubleType);
-
-   if (comm->isRoot())
-   {
-      finish = MPI_Wtime();
-      UBLOG(logINFO, "MPIIOMigrationCoProcessor::readRelaxationFactor time: " << finish - start << " s");
-      UBLOG(logINFO, "MPIIOMigrationCoProcessor::readRelaxationFactor start of restore of data, rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() /
-1073741824.0 << " GB");
-   }
-
-   size_t index = 0;
-   size_t nextVectorSize = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] *
-dataSetParamStr.nx[3]; std::vector<double> vectorsOfValues; for (int n = 0; n < blocksCount; n++)
-   {
-      vectorsOfValues.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + nextVectorSize);
-      index += nextVectorSize;
-
-      // fill RelaxationFactor array
-      SPtr<RelaxationFactorArray3D> mRelaxationFactor;
-      //if ((dataSetParamStr.nx[0] == 0) && (dataSetParamStr.nx[1] == 0) && (dataSetParamStr.nx[2] == 0))
-      //   mRelaxationFactor = CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr();
-      //else
-      mRelaxationFactor = CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal,
-IndexerX3X2X1>(vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2]));
-
-      // find the nesessary block and fill it
-      SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].globalID);
-      block->getKernel()->getDataSet()->setRelaxationFactor(mRelaxationFactor);
-   }
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationCoProcessor::readRelaxationFactor end of restore of data, rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() /
-1073741824.0 << " GB");
-   }
-
-   delete[] dataSetSmallArray;
-}
-*/
-
 void MPIIOMigrationCoProcessor::readBoundaryConds(int step)
 {
     int rank, size;
-- 
GitLab