diff --git a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOCoProcessor.cpp b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOCoProcessor.cpp
index d5a7b0265f2a74a33f394f540828adc3af3c45b0..0ef0b25662246d4adb6ec6d934ff25cf1611d771 100644
--- a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOCoProcessor.cpp
+++ b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOCoProcessor.cpp
@@ -7,6 +7,8 @@
 #include "MPIIODataStructures.h"
 #include "UbLogger.h"
 #include "MemoryUtil.h"
+#include "UbFileOutputASCII.h"
+#include "UbFileInputASCII.h"
 
 using namespace MPIIODataStructures;
 
@@ -45,12 +47,42 @@ MPIIOCoProcessor::MPIIOCoProcessor(SPtr<Grid3D> grid, SPtr<UbScheduler> s, const
 
    MPI_Type_create_struct(2, blocksBlock, offsetsBlock, typesBlock, &block3dType);
    MPI_Type_commit(&block3dType);
+
+   //---------------------------------------
+
+   MPI_Type_contiguous(7, MPI_INT, &dataSetParamType);
+   MPI_Type_commit(&dataSetParamType);
+
+   //-----------------------------------------------------------------------
+
+   MPI_Datatype typesBC[3] = { MPI_LONG_LONG_INT, MPI_FLOAT, MPI_CHAR };
+   int blocksBC[3] = { 5, 38, 1 };
+   MPI_Aint offsetsBC[3], lbBC, extentBC;
+
+   offsetsBC[0] = 0;
+   MPI_Type_get_extent(MPI_LONG_LONG_INT, &lbBC, &extentBC);
+   offsetsBC[1] = blocksBC[0] * extentBC;
+
+   MPI_Type_get_extent(MPI_FLOAT, &lbBC, &extentBC);
+   offsetsBC[2] = offsetsBC[1] + blocksBC[1] * extentBC;
+
+   MPI_Type_create_struct(3, blocksBC, offsetsBC, typesBC, &boundCondType);
+   MPI_Type_commit(&boundCondType);
+
+   //---------------------------------------
+
+   MPI_Type_contiguous(6, MPI_CHAR, &arrayPresenceType);
+   MPI_Type_commit(&arrayPresenceType);
+
 }
 
 MPIIOCoProcessor::~MPIIOCoProcessor()
 {
    MPI_Type_free(&gridParamType);
    MPI_Type_free(&block3dType);
+   MPI_Type_free(&dataSetParamType);
+   MPI_Type_free(&boundCondType);
+   MPI_Type_free(&arrayPresenceType);
 }
 
 void MPIIOCoProcessor::writeBlocks(int step)
@@ -397,7 +429,7 @@ void MPIIOCoProcessor::clearAllFiles(int step)
    MPI_File_set_size(file_handler, new_size);
    MPI_File_close(&file_handler);
 
-   std::string filename10 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpBC1.bin";
+   /*std::string filename10 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpBC1.bin";
    int rc10 = MPI_File_open(MPI_COMM_WORLD, filename10.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
    if (rc10 != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename10);
    MPI_File_set_size(file_handler, new_size);
@@ -407,5 +439,22 @@ void MPIIOCoProcessor::clearAllFiles(int step)
    int rc11 = MPI_File_open(MPI_COMM_WORLD, filename11.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
    if (rc11 != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename11);
    MPI_File_set_size(file_handler, new_size);
-   MPI_File_close(&file_handler);
+   MPI_File_close(&file_handler);*/
+}
+
+void MPIIOCoProcessor::writeCpTimeStep(int step)
+{
+   if (comm->isRoot())
+   {
+      UbFileOutputASCII f(path + "/mpi_io_cp/cp.txt");
+      f.writeInteger(step);
+   }
+}
+//////////////////////////////////////////////////////////////////////////
+int MPIIOCoProcessor::readCpTimeStep()
+{
+   UbFileInputASCII f(path + "/mpi_io_cp/cp.txt");
+   int step = f.readInteger();
+   return step;
 }
+//////////////////////////////////////////////////////////////////////////
diff --git a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOCoProcessor.h b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOCoProcessor.h
index 93de60bff9ed52575d6c53553ed547eb35ac7bd9..abf06426362ce41443eabfae10ea3550690ac38b 100644
--- a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOCoProcessor.h
+++ b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOCoProcessor.h
@@ -29,10 +29,16 @@ public:
 
    //!The function truncates the data files
    void clearAllFiles(int step);
+
+   //!The function write a time step of last check point
+   void writeCpTimeStep(int step);
+   //!The function read a time step of last check point
+   int readCpTimeStep();
+
 protected:
    std::string path;
    SPtr<Communicator> comm;
-   MPI_Datatype gridParamType, block3dType;
+   MPI_Datatype gridParamType, block3dType, dataSetParamType, boundCondType, arrayPresenceType;
 };
 #endif // ! _MPIIOCoProcessor_H_
 #define _MPIIOCoProcessor_H_
diff --git a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationBECoProcessor.cpp b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationBECoProcessor.cpp
index 3c49bd45bc03f696c410f752a5c18c4981a2421c..37d43afbe4586e79c173126cceed0ad1da69aaef 100644
--- a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationBECoProcessor.cpp
+++ b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationBECoProcessor.cpp
@@ -26,78 +26,12 @@ using namespace MPIIODataStructures;
 #define MESSAGE_TAG 80
 #define SEND_BLOCK_SIZE 100000
 
-MPIIOMigrationBECoProcessor::MPIIOMigrationBECoProcessor(SPtr<Grid3D> grid, SPtr<UbScheduler> s,
-   const std::string& path,
-   SPtr<Communicator> comm) :
-   CoProcessor(grid, s),
-   path(path),
-   comm(comm), 
-   nue(-999.999)
+MPIIOMigrationBECoProcessor::MPIIOMigrationBECoProcessor(SPtr<Grid3D> grid, SPtr<UbScheduler> s, const std::string& path, SPtr<Communicator> comm) : MPIIOCoProcessor(grid, s, path, comm), nue(-999.999)
 {
-   UbSystem::makeDirectory(path + "/mpi_io_cp");
-
    memset(&boundCondParamStr, 0, sizeof(boundCondParamStr));
 
    //-------------------------   define MPI types  ---------------------------------
 
-   MPI_Datatype typesGP[3] = { MPI_DOUBLE, MPI_INT, MPI_CHAR };
-   int blocksGP[3] = { 34, 6, 5 };
-   MPI_Aint offsetsGP[3], lbGP, extentGP;
-
-   offsetsGP[0] = 0;
-   MPI_Type_get_extent(MPI_DOUBLE, &lbGP, &extentGP);
-   offsetsGP[1] = blocksGP[0] * extentGP;
-
-   MPI_Type_get_extent(MPI_INT, &lbGP, &extentGP);
-   offsetsGP[2] = offsetsGP[1] + blocksGP[1] * extentGP;
-
-   MPI_Type_create_struct(3, blocksGP, offsetsGP, typesGP, &gridParamType);
-   MPI_Type_commit(&gridParamType);
-
-   //-----------------------------------------------------------------------
-
-   MPI_Datatype typesBlock[2] = { MPI_INT, MPI_CHAR };
-   int blocksBlock[2] = { 13, 1 };
-   MPI_Aint offsetsBlock[2], lbBlock, extentBlock;
-
-   offsetsBlock[0] = 0;
-   MPI_Type_get_extent(MPI_INT, &lbBlock, &extentBlock);
-   offsetsBlock[1] = blocksBlock[0] * extentBlock;
-
-   MPI_Type_create_struct(2, blocksBlock, offsetsBlock, typesBlock, &block3dType);
-   MPI_Type_commit(&block3dType);
-
-   //-----------------------------------------------------------------------
-
-   MPI_Datatype typesBC[3] = { MPI_LONG_LONG_INT, MPI_FLOAT, MPI_CHAR };
-   int blocksBC[3] = { 5, 38, 1 };
-   MPI_Aint offsetsBC[3], lbBC, extentBC;
-
-   offsetsBC[0] = 0;
-   MPI_Type_get_extent(MPI_LONG_LONG_INT, &lbBC, &extentBC);
-   offsetsBC[1] = blocksBC[0] * extentBC;
-
-   MPI_Type_get_extent(MPI_FLOAT, &lbBC, &extentBC);
-   offsetsBC[2] = offsetsBC[1] + blocksBC[1] * extentBC;
-
-   MPI_Type_create_struct(3, blocksBC, offsetsBC, typesBC, &boundCondType);
-   MPI_Type_commit(&boundCondType);
-
-   //-----------------------------------------------------------------------
-
-   MPI_Type_contiguous(7, MPI_INT, &dataSetParamType);
-   MPI_Type_commit(&dataSetParamType);
-
-   //---------------------------------------
-
-   MPI_Type_contiguous(6, MPI_CHAR, &arrayPresenceType);
-   MPI_Type_commit(&arrayPresenceType);
-
-   //-----------------------------------------------------------------------
-
-   MPI_Type_contiguous(SEND_BLOCK_SIZE, MPI_DOUBLE, &sendBlockDoubleType);
-   MPI_Type_commit(&sendBlockDoubleType);
-
    MPI_Type_contiguous(SEND_BLOCK_SIZE, MPI_INT, &sendBlockIntType);
    MPI_Type_commit(&sendBlockIntType);
 
@@ -106,13 +40,7 @@ MPIIOMigrationBECoProcessor::MPIIOMigrationBECoProcessor(SPtr<Grid3D> grid, SPtr
 //////////////////////////////////////////////////////////////////////////
 MPIIOMigrationBECoProcessor::~MPIIOMigrationBECoProcessor()
 {
-   MPI_Type_free(&gridParamType);
-   MPI_Type_free(&block3dType);
-   MPI_Type_free(&boundCondType);
-   MPI_Type_free(&dataSetParamType);
-   MPI_Type_free(&sendBlockDoubleType);
    MPI_Type_free(&sendBlockIntType);
-   MPI_Type_free(&arrayPresenceType);
 }
 
 void MPIIOMigrationBECoProcessor::process(double step)
@@ -139,67 +67,9 @@ void MPIIOMigrationBECoProcessor::clearAllFiles(int step)
    MPI_Info info = MPI_INFO_NULL;
    MPI_Offset new_size = 0;
 
-   UbSystem::makeDirectory(path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step));
-
-   std::string filename1 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpBlocks.bin";
-   int rc1 = MPI_File_open(MPI_COMM_WORLD, filename1.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &file_handler);
-   if (rc1 != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename1);
-   MPI_File_set_size(file_handler, new_size);
-   MPI_File_close(&file_handler);
-
-   std::string filename2 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSet.bin";
-   int rc2 = MPI_File_open(MPI_COMM_WORLD, filename2.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
-   if (rc2 != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename2);
-   MPI_File_set_size(file_handler, new_size);
-   MPI_File_close(&file_handler);
-
-   std::string filename3 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpArrays.bin";
-   int rc3 = MPI_File_open(MPI_COMM_WORLD, filename3.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
-   if (rc3 != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename3);
-   MPI_File_set_size(file_handler, new_size);
-   MPI_File_close(&file_handler);
-
-   std::string filename4 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpAverageDensityArray.bin";
-   //MPI_File_delete(filename4.c_str(), info);
-   int rc4 = MPI_File_open(MPI_COMM_WORLD, filename4.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
-   if (rc4 != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename4);
-   MPI_File_set_size(file_handler, new_size);
-   MPI_File_close(&file_handler);
-
-   std::string filename5 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpAverageVelocityArray.bin";
-   //MPI_File_delete(filename5.c_str(), info);
-   int rc5 = MPI_File_open(MPI_COMM_WORLD, filename5.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
-   if (rc5 != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename5);
-   MPI_File_set_size(file_handler, new_size);
-   MPI_File_close(&file_handler);
-
-   std::string filename6 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpAverageFluktuationsArray.bin";
-   //MPI_File_delete(filename6.c_str(), info);
-   int rc6 = MPI_File_open(MPI_COMM_WORLD, filename6.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
-   if (rc6 != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename6);
-   MPI_File_set_size(file_handler, new_size);
-   MPI_File_close(&file_handler);
-
-   std::string filename7 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpAverageTripleArray.bin";
-   //MPI_File_delete(filename7.c_str(), info);
-   int rc7 = MPI_File_open(MPI_COMM_WORLD, filename7.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
-   if (rc7 != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename7);
-   MPI_File_set_size(file_handler, new_size);
-   MPI_File_close(&file_handler);
-
-   std::string filename8 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpShearStressValArray.bin";
-   //MPI_File_delete(filename8.c_str(), info);
-   int rc8 = MPI_File_open(MPI_COMM_WORLD, filename8.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
-   if (rc8 != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename8);
-   MPI_File_set_size(file_handler, new_size);
-   MPI_File_close(&file_handler);
+   MPIIOCoProcessor::clearAllFiles(step);
 
-   std::string filename9 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpRelaxationFactor.bin";
-   //MPI_File_delete(filename9.c_str(), info);
-   int rc9 = MPI_File_open(MPI_COMM_WORLD, filename9.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
-   if (rc9 != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename9);
-   MPI_File_set_size(file_handler, new_size);
-   MPI_File_close(&file_handler);
+   UbSystem::makeDirectory(path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step));
 
    std::string filename10 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpBC1.bin";
    int rc10 = MPI_File_open(MPI_COMM_WORLD, filename10.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
@@ -214,179 +84,13 @@ void MPIIOMigrationBECoProcessor::clearAllFiles(int step)
    MPI_File_close(&file_handler);
 }
 
-void MPIIOMigrationBECoProcessor::writeCpTimeStep(int step)
-{
-   if (comm->isRoot())
-   {
-      UbFileOutputASCII f(path + "/mpi_io_cp/cp.txt");
-      f.writeInteger(step);
-   }
-}
-//////////////////////////////////////////////////////////////////////////
-int MPIIOMigrationBECoProcessor::readCpTimeStep()
-{
-   UbFileInputASCII f(path + "/mpi_io_cp/cp.txt");
-   int step = f.readInteger();
-   return step;
-}
-
 void MPIIOMigrationBECoProcessor::writeBlocks(int step)
 {
-   int rank, size;
-   MPI_Comm_rank(MPI_COMM_WORLD, &rank);
-   //MPI_Comm_size(MPI_COMM_WORLD, &size);
-   size = 1;
-
 	grid->deleteBlockIDs();
 	RenumberGridVisitor renumber(comm);
 	grid->accept(renumber);
 
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::writeBlocks start collect data rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
-   }
-
-   int blocksCount = 0; // quantity of all the blocks in the grid, max 2147483648 blocks!
-   int minInitLevel = this->grid->getCoarsestInitializedLevel();
-   int maxInitLevel = this->grid->getFinestInitializedLevel();
-
-   std::vector<SPtr<Block3D>> blocksVector[25]; // max 25 levels
-   for (int level = minInitLevel; level <= maxInitLevel; level++)
-   {
-      grid->getBlocks(level, blocksVector[level]);
-      blocksCount += static_cast<int>(blocksVector[level].size());
-   }
-
-   GridParam* gridParameters = new GridParam;
-   gridParameters->trafoParams[0] = grid->getCoordinateTransformator()->Tx1;
-   gridParameters->trafoParams[1] = grid->getCoordinateTransformator()->Tx2;
-   gridParameters->trafoParams[2] = grid->getCoordinateTransformator()->Tx3;
-   gridParameters->trafoParams[3] = grid->getCoordinateTransformator()->Sx1;
-   gridParameters->trafoParams[4] = grid->getCoordinateTransformator()->Sx2;
-   gridParameters->trafoParams[5] = grid->getCoordinateTransformator()->Sx3;
-   gridParameters->trafoParams[6] = grid->getCoordinateTransformator()->alpha;
-   gridParameters->trafoParams[7] = grid->getCoordinateTransformator()->beta;
-   gridParameters->trafoParams[8] = grid->getCoordinateTransformator()->gamma;
-
-   gridParameters->trafoParams[9] = grid->getCoordinateTransformator()->toX1factorX1;
-   gridParameters->trafoParams[10] = grid->getCoordinateTransformator()->toX1factorX2;
-   gridParameters->trafoParams[11] = grid->getCoordinateTransformator()->toX1factorX3;
-   gridParameters->trafoParams[12] = grid->getCoordinateTransformator()->toX1delta;
-   gridParameters->trafoParams[13] = grid->getCoordinateTransformator()->toX2factorX1;
-   gridParameters->trafoParams[14] = grid->getCoordinateTransformator()->toX2factorX2;
-   gridParameters->trafoParams[15] = grid->getCoordinateTransformator()->toX2factorX3;
-   gridParameters->trafoParams[16] = grid->getCoordinateTransformator()->toX2delta;
-   gridParameters->trafoParams[17] = grid->getCoordinateTransformator()->toX3factorX1;
-   gridParameters->trafoParams[18] = grid->getCoordinateTransformator()->toX3factorX2;
-   gridParameters->trafoParams[19] = grid->getCoordinateTransformator()->toX3factorX3;
-   gridParameters->trafoParams[20] = grid->getCoordinateTransformator()->toX3delta;
-
-   gridParameters->trafoParams[21] = grid->getCoordinateTransformator()->fromX1factorX1;
-   gridParameters->trafoParams[22] = grid->getCoordinateTransformator()->fromX1factorX2;
-   gridParameters->trafoParams[23] = grid->getCoordinateTransformator()->fromX1factorX3;
-   gridParameters->trafoParams[24] = grid->getCoordinateTransformator()->fromX1delta;
-   gridParameters->trafoParams[25] = grid->getCoordinateTransformator()->fromX2factorX1;
-   gridParameters->trafoParams[26] = grid->getCoordinateTransformator()->fromX2factorX2;
-   gridParameters->trafoParams[27] = grid->getCoordinateTransformator()->fromX2factorX3;
-   gridParameters->trafoParams[28] = grid->getCoordinateTransformator()->fromX2delta;
-   gridParameters->trafoParams[29] = grid->getCoordinateTransformator()->fromX3factorX1;
-   gridParameters->trafoParams[30] = grid->getCoordinateTransformator()->fromX3factorX2;
-   gridParameters->trafoParams[31] = grid->getCoordinateTransformator()->fromX3factorX3;
-   gridParameters->trafoParams[32] = grid->getCoordinateTransformator()->fromX3delta;
-
-   gridParameters->active = grid->getCoordinateTransformator()->active;
-   gridParameters->transformation = grid->getCoordinateTransformator()->transformation;
-   
-   gridParameters->deltaX = grid->getDeltaX(minInitLevel);
-   UbTupleInt3 blocknx = grid->getBlockNX();
-   gridParameters->blockNx1 = val<1>(blocknx);
-   gridParameters->blockNx2 = val<2>(blocknx);
-   gridParameters->blockNx3 = val<3>(blocknx);
-   gridParameters->nx1 = grid->getNX1();
-   gridParameters->nx2 = grid->getNX2();
-   gridParameters->nx3 = grid->getNX3();
-   gridParameters->periodicX1 = grid->isPeriodicX1();
-   gridParameters->periodicX2 = grid->isPeriodicX2();
-   gridParameters->periodicX3 = grid->isPeriodicX3();
-
-   //----------------------------------------------------------------------
-
-   Block3d* block3dArray = new Block3d[blocksCount];
-   int ic = 0;
-   for (int level = minInitLevel; level <= maxInitLevel; level++)
-   {
-      for (SPtr<Block3D> block : blocksVector[level])  //	all the blocks of the current level
-      {
-         // save data describing the block
-         block3dArray[ic].x1 = block->getX1();
-         block3dArray[ic].x2 = block->getX2();
-         block3dArray[ic].x3 = block->getX3();
-         block3dArray[ic].bundle = block->getBundle();
-         block3dArray[ic].rank = block->getRank();
-         block3dArray[ic].lrank = block->getLocalRank();
-         block3dArray[ic].part = block->getPart();
-         block3dArray[ic].globalID = block->getGlobalID();
-         block3dArray[ic].localID = block->getLocalID();
-         block3dArray[ic].level = block->getLevel();
-         block3dArray[ic].interpolationFlagCF = block->getCollectionOfInterpolationFlagCF();
-         block3dArray[ic].interpolationFlagFC = block->getCollectionOfInterpolationFlagFC();
-         block3dArray[ic].counter = block->getMaxGlobalID();
-         block3dArray[ic].active = block->isActive();
-
-         ic++;
-      }
-   }
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::writeBlocks start MPI IO rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
-   }
-
-   // write to the file
-   MPI_File file_handler;
-   MPI_Info info = MPI_INFO_NULL;
-   //MPI_Info_create (&info);
-   //MPI_Info_set(info,"romio_cb_write","enable");
-   //MPI_Info_set(i nfo,"cb_buffer_size","4194304");
-   //MPI_Info_set(info,"striping_unit","4194304");
-
-   // if (comm->isRoot())
-   // {
-   UbSystem::makeDirectory(path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step));
-   std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpBlocks.bin";
-   int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &file_handler);
-   if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename);
-   // }
-
-   double start, finish;
-   //MPI_Offset write_offset = (MPI_Offset)(size * sizeof(int));
-   MPI_Offset write_offset = (MPI_Offset)(sizeof(int));
-
-   if (comm->isRoot())
-   {
-      start = MPI_Wtime();
-
-      // each process writes the quantity of it's blocks
-      MPI_File_write_at(file_handler, 0, &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE);
-      // each process writes parameters of the grid
-      MPI_File_write_at(file_handler, write_offset, gridParameters, 1, gridParamType, MPI_STATUS_IGNORE);
-      // each process writes it's blocks
-      MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(GridParam)), &block3dArray[0], blocksCount, block3dType, MPI_STATUS_IGNORE);
-      //MPI_File_sync(file_handler);
-   }
-
-   MPI_File_close(&file_handler);
-
-   if (comm->isRoot())
-   {
-      finish = MPI_Wtime();
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::writeBlocks time: " << finish - start << " s");
-   }
-
-   delete[] block3dArray;
-   delete gridParameters;
+   MPIIOCoProcessor::writeBlocks(step);
 }
 
 void MPIIOMigrationBECoProcessor::writeDataSet(int step)
@@ -395,9 +99,6 @@ void MPIIOMigrationBECoProcessor::writeDataSet(int step)
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
 
-  if (comm->isRoot())
-	std::cout << "size = "<<size<<std::endl;
-
    int blocksCount = 0; // quantity of blocks, that belong to this process 
 
    std::vector<SPtr<Block3D>> blocksVector[25];
@@ -423,14 +124,19 @@ void MPIIOMigrationBECoProcessor::writeDataSet(int step)
    bool firstBlock = true;
    int doubleCountInBlock = 0;
    int ic = 0;
+   SPtr< D3Q27EsoTwist3DSplittedVector > D3Q27EsoTwist3DSplittedVectorPtr;
+   CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr localDistributions;
+   CbArray4D <LBMReal, IndexerX4X3X2X1>::CbArray4DPtr nonLocalDistributions;
+   CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr zeroDistributions;
+
    for (int level = minInitLevel; level <= maxInitLevel; level++)
    {
       for (SPtr<Block3D> block : blocksVector[level])  //	blocks of the current level
       {
-         SPtr< D3Q27EsoTwist3DSplittedVector > D3Q27EsoTwist3DSplittedVectorPtr = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(block->getKernel()->getDataSet()->getFdistributions());
-         CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr localDistributions = D3Q27EsoTwist3DSplittedVectorPtr->getLocalDistributions();
-         CbArray4D <LBMReal, IndexerX4X3X2X1>::CbArray4DPtr nonLocalDistributions = D3Q27EsoTwist3DSplittedVectorPtr->getNonLocalDistributions();
-         CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr zeroDistributions = D3Q27EsoTwist3DSplittedVectorPtr->getZeroDistributions();
+         D3Q27EsoTwist3DSplittedVectorPtr = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(block->getKernel()->getDataSet()->getFdistributions());
+         localDistributions = D3Q27EsoTwist3DSplittedVectorPtr->getLocalDistributions();
+         nonLocalDistributions = D3Q27EsoTwist3DSplittedVectorPtr->getNonLocalDistributions();
+         zeroDistributions = D3Q27EsoTwist3DSplittedVectorPtr->getZeroDistributions();
 
          if (firstBlock)// && block->getKernel()) // when first (any) valid block...
          {
@@ -518,6 +224,9 @@ void MPIIOMigrationBECoProcessor::writeDataSet(int step)
       }
    }
 
+   MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType);
+   MPI_Type_commit(&dataSetDoubleType);
+
    if (comm->isRoot())
    {
       UBLOG(logINFO, "MPIIOMigrationBECoProcessor::writeDataSet start MPI IO rank = " << rank);
@@ -546,10 +255,11 @@ void MPIIOMigrationBECoProcessor::writeDataSet(int step)
    MPI_File_write_at(file_handler, (MPI_Offset)0, &dataSetParamStr1, 1, dataSetParamType, MPI_STATUS_IGNORE);
    MPI_File_write_at(file_handler, (MPI_Offset)(sizeof(dataSetParam)), &dataSetParamStr2, 1, dataSetParamType, MPI_STATUS_IGNORE);
    MPI_File_write_at(file_handler, (MPI_Offset)(2 * sizeof(dataSetParam)), &dataSetParamStr3, 1, dataSetParamType, MPI_STATUS_IGNORE);
-   MPI_File_write_at(file_handler, write_offset, &doubleValuesArray[0], blocksCount * doubleCountInBlock, MPI_DOUBLE, MPI_STATUS_IGNORE);
+   MPI_File_write_at(file_handler, write_offset, &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
 
    MPI_File_sync(file_handler);
    MPI_File_close(&file_handler);
+   MPI_Type_free(&dataSetDoubleType);
 
    if (comm->isRoot())
    {
@@ -566,26 +276,32 @@ void MPIIOMigrationBECoProcessor::writeDataSet(int step)
    MPI_File_close(&file_handler1);
 
    if (arrPresence.isAverageDensityArrayPresent)
-      writeAverageDensityArray(step);
+      write4DArray(step, AverageDensity, std::string("/cpAverageDensityArray.bin"));
+      //writeAverageDensityArray(step);
 
    if (arrPresence.isAverageVelocityArrayPresent)
-      writeAverageVelocityArray(step);
+      write4DArray(step, AverageVelocity, std::string("/cpAverageVelocityArray.bin"));
+   //writeAverageVelocityArray(step);
 
    if (arrPresence.isAverageFluktuationsArrayPresent)
-      writeAverageFluktuationsArray(step);
+      write4DArray(step, AverageFluktuations, std::string("/cpAverageFluktuationsArray.bin"));
+   //writeAverageFluktuationsArray(step);
 
    if (arrPresence.isAverageTripleArrayPresent)
-      writeAverageTripleArray(step);
+      write4DArray(step, AverageTriple, std::string("/cpAverageTripleArray.bin"));
+   //writeAverageTripleArray(step);
 
    if (arrPresence.isShearStressValArrayPresent)
-      writeShearStressValArray(step);
+      write4DArray(step, ShearStressVal, std::string("/cpShearStressValArray.bin"));
+   //writeShearStressValArray(step);
 
    if (arrPresence.isRelaxationFactorPresent)
-      writeRelaxationFactor(step);
+      write3DArray(step, RelaxationFactor, std::string("/cpRelaxationFactor.bin"));
+   //writeRelaxationFactor(step);
 
 }
 
-void MPIIOMigrationBECoProcessor::writeAverageDensityArray(int step)
+void MPIIOMigrationBECoProcessor::write4DArray(int step, Arrays arrayType, std::string fname)
 {
    int rank, size;
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
@@ -603,40 +319,60 @@ void MPIIOMigrationBECoProcessor::writeAverageDensityArray(int step)
    }
 
    int firstGlobalID;
-   std::vector<double> doubleValuesArray; // double-values of the AverageDensityArray in all blocks 
+   std::vector<double> doubleValuesArray; // double-values of the data array in all blocks 
    dataSetParam dataSetParamStr;
+   bool firstBlock = true;
+   int doubleCountInBlock = 0;
+   int ic = 0;
+   SPtr< CbArray4D<LBMReal, IndexerX4X3X2X1> > ___Array;
 
    if (comm->isRoot())
    {
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::writeAverageDensityArray start collect data rank = " << rank);
+      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::write4DArray start collect data rank = " << rank);
       UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
    }
 
-   bool firstBlock = true;
-   int doubleCountInBlock = 0;
-   int ic = 0;
    for (int level = minInitLevel; level <= maxInitLevel; level++)
    {
       for (SPtr<Block3D> block : blocksVector[level])  //	blocks of the current level
       {
-         SPtr< CbArray4D<LBMReal, IndexerX4X3X2X1> > averageDensityArray = block->getKernel()->getDataSet()->getAverageDensity();
+         switch (arrayType) {
+         case AverageDensity:
+            ___Array = block->getKernel()->getDataSet()->getAverageDensity();
+            break;
+         case AverageVelocity:
+            ___Array = block->getKernel()->getDataSet()->getAverageVelocity();
+            break;
+         case AverageFluktuations:
+            ___Array = block->getKernel()->getDataSet()->getAverageFluctuations();
+            break;
+         case AverageTriple:
+            ___Array = block->getKernel()->getDataSet()->getAverageTriplecorrelations();
+            break;
+         case ShearStressVal:
+            ___Array = block->getKernel()->getDataSet()->getShearStressValues();
+            break;
+         default:
+            UB_THROW(UbException(UB_EXARGS, "MPIIOMigrationBECoProcessor::write4DArray : 4D array type does not exist!"));
+            break;
+         }
 
          if (firstBlock) // when first (any) valid block...
          {
             firstGlobalID = block->getGlobalID();
 
             dataSetParamStr.nx1 = dataSetParamStr.nx2 = dataSetParamStr.nx3 = 0;
-            dataSetParamStr.nx[0] = static_cast<int>(averageDensityArray->getNX1());
-            dataSetParamStr.nx[1] = static_cast<int>(averageDensityArray->getNX2());
-            dataSetParamStr.nx[2] = static_cast<int>(averageDensityArray->getNX3());
-            dataSetParamStr.nx[3] = static_cast<int>(averageDensityArray->getNX4());
+            dataSetParamStr.nx[0] = static_cast<int>(___Array->getNX1());
+            dataSetParamStr.nx[1] = static_cast<int>(___Array->getNX2());
+            dataSetParamStr.nx[2] = static_cast<int>(___Array->getNX3());
+            dataSetParamStr.nx[3] = static_cast<int>(___Array->getNX4());
             doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
 
             firstBlock = false;
          }
 
          if ((dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0) && (dataSetParamStr.nx[3] > 0))
-            doubleValuesArray.insert(doubleValuesArray.end(), averageDensityArray->getDataVector().begin(), averageDensityArray->getDataVector().end());
+            doubleValuesArray.insert(doubleValuesArray.end(), ___Array->getDataVector().begin(), ___Array->getDataVector().end());
 
          ic++;
       }
@@ -644,17 +380,21 @@ void MPIIOMigrationBECoProcessor::writeAverageDensityArray(int step)
 
    if (comm->isRoot())
    {
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::writeAverageDensityArray start MPI IO rank = " << rank);
+      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::write4DArray start MPI IO rank = " << rank);
       UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
    }
 
+   // register new MPI-type depending on the block-specific information
+   MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType);
+   MPI_Type_commit(&dataSetDoubleType);
+
    double start, finish;
    if (comm->isRoot()) start = MPI_Wtime();
 
    MPI_Info info = MPI_INFO_NULL;
 
    MPI_File file_handler;
-   std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpAverageDensityArray.bin";
+   std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + fname;
    int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
    if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename);
 
@@ -662,19 +402,20 @@ void MPIIOMigrationBECoProcessor::writeAverageDensityArray(int step)
 
    // each process writes common parameters of a dataSet
    MPI_File_write_at(file_handler, 0, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
-   MPI_File_write_at(file_handler, write_offset, &doubleValuesArray[0], blocksCount * doubleCountInBlock, MPI_DOUBLE, MPI_STATUS_IGNORE);
+   MPI_File_write_at(file_handler, write_offset, &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
 
    MPI_File_sync(file_handler);
    MPI_File_close(&file_handler);
+   MPI_Type_free(&dataSetDoubleType);
 
    if (comm->isRoot())
    {
       finish = MPI_Wtime();
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::writeAverageDensityArray time: " << finish - start << " s");
+      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::write4DArray time: " << finish - start << " s");
    }
 }
 
-void MPIIOMigrationBECoProcessor::writeAverageVelocityArray(int step)
+void MPIIOMigrationBECoProcessor::write3DArray(int step, Arrays arrayType, std::string fname)
 {
    int rank, size;
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
@@ -692,40 +433,48 @@ void MPIIOMigrationBECoProcessor::writeAverageVelocityArray(int step)
    }
 
    int firstGlobalID;
-   std::vector<double> doubleValuesArray; // double-values (arrays of f's) in all blocks 
+   std::vector<double> doubleValuesArray; // double-values of the data array in all blocks 
    dataSetParam dataSetParamStr;
+   bool firstBlock = true;
+   int doubleCountInBlock = 0;
+   int ic = 0;
+   SPtr< CbArray3D<LBMReal, IndexerX3X2X1> > ___Array;
 
    if (comm->isRoot())
    {
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::writeAverageVelocityArray start collect data rank = " << rank);
+      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::write3DArray start collect data rank = " << rank);
       UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
    }
 
-   bool firstBlock = true;
-   int doubleCountInBlock = 0;
-   int ic = 0;
    for (int level = minInitLevel; level <= maxInitLevel; level++)
    {
       for (SPtr<Block3D> block : blocksVector[level])  //	blocks of the current level
       {
-         SPtr< CbArray4D<LBMReal, IndexerX4X3X2X1> > AverageVelocityArray3DPtr = block->getKernel()->getDataSet()->getAverageVelocity();
+         switch (arrayType) {
+         case RelaxationFactor:
+            ___Array = block->getKernel()->getDataSet()->getRelaxationFactor();
+            break;
+         default:
+            UB_THROW(UbException(UB_EXARGS, "MPIIOMigrationBECoProcessor::write3DArray : 3D array type does not exist!"));
+            break;
+         }
 
          if (firstBlock) // when first (any) valid block...
          {
             firstGlobalID = block->getGlobalID();
 
             dataSetParamStr.nx1 = dataSetParamStr.nx2 = dataSetParamStr.nx3 = 0;
-            dataSetParamStr.nx[0] = static_cast<int>(AverageVelocityArray3DPtr->getNX1());
-            dataSetParamStr.nx[1] = static_cast<int>(AverageVelocityArray3DPtr->getNX2());
-            dataSetParamStr.nx[2] = static_cast<int>(AverageVelocityArray3DPtr->getNX3());
-            dataSetParamStr.nx[3] = static_cast<int>(AverageVelocityArray3DPtr->getNX4());
+            dataSetParamStr.nx[0] = static_cast<int>(___Array->getNX1());
+            dataSetParamStr.nx[1] = static_cast<int>(___Array->getNX2());
+            dataSetParamStr.nx[2] = static_cast<int>(___Array->getNX3());
+            dataSetParamStr.nx[3] = 1;
             doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
 
             firstBlock = false;
          }
 
-         if ((dataSetParamStr.nx[0]>0) && (dataSetParamStr.nx[1]>0) && (dataSetParamStr.nx[2]>0) && (dataSetParamStr.nx[3]>0))
-            doubleValuesArray.insert(doubleValuesArray.end(), AverageVelocityArray3DPtr->getDataVector().begin(), AverageVelocityArray3DPtr->getDataVector().end());
+         if ((dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0))
+            doubleValuesArray.insert(doubleValuesArray.end(), ___Array->getDataVector().begin(), ___Array->getDataVector().end());
 
          ic++;
       }
@@ -733,17 +482,21 @@ void MPIIOMigrationBECoProcessor::writeAverageVelocityArray(int step)
 
    if (comm->isRoot())
    {
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::writeAverageVelocityArray start MPI IO rank = " << rank);
+      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::write3DArray start MPI IO rank = " << rank);
       UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
    }
 
+   // register new MPI-type depending on the block-specific information
+   MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType);
+   MPI_Type_commit(&dataSetDoubleType);
+
    double start, finish;
    if (comm->isRoot()) start = MPI_Wtime();
 
    MPI_Info info = MPI_INFO_NULL;
 
    MPI_File file_handler;
-   std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpAverageVelocityArray.bin";
+   std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + fname;
    int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
    if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename);
 
@@ -751,25 +504,37 @@ void MPIIOMigrationBECoProcessor::writeAverageVelocityArray(int step)
 
    // each process writes common parameters of a dataSet
    MPI_File_write_at(file_handler, 0, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
-   MPI_File_write_at(file_handler, write_offset, &doubleValuesArray[0], blocksCount * doubleCountInBlock, MPI_DOUBLE, MPI_STATUS_IGNORE);
+   MPI_File_write_at(file_handler, write_offset, &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
 
    MPI_File_sync(file_handler);
    MPI_File_close(&file_handler);
+   MPI_Type_free(&dataSetDoubleType);
 
    if (comm->isRoot())
    {
       finish = MPI_Wtime();
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::writeAverageVelocityArray time: " << finish - start << " s");
+      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::write3DArray time: " << finish - start << " s");
    }
 }
 
-void MPIIOMigrationBECoProcessor::writeAverageFluktuationsArray(int step)
+//---------------------------------------------------------------------------------
+
+void MPIIOMigrationBECoProcessor::writeBoundaryConds(int step) 
 {
    int rank, size;
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
 
-   int blocksCount = 0; // quantity of blocks in the grid, max 2147483648 blocks!
+   if (comm->isRoot())
+   {
+      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::writeBoundaryConds start collect data rank = " << rank);
+      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+   }
+
+   int blocksCount = 0;    // quantity of blocks, that belong to this process
+   size_t allBytesCount = 0;  // quantity of bytes, that one process writes to the file
+   size_t count_boundCond = 0;	// how many BoundaryConditions in all blocks
+   int count_indexContainer = 0;	// how many indexContainer-values in all blocks
 
    std::vector<SPtr<Block3D>> blocksVector[25];
    int minInitLevel = this->grid->getCoarsestInitializedLevel();
@@ -780,49 +545,92 @@ void MPIIOMigrationBECoProcessor::writeAverageFluktuationsArray(int step)
       blocksCount += static_cast<int>(blocksVector[level].size());
    }
 
-   int firstGlobalID;
-   std::vector<double> doubleValuesArray; // double-values (arrays of f's) in all blocks 
-   dataSetParam dataSetParamStr;
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::writeAverageFluktuationsArray start collect data rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
-   }
+   BCAddMigration* bcAddArray = new BCAddMigration[blocksCount];
+   size_t* bytesCount = new size_t[blocksCount];  // quantity of bytes, that each block writes to the file
+   std::vector<BoundaryCondition>* bcVector = new std::vector<BoundaryCondition>[blocksCount];
+   std::vector<int>* indexContainerVector = new std::vector<int>[blocksCount];
+   std::vector<int> bcindexmatrixVector;
 
-   bool firstBlock = true;
-   int doubleCountInBlock = 0;
+   bool bcindexmatrixCountNotInit = true;
    int ic = 0;
+   SPtr<BCArray3D> bcArr;
+
    for (int level = minInitLevel; level <= maxInitLevel; level++)
    {
-      for (SPtr<Block3D> block : blocksVector[level])  //	blocks of the current level
+      for (SPtr<Block3D> block : blocksVector[level])  // all the blocks of the current level
       {
-         SPtr< CbArray4D<LBMReal, IndexerX4X3X2X1> > AverageFluctArray3DPtr = block->getKernel()->getDataSet()->getAverageFluctuations();
+         bcArr = block->getKernel()->getBCProcessor()->getBCArray();
 
-         if (firstBlock) // when first (any) valid block...
+         bcAddArray[ic].globalID = block->getGlobalID(); // id of the block needed to find it while regenerating the grid
+         bcAddArray[ic].boundCond_count = 0;             // how many BoundaryConditions in this block
+         bcAddArray[ic].indexContainer_count = 0;        // how many indexContainer-values in this block
+         bytesCount[ic] = sizeof(BCAddMigration);
+         bcVector[ic].resize(0);
+         indexContainerVector[ic].resize(0);
+
+         for (int bc = 0; bc<bcArr->getBCVectorSize(); bc++)
          {
-            firstGlobalID = block->getGlobalID();
+            BoundaryCondition* bouCond = new BoundaryCondition();
+            if (bcArr->bcvector[bc] == NULL)
+               memset(bouCond, 0, sizeof(BoundaryCondition));
+            else
+            {
+               bouCond->noslipBoundaryFlags = bcArr->bcvector[bc]->getNoSlipBoundary();
+               bouCond->slipBoundaryFlags = bcArr->bcvector[bc]->getSlipBoundary();
+               bouCond->velocityBoundaryFlags = bcArr->bcvector[bc]->getVelocityBoundary();
+               bouCond->densityBoundaryFlags = bcArr->bcvector[bc]->getDensityBoundary();
+               bouCond->wallModelBoundaryFlags = bcArr->bcvector[bc]->getWallModelBoundary();
+               bouCond->bcVelocityX1 = bcArr->bcvector[bc]->getBoundaryVelocityX1();
+               bouCond->bcVelocityX2 = bcArr->bcvector[bc]->getBoundaryVelocityX2();
+               bouCond->bcVelocityX3 = bcArr->bcvector[bc]->getBoundaryVelocityX3();
+               bouCond->bcDensity = bcArr->bcvector[bc]->getBoundaryDensity();
+               bouCond->bcLodiDensity = bcArr->bcvector[bc]->getDensityLodiDensity();
+               bouCond->bcLodiVelocityX1 = bcArr->bcvector[bc]->getDensityLodiVelocityX1();
+               bouCond->bcLodiVelocityX2 = bcArr->bcvector[bc]->getDensityLodiVelocityX2();
+               bouCond->bcLodiVelocityX3 = bcArr->bcvector[bc]->getDensityLodiVelocityX3();
+               bouCond->bcLodiLentgh = bcArr->bcvector[bc]->getDensityLodiLength();
+               bouCond->nx1 = bcArr->bcvector[bc]->nx1;
+               bouCond->nx2 = bcArr->bcvector[bc]->nx2;
+               bouCond->nx3 = bcArr->bcvector[bc]->nx3;
+               for (int iq = 0; iq<26; iq++)
+                  bouCond->q[iq] = bcArr->bcvector[bc]->getQ(iq);
+               bouCond->algorithmType = bcArr->bcvector[bc]->getBcAlgorithmType();
+            }
 
-            dataSetParamStr.nx1 = dataSetParamStr.nx2 = dataSetParamStr.nx3 = 0;
-            dataSetParamStr.nx[0] = static_cast<int>(AverageFluctArray3DPtr->getNX1());
-            dataSetParamStr.nx[1] = static_cast<int>(AverageFluctArray3DPtr->getNX2());
-            dataSetParamStr.nx[2] = static_cast<int>(AverageFluctArray3DPtr->getNX3());
-            dataSetParamStr.nx[3] = static_cast<int>(AverageFluctArray3DPtr->getNX4());
-            doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+            bcVector[ic].push_back(*bouCond);
+            bcAddArray[ic].boundCond_count++;
+            count_boundCond++;
+            bytesCount[ic] += sizeof(BoundaryCondition);
+         }
 
-            firstBlock = false;
+         if (bcindexmatrixCountNotInit)
+         {
+            boundCondParamStr.nx1 = static_cast<int>(bcArr->bcindexmatrix.getNX1());
+            boundCondParamStr.nx2 = static_cast<int>(bcArr->bcindexmatrix.getNX2());
+            boundCondParamStr.nx3 = static_cast<int>(bcArr->bcindexmatrix.getNX3());
+            boundCondParamStr.bcindexmatrixCount = static_cast<int>(bcArr->bcindexmatrix.getDataVector().size());
+            bcindexmatrixCountNotInit = false;
          }
 
-         if ((dataSetParamStr.nx[0]>0) && (dataSetParamStr.nx[1]>0) && (dataSetParamStr.nx[2]>0) && (dataSetParamStr.nx[3]>0))
-            doubleValuesArray.insert(doubleValuesArray.end(), AverageFluctArray3DPtr->getDataVector().begin(), AverageFluctArray3DPtr->getDataVector().end());
+         bcindexmatrixVector.insert(bcindexmatrixVector.end(), bcArr->bcindexmatrix.getDataVector().begin(), bcArr->bcindexmatrix.getDataVector().end());
+
+         indexContainerVector[ic].insert(indexContainerVector[ic].begin(), bcArr->indexContainer.begin(), bcArr->indexContainer.end());
+         bcAddArray[ic].indexContainer_count = static_cast<int>(bcArr->indexContainer.size());
+         count_indexContainer += bcAddArray[ic].indexContainer_count;
+         bytesCount[ic] += bcAddArray[ic].indexContainer_count * sizeof(int);
+
+         allBytesCount += bytesCount[ic];
 
          ic++;
       }
    }
 
+   MPI_Type_contiguous(boundCondParamStr.bcindexmatrixCount, MPI_INT, &bcindexmatrixType);
+   MPI_Type_commit(&bcindexmatrixType);
+   
    if (comm->isRoot())
    {
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::writeAverageFluktuationsArray start MPI IO rank = " << rank);
+      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::writeBoundaryConds start MPI IO rank = " << rank);
       UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
    }
 
@@ -830,1346 +638,263 @@ void MPIIOMigrationBECoProcessor::writeAverageFluktuationsArray(int step)
    if (comm->isRoot()) start = MPI_Wtime();
 
    MPI_Info info = MPI_INFO_NULL;
+   //MPI_Info_create (&info);
+   //MPI_Info_set(info,"romio_cb_write","enable");
+   //MPI_Info_set(info,"cb_buffer_size","4194304");
+   //MPI_Info_set(info,"striping_unit","4194304");
+
+//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
 
    MPI_File file_handler;
-   std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpAverageFluktuationsArray.bin";
+   std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpBC1.bin";
    int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
    if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename);
 
-   MPI_Offset write_offset = (MPI_Offset)(sizeof(dataSetParam)) + (MPI_Offset)(firstGlobalID) * (MPI_Offset)(doubleCountInBlock) * (MPI_Offset)(sizeof(double));
+   MPI_Offset write_offset = (MPI_Offset)(sizeof(int)) + (MPI_Offset)(bcAddArray[0].globalID) * (MPI_Offset)(boundCondParamStr.bcindexmatrixCount) * (MPI_Offset)(sizeof(int));
 
-   // each process writes common parameters of a dataSet
-   MPI_File_write_at(file_handler, 0, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
-   MPI_File_write_at(file_handler, write_offset, &doubleValuesArray[0], blocksCount * doubleCountInBlock, MPI_DOUBLE, MPI_STATUS_IGNORE);
+   MPI_File_write_at(file_handler, 0, &boundCondParamStr.bcindexmatrixCount, 1, MPI_INT, MPI_STATUS_IGNORE);
+   MPI_File_write_at(file_handler, write_offset, &bcindexmatrixVector[0], blocksCount, bcindexmatrixType, MPI_STATUS_IGNORE);
 
    MPI_File_sync(file_handler);
    MPI_File_close(&file_handler);
+   MPI_Type_free(&bcindexmatrixType);
 
-   if (comm->isRoot())
-   {
-      finish = MPI_Wtime();
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::writeAverageFluktuationsArray time: " << finish - start << " s");
-   }
-}
+//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+   filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpBC2.bin";
+   rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
+   if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename);
 
-void MPIIOMigrationBECoProcessor::writeAverageTripleArray(int step)
-{
-   int rank, size;
-   MPI_Comm_rank(MPI_COMM_WORLD, &rank);
-   MPI_Comm_size(MPI_COMM_WORLD, &size);
+   MPI_File_write_at(file_handler, 0, &boundCondParamStr, 4, MPI_INT, MPI_STATUS_IGNORE);
 
-   int blocksCount = 0; // quantity of blocks in the grid, max 2147483648 blocks!
- 
-   std::vector<SPtr<Block3D>> blocksVector[25];
-   int minInitLevel = this->grid->getCoarsestInitializedLevel();
-   int maxInitLevel = this->grid->getFinestInitializedLevel();
-   for (int level = minInitLevel; level <= maxInitLevel; level++)
-   {
-      grid->getBlocks(level, rank, blocksVector[level]);
-      blocksCount += static_cast<int>(blocksVector[level].size());
-   }
-
-   int firstGlobalID;
-   std::vector<double> doubleValuesArray; // double-values (arrays of f's) in all blocks 
-   dataSetParam dataSetParamStr;
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationCoProcessor::writeAverageTripleArray start collect data rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
-   }
-
-   bool firstBlock = true;
-   int doubleCountInBlock = 0;
-   int ic = 0;
-   for (int level = minInitLevel; level <= maxInitLevel; level++)
-   {
-      for (SPtr<Block3D> block : blocksVector[level])  //	blocks of the current level
-      {
-         SPtr< CbArray4D<LBMReal, IndexerX4X3X2X1> > AverageTripleArray3DPtr = block->getKernel()->getDataSet()->getAverageTriplecorrelations();
-
-         if (firstBlock) // when first (any) valid block...
-         {
-            firstGlobalID = block->getGlobalID();
-
-            dataSetParamStr.nx1 = dataSetParamStr.nx2 = dataSetParamStr.nx3 = 0;
-            dataSetParamStr.nx[0] = static_cast<int>(AverageTripleArray3DPtr->getNX1());
-            dataSetParamStr.nx[1] = static_cast<int>(AverageTripleArray3DPtr->getNX2());
-            dataSetParamStr.nx[2] = static_cast<int>(AverageTripleArray3DPtr->getNX3());
-            dataSetParamStr.nx[3] = static_cast<int>(AverageTripleArray3DPtr->getNX4());
-            doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
-
-            firstBlock = false;
-         }
-
-         if ((dataSetParamStr.nx[0]>0) && (dataSetParamStr.nx[1]>0) && (dataSetParamStr.nx[2]>0) && (dataSetParamStr.nx[3]>0))
-            doubleValuesArray.insert(doubleValuesArray.end(), AverageTripleArray3DPtr->getDataVector().begin(), AverageTripleArray3DPtr->getDataVector().end());
-
-         ic++;
-      }
-   }
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::writeAverageTripleArray start MPI IO rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
-   }
-
-   double start, finish;
-   if (comm->isRoot()) start = MPI_Wtime();
-
-   MPI_Info info = MPI_INFO_NULL;
-
-#ifdef HLRN
-   MPI_Info_create(&info);
-   MPI_Info_set(info, "striping_factor", "40");
-   MPI_Info_set(info, "striping_unit", "4M");
-#endif
-
-   MPI_File file_handler;
-   std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpAverageTripleArray.bin";
-   int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
-   if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename);
-
-   MPI_Offset write_offset = (MPI_Offset)(sizeof(dataSetParam)) + (MPI_Offset)(firstGlobalID) * (MPI_Offset)(doubleCountInBlock) * (MPI_Offset)(sizeof(double));
-
-   // each process writes common parameters of a dataSet
-   MPI_File_write_at(file_handler, 0, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
-   MPI_File_write_at(file_handler, write_offset, &doubleValuesArray[0], blocksCount * doubleCountInBlock, MPI_DOUBLE, MPI_STATUS_IGNORE);
-
-   MPI_File_sync(file_handler);
-   MPI_File_close(&file_handler);
-
-   if (comm->isRoot())
-   {
-      finish = MPI_Wtime();
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::writeAverageTripleArray time: " << finish - start << " s");
-   }
-}
-
-void MPIIOMigrationBECoProcessor::writeShearStressValArray(int step)
-{
-   int rank, size;
-   MPI_Comm_rank(MPI_COMM_WORLD, &rank);
-   MPI_Comm_size(MPI_COMM_WORLD, &size);
-
-   int blocksCount = 0; // quantity of blocks in the grid, max 2147483648 blocks!
-
-   std::vector<SPtr<Block3D>> blocksVector[25];
-   int minInitLevel = this->grid->getCoarsestInitializedLevel();
-   int maxInitLevel = this->grid->getFinestInitializedLevel();
-   for (int level = minInitLevel; level <= maxInitLevel; level++)
-   {
-      grid->getBlocks(level, rank, blocksVector[level]);
-      blocksCount += static_cast<int>(blocksVector[level].size());
-   }
-
-   int firstGlobalID;
-   std::vector<double> doubleValuesArray; // double-values (arrays of f's) in all blocks 
-   dataSetParam dataSetParamStr;
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::writeShearStressValArray start collect data rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
-   }
-
-   bool firstBlock = true;
-   int doubleCountInBlock = 0;
-   int ic = 0;
-   for (int level = minInitLevel; level <= maxInitLevel; level++)
-   {
-      for (SPtr<Block3D> block : blocksVector[level])  //	blocks of the current level
-      {
-         SPtr< CbArray4D<LBMReal, IndexerX4X3X2X1> > ShearStressValArray3DPtr = block->getKernel()->getDataSet()->getShearStressValues();
-
-         if (firstBlock) // when first (any) valid block...
-         {
-            firstGlobalID = block->getGlobalID();
-
-            dataSetParamStr.nx1 = dataSetParamStr.nx2 = dataSetParamStr.nx3 = 0;
-            dataSetParamStr.nx[0] = static_cast<int>(ShearStressValArray3DPtr->getNX1());
-            dataSetParamStr.nx[1] = static_cast<int>(ShearStressValArray3DPtr->getNX2());
-            dataSetParamStr.nx[2] = static_cast<int>(ShearStressValArray3DPtr->getNX3());
-            dataSetParamStr.nx[3] = static_cast<int>(ShearStressValArray3DPtr->getNX4());
-            doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
-
-            firstBlock = false;
-         }
-
-         if ((dataSetParamStr.nx[0]>0) && (dataSetParamStr.nx[1]>0) && (dataSetParamStr.nx[2]>0) && (dataSetParamStr.nx[3]>0))
-            doubleValuesArray.insert(doubleValuesArray.end(), ShearStressValArray3DPtr->getDataVector().begin(), ShearStressValArray3DPtr->getDataVector().end());
-
-         ic++;
-      }
-   }
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::writeShearStressValArray start MPI IO rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
-   }
-
-   double start, finish;
-   if (comm->isRoot()) start = MPI_Wtime();
-
-   MPI_Info info = MPI_INFO_NULL;
-
-#ifdef HLRN
-   MPI_Info_create(&info);
-   MPI_Info_set(info, "striping_factor", "40");
-   MPI_Info_set(info, "striping_unit", "4M");
-#endif
-
-   MPI_File file_handler;
-   std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpShearStressValArray.bin";
-   int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
-   if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename);
-
-   MPI_Offset write_offset = (MPI_Offset)(sizeof(dataSetParam)) + (MPI_Offset)(firstGlobalID) * (MPI_Offset)(doubleCountInBlock) * (MPI_Offset)(sizeof(double));
-
-   // each process writes common parameters of a dataSet
-   MPI_File_write_at(file_handler, 0, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
-   MPI_File_write_at(file_handler, write_offset, &doubleValuesArray[0], blocksCount * doubleCountInBlock, MPI_DOUBLE, MPI_STATUS_IGNORE);
-
-   MPI_File_sync(file_handler);
-   MPI_File_close(&file_handler);
-
-   if (comm->isRoot())
-   {
-      finish = MPI_Wtime();
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::writeShearStressValArray time: " << finish - start << " s");
-   }
-}
-
-void MPIIOMigrationBECoProcessor::writeRelaxationFactor(int step)
-{
-   int rank, size;
-   MPI_Comm_rank(MPI_COMM_WORLD, &rank);
-   MPI_Comm_size(MPI_COMM_WORLD, &size);
-
-   int blocksCount = 0; // quantity of blocks in the grid, max 2147483648 blocks!
- 
-   std::vector<SPtr<Block3D>> blocksVector[25];
-   int minInitLevel = this->grid->getCoarsestInitializedLevel();
-   int maxInitLevel = this->grid->getFinestInitializedLevel();
-   for (int level = minInitLevel; level <= maxInitLevel; level++)
-   {
-      grid->getBlocks(level, rank, blocksVector[level]);
-      blocksCount += static_cast<int>(blocksVector[level].size());
-   }
-
-   int firstGlobalID;
-   std::vector<double> doubleValuesArray; // double-values (arrays of f's) in all blocks 
-   dataSetParam dataSetParamStr;
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::writeRelaxationFactor start collect data rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
-   }
-
-   bool firstBlock = true;
-   int doubleCountInBlock = 0;
-   int ic = 0;
-   for (int level = minInitLevel; level <= maxInitLevel; level++)
-   {
-      for (SPtr<Block3D> block : blocksVector[level])  //	blocks of the current level
-      {
-         SPtr< CbArray3D<LBMReal, IndexerX3X2X1> > relaxationFactor3DPtr = block->getKernel()->getDataSet()->getRelaxationFactor();
-
-         if (firstBlock) // when first (any) valid block...
-         {
-            firstGlobalID = block->getGlobalID();
-
-            dataSetParamStr.nx1 = dataSetParamStr.nx2 = dataSetParamStr.nx3 = 0;
-            dataSetParamStr.nx[0] = static_cast<int>(relaxationFactor3DPtr->getNX1());
-            dataSetParamStr.nx[1] = static_cast<int>(relaxationFactor3DPtr->getNX2());
-            dataSetParamStr.nx[2] = static_cast<int>(relaxationFactor3DPtr->getNX3());
-            dataSetParamStr.nx[3] = 1;
-            doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
-
-            firstBlock = false;
-         }
-
-         if ((dataSetParamStr.nx[0]>0) && (dataSetParamStr.nx[1]>0) && (dataSetParamStr.nx[2]>0))
-            doubleValuesArray.insert(doubleValuesArray.end(), relaxationFactor3DPtr->getDataVector().begin(), relaxationFactor3DPtr->getDataVector().end());
-
-         ic++;
-      }
-   }
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::writeRelaxationFactor start MPI IO rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
-   }
-
-   double start, finish;
-   if (comm->isRoot()) start = MPI_Wtime();
-
-   MPI_Info info = MPI_INFO_NULL;
-
-#ifdef HLRN
-   MPI_Info_create(&info);
-   MPI_Info_set(info, "striping_factor", "40");
-   MPI_Info_set(info, "striping_unit", "4M");
-#endif
-
-   MPI_File file_handler;
-   std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpRelaxationFactor.bin";
-   int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
-   if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename);
-
-   MPI_Offset write_offset = (MPI_Offset)(sizeof(dataSetParam)) + (MPI_Offset)(firstGlobalID) * (MPI_Offset)(doubleCountInBlock) * (MPI_Offset)(sizeof(double));
-
-   // each process writes common parameters of a dataSet
-   MPI_File_write_at(file_handler, 0, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
-   MPI_File_write_at(file_handler, write_offset, &doubleValuesArray[0], blocksCount * doubleCountInBlock, MPI_DOUBLE, MPI_STATUS_IGNORE);
-
-   MPI_File_sync(file_handler);
-   MPI_File_close(&file_handler);
-
-   if (comm->isRoot())
-   {
-      finish = MPI_Wtime();
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::writeRelaxationFactor time: " << finish - start << " s");
-   }
-}
-
-//---------------------------------------------------------------------------------
-
-void MPIIOMigrationBECoProcessor::writeBoundaryConds(int step) 
-{
-   int rank, size;
-   MPI_Comm_rank(MPI_COMM_WORLD, &rank);
-   MPI_Comm_size(MPI_COMM_WORLD, &size);
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::writeBoundaryConds start collect data rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
-   }
-
-   int blocksCount = 0;    // quantity of blocks, that belong to this process
-   size_t allBytesCount = 0;  // quantity of bytes, that one process writes to the file
-   size_t count_boundCond = 0;	// how many BoundaryConditions in all blocks
-   int count_indexContainer = 0;	// how many indexContainer-values in all blocks
-
-   std::vector<SPtr<Block3D>> blocksVector[25];
-   int minInitLevel = this->grid->getCoarsestInitializedLevel();
-   int maxInitLevel = this->grid->getFinestInitializedLevel();
-   for (int level = minInitLevel; level <= maxInitLevel; level++)
-   {
-      grid->getBlocks(level, rank, blocksVector[level]);
-      blocksCount += static_cast<int>(blocksVector[level].size());
-   }
-
-   BCAddMigration* bcAddArray = new BCAddMigration[blocksCount];
-   size_t* bytesCount = new size_t[blocksCount];  // quantity of bytes, that each block writes to the file
-   std::vector<BoundaryCondition>* bcVector = new std::vector<BoundaryCondition>[blocksCount];
-   std::vector<int>* indexContainerVector = new std::vector<int>[blocksCount];
-   std::vector<int> bcindexmatrixVector;
-
-   bool bcindexmatrixCountNotInit = true;
-   int ic = 0;
-   for (int level = minInitLevel; level <= maxInitLevel; level++)
-   {
-      for (SPtr<Block3D> block : blocksVector[level])  // all the blocks of the current level
-      {
-         SPtr<BCArray3D> bcArr = block->getKernel()->getBCProcessor()->getBCArray();
-
-         bcAddArray[ic].globalID = block->getGlobalID(); // id of the block needed to find it while regenerating the grid
-         bcAddArray[ic].boundCond_count = 0;             // how many BoundaryConditions in this block
-         bcAddArray[ic].indexContainer_count = 0;        // how many indexContainer-values in this block
-         bytesCount[ic] = sizeof(BCAddMigration);
-         bcVector[ic].resize(0);
-         indexContainerVector[ic].resize(0);
-
-         for (int bc = 0; bc<bcArr->getBCVectorSize(); bc++)
-         {
-            BoundaryCondition* bouCond = new BoundaryCondition();
-            if (bcArr->bcvector[bc] == NULL)
-               memset(bouCond, 0, sizeof(BoundaryCondition));
-            else
-            {
-               bouCond->noslipBoundaryFlags = bcArr->bcvector[bc]->getNoSlipBoundary();
-               bouCond->slipBoundaryFlags = bcArr->bcvector[bc]->getSlipBoundary();
-               bouCond->velocityBoundaryFlags = bcArr->bcvector[bc]->getVelocityBoundary();
-               bouCond->densityBoundaryFlags = bcArr->bcvector[bc]->getDensityBoundary();
-               bouCond->wallModelBoundaryFlags = bcArr->bcvector[bc]->getWallModelBoundary();
-               bouCond->bcVelocityX1 = bcArr->bcvector[bc]->getBoundaryVelocityX1();
-               bouCond->bcVelocityX2 = bcArr->bcvector[bc]->getBoundaryVelocityX2();
-               bouCond->bcVelocityX3 = bcArr->bcvector[bc]->getBoundaryVelocityX3();
-               bouCond->bcDensity = bcArr->bcvector[bc]->getBoundaryDensity();
-               bouCond->bcLodiDensity = bcArr->bcvector[bc]->getDensityLodiDensity();
-               bouCond->bcLodiVelocityX1 = bcArr->bcvector[bc]->getDensityLodiVelocityX1();
-               bouCond->bcLodiVelocityX2 = bcArr->bcvector[bc]->getDensityLodiVelocityX2();
-               bouCond->bcLodiVelocityX3 = bcArr->bcvector[bc]->getDensityLodiVelocityX3();
-               bouCond->bcLodiLentgh = bcArr->bcvector[bc]->getDensityLodiLength();
-               bouCond->nx1 = bcArr->bcvector[bc]->nx1;
-               bouCond->nx2 = bcArr->bcvector[bc]->nx2;
-               bouCond->nx3 = bcArr->bcvector[bc]->nx3;
-               for (int iq = 0; iq<26; iq++)
-                  bouCond->q[iq] = bcArr->bcvector[bc]->getQ(iq);
-               bouCond->algorithmType = bcArr->bcvector[bc]->getBcAlgorithmType();
-            }
-
-            bcVector[ic].push_back(*bouCond);
-            bcAddArray[ic].boundCond_count++;
-            count_boundCond++;
-            bytesCount[ic] += sizeof(BoundaryCondition);
-         }
-
-         if (bcindexmatrixCountNotInit)
-         {
-            boundCondParamStr.nx1 = static_cast<int>(bcArr->bcindexmatrix.getNX1());
-            boundCondParamStr.nx2 = static_cast<int>(bcArr->bcindexmatrix.getNX2());
-            boundCondParamStr.nx3 = static_cast<int>(bcArr->bcindexmatrix.getNX3());
-            boundCondParamStr.bcindexmatrixCount = static_cast<int>(bcArr->bcindexmatrix.getDataVector().size());
-            bcindexmatrixCountNotInit = false;
-         }
-
-         bcindexmatrixVector.insert(bcindexmatrixVector.end(), bcArr->bcindexmatrix.getDataVector().begin(), bcArr->bcindexmatrix.getDataVector().end());
-
-         indexContainerVector[ic].insert(indexContainerVector[ic].begin(), bcArr->indexContainer.begin(), bcArr->indexContainer.end());
-         bcAddArray[ic].indexContainer_count = static_cast<int>(bcArr->indexContainer.size());
-         count_indexContainer += bcAddArray[ic].indexContainer_count;
-         bytesCount[ic] += bcAddArray[ic].indexContainer_count * sizeof(int);
-
-         allBytesCount += bytesCount[ic];
-
-         ic++;
-      }
-   }
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::writeBoundaryConds start MPI IO rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
-   }
-
-   double start, finish;
-   if (comm->isRoot()) start = MPI_Wtime();
-
-   MPI_Info info = MPI_INFO_NULL;
-   //MPI_Info_create (&info);
-   //MPI_Info_set(info,"romio_cb_write","enable");
-   //MPI_Info_set(info,"cb_buffer_size","4194304");
-   //MPI_Info_set(info,"striping_unit","4194304");
-
-//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-   MPI_File file_handler;
-   std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpBC1.bin";
-   int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
-   if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename);
-
-   MPI_Offset write_offset = (MPI_Offset)(sizeof(int)) + (MPI_Offset)(bcAddArray[0].globalID) * (MPI_Offset)(boundCondParamStr.bcindexmatrixCount) * (MPI_Offset)(sizeof(int));
-
-   MPI_File_write_at(file_handler, 0, &boundCondParamStr.bcindexmatrixCount, 1, MPI_INT, MPI_STATUS_IGNORE);
-   MPI_File_write_at(file_handler, write_offset, &bcindexmatrixVector[0], blocksCount * boundCondParamStr.bcindexmatrixCount, MPI_INT, MPI_STATUS_IGNORE);
-
-   MPI_File_sync(file_handler);
-   MPI_File_close(&file_handler);
-
-//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-   filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpBC2.bin";
-   rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
-   if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename);
-
-   MPI_File_write_at(file_handler, 0, &boundCondParamStr, 4, MPI_INT, MPI_STATUS_IGNORE);
-
-   write_offset = (MPI_Offset)(sizeof(boundCondParam)) + (MPI_Offset)(grid->getNumberOfBlocks()) * (MPI_Offset)(sizeof(size_t));
-   size_t next_file_offset = 0;
-   if (size > 1)
-   {
-      if (rank == 0)
-      {
-         next_file_offset = write_offset + allBytesCount;
-         MPI_Send(&next_file_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD);
-      }
-      else
-      {
-         MPI_Recv(&write_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
-         next_file_offset = write_offset + allBytesCount;
-         if (rank < size - 1)
-            MPI_Send(&next_file_offset, 1, MPI_LONG_LONG_INT, rank + 1, 5, MPI_COMM_WORLD);
-      }
-   }
-
-   MPI_Offset write_offsetIndex;
-
-   for (int nb = 0; nb < blocksCount; nb++)
-   {
-      write_offsetIndex = (MPI_Offset)(sizeof(boundCondParam)) + (MPI_Offset)(bcAddArray[nb].globalID) * (MPI_Offset)(sizeof(size_t));
-      MPI_File_write_at(file_handler, write_offsetIndex, &write_offset, 1, MPI_LONG_LONG_INT, MPI_STATUS_IGNORE);
-
-      MPI_File_write_at(file_handler, write_offset, &bcAddArray[nb], 3, MPI_INT, MPI_STATUS_IGNORE);
-      if (bcVector[nb].size() > 0)
-         MPI_File_write_at(file_handler, write_offset + (MPI_Offset)(sizeof(BCAddMigration)), &bcVector[nb][0], bcAddArray[nb].boundCond_count, boundCondType, MPI_STATUS_IGNORE);
-
-      if (indexContainerVector[nb].size() > 0)
-         MPI_File_write_at(file_handler, write_offset + (MPI_Offset)(sizeof(BCAddMigration)) + (MPI_Offset)(bcAddArray[nb].boundCond_count) * (MPI_Offset)(sizeof(BoundaryCondition)),
-            &indexContainerVector[nb][0], bcAddArray[nb].indexContainer_count, MPI_INT, MPI_STATUS_IGNORE);
-
-      write_offset += bytesCount[nb];
-   }
-
-   MPI_File_sync(file_handler);
-   MPI_File_close(&file_handler);
-
-   if (comm->isRoot())
-   {
-      finish = MPI_Wtime();
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::writeBoundaryConds time: " << finish - start << " s");
-   }
-
-   delete[] bcAddArray;
-   delete[] bytesCount;
-   delete[] bcVector;
-   delete[] indexContainerVector;
-}
-
-//------------------------------------------- READ -----------------------------------------------
-void MPIIOMigrationBECoProcessor::restart(int step)
-{
-   if (comm->isRoot()) UBLOG(logINFO, "MPIIOMigrationBECoProcessor restart step: " << step);
-   if (comm->isRoot()) UBLOG(logINFO, "Load check point - start");
-
-   readBlocks(step);
-   SPtr<Grid3DVisitor> newMetisVisitor(new MetisPartitioningGridVisitor(comm, MetisPartitioningGridVisitor::LevelBased, D3Q27System::BSW, MetisPartitioner::KWAY));
-   grid->accept(newMetisVisitor);
-
-   readDataSet(step);
-   readBoundaryConds(step);
-
-   grid->setTimeStep(step);
-   if (comm->isRoot()) UBLOG(logINFO, "Load check point - end");
-}
-
-void MPIIOMigrationBECoProcessor::readBlocks(int step)
-{
-   int rank, size;
-   MPI_Comm_rank(MPI_COMM_WORLD, &rank);
-   //MPI_Comm_size(MPI_COMM_WORLD, &size);
-   size = 1;
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readBlocks start MPI IO rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
-   }
-
-   double start, finish;
-   if (comm->isRoot()) start = MPI_Wtime();
-
-   MPI_File file_handler;
-   std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpBlocks.bin";
-   int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler);
-   if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename);
-
-   // read count of blocks
-   int blocksCount = 0;
-   MPI_File_read_at(file_handler, 0, &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE);
-   Block3d* block3dArray = new Block3d[blocksCount];
-
-   GridParam* gridParameters = new GridParam;
-
-   // calculate the read offset
-   //MPI_Offset read_offset = (MPI_Offset)(size * sizeof(int));
-   MPI_Offset read_offset = (MPI_Offset)(sizeof(int));
-
-   // read parameters of the grid
-   MPI_File_read_at(file_handler, read_offset, gridParameters, 1, gridParamType, MPI_STATUS_IGNORE);
-   // read all the blocks
-   if (comm->isRoot())
-      MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(GridParam)), &block3dArray[0], blocksCount, block3dType, MPI_STATUS_IGNORE);
-
-   MPI_Bcast(block3dArray, blocksCount, block3dType, comm->getRoot(), MPI_COMM_WORLD);
-
-   MPI_File_close(&file_handler);
-
-   if (comm->isRoot())
-   {
-      finish = MPI_Wtime();
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readBlocks time: " << finish - start << " s");
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readBlocks start of restore of data, rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
-   }
-
-   // clear the grid
-   grid->deleteBlocks();
-
-   // restore the grid
-   SPtr<CoordinateTransformation3D> trafo(new CoordinateTransformation3D());
-   trafo->Tx1 = gridParameters->trafoParams[0];
-   trafo->Tx2 = gridParameters->trafoParams[1];
-   trafo->Tx3 = gridParameters->trafoParams[2];
-   trafo->Sx1 = gridParameters->trafoParams[3];
-   trafo->Sx2 = gridParameters->trafoParams[4];
-   trafo->Sx3 = gridParameters->trafoParams[5];
-   trafo->alpha = gridParameters->trafoParams[6];
-   trafo->beta = gridParameters->trafoParams[7];
-   trafo->gamma = gridParameters->trafoParams[8];
-
-   trafo->toX1factorX1 = gridParameters->trafoParams[9];
-   trafo->toX1factorX2 = gridParameters->trafoParams[10];
-   trafo->toX1factorX3 = gridParameters->trafoParams[11];
-   trafo->toX1delta = gridParameters->trafoParams[12];
-   trafo->toX2factorX1 = gridParameters->trafoParams[13];
-   trafo->toX2factorX2 = gridParameters->trafoParams[14];
-   trafo->toX2factorX3 = gridParameters->trafoParams[15];
-   trafo->toX2delta = gridParameters->trafoParams[16];
-   trafo->toX3factorX1 = gridParameters->trafoParams[17];
-   trafo->toX3factorX2 = gridParameters->trafoParams[18];
-   trafo->toX3factorX3 = gridParameters->trafoParams[19];
-   trafo->toX3delta = gridParameters->trafoParams[20];
-
-   trafo->fromX1factorX1 = gridParameters->trafoParams[21];
-   trafo->fromX1factorX2 = gridParameters->trafoParams[22];
-   trafo->fromX1factorX3 = gridParameters->trafoParams[23];
-   trafo->fromX1delta = gridParameters->trafoParams[24];
-   trafo->fromX2factorX1 = gridParameters->trafoParams[25];
-   trafo->fromX2factorX2 = gridParameters->trafoParams[26];
-   trafo->fromX2factorX3 = gridParameters->trafoParams[27];
-   trafo->fromX2delta = gridParameters->trafoParams[28];
-   trafo->fromX3factorX1 = gridParameters->trafoParams[29];
-   trafo->fromX3factorX2 = gridParameters->trafoParams[30];
-   trafo->fromX3factorX3 = gridParameters->trafoParams[31];
-   trafo->fromX3delta = gridParameters->trafoParams[32];
-
-   trafo->active = gridParameters->active;
-   trafo->transformation = gridParameters->transformation;
-
-   grid->setCoordinateTransformator(trafo);
-
-   grid->setDeltaX(gridParameters->deltaX);
-   grid->setBlockNX(gridParameters->blockNx1, gridParameters->blockNx2, gridParameters->blockNx3);
-   grid->setNX1(gridParameters->nx1);
-   grid->setNX2(gridParameters->nx2);
-   grid->setNX3(gridParameters->nx3);
-   grid->setPeriodicX1(gridParameters->periodicX1);
-   grid->setPeriodicX2(gridParameters->periodicX2);
-   grid->setPeriodicX3(gridParameters->periodicX3);
-
-   // regenerate blocks
-   for (int n = 0; n<blocksCount; n++)
-   {
-      SPtr<Block3D> block(new Block3D(block3dArray[n].x1, block3dArray[n].x2, block3dArray[n].x3, block3dArray[n].level));
-      block->setActive(block3dArray[n].active);
-      block->setBundle(block3dArray[n].bundle);
-      block->setRank(block3dArray[n].rank);
-      block->setLocalRank(block3dArray[n].lrank);
-      block->setGlobalID(block3dArray[n].globalID);
-      block->setLocalID(block3dArray[n].localID);
-      block->setPart(block3dArray[n].part);
-      block->setLevel(block3dArray[n].level);
-      block->setCollectionOfInterpolationFlagCF(block3dArray[n].interpolationFlagCF);
-      block->setCollectionOfInterpolationFlagFC(block3dArray[n].interpolationFlagFC);
-
-      grid->addBlock(block);
-   }
-
-   delete gridParameters;
-   delete[] block3dArray;
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readBlocks end of restore of data, rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
-   }
-}
-
-void MPIIOMigrationBECoProcessor::blocksExchange(int tagN, int ind1, int ind2, int doubleCountInBlock, std::vector<double>& pV, std::vector<double>* rawDataReceive)
-{
-   int rank, size;
-   MPI_Comm_rank(MPI_COMM_WORLD, &rank);
-   MPI_Comm_size(MPI_COMM_WORLD, &size);
-
-   int indexB = ind1;
-   int indexE = ind2;
-   int myBlocksCount = indexE - indexB;
-
-   std::vector<double>* rawDataSend = new std::vector<double>[size];
-   for (int r = 0; r < size; r++)
-   {
-      rawDataSend[r].resize(0);
-      rawDataSend[r].push_back(0);
-   }
-
-   SPtr<Block3D> tempBlock;
-   int tempRank;
-   for (int ind = indexB - indexB; ind < indexE - indexB; ind++)
-   {
-      tempBlock = grid->getBlock(indexB + ind);
-      if(!tempBlock)  throw UbException(UB_EXARGS,"MPIIOMigrationBECoProcessor::blocksExchange -- null block pointer!!!" );
-
-      tempRank = tempBlock->getRank();
-
-      if (tempRank == rank) // no need to send data, the process already has it
-      {
-         rawDataReceive[tempRank][0]++;
-         rawDataReceive[tempRank].push_back(double(indexB + ind));
-         rawDataReceive[tempRank].insert(rawDataReceive[tempRank].end(), pV.begin() + ind * doubleCountInBlock,
-            pV.begin() + ind * doubleCountInBlock + doubleCountInBlock);
-      }
-      else  // we must send data to other processes
-      {
-         rawDataSend[tempRank][0]++;
-         rawDataSend[tempRank].push_back(double(indexB + ind));
-         rawDataSend[tempRank].insert(rawDataSend[tempRank].end(), pV.begin() + ind * doubleCountInBlock,
-            pV.begin() + ind * doubleCountInBlock + doubleCountInBlock);
-      }
-   }
-
-   MPI_Request* requests = new MPI_Request[size * 2]; // send + receive
-   int requestCount = 0;
-   MPI_Status status;
-   int quant;
-   int doubleBlockCount;
-   int rds;
-
-   for (int r = 0; r < size; r++)
-   {
-      if (r != rank)
-      {
-		 rds = rawDataSend[r].size();
-         doubleBlockCount = (int)(rds / SEND_BLOCK_SIZE);
-         if (doubleBlockCount * SEND_BLOCK_SIZE < rds)
-            doubleBlockCount += 1;
-
-	     for (int i = rds; i < doubleBlockCount * SEND_BLOCK_SIZE; i++)
-	         rawDataSend[r].push_back(0);
-
-         MPI_Isend(&rawDataSend[r][0], doubleBlockCount, sendBlockDoubleType, r, tagN, MPI_COMM_WORLD, &requests[requestCount]);
-         requestCount++;
-      }
-   }
-
-   for (int r = 0; r < size; r++)
-   {
-      if (r != rank)
-      {
-         MPI_Probe(r, tagN, MPI_COMM_WORLD, &status);
-         MPI_Get_count(&status, sendBlockDoubleType, &quant);
-         rawDataReceive[r].resize(quant * SEND_BLOCK_SIZE);
-         MPI_Irecv(&rawDataReceive[r][0], quant, sendBlockDoubleType, r, tagN, MPI_COMM_WORLD, &requests[requestCount]);
-         requestCount++;
-      }
-   }
-
-   MPI_Waitall(requestCount, &requests[0], MPI_STATUSES_IGNORE);
-}
-
-void MPIIOMigrationBECoProcessor::readDataSet(int step)
-{
-   int rank, size;
-   MPI_Comm_rank(MPI_COMM_WORLD, &rank);
-   MPI_Comm_size(MPI_COMM_WORLD, &size);
-
-   if (!lbmKernel)
-      UB_THROW(UbException(UB_EXARGS, "lbmKernel does not exist!"));
-   if (!bcProcessor)
-      UB_THROW(UbException(UB_EXARGS, "bcProcessor does not exist!"));
-   if (nue == -999.999)
-      UB_THROW(UbException(UB_EXARGS, "nue is not initialised!"));
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readDataSet start MPI IO rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
-   }
-
-  if (comm->isRoot())
-	std::cout << "size = "<<size<<std::endl;
-
-   dataSetParam dataSetParamStr1, dataSetParamStr2, dataSetParamStr3;
-
-   int blocksCountAll = grid->getNumberOfBlocks(); // quantity of all blocks in the grid
-   int blocksPerProcess = blocksCountAll / size;   // how many blocks has each process
-
-   int myBlocksCount;
-   if (rank < (size - 1))
-      myBlocksCount = blocksPerProcess;
-   else
-      myBlocksCount = blocksPerProcess + (blocksCountAll - blocksPerProcess * size);
-
-   int indexB = rank * blocksPerProcess;  // the first "my" block
-   int indexE = indexB + myBlocksCount;   // the latest "my" block
-
-   double start, finish;
-   if (comm->isRoot()) start = MPI_Wtime();
-
-   MPI_File file_handler;
-   std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSet.bin";
-   int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler);
-   if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename);
-
-   MPI_File_read_at(file_handler, (MPI_Offset)0, &dataSetParamStr1, 1, dataSetParamType, MPI_STATUS_IGNORE);
-   MPI_File_read_at(file_handler, (MPI_Offset)(sizeof(dataSetParam)), &dataSetParamStr2, 1, dataSetParamType, MPI_STATUS_IGNORE);
-   MPI_File_read_at(file_handler, (MPI_Offset)(2 * sizeof(dataSetParam)), &dataSetParamStr3, 1, dataSetParamType, MPI_STATUS_IGNORE);
-
-   int doubleCountInBlock = dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3] +
-      dataSetParamStr2.nx[0] * dataSetParamStr2.nx[1] * dataSetParamStr2.nx[2] * dataSetParamStr2.nx[3] +
-      dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3];
-   std::vector<double> doubleValuesArray(myBlocksCount * doubleCountInBlock); // double-values in all blocks 
-
-   MPI_Offset read_offset = (MPI_Offset)(3 * sizeof(dataSetParam)) + (MPI_Offset)(indexB * doubleCountInBlock * sizeof(double));
-   MPI_File_read_at(file_handler, read_offset, &doubleValuesArray[0], myBlocksCount * doubleCountInBlock, MPI_DOUBLE, MPI_STATUS_IGNORE);
-
-   MPI_File_close(&file_handler);
-
-   if (comm->isRoot())
-   {
-      finish = MPI_Wtime();
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readDataSet time: " << finish - start << " s");
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readDataSet start of exchange of data, rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
-   }
-
-   std::vector<double>* rawDataReceive = new std::vector<double>[size];
-   for (int r = 0; r < size; r++)
-   {
-      rawDataReceive[r].resize(0);
-      rawDataReceive[r].push_back(0);
-   }
-
-   blocksExchange(MESSAGE_TAG, indexB, indexE, doubleCountInBlock, doubleValuesArray, rawDataReceive);
-
-   if (comm->isRoot())
-   {
-      finish = MPI_Wtime();
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readDataSet end of exchange of data, rank = " << rank);
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readDataSet time: " << finish - start << " s");
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readDataSet start of restore of data, rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
-   }
-
-   //-------------------------------------- restore blocks ---------------------------------
-   int blockID;
-   std::vector<double> vectorsOfValues1, vectorsOfValues2, vectorsOfValues3;
-
-   size_t vectorSize1 = dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3];
-   size_t vectorSize2 = dataSetParamStr2.nx[0] * dataSetParamStr2.nx[1] * dataSetParamStr2.nx[2] * dataSetParamStr2.nx[3];
-   size_t vectorSize3 = dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3];
-
-   size_t index;
-   for (int r = 0; r < size; r++)
-   {
-      index = 1;
-      for (int ii = 0; ii < rawDataReceive[r][0]; ii++)
-      {
-         blockID = (int)(rawDataReceive[r][index]);
-         index += 1;
-
-         vectorsOfValues1.assign(rawDataReceive[r].data() + index, rawDataReceive[r].data() + index + vectorSize1);
-         index += vectorSize1;
-
-         vectorsOfValues2.assign(rawDataReceive[r].data() + index, rawDataReceive[r].data() + index + vectorSize2);
-         index += vectorSize2;
-
-         vectorsOfValues3.assign(rawDataReceive[r].data() + index, rawDataReceive[r].data() + index + vectorSize3);
-         index += vectorSize3;
-
-         SPtr<DistributionArray3D> mFdistributions(new D3Q27EsoTwist3DSplittedVector());
-
-         dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues1, dataSetParamStr1.nx[0], dataSetParamStr1.nx[1], dataSetParamStr1.nx[2], dataSetParamStr1.nx[3])));
-         dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setNonLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues2, dataSetParamStr2.nx[0], dataSetParamStr2.nx[1], dataSetParamStr2.nx[2], dataSetParamStr2.nx[3])));
-         dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setZeroDistributions(CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>(vectorsOfValues3, dataSetParamStr3.nx[0], dataSetParamStr3.nx[1], dataSetParamStr3.nx[2])));
-
-         dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setNX1(dataSetParamStr1.nx1);
-         dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setNX2(dataSetParamStr1.nx2);
-         dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setNX3(dataSetParamStr1.nx3);
-
-         // find the nesessary block and fill it
-         SPtr<Block3D> block = grid->getBlock(blockID);
-         this->lbmKernel->setBlock(block);
-         SPtr<LBMKernel> kernel = this->lbmKernel->clone();
-         LBMReal collFactor = LBMSystem::calcCollisionFactor(this->nue, block->getLevel());
-         kernel->setCollisionFactor(collFactor);
-         kernel->setIndex(block->getX1(), block->getX2(), block->getX3());
-         kernel->setDeltaT(LBMSystem::getDeltaT(block->getLevel()));
-         SPtr<DataSet3D> dataSetPtr = SPtr<DataSet3D>(new DataSet3D());
-         dataSetPtr->setFdistributions(mFdistributions);
-         kernel->setDataSet(dataSetPtr);
-         block->setKernel(kernel);
-      }
-   }
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readDataSet end of restore of data, rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
-   }
-
-   //-------------------------------------------------------------
-
-   DSArraysPresence arrPresence;
-   MPI_File file_handler1;
-   std::string filename1 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpArrays.bin";
-   rc = MPI_File_open(MPI_COMM_WORLD, filename1.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler1);
-   if (rc != MPI_SUCCESS) return;// throw UbException(UB_EXARGS, "couldn't open file " + filename1);
-
-   MPI_File_read_at(file_handler1, (MPI_Offset)0, &arrPresence, 1, arrayPresenceType, MPI_STATUS_IGNORE);
-   MPI_File_close(&file_handler1);
-
-   if (arrPresence.isAverageDensityArrayPresent)
-   readAverageDensityArray(step);
-
-   if (arrPresence.isAverageVelocityArrayPresent)
-   readAverageVelocityArray(step);
-
-   if (arrPresence.isAverageFluktuationsArrayPresent)
-   readAverageFluktuationsArray(step);
-
-   if (arrPresence.isAverageTripleArrayPresent)
-   readAverageTripleArray(step);
-
-   if (arrPresence.isShearStressValArrayPresent)
-   readShearStressValArray(step);
-
-   if (arrPresence.isRelaxationFactorPresent)
-   readRelaxationFactor(step);
-
-}
-
-void MPIIOMigrationBECoProcessor::readAverageDensityArray(int step)
-{
-   int rank, size;
-   MPI_Comm_rank(MPI_COMM_WORLD, &rank);
-   MPI_Comm_size(MPI_COMM_WORLD, &size);
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readAverageDensityArray start MPI IO rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
-   }
-   double start, finish;
-   if (comm->isRoot()) start = MPI_Wtime();
-
-   dataSetParam dataSetParamStr;
-   memset(&dataSetParamStr, 0, sizeof(dataSetParam));
-
-   int blocksCountAll = grid->getNumberOfBlocks(); // quantity of all blocks in the grid
-   int blocksPerProcess = blocksCountAll / size;   // how many blocks has each process
-
-   int myBlocksCount;
-   if (rank < (size - 1))
-      myBlocksCount = blocksPerProcess;
-   else
-      myBlocksCount = blocksPerProcess + (blocksCountAll - blocksPerProcess * size);
-
-   int indexB = rank * blocksPerProcess;  // the first "my" block
-   int indexE = indexB + myBlocksCount;   // the latest "my" block
-
-   MPI_File file_handler;
-   std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpAverageDensityArray.bin";
-   int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler);
-   if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename);
-
-   MPI_File_read_at(file_handler, (MPI_Offset)0, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
-
-   int doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
-   std::vector<double> doubleValuesArray(myBlocksCount * doubleCountInBlock); // double-values in all blocks
-
-   MPI_Offset read_offset = (MPI_Offset)(sizeof(dataSetParam)) + (MPI_Offset)(indexB) * (MPI_Offset)(doubleCountInBlock) * (MPI_Offset)(sizeof(double));
-   MPI_File_read_at(file_handler, read_offset, &doubleValuesArray[0], myBlocksCount * doubleCountInBlock, MPI_DOUBLE, MPI_STATUS_IGNORE);
-
-   MPI_File_close(&file_handler);
-
-   if (comm->isRoot())
-   {
-      finish = MPI_Wtime();
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readAverageDensityArray time: " << finish - start << " s");
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readAverageDensityArray start of exchange of data, rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
-   }
-
-   std::vector<double>* rawDataReceive = new std::vector<double>[size];
-   for (int r = 0; r < size; r++)
-   {
-      rawDataReceive[r].resize(0);
-      rawDataReceive[r].push_back(0);
-   }
-
-   blocksExchange(MESSAGE_TAG + 1, indexB, indexE, doubleCountInBlock, doubleValuesArray, rawDataReceive);
-
-   if (comm->isRoot())
-   {
-      finish = MPI_Wtime();
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readAverageDensityArray end of exchange of data, rank = " << rank);
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readAverageDensityArray time: " << finish - start << " s");
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readAverageDensityArray start of restore of data, rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
-   }
-
-   //----------------------------- restore data ---------------------------------
-   int blockID;
-   std::vector<double> vectorsOfValues;
-   size_t index;
-   size_t nextVectorSize = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
-
-   for (int r = 0; r < size; r++)
+   write_offset = (MPI_Offset)(sizeof(boundCondParam)) + (MPI_Offset)(grid->getNumberOfBlocks()) * (MPI_Offset)(sizeof(size_t));
+   size_t next_file_offset = 0;
+   if (size > 1)
    {
-      index = 1;
-      for(int ii = 0; ii < rawDataReceive[r][0]; ii++)
+      if (rank == 0)
       {
-         blockID = (int)(rawDataReceive[r][index]);
-         index += 1;
-
-         vectorsOfValues.assign(rawDataReceive[r].data() + index, rawDataReceive[r].data() + index + nextVectorSize);
-         index += nextVectorSize;
-
-         // fill mAverageDensity arrays
-         SPtr<AverageValuesArray3D> mAverageDensity;
-         mAverageDensity = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
-
-         // find the nesessary block and fill it
-         SPtr<Block3D> block = grid->getBlock(blockID);
-         block->getKernel()->getDataSet()->setAverageDensity(mAverageDensity);
+         next_file_offset = write_offset + allBytesCount;
+         MPI_Send(&next_file_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD);
       }
-   }
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readAverageDensityArray end of restore of data, rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
-   }
-}
-
-void MPIIOMigrationBECoProcessor::readAverageVelocityArray(int step)
-{
-   int rank, size;
-   MPI_Comm_rank(MPI_COMM_WORLD, &rank);
-   MPI_Comm_size(MPI_COMM_WORLD, &size);
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readAverageVelocityArray start MPI IO rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
-   }
-   double start, finish;
-   if (comm->isRoot()) start = MPI_Wtime();
-
-   dataSetParam dataSetParamStr;
-   memset(&dataSetParamStr, 0, sizeof(dataSetParam));
-
-   int blocksCountAll = grid->getNumberOfBlocks(); // quantity of all blocks in the grid
-   int blocksPerProcess = blocksCountAll / size;   // how many blocks has each process
-
-   int myBlocksCount;
-   if (rank < (size - 1))
-      myBlocksCount = blocksPerProcess;
-   else
-      myBlocksCount = blocksPerProcess + (blocksCountAll - blocksPerProcess * size);
-
-   int indexB = rank * blocksPerProcess;  // the first "my" block
-   int indexE = indexB + myBlocksCount;   // the latest "my" block
-
-   MPI_File file_handler;
-   std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpAverageVelocityArray.bin";
-   int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler);
-   if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename);
-
-   MPI_File_read_at(file_handler, (MPI_Offset)0, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
-
-   int doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
-   std::vector<double> doubleValuesArray(myBlocksCount * doubleCountInBlock); // double-values in all blocks
-
-   MPI_Offset read_offset = (MPI_Offset)(sizeof(dataSetParam)) + (MPI_Offset)(indexB) * (MPI_Offset)(doubleCountInBlock) * (MPI_Offset)(sizeof(double));
-   MPI_File_read_at(file_handler, read_offset, &doubleValuesArray[0], myBlocksCount * doubleCountInBlock, MPI_DOUBLE, MPI_STATUS_IGNORE);
-
-   MPI_File_close(&file_handler);
-
-   if (comm->isRoot())
-   {
-      finish = MPI_Wtime();
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readAverageVelocityArray time: " << finish - start << " s");
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readAverageVelocityArray start of exchange of data, rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
-   }
-
-   std::vector<double>* rawDataReceive = new std::vector<double>[size];
-   for (int r = 0; r < size; r++)
-   {
-      rawDataReceive[r].resize(0);
-      rawDataReceive[r].push_back(0);
-   }
-
-   blocksExchange(MESSAGE_TAG + 2, indexB, indexE, doubleCountInBlock, doubleValuesArray, rawDataReceive);
-
-   if (comm->isRoot())
-   {
-      finish = MPI_Wtime();
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readAverageVelocityArray end of exchange of data, rank = " << rank);
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readAverageVelocityArray time: " << finish - start << " s");
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readAverageVelocityArray start of restore of data, rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
-   }
-
-   int blockID;
-   std::vector<double> vectorsOfValues;
-
-   size_t index;
-   size_t nextVectorSize = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
-
-   for (int r = 0; r < size; r++)
-   {
-      index = 1;
-      for(int ii = 0; ii < rawDataReceive[r][0]; ii++)
+      else
       {
-         blockID = (int)(rawDataReceive[r][index]);
-         index += 1;
-
-         vectorsOfValues.assign(rawDataReceive[r].data() + index, rawDataReceive[r].data() + index + nextVectorSize);
-         index += nextVectorSize;
-
-         // fill mAverageVelocity array
-         SPtr<AverageValuesArray3D> mAverageVelocity;
-         mAverageVelocity = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
-
-         // find the nesessary block and fill it
-         SPtr<Block3D> block = grid->getBlock(blockID);
-         block->getKernel()->getDataSet()->setAverageVelocity(mAverageVelocity);
+         MPI_Recv(&write_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
+         next_file_offset = write_offset + allBytesCount;
+         if (rank < size - 1)
+            MPI_Send(&next_file_offset, 1, MPI_LONG_LONG_INT, rank + 1, 5, MPI_COMM_WORLD);
       }
    }
 
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readAverageVelocityArray end of restore of data, rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
-   }
-}
-
-void MPIIOMigrationBECoProcessor::readAverageFluktuationsArray(int step)
-{
-   int rank, size;
-   MPI_Comm_rank(MPI_COMM_WORLD, &rank);
-   MPI_Comm_size(MPI_COMM_WORLD, &size);
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readAverageFluktuationsArray start MPI IO rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
-   }
-   double start, finish;
-   if (comm->isRoot()) start = MPI_Wtime();
-
-   dataSetParam dataSetParamStr;
-   memset(&dataSetParamStr, 0, sizeof(dataSetParam));
-
-   int blocksCountAll = grid->getNumberOfBlocks(); // quantity of all blocks in the grid
-   int blocksPerProcess = blocksCountAll / size;   // how many blocks has each process
-
-   int myBlocksCount;
-   if (rank < (size - 1))
-      myBlocksCount = blocksPerProcess;
-   else
-      myBlocksCount = blocksPerProcess + (blocksCountAll - blocksPerProcess * size);
-
-   int indexB = rank * blocksPerProcess;  // the first "my" block
-   int indexE = indexB + myBlocksCount;   // the latest "my" block
-
-   MPI_File file_handler;
-   std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpAverageFluktuationsArray.bin";
-   int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler);
-   if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename);
-
-   MPI_File_read_at(file_handler, (MPI_Offset)0, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
-
-   int doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
-   std::vector<double> doubleValuesArray(myBlocksCount * doubleCountInBlock); // double-values in all blocks
-
-   MPI_Offset read_offset = (MPI_Offset)(sizeof(dataSetParam)) + (MPI_Offset)(indexB) * (MPI_Offset)(doubleCountInBlock) * (MPI_Offset)(sizeof(double));
-   MPI_File_read_at(file_handler, read_offset, &doubleValuesArray[0], myBlocksCount * doubleCountInBlock, MPI_DOUBLE, MPI_STATUS_IGNORE);
-
-   MPI_File_close(&file_handler);
-
-   if (comm->isRoot())
-   {
-      finish = MPI_Wtime();
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readAverageFluktuationsArray time: " << finish - start << " s");
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readAverageFluktuationsArray start of exchange of data, rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
-   }
-
-   std::vector<double>* rawDataReceive = new std::vector<double>[size];
-   for (int r = 0; r < size; r++)
-   {
-      rawDataReceive[r].resize(0);
-      rawDataReceive[r].push_back(0);
-   }
-
-   blocksExchange(MESSAGE_TAG + 3, indexB, indexE, doubleCountInBlock, doubleValuesArray, rawDataReceive);
-
-   if (comm->isRoot())
-   {
-      finish = MPI_Wtime();
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readAverageFluktuationsArray end of exchange of data, rank = " << rank);
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readAverageFluktuationsArray time: " << finish - start << " s");
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readAverageFluktuationsArray start of restore of data, rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
-   }
-
-   int blockID;
-   std::vector<double> vectorsOfValues;
-
-   size_t index;
-   size_t nextVectorSize = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+   MPI_Offset write_offsetIndex;
 
-   for (int r = 0; r < size; r++)
+   for (int nb = 0; nb < blocksCount; nb++)
    {
-      index = 1;
-      for(int ii = 0; ii < rawDataReceive[r][0]; ii++)
-      {
-         blockID = (int)(rawDataReceive[r][index]);
-         index += 1;
-
-         vectorsOfValues.assign(rawDataReceive[r].data() + index, rawDataReceive[r].data() + index + nextVectorSize);
-         index += nextVectorSize;
-
-         // fill AverageFluktuations array
-         SPtr<AverageValuesArray3D> mAverageFluktuations;
-         mAverageFluktuations = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
+      write_offsetIndex = (MPI_Offset)(sizeof(boundCondParam)) + (MPI_Offset)(bcAddArray[nb].globalID) * (MPI_Offset)(sizeof(size_t));
+      MPI_File_write_at(file_handler, write_offsetIndex, &write_offset, 1, MPI_LONG_LONG_INT, MPI_STATUS_IGNORE);
 
-         // find the nesessary block and fill it
-         SPtr<Block3D> block = grid->getBlock(blockID);
-         block->getKernel()->getDataSet()->setAverageFluctuations(mAverageFluktuations);
-      }
-   }
+      MPI_File_write_at(file_handler, write_offset, &bcAddArray[nb], 3, MPI_INT, MPI_STATUS_IGNORE);
+      if (bcVector[nb].size() > 0)
+         MPI_File_write_at(file_handler, write_offset + (MPI_Offset)(sizeof(BCAddMigration)), &bcVector[nb][0], bcAddArray[nb].boundCond_count, boundCondType, MPI_STATUS_IGNORE);
 
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readAverageFluktuationsArray end of restore of data, rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+      if (indexContainerVector[nb].size() > 0)
+         MPI_File_write_at(file_handler, write_offset + (MPI_Offset)(sizeof(BCAddMigration)) + (MPI_Offset)(bcAddArray[nb].boundCond_count) * (MPI_Offset)(sizeof(BoundaryCondition)),
+            &indexContainerVector[nb][0], bcAddArray[nb].indexContainer_count, MPI_INT, MPI_STATUS_IGNORE);
+
+      write_offset += bytesCount[nb];
    }
-}
 
-void MPIIOMigrationBECoProcessor::readAverageTripleArray(int step)
-{
-   int rank, size;
-   MPI_Comm_rank(MPI_COMM_WORLD, &rank);
-   MPI_Comm_size(MPI_COMM_WORLD, &size);
+   MPI_File_sync(file_handler);
+   MPI_File_close(&file_handler);
 
    if (comm->isRoot())
    {
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readAverageTripleArray start MPI IO rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+      finish = MPI_Wtime();
+      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::writeBoundaryConds time: " << finish - start << " s");
    }
-   double start, finish;
-   if (comm->isRoot()) start = MPI_Wtime();
-
-   dataSetParam dataSetParamStr;
-   memset(&dataSetParamStr, 0, sizeof(dataSetParam));
 
-   int myBlocksCount;
-   int blocksCountAll = grid->getNumberOfBlocks(); // quantity of all blocks in the grid
-   int blocksPerProcess = blocksCountAll / size;   // how many blocks has each process
+   delete[] bcAddArray;
+   delete[] bytesCount;
+   delete[] bcVector;
+   delete[] indexContainerVector;
+}
 
-   if (rank < (size - 1))
-      myBlocksCount = blocksPerProcess;
-   else
-      myBlocksCount = blocksPerProcess + (blocksCountAll - blocksPerProcess * size);
+//------------------------------------------- READ -----------------------------------------------
+void MPIIOMigrationBECoProcessor::restart(int step)
+{
+   if (comm->isRoot()) UBLOG(logINFO, "MPIIOMigrationBECoProcessor restart step: " << step);
+   if (comm->isRoot()) UBLOG(logINFO, "Load check point - start");
 
-   int indexB = rank * blocksPerProcess;  // the first "my" block
-   int indexE = indexB + myBlocksCount;   // the latest "my" block
+   readBlocks(step);
+   SPtr<Grid3DVisitor> newMetisVisitor(new MetisPartitioningGridVisitor(comm, MetisPartitioningGridVisitor::LevelBased, D3Q27System::BSW, MetisPartitioner::KWAY));
+   grid->accept(newMetisVisitor);
 
-   MPI_File file_handler;
-   std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpAverageTripleArray.bin";
-   int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler);
-   if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename);
+   readDataSet(step);
+   readBoundaryConds(step);
 
-   MPI_File_read_at(file_handler, (MPI_Offset)0, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
+   grid->setTimeStep(step);
+   if (comm->isRoot()) UBLOG(logINFO, "Load check point - end");
+}
 
-   int doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
-   std::vector<double> doubleValuesArray(myBlocksCount * doubleCountInBlock); // double-values in all blocks
+void MPIIOMigrationBECoProcessor::readBlocks(int step)
+{
+   MPIIOCoProcessor::readBlocks(step);
+}
 
-   MPI_Offset read_offset = (MPI_Offset)(sizeof(dataSetParam)) + (MPI_Offset)(indexB) * (MPI_Offset)(doubleCountInBlock) * (MPI_Offset)(sizeof(double));
-   MPI_File_read_at(file_handler, read_offset, &doubleValuesArray[0], myBlocksCount * doubleCountInBlock, MPI_DOUBLE, MPI_STATUS_IGNORE);
+void MPIIOMigrationBECoProcessor::blocksExchange(int tagN, int ind1, int ind2, int doubleCountInBlock, std::vector<double>& pV, std::vector<double>* rawDataReceive)
+{
+   int rank, size;
+   MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+   MPI_Comm_size(MPI_COMM_WORLD, &size);
 
-   MPI_File_close(&file_handler);
+   int indexB = ind1;
+   int indexE = ind2;
+   int myBlocksCount = indexE - indexB;
 
-   if (comm->isRoot())
+   int* blocksCounterSend = new int[size];
+   int* blocksCounterRec = new int[size];
+  
+   std::vector<double>* rawDataSend = new std::vector<double>[size];
+   for (int r = 0; r < size; r++)
    {
-      finish = MPI_Wtime();
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readAverageTripleArray time: " << finish - start << " s");
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readAverageTripleArray start of exchange of data, rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+      rawDataSend[r].resize(0);
+      blocksCounterSend[r] = 0;
+      blocksCounterRec[r] = 0;
    }
 
-   std::vector<double>* rawDataReceive = new std::vector<double>[size];
-   for (int r = 0; r < size; r++)
+   SPtr<Block3D> tempBlock;
+   int tempRank;
+   
+   for(size_t ind = indexB - indexB; ind < indexE - indexB; ind++)
    {
-      rawDataReceive[r].resize(0);
-      rawDataReceive[r].push_back(0);
-   }
+      tempBlock = grid->getBlock(indexB + ind);
+      if(!tempBlock)  throw UbException(UB_EXARGS,"MPIIOMigrationBECoProcessor::blocksExchange -- null block pointer!!!" );
 
-   blocksExchange(MESSAGE_TAG + 4, indexB, indexE, doubleCountInBlock, doubleValuesArray, rawDataReceive);
+      tempRank = tempBlock->getRank();
+     
+      if (tempRank == rank) // no need to send data, the process already has it
+      {
+		 blocksCounterRec[tempRank]++;
+         rawDataReceive[tempRank].push_back(double(indexB + ind));       
+         rawDataReceive[tempRank].insert(rawDataReceive[tempRank].end(), pV.begin() + ind * size_t(doubleCountInBlock), pV.begin() + ind * size_t(doubleCountInBlock) + size_t(doubleCountInBlock));
+      }
+      else  // we must send data to other processes
+      {
+		 blocksCounterSend[tempRank]++;
+         rawDataSend[tempRank].push_back(double(indexB + ind));
+         rawDataSend[tempRank].insert(rawDataSend[tempRank].end(), pV.begin() + ind * size_t(doubleCountInBlock), pV.begin() + ind * size_t(doubleCountInBlock) + size_t(doubleCountInBlock));
+     }
+   }
+   
+   MPI_Request* requests = new MPI_Request[size * 2]; // send + receive
+   int requestCount = 0;
+   MPI_Status status;
 
-   if (comm->isRoot())
+   for (int r = 0; r < size; r++)
    {
-      finish = MPI_Wtime();
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readAverageTripleArray end of exchange of data, rank = " << rank);
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readAverageTripleArray time: " << finish - start << " s");
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readAverageTripleArray start of restore of data, rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+      if (r != rank)
+      {
+         MPI_Irecv(&blocksCounterRec[r], 1, MPI_INT, r, tagN, MPI_COMM_WORLD, &requests[requestCount]);
+         requestCount++;
+      }
    }
-
-   int blockID;
-   std::vector<double> vectorsOfValues;
-
-   size_t index;
-   size_t nextVectorSize = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
-
+  
    for (int r = 0; r < size; r++)
    {
-      index = 1;
-      for(int ii = 0; ii < rawDataReceive[r][0]; ii++)
+      if (r != rank)
       {
-         blockID = (int)(rawDataReceive[r][index]);
-         index += 1;
-
-         vectorsOfValues.assign(rawDataReceive[r].data() + index, rawDataReceive[r].data() + index + nextVectorSize);
-         index += nextVectorSize;
+        MPI_Isend(&blocksCounterSend[r], 1, MPI_INT, r, tagN, MPI_COMM_WORLD, &requests[requestCount]);
+        requestCount++;
+      } 
+   }
 
-         // fill AverageTriplecorrelations array
-         SPtr<AverageValuesArray3D> mAverageTriplecorrelations;
-         mAverageTriplecorrelations = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
+   MPI_Waitall(requestCount, &requests[0], MPI_STATUSES_IGNORE);
+    
+    MPI_Type_contiguous(doubleCountInBlock + 1, MPI_DOUBLE, &sendBlockDoubleType);
+    MPI_Type_commit(&sendBlockDoubleType);
 
-         // find the nesessary block and fill it
-         SPtr<Block3D> block = grid->getBlock(blockID);
-         block->getKernel()->getDataSet()->setAverageTriplecorrelations(mAverageTriplecorrelations);
+    for (int r = 0; r < size; r++)
+    {
+      if (r != rank)
+       rawDataReceive[r].resize(size_t(blocksCounterRec[r]) * size_t(doubleCountInBlock + 1));
+    }
+
+	requestCount = 0;
+ 	int sendRecCount = 0;
+	size_t sendRecOffset = 0;
+	const int maxQuant = 400; 
+	int restQuant;
+
+    for (int r = 0; r < size; r++)
+    {
+       if (r != rank)
+       {
+		  sendRecCount = int(blocksCounterRec[r] / maxQuant);
+		  if(sendRecCount * maxQuant < blocksCounterRec[r]) 
+			sendRecCount++;
+		  requests = (MPI_Request*)realloc(requests, (requestCount + sendRecCount) * sizeof(MPI_Request));
+		  
+		  for(int sc = 0; sc < sendRecCount; sc++)
+		  {
+			 restQuant = (sc < sendRecCount - 1) ? maxQuant : blocksCounterRec[r] - sc * maxQuant;
+			 sendRecOffset = size_t(sc) * size_t(maxQuant) * size_t((doubleCountInBlock + 1));
+			 MPI_Irecv(&rawDataReceive[r][sendRecOffset], restQuant, sendBlockDoubleType, r, tagN, MPI_COMM_WORLD, &requests[requestCount]);
+			 requestCount++;
+		  }
+       }
+    }
+
+    for (int r = 0; r < size; r++)
+    {
+      if (r != rank)
+      {
+		 sendRecCount = int(blocksCounterSend[r] / maxQuant);
+		 if(sendRecCount * maxQuant < blocksCounterSend[r]) 
+			sendRecCount++;
+		  requests = (MPI_Request*)realloc(requests, (requestCount + sendRecCount) * sizeof(MPI_Request));
+					 
+		 for(int sc = 0; sc < sendRecCount; sc++)
+		 {
+			restQuant = (sc < sendRecCount - 1) ? maxQuant : blocksCounterSend[r] - sc * maxQuant;
+			sendRecOffset = size_t(sc) * size_t(maxQuant) * size_t((doubleCountInBlock + 1));
+			MPI_Isend(&rawDataSend[r][sendRecOffset], restQuant, sendBlockDoubleType, r, tagN, MPI_COMM_WORLD, &requests[requestCount]);
+			requestCount++;
+		 }
       }
-   }
+    }
+
+	MPI_Waitall(requestCount, &requests[0], MPI_STATUSES_IGNORE);
+
+	delete [] blocksCounterSend;
+	delete [] blocksCounterRec;
+	delete [] rawDataSend;
+	delete [] requests;
 
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readAverageTripleArray end of restore of data, rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
-   }
 }
 
-void MPIIOMigrationBECoProcessor::readShearStressValArray(int step)
+void MPIIOMigrationBECoProcessor::readDataSet(int step)
 {
    int rank, size;
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
 
+   if (!lbmKernel)
+      UB_THROW(UbException(UB_EXARGS, "lbmKernel does not exist!"));
+   if (!bcProcessor)
+      UB_THROW(UbException(UB_EXARGS, "bcProcessor does not exist!"));
+   if (nue == -999.999)
+      UB_THROW(UbException(UB_EXARGS, "nue is not initialised!"));
+
    if (comm->isRoot())
    {
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readShearStressValArray start MPI IO rank = " << rank);
+      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readDataSet start MPI IO rank = " << rank);
       UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
    }
-   double start, finish;
-   if (comm->isRoot()) start = MPI_Wtime();
 
-   dataSetParam dataSetParamStr;
-   memset(&dataSetParamStr, 0, sizeof(dataSetParam));
+   dataSetParam dataSetParamStr1, dataSetParamStr2, dataSetParamStr3;
 
    int blocksCountAll = grid->getNumberOfBlocks(); // quantity of all blocks in the grid
    int blocksPerProcess = blocksCountAll / size;   // how many blocks has each process
 
-   int myBlocksCount;
+   size_t myBlocksCount;
    if (rank < (size - 1))
       myBlocksCount = blocksPerProcess;
    else
@@ -2178,82 +903,148 @@ void MPIIOMigrationBECoProcessor::readShearStressValArray(int step)
    int indexB = rank * blocksPerProcess;  // the first "my" block
    int indexE = indexB + myBlocksCount;   // the latest "my" block
 
+   double start, finish;
+   if (comm->isRoot()) start = MPI_Wtime();
+
    MPI_File file_handler;
-   std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpShearStressValArray.bin";
+   std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSet.bin";
    int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler);
    if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename);
 
-   MPI_File_read_at(file_handler, (MPI_Offset)0, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
+   MPI_File_read_at(file_handler, (MPI_Offset)0, &dataSetParamStr1, 1, dataSetParamType, MPI_STATUS_IGNORE);
+   MPI_File_read_at(file_handler, (MPI_Offset)(sizeof(dataSetParam)), &dataSetParamStr2, 1, dataSetParamType, MPI_STATUS_IGNORE);
+   MPI_File_read_at(file_handler, (MPI_Offset)(2 * sizeof(dataSetParam)), &dataSetParamStr3, 1, dataSetParamType, MPI_STATUS_IGNORE);
 
-   int doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
-   std::vector<double> doubleValuesArray(myBlocksCount * doubleCountInBlock); // double-values in all blocks
+   size_t doubleCountInBlock = dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3] +
+      dataSetParamStr2.nx[0] * dataSetParamStr2.nx[1] * dataSetParamStr2.nx[2] * dataSetParamStr2.nx[3] +
+      dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3];
+   std::vector<double> doubleValuesArray(myBlocksCount * doubleCountInBlock); // double-values in all blocks 
 
-   MPI_Offset read_offset = (MPI_Offset)(sizeof(dataSetParam)) + (MPI_Offset)(indexB) * (MPI_Offset)(doubleCountInBlock) * (MPI_Offset)(sizeof(double));
-   MPI_File_read_at(file_handler, read_offset, &doubleValuesArray[0], myBlocksCount * doubleCountInBlock, MPI_DOUBLE, MPI_STATUS_IGNORE);
+   MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType);
+   MPI_Type_commit(&dataSetDoubleType);
+
+   MPI_Offset read_offset = (MPI_Offset)(3 * sizeof(dataSetParam)) + (MPI_Offset)(indexB * doubleCountInBlock * sizeof(double));
+   MPI_File_read_at(file_handler, read_offset, &doubleValuesArray[0], myBlocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
 
    MPI_File_close(&file_handler);
+   MPI_Type_free(&dataSetDoubleType);
 
    if (comm->isRoot())
    {
       finish = MPI_Wtime();
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readShearStressValArray time: " << finish - start << " s");
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readShearStressValArray start of exchange of data, rank = " << rank);
+      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readDataSet time: " << finish - start << " s");
+      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readDataSet start of exchange of data, rank = " << rank);
       UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
    }
 
    std::vector<double>* rawDataReceive = new std::vector<double>[size];
    for (int r = 0; r < size; r++)
-   {
       rawDataReceive[r].resize(0);
-      rawDataReceive[r].push_back(0);
-   }
-
-   blocksExchange(MESSAGE_TAG + 5, indexB, indexE, doubleCountInBlock, doubleValuesArray, rawDataReceive);
 
+   blocksExchange(MESSAGE_TAG, indexB, indexE, doubleCountInBlock, doubleValuesArray, rawDataReceive);
+   
    if (comm->isRoot())
    {
       finish = MPI_Wtime();
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readShearStressValArray end of exchange of data, rank = " << rank);
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readShearStressValArray time: " << finish - start << " s");
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readShearStressValArray start of restore of data, rank = " << rank);
+      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readDataSet time: " << finish - start << " s");
+      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readDataSet start of restore of data, rank = " << rank);
       UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
    }
-
+     
+   //-------------------------------------- restore blocks ---------------------------------
    int blockID;
-   std::vector<double> vectorsOfValues;
+   std::vector<double> vectorsOfValues1, vectorsOfValues2, vectorsOfValues3;
 
+   size_t vectorSize1 = dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3];
+   size_t vectorSize2 = dataSetParamStr2.nx[0] * dataSetParamStr2.nx[1] * dataSetParamStr2.nx[2] * dataSetParamStr2.nx[3];
+   size_t vectorSize3 = dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3];
+ 
    size_t index;
-   size_t nextVectorSize = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
-
    for (int r = 0; r < size; r++)
    {
-      index = 1;
-      for(int ii = 0; ii < rawDataReceive[r][0]; ii++)
+      index = 0;
+      for (int ii = 0; ii < int(rawDataReceive[r].size() / doubleCountInBlock); ii++)
       {
          blockID = (int)(rawDataReceive[r][index]);
          index += 1;
 
-         vectorsOfValues.assign(rawDataReceive[r].data() + index, rawDataReceive[r].data() + index + nextVectorSize);
-         index += nextVectorSize;
+         vectorsOfValues1.assign(rawDataReceive[r].data() + index, rawDataReceive[r].data() + index + vectorSize1);
+         index += vectorSize1;
+
+         vectorsOfValues2.assign(rawDataReceive[r].data() + index, rawDataReceive[r].data() + index + vectorSize2);
+         index += vectorSize2;
 
-         // fill ShearStressValuesArray array
-         SPtr<ShearStressValuesArray3D> mShearStressValues;
-         mShearStressValues = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
+         vectorsOfValues3.assign(rawDataReceive[r].data() + index, rawDataReceive[r].data() + index + vectorSize3);
+         index += vectorSize3;
+         
+         SPtr<DistributionArray3D> mFdistributions(new D3Q27EsoTwist3DSplittedVector());
+         dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues1, dataSetParamStr1.nx[0], dataSetParamStr1.nx[1], dataSetParamStr1.nx[2], dataSetParamStr1.nx[3])));
+         dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setNonLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues2, dataSetParamStr2.nx[0], dataSetParamStr2.nx[1], dataSetParamStr2.nx[2], dataSetParamStr2.nx[3])));
+         dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setZeroDistributions(CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>(vectorsOfValues3, dataSetParamStr3.nx[0], dataSetParamStr3.nx[1], dataSetParamStr3.nx[2])));
+
+         dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setNX1(dataSetParamStr1.nx1);
+         dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setNX2(dataSetParamStr1.nx2);
+         dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setNX3(dataSetParamStr1.nx3);
 
          // find the nesessary block and fill it
          SPtr<Block3D> block = grid->getBlock(blockID);
-         block->getKernel()->getDataSet()->setShearStressValues(mShearStressValues);
+         this->lbmKernel->setBlock(block);
+         SPtr<LBMKernel> kernel = this->lbmKernel->clone();
+         LBMReal collFactor = LBMSystem::calcCollisionFactor(this->nue, block->getLevel());
+         kernel->setCollisionFactor(collFactor);
+         kernel->setIndex(block->getX1(), block->getX2(), block->getX3());
+         kernel->setDeltaT(LBMSystem::getDeltaT(block->getLevel()));
+         SPtr<DataSet3D> dataSetPtr = SPtr<DataSet3D>(new DataSet3D());
+         dataSetPtr->setFdistributions(mFdistributions);
+         kernel->setDataSet(dataSetPtr);
+         block->setKernel(kernel);
       }
    }
-
    if (comm->isRoot())
    {
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readShearStressValArray end of restore of data, rank = " << rank);
+      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readDataSet end of restore of data, rank = " << rank);
       UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
    }
+
+   //-------------------------------------------------------------
+
+   DSArraysPresence arrPresence;
+   MPI_File file_handler1;
+   std::string filename1 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpArrays.bin";
+   rc = MPI_File_open(MPI_COMM_WORLD, filename1.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler1);
+   if (rc != MPI_SUCCESS) return;// throw UbException(UB_EXARGS, "couldn't open file " + filename1);
+
+   MPI_File_read_at(file_handler1, (MPI_Offset)0, &arrPresence, 1, arrayPresenceType, MPI_STATUS_IGNORE);
+   MPI_File_close(&file_handler1);
+
+   if (arrPresence.isAverageDensityArrayPresent)
+      readArray(step, AverageDensity, std::string("/cpAverageDensityArray.bin"));
+   //readAverageDensityArray(step);
+
+   if (arrPresence.isAverageVelocityArrayPresent)
+      readArray(step, AverageVelocity, std::string("/cpAverageVelocityArray.bin"));
+   //   readAverageVelocityArray(step);
+
+   if (arrPresence.isAverageFluktuationsArrayPresent)
+      readArray(step, AverageFluktuations, std::string("/cpAverageFluktuationsArray.bin"));
+   //   readAverageFluktuationsArray(step);
+
+   if (arrPresence.isAverageTripleArrayPresent)
+      readArray(step, AverageTriple, std::string("/cpAverageTripleArray.bin"));
+   //  readAverageTripleArray(step);
+
+   if (arrPresence.isShearStressValArrayPresent)
+      readArray(step, ShearStressVal, std::string("/cpShearStressValArray.bin"));
+   //   readShearStressValArray(step);
+
+   if (arrPresence.isRelaxationFactorPresent)
+      readArray(step, RelaxationFactor, std::string("/cpRelaxationFactor.bin"));
+   //   readRelaxationFactor(step);
+   
+   delete [] rawDataReceive;
 }
 
-void MPIIOMigrationBECoProcessor::readRelaxationFactor(int step)
+void MPIIOMigrationBECoProcessor::readArray(int step, Arrays arrType, std::string fname)
 {
    int rank, size;
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
@@ -2261,7 +1052,7 @@ void MPIIOMigrationBECoProcessor::readRelaxationFactor(int step)
 
    if (comm->isRoot())
    {
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readRelaxationFactor start MPI IO rank = " << rank);
+      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readArray start MPI IO rank = " << rank);
       UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
    }
    double start, finish;
@@ -2273,7 +1064,7 @@ void MPIIOMigrationBECoProcessor::readRelaxationFactor(int step)
    int blocksCountAll = grid->getNumberOfBlocks(); // quantity of all blocks in the grid
    int blocksPerProcess = blocksCountAll / size;   // how many blocks has each process
 
-   int myBlocksCount;
+   size_t myBlocksCount;
    if (rank < (size - 1))
       myBlocksCount = blocksPerProcess;
    else
@@ -2283,76 +1074,106 @@ void MPIIOMigrationBECoProcessor::readRelaxationFactor(int step)
    int indexE = indexB + myBlocksCount;   // the latest "my" block
 
    MPI_File file_handler;
-   std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpRelaxationFactor.bin";
+   std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + fname;
    int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler);
    if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename);
 
    MPI_File_read_at(file_handler, (MPI_Offset)0, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
 
-   int doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+   size_t doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
    std::vector<double> doubleValuesArray(myBlocksCount * doubleCountInBlock); // double-values in all blocks
 
+   MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType);
+   MPI_Type_commit(&dataSetDoubleType);
+
    MPI_Offset read_offset = (MPI_Offset)(sizeof(dataSetParam)) + (MPI_Offset)(indexB) * (MPI_Offset)(doubleCountInBlock) * (MPI_Offset)(sizeof(double));
-   MPI_File_read_at(file_handler, read_offset, &doubleValuesArray[0], myBlocksCount * doubleCountInBlock, MPI_DOUBLE, MPI_STATUS_IGNORE);
+   MPI_File_read_at(file_handler, read_offset, &doubleValuesArray[0], myBlocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
 
    MPI_File_close(&file_handler);
+   MPI_Type_free(&dataSetDoubleType);
 
    if (comm->isRoot())
    {
       finish = MPI_Wtime();
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readRelaxationFactor time: " << finish - start << " s");
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readRelaxationFactor start of exchange of data, rank = " << rank);
+      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readArray time: " << finish - start << " s");
+      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readArray start of exchange of data, rank = " << rank);
       UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
    }
 
    std::vector<double>* rawDataReceive = new std::vector<double>[size];
    for (int r = 0; r < size; r++)
-   {
-      rawDataReceive[r].resize(0);
-      rawDataReceive[r].push_back(0);
-   }
+     rawDataReceive[r].resize(0);
 
-   blocksExchange(MESSAGE_TAG + 6, indexB, indexE, doubleCountInBlock, doubleValuesArray, rawDataReceive);
+   blocksExchange(MESSAGE_TAG + int(arrType), indexB, indexE, doubleCountInBlock, doubleValuesArray, rawDataReceive);
 
    if (comm->isRoot())
    {
       finish = MPI_Wtime();
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readRelaxationFactor end of exchange of data, rank = " << rank);
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readRelaxationFactor time: " << finish - start << " s");
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readRelaxationFactor start of restore of data, rank = " << rank);
+      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readArray end of exchange of data, rank = " << rank);
+      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readArray time: " << finish - start << " s");
+      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readArray start of restore of data, rank = " << rank);
       UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
    }
 
+   //----------------------------- restore data ---------------------------------
    int blockID;
    std::vector<double> vectorsOfValues;
-
    size_t index;
    size_t nextVectorSize = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
 
    for (int r = 0; r < size; r++)
    {
-      index = 1;
-      for(int ii = 0; ii < rawDataReceive[r][0]; ii++)
+      index = 0;
+      for (int ii = 0; ii < int(rawDataReceive[r].size() / doubleCountInBlock); ii++)
       {
          blockID = (int)(rawDataReceive[r][index]);
+         SPtr<Block3D> block = grid->getBlock(blockID);
          index += 1;
 
          vectorsOfValues.assign(rawDataReceive[r].data() + index, rawDataReceive[r].data() + index + nextVectorSize);
          index += nextVectorSize;
 
-         // fill RelaxationFactor array
-         SPtr<RelaxationFactorArray3D> mRelaxationFactor;
-         mRelaxationFactor = CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>(vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2]));
-
-         // find the nesessary block and fill it
-         SPtr<Block3D> block = grid->getBlock(blockID);
-         block->getKernel()->getDataSet()->setRelaxationFactor(mRelaxationFactor);
+         // fill arrays
+         SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> ___4DArray;
+         SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> ___3DArray;
+
+         switch (arrType) {
+         case AverageDensity:
+            ___4DArray = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
+            block->getKernel()->getDataSet()->setAverageDensity(___4DArray);
+            break;
+         case AverageVelocity:
+            ___4DArray = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
+            block->getKernel()->getDataSet()->setAverageVelocity(___4DArray);
+            break;
+         case AverageFluktuations:
+            ___4DArray = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
+            block->getKernel()->getDataSet()->setAverageFluctuations(___4DArray);
+            break;
+         case AverageTriple:
+            ___4DArray = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
+            block->getKernel()->getDataSet()->setAverageTriplecorrelations(___4DArray);
+            break;
+         case ShearStressVal:
+            ___4DArray = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
+            block->getKernel()->getDataSet()->setShearStressValues(___4DArray);
+            break;
+         case RelaxationFactor:
+            ___3DArray = CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>(vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2]));
+            block->getKernel()->getDataSet()->setRelaxationFactor(___3DArray);
+            break;
+         default:
+            UB_THROW(UbException(UB_EXARGS, "MPIIOMigrationBECoProcessor::readArray : array type does not exist!"));
+            break;
+         }
       }
    }
 
+   delete [] rawDataReceive;
+	
    if (comm->isRoot())
    {
-      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readRelaxationFactor end of restore of data, rank = " << rank);
+      UBLOG(logINFO, "MPIIOMigrationBECoProcessor::readArray end of restore of data, rank = " << rank);
       UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
    }
 }
@@ -2373,14 +1194,14 @@ void MPIIOMigrationBECoProcessor::readBoundaryConds(int step)
    if (comm->isRoot()) start = MPI_Wtime();
 
    int blocksCountAll = grid->getNumberOfBlocks(); // quantity of all blocks in the grid
-   int myBlocksCount;
+   size_t myBlocksCount;
    int blocksPerProcess = blocksCountAll / size;   // how many blocks has each process
    
    if (rank < (size - 1))
       myBlocksCount = blocksPerProcess;
    else
       myBlocksCount = blocksPerProcess + (blocksCountAll - blocksPerProcess * size);
-   
+  
    int indexB = rank * blocksPerProcess;  // the first "my" block
    int indexE = indexB + myBlocksCount;   // the latest "my" block
    
@@ -2395,10 +1216,14 @@ void MPIIOMigrationBECoProcessor::readBoundaryConds(int step)
    MPI_File_read_at(file_handler, (MPI_Offset)0, &sizeOfBIM, 1, MPI_INT, MPI_STATUS_IGNORE);
    bcindexmatrixVAll.resize(myBlocksCount * sizeOfBIM);
    
+   MPI_Type_contiguous(sizeOfBIM, MPI_INT, &bcindexmatrixType);
+   MPI_Type_commit(&bcindexmatrixType);
+
    MPI_Offset read_offset = (MPI_Offset)(sizeof(int)) + (MPI_Offset)(indexB) * (MPI_Offset)(sizeOfBIM) * (MPI_Offset)(sizeof(int));
-   MPI_File_read_at(file_handler, read_offset, &bcindexmatrixVAll[0], myBlocksCount * sizeOfBIM, MPI_INT, MPI_STATUS_IGNORE);
+   MPI_File_read_at(file_handler, read_offset, &bcindexmatrixVAll[0], myBlocksCount, bcindexmatrixType, MPI_STATUS_IGNORE);
 
    MPI_File_close(&file_handler);
+   MPI_Type_free(&bcindexmatrixType);
 
    if (comm->isRoot())
    {
@@ -2440,7 +1265,7 @@ void MPIIOMigrationBECoProcessor::readBoundaryConds(int step)
             bcindexmatrixVAll.begin() + ind * sizeOfBIM + sizeOfBIM);
       }
    }
-
+ 
    MPI_Request* requests = new MPI_Request[size * 2]; // send + receive
    int requestCount = 0;
    MPI_Status status;
@@ -2591,6 +1416,10 @@ void MPIIOMigrationBECoProcessor::readBoundaryConds(int step)
    MPI_File_close(&file_handler);
 
    delete nullBouCond;
+   delete bcArray;
+   delete [] rawDataReceive;
+   delete [] rawDataSend;
+   delete [] requests;
 
    if (comm->isRoot())
    {
diff --git a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationBECoProcessor.h b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationBECoProcessor.h
index 30d5b4b37789d521316888eb0a7b69ba67e9be2f..d5ecf88a870d845d3ae25ef6f1c3625e1ac38bca 100644
--- a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationBECoProcessor.h
+++ b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationBECoProcessor.h
@@ -5,7 +5,7 @@
 #include <string>
 #include <vector>
 
-#include "CoProcessor.h"
+#include "MPIIOCoProcessor.h"
 #include "MPIIODataStructures.h"
 
 class Grid3D;
@@ -16,9 +16,12 @@ class LBMKernel;
 
 //! \class MPIWriteBlocksBECoProcessor 
 //! \brief Writes the grid each timestep into the files and reads the grip from the files before regenerating  
-class MPIIOMigrationBECoProcessor : public CoProcessor
+class MPIIOMigrationBECoProcessor : public MPIIOCoProcessor
 {
+  enum Arrays { AverageDensity = 1, AverageVelocity = 2, AverageFluktuations = 3, AverageTriple = 4, ShearStressVal = 5, RelaxationFactor = 6};
+
 public:
+
    MPIIOMigrationBECoProcessor(SPtr<Grid3D> grid, SPtr<UbScheduler> s, const std::string& path, SPtr<Communicator> comm);
    virtual ~MPIIOMigrationBECoProcessor();
    //! Each timestep writes the grid into the files
@@ -29,12 +32,14 @@ public:
    void writeBlocks(int step);
    //! Writes the datasets of the blocks into the file cpDataSet.bin
    void writeDataSet(int step);
-   void writeAverageDensityArray(int step);
-   void writeAverageVelocityArray(int step);
-   void writeAverageFluktuationsArray(int step);
-   void writeAverageTripleArray(int step);
-   void writeShearStressValArray(int step);
-   void writeRelaxationFactor(int step);
+   void write4DArray(int step, Arrays arrType, std::string fname);
+   void write3DArray(int step, Arrays arrType, std::string fname);
+  // void writeAverageDensityArray(int step);
+   //void writeAverageVelocityArray(int step);
+   //void writeAverageFluktuationsArray(int step);
+   //void writeAverageTripleArray(int step);
+   //void writeShearStressValArray(int step);
+   //void writeRelaxationFactor(int step);
    //! Writes the boundary conditions of the blocks into the file cpBC.bin
    void writeBoundaryConds(int step);
 
@@ -42,12 +47,13 @@ public:
    void readBlocks(int step);
    //! Reads the datasets of the blocks from the file cpDataSet.bin
    void readDataSet(int step);
-   void readAverageDensityArray(int step);
-   void readAverageVelocityArray(int step);
-   void readAverageFluktuationsArray(int step);
-   void readAverageTripleArray(int step);
-   void readShearStressValArray(int step);
-   void readRelaxationFactor(int step);
+   void readArray(int step, Arrays arrType, std::string fname);
+//   void readAverageDensityArray(int step);
+//   void readAverageVelocityArray(int step);
+//   void readAverageFluktuationsArray(int step);
+//   void readAverageTripleArray(int step);
+//   void readShearStressValArray(int step);
+//   void readRelaxationFactor(int step);
    //! Reads the boundary conditions of the blocks from the file cpBC.bin
    void readBoundaryConds(int step);
    //! The function sets LBMKernel
@@ -57,21 +63,17 @@ public:
    //!The function truncates the data files
    void clearAllFiles(int step);
    void setNu(double nu);
-   //!The function write a time step of last check point
-   void writeCpTimeStep(int step);
-   //!The function read a time step of last check point
-   int readCpTimeStep();
 
    void blocksExchange(int tagN, int ind1, int ind2, int doubleCountInBlock, std::vector<double>& pV, std::vector<double>* rawDataReceive);
 
 protected:
-   std::string path;
-   SPtr<Communicator> comm;
+   //std::string path;
+   //SPtr<Communicator> comm;
 
 private:
-   MPI_Datatype gridParamType, block3dType, arrayPresenceType;
-   MPI_Datatype dataSetParamType, dataSetType, dataSetSmallType, dataSetDoubleType;
-   MPI_Datatype boundCondParamType, boundCondType, boundCondTypeAdd, bcindexmatrixType;
+   //MPI_Datatype gridParamType, block3dType;
+   MPI_Datatype dataSetType, dataSetSmallType, dataSetDoubleType;
+   MPI_Datatype boundCondParamType, boundCondTypeAdd, bcindexmatrixType;
    MPI_Datatype sendBlockDoubleType, sendBlockIntType;
 
    MPIIODataStructures::boundCondParam boundCondParamStr;
diff --git a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.cpp b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.cpp
index 9e7684cb5633890a2f7db295d26e083168999b99..40e6541a00427571c61da1bd2cde4073ab52f8bc 100644
--- a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.cpp
+++ b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.cpp
@@ -9,7 +9,6 @@
 #include "CoordinateTransformation3D.h"
 #include "DataSet3D.h"
 #include "Grid3D.h"
-#include "Grid3DSystem.h"
 #include "BCArray3D.h"
 #include "Communicator.h"
 #include "WbWriter.h"
@@ -30,11 +29,6 @@ MPIIOMigrationCoProcessor::MPIIOMigrationCoProcessor(SPtr<Grid3D> grid, SPtr<UbS
 
    //-------------------------   define MPI types  ---------------------------------
 
-   MPI_Type_contiguous(7, MPI_INT, &dataSetParamType);
-   MPI_Type_commit(&dataSetParamType);
-
-   //-----------------------------------------------------------------------
-
    MPI_Datatype typesDataSet[3] = { MPI_DOUBLE, MPI_INT, MPI_CHAR };
    int blocksDataSet[3] = { 2, 2, 2 };
    MPI_Aint offsetsDatatSet[3], lbDataSet, extentDataSet;
@@ -59,42 +53,19 @@ MPIIOMigrationCoProcessor::MPIIOMigrationCoProcessor(SPtr<Grid3D> grid, SPtr<UbS
    MPI_Type_contiguous(4, MPI_INT, &boundCondParamType);
    MPI_Type_commit(&boundCondParamType);
 
-   //-----------------------------------------------------------------------
-
-   MPI_Datatype typesBC[3] = { MPI_LONG_LONG_INT, MPI_FLOAT, MPI_CHAR };
-   int blocksBC[3] = { 5, 38, 1 };
-   MPI_Aint offsetsBC[3], lbBC, extentBC;
-
-   offsetsBC[0] = 0;
-   MPI_Type_get_extent(MPI_LONG_LONG_INT, &lbBC, &extentBC);
-   offsetsBC[1] = blocksBC[0] * extentBC;
-
-   MPI_Type_get_extent(MPI_FLOAT, &lbBC, &extentBC);
-   offsetsBC[2] = offsetsBC[1] + blocksBC[1] * extentBC;
-
-   MPI_Type_create_struct(3, blocksBC, offsetsBC, typesBC, &boundCondType);
-   MPI_Type_commit(&boundCondType);
-
    //---------------------------------------
 
    MPI_Type_contiguous(3, MPI_INT, &boundCondTypeAdd);
    MPI_Type_commit(&boundCondTypeAdd);
-   //---------------------------------------
-
-   MPI_Type_contiguous(6, MPI_CHAR, &arrayPresenceType);
-   MPI_Type_commit(&arrayPresenceType);
 
 }
 //////////////////////////////////////////////////////////////////////////
 MPIIOMigrationCoProcessor::~MPIIOMigrationCoProcessor()
 {
-   MPI_Type_free(&dataSetParamType);
    MPI_Type_free(&dataSetType);
    MPI_Type_free(&dataSetSmallType);
    MPI_Type_free(&boundCondParamType);
-   MPI_Type_free(&boundCondType);
    MPI_Type_free(&boundCondTypeAdd);
-   MPI_Type_free(&arrayPresenceType);
 }
 
 //////////////////////////////////////////////////////////////////////////
@@ -115,23 +86,24 @@ void MPIIOMigrationCoProcessor::process(double step)
       if (comm->isRoot()) UBLOG(logINFO, "Save check point - end");
    }
 }
-//////////////////////////////////////////////////////////////////////////
-void MPIIOMigrationCoProcessor::writeCpTimeStep(int step)
-{
-   if (comm->isRoot())
-   {
-      UbFileOutputASCII f(path + "/mpi_io_cp/cp.txt");
-      f.writeInteger(step);
-   }
-}
-//////////////////////////////////////////////////////////////////////////
-int MPIIOMigrationCoProcessor::readCpTimeStep()
+
+void MPIIOMigrationCoProcessor::clearAllFiles(int step)
 {
-   UbFileInputASCII f(path + "/mpi_io_cp/cp.txt");
-   int step = f.readInteger();
-   return step;
+   MPI_File file_handler;
+   MPI_Info info = MPI_INFO_NULL;
+   MPI_Offset new_size = 0;
+
+   MPIIOCoProcessor::clearAllFiles(step);
+   
+   UbSystem::makeDirectory(path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step));
+
+   std::string filename10 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpBC.bin";
+   int rc10 = MPI_File_open(MPI_COMM_WORLD, filename10.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
+   if (rc10 != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename10);
+   MPI_File_set_size(file_handler, new_size);
+   MPI_File_close(&file_handler);
 }
-//////////////////////////////////////////////////////////////////////////
+
 void MPIIOMigrationCoProcessor::writeBlocks(int step)
 {
    grid->renumberBlockIDs();
@@ -167,8 +139,13 @@ void MPIIOMigrationCoProcessor::writeDataSet(int step)
 
    DSArraysPresence arrPresence;
    bool firstBlock = true;
-   int doubleCountInBlock = 0;
+   size_t doubleCountInBlock = 0;
    int ic = 0;
+   SPtr< D3Q27EsoTwist3DSplittedVector > D3Q27EsoTwist3DSplittedVectorPtr;
+   CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr localDistributions;
+   CbArray4D <LBMReal, IndexerX4X3X2X1>::CbArray4DPtr nonLocalDistributions;
+   CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr zeroDistributions;
+
    for (int level = minInitLevel; level <= maxInitLevel; level++)
    {
       for (SPtr<Block3D> block : blocksVector[level])  //	blocks of the current level
@@ -180,10 +157,10 @@ void MPIIOMigrationCoProcessor::writeDataSet(int step)
          dataSetArray[ic].compressible = block->getKernel()->getCompressible();
          dataSetArray[ic].withForcing = block->getKernel()->getWithForcing();
 
-         SPtr< D3Q27EsoTwist3DSplittedVector > D3Q27EsoTwist3DSplittedVectorPtr = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(block->getKernel()->getDataSet()->getFdistributions());
-         CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr localDistributions = D3Q27EsoTwist3DSplittedVectorPtr->getLocalDistributions();
-         CbArray4D <LBMReal, IndexerX4X3X2X1>::CbArray4DPtr nonLocalDistributions = D3Q27EsoTwist3DSplittedVectorPtr->getNonLocalDistributions();
-         CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr zeroDistributions = D3Q27EsoTwist3DSplittedVectorPtr->getZeroDistributions();
+         D3Q27EsoTwist3DSplittedVectorPtr = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(block->getKernel()->getDataSet()->getFdistributions());
+         localDistributions = D3Q27EsoTwist3DSplittedVectorPtr->getLocalDistributions();
+         nonLocalDistributions = D3Q27EsoTwist3DSplittedVectorPtr->getNonLocalDistributions();
+         zeroDistributions = D3Q27EsoTwist3DSplittedVectorPtr->getZeroDistributions();
 
          if (firstBlock)// && block->getKernel()) // when first (any) valid block...
          {
@@ -302,7 +279,7 @@ void MPIIOMigrationCoProcessor::writeDataSet(int step)
    MPI_Offset write_offset;
    size_t sizeofOneDataSet = sizeof(DataSetMigration) + doubleCountInBlock * sizeof(double);
 
-   for (int nb = 0; nb < blocksCount; nb++)
+   for (size_t nb = 0; nb < blocksCount; nb++)
    {
       write_offset = (MPI_Offset)(3 * sizeof(dataSetParam) + dataSetArray[nb].globalID * sizeofOneDataSet);
       MPI_File_write_at(file_handler, write_offset, &dataSetArray[nb], 1, dataSetType, MPI_STATUS_IGNORE);
@@ -311,7 +288,6 @@ void MPIIOMigrationCoProcessor::writeDataSet(int step)
 
    MPI_File_sync(file_handler);
    MPI_File_close(&file_handler);
-
    MPI_Type_free(&dataSetDoubleType);
 
    if (comm->isRoot())
@@ -331,25 +307,272 @@ void MPIIOMigrationCoProcessor::writeDataSet(int step)
    MPI_File_close(&file_handler1);
 
    if (arrPresence.isAverageDensityArrayPresent)
-      writeAverageDensityArray(step);
+      write4DArray(step, AverageDensity, std::string("/cpAverageDensityArray.bin"));
+   //writeAverageDensityArray(step);
 
    if (arrPresence.isAverageVelocityArrayPresent)
-      writeAverageVelocityArray(step);
+      write4DArray(step, AverageVelocity, std::string("/cpAverageVelocityArray.bin"));
+   //writeAverageVelocityArray(step);
 
    if (arrPresence.isAverageFluktuationsArrayPresent)
-      writeAverageFluktuationsArray(step);
+      write4DArray(step, AverageFluktuations, std::string("/cpAverageFluktuationsArray.bin"));
+   //writeAverageFluktuationsArray(step);
 
    if (arrPresence.isAverageTripleArrayPresent)
-      writeAverageTripleArray(step);
+      write4DArray(step, AverageTriple, std::string("/cpAverageTripleArray.bin"));
+   //writeAverageTripleArray(step);
 
    if (arrPresence.isShearStressValArrayPresent)
-      writeShearStressValArray(step);
+      write4DArray(step, ShearStressVal, std::string("/cpShearStressValArray.bin"));
+   //writeShearStressValArray(step);
 
    if (arrPresence.isRelaxationFactorPresent)
-      writeRelaxationFactor(step);
+      write3DArray(step, RelaxationFactor, std::string("/cpRelaxationFactor.bin"));
+}
+
+void MPIIOMigrationCoProcessor::write4DArray(int step, Arrays arrayType, std::string fname)
+{
+   int rank, size;
+   MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+   MPI_Comm_size(MPI_COMM_WORLD, &size);
+
+   int blocksCount = 0; // quantity of blocks in the grid, max 2147483648 blocks!
+
+   std::vector<SPtr<Block3D>> blocksVector[25];
+   int minInitLevel = this->grid->getCoarsestInitializedLevel();
+   int maxInitLevel = this->grid->getFinestInitializedLevel();
+   for (int level = minInitLevel; level <= maxInitLevel; level++)
+   {
+      grid->getBlocks(level, rank, blocksVector[level]);
+      blocksCount += static_cast<int>(blocksVector[level].size());
+   }
+
+   DataSetSmallMigration* dataSetSmallArray = new DataSetSmallMigration[blocksCount];
+   std::vector<double> doubleValuesArray; // double-values of the AverageDensityArray in all blocks 
+   dataSetParam dataSetParamStr;
+
+   if (comm->isRoot())
+   {
+      UBLOG(logINFO, "MPIIOMigrationCoProcessor::writeAverageDensityArray start collect data rank = " << rank);
+      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+   }
+
+   bool firstBlock = true;
+   size_t doubleCountInBlock = 0;
+   int ic = 0;
+   SPtr< CbArray4D<LBMReal, IndexerX4X3X2X1> > ___Array;
+
+   for (int level = minInitLevel; level <= maxInitLevel; level++)
+   {
+      for (SPtr<Block3D> block : blocksVector[level])  //	blocks of the current level
+      {
+         dataSetSmallArray[ic].globalID = block->getGlobalID();     // id of the block needed to find it while regenerating the grid
+
+         switch (arrayType) {
+         case AverageDensity:
+            ___Array = block->getKernel()->getDataSet()->getAverageDensity();
+            break;
+         case AverageVelocity:
+            ___Array = block->getKernel()->getDataSet()->getAverageVelocity();
+            break;
+         case AverageFluktuations:
+            ___Array = block->getKernel()->getDataSet()->getAverageFluctuations();
+            break;
+         case AverageTriple:
+            ___Array = block->getKernel()->getDataSet()->getAverageTriplecorrelations();
+            break;
+         case ShearStressVal:
+            ___Array = block->getKernel()->getDataSet()->getShearStressValues();
+            break;
+         default:
+            UB_THROW(UbException(UB_EXARGS, "MPIIOMigrationCoProcessor::write4DArray : 4D array type does not exist!"));
+            break;
+         }
+
+         if (firstBlock) // when first (any) valid block...
+         {
+            dataSetParamStr.nx1 = dataSetParamStr.nx2 = dataSetParamStr.nx3 = 0;
+            dataSetParamStr.nx[0] = static_cast<int>(___Array->getNX1());
+            dataSetParamStr.nx[1] = static_cast<int>(___Array->getNX2());
+            dataSetParamStr.nx[2] = static_cast<int>(___Array->getNX3());
+            dataSetParamStr.nx[3] = static_cast<int>(___Array->getNX4());
+            doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+
+            firstBlock = false;
+         }
+
+         if (___Array && (dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0) && (dataSetParamStr.nx[3] > 0))
+            doubleValuesArray.insert(doubleValuesArray.end(), ___Array->getDataVector().begin(), ___Array->getDataVector().end());
+
+         ic++;
+      }
+   }
 
+   // register new MPI-types depending on the block-specific information
+   MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType);
+   MPI_Type_commit(&dataSetDoubleType);
+
+   if (comm->isRoot())
+   {
+      UBLOG(logINFO, "MPIIOMigrationCoProcessor::write4DArray start MPI IO rank = " << rank);
+      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+   }
+
+   double start, finish;
+   if (comm->isRoot()) start = MPI_Wtime();
+
+   MPI_Info info = MPI_INFO_NULL;
+
+   MPI_File file_handler;
+   std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + fname;
+   int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
+   if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename);
+
+   // each process writes common parameters of a dataSet
+   MPI_File_write_at(file_handler, 0, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
+
+   MPI_Offset write_offset;
+   size_t sizeofOneDataSet = sizeof(DataSetSmallMigration) + doubleCountInBlock * sizeof(double);
+
+   for (size_t nb = 0; nb < blocksCount; nb++)
+   {
+      write_offset = (MPI_Offset)(sizeof(dataSetParam) + dataSetSmallArray[nb].globalID * sizeofOneDataSet);
+      MPI_File_write_at(file_handler, write_offset, &dataSetSmallArray[nb], 1, dataSetSmallType, MPI_STATUS_IGNORE);
+      MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(DataSetSmallMigration)), &doubleValuesArray[nb * doubleCountInBlock], 1, dataSetDoubleType, MPI_STATUS_IGNORE);
+   }
+
+   MPI_File_sync(file_handler);
+   MPI_File_close(&file_handler);
+   MPI_Type_free(&dataSetDoubleType);
+
+   if (comm->isRoot())
+   {
+      finish = MPI_Wtime();
+      UBLOG(logINFO, "MPIIOMigrationCoProcessor::write4DArray time: " << finish - start << " s");
+   }
+
+   delete[] dataSetSmallArray;
 }
 
+void MPIIOMigrationCoProcessor::write3DArray(int step, Arrays arrayType, std::string fname)
+{
+   int rank, size;
+   MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+   MPI_Comm_size(MPI_COMM_WORLD, &size);
+
+   int blocksCount = 0; // quantity of blocks in the grid, max 2147483648 blocks!
+
+   std::vector<SPtr<Block3D>> blocksVector[25];
+   int minInitLevel = this->grid->getCoarsestInitializedLevel();
+   int maxInitLevel = this->grid->getFinestInitializedLevel();
+   for (int level = minInitLevel; level <= maxInitLevel; level++)
+   {
+      grid->getBlocks(level, rank, blocksVector[level]);
+      blocksCount += static_cast<int>(blocksVector[level].size());
+   }
+
+   DataSetSmallMigration* dataSetSmallArray = new DataSetSmallMigration[blocksCount];
+   std::vector<double> doubleValuesArray; // double-values (arrays of f's) in all blocks 
+   dataSetParam dataSetParamStr;
+
+   if (comm->isRoot())
+   {
+      UBLOG(logINFO, "MPIIOMigrationCoProcessor::write3DArray start collect data rank = " << rank);
+      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+   }
+
+   bool firstBlock = true;
+   size_t doubleCountInBlock = 0;
+   int ic = 0;
+   SPtr< CbArray3D<LBMReal, IndexerX3X2X1> > ___Array;
+
+   for (int level = minInitLevel; level <= maxInitLevel; level++)
+   {
+      for (SPtr<Block3D> block : blocksVector[level])  //	blocks of the current level
+      {
+         dataSetSmallArray[ic].globalID = block->getGlobalID();     // id of the block needed to find it while regenerating the grid
+
+         switch (arrayType) {
+         case RelaxationFactor:
+            ___Array = block->getKernel()->getDataSet()->getRelaxationFactor();
+            break;
+         default:
+            UB_THROW(UbException(UB_EXARGS, "MPIIOMigrationCoProcessor::write3DArray : 3D array type does not exist!"));
+            break;
+         }
+
+         if (firstBlock) // when first (any) valid block...
+         {
+            dataSetParamStr.nx1 = dataSetParamStr.nx2 = dataSetParamStr.nx3 = 0;
+            dataSetParamStr.nx[0] = static_cast<int>(___Array->getNX1());
+            dataSetParamStr.nx[1] = static_cast<int>(___Array->getNX2());
+            dataSetParamStr.nx[2] = static_cast<int>(___Array->getNX3());
+            dataSetParamStr.nx[3] = 1;
+            doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+
+            firstBlock = false;
+         }
+
+         if (___Array && (dataSetParamStr.nx[0]>0) && (dataSetParamStr.nx[1]>0) && (dataSetParamStr.nx[2]>0))
+            doubleValuesArray.insert(doubleValuesArray.end(), ___Array->getDataVector().begin(), ___Array->getDataVector().end());
+
+         ic++;
+      }
+   }
+
+   // register new MPI-types depending on the block-specific information
+   MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType);
+   MPI_Type_commit(&dataSetDoubleType);
+
+   if (comm->isRoot())
+   {
+      UBLOG(logINFO, "MPIIOMigrationCoProcessor::write3DArray start MPI IO rank = " << rank);
+      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+   }
+
+   double start, finish;
+   if (comm->isRoot()) start = MPI_Wtime();
+
+   MPI_Info info = MPI_INFO_NULL;
+
+#ifdef HLRN_LUSTRE
+   MPI_Info_create(&info);
+   MPI_Info_set(info, "striping_factor", "40");
+   MPI_Info_set(info, "striping_unit", "4M");
+#endif
+
+   MPI_File file_handler;
+   std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + fname;
+   int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
+   if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename);
+
+   // each process writes common parameters of a dataSet
+   MPI_File_write_at(file_handler, 0, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
+
+   size_t sizeofOneDataSet = sizeof(DataSetSmallMigration) + doubleCountInBlock * sizeof(double);
+
+   MPI_Offset write_offset;
+   for (size_t nb = 0; nb < blocksCount; nb++)
+   {
+      write_offset = (MPI_Offset)(sizeof(dataSetParam) + dataSetSmallArray[nb].globalID * sizeofOneDataSet);
+      MPI_File_write_at(file_handler, write_offset, &dataSetSmallArray[nb], 1, dataSetSmallType, MPI_STATUS_IGNORE);
+      MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(DataSetSmallMigration)), &doubleValuesArray[nb * doubleCountInBlock], 1, dataSetDoubleType, MPI_STATUS_IGNORE);
+   }
+
+   MPI_File_sync(file_handler);
+   MPI_File_close(&file_handler);
+   MPI_Type_free(&dataSetDoubleType);
+
+   if (comm->isRoot())
+   {
+      finish = MPI_Wtime();
+      UBLOG(logINFO, "MPIIOMigrationCoProcessor::write3DArray time: " << finish - start << " s");
+   }
+
+   delete[] dataSetSmallArray;
+}
+
+/*
 void MPIIOMigrationCoProcessor::writeAverageDensityArray(int step)
 {
    int rank, size;
@@ -378,7 +601,7 @@ void MPIIOMigrationCoProcessor::writeAverageDensityArray(int step)
    }
 
    bool firstBlock = true;
-   int doubleCountInBlock = 0;
+   size_t doubleCountInBlock = 0;
    int ic = 0;
    for (int level = minInitLevel; level <= maxInitLevel; level++)
    {
@@ -438,7 +661,7 @@ void MPIIOMigrationCoProcessor::writeAverageDensityArray(int step)
    MPI_Offset write_offset;
    size_t sizeofOneDataSet = sizeof(DataSetSmallMigration) + doubleCountInBlock * sizeof(double);
 
-   for (int nb = 0; nb < blocksCount; nb++)
+   for (size_t nb = 0; nb < blocksCount; nb++)
    {
       write_offset = (MPI_Offset)(sizeof(dataSetParam) + dataSetSmallArray[nb].globalID * sizeofOneDataSet);
       MPI_File_write_at(file_handler, write_offset, &dataSetSmallArray[nb], 1, dataSetSmallType, MPI_STATUS_IGNORE);
@@ -447,7 +670,6 @@ void MPIIOMigrationCoProcessor::writeAverageDensityArray(int step)
 
    MPI_File_sync(file_handler);
    MPI_File_close(&file_handler);
-
    MPI_Type_free(&dataSetDoubleType);
 
    if (comm->isRoot())
@@ -487,7 +709,7 @@ void MPIIOMigrationCoProcessor::writeAverageVelocityArray(int step)
    }
 
    bool firstBlock = true;
-   int doubleCountInBlock = 0;
+   size_t doubleCountInBlock = 0;
    int ic = 0;
    for (int level = minInitLevel; level <= maxInitLevel; level++)
    {
@@ -547,7 +769,7 @@ void MPIIOMigrationCoProcessor::writeAverageVelocityArray(int step)
    MPI_Offset write_offset;
    size_t sizeofOneDataSet = sizeof(DataSetSmallMigration) + doubleCountInBlock * sizeof(double);
 
-   for (int nb = 0; nb < blocksCount; nb++)
+   for (size_t nb = 0; nb < blocksCount; nb++)
    {
       write_offset = (MPI_Offset)(sizeof(dataSetParam) + dataSetSmallArray[nb].globalID * sizeofOneDataSet);
       MPI_File_write_at(file_handler, write_offset, &dataSetSmallArray[nb], 1, dataSetSmallType, MPI_STATUS_IGNORE);
@@ -556,7 +778,6 @@ void MPIIOMigrationCoProcessor::writeAverageVelocityArray(int step)
 
    MPI_File_sync(file_handler);
    MPI_File_close(&file_handler);
-
    MPI_Type_free(&dataSetDoubleType);
 
    if (comm->isRoot())
@@ -596,7 +817,7 @@ void MPIIOMigrationCoProcessor::writeAverageFluktuationsArray(int step)
    }
 
    bool firstBlock = true;
-   int doubleCountInBlock = 0;
+   size_t doubleCountInBlock = 0;
    int ic = 0;
    for (int level = minInitLevel; level <= maxInitLevel; level++)
    {
@@ -661,7 +882,7 @@ void MPIIOMigrationCoProcessor::writeAverageFluktuationsArray(int step)
    MPI_Offset write_offset;
    size_t sizeofOneDataSet = sizeof(DataSetSmallMigration) + doubleCountInBlock * sizeof(double);
 
-   for (int nb = 0; nb < blocksCount; nb++)
+   for (size_t nb = 0; nb < blocksCount; nb++)
    {
       write_offset = (MPI_Offset)(sizeof(dataSetParam) + dataSetSmallArray[nb].globalID * sizeofOneDataSet);
       MPI_File_write_at(file_handler, write_offset, &dataSetSmallArray[nb], 1, dataSetSmallType, MPI_STATUS_IGNORE);
@@ -670,7 +891,6 @@ void MPIIOMigrationCoProcessor::writeAverageFluktuationsArray(int step)
 
    MPI_File_sync(file_handler);
    MPI_File_close(&file_handler);
-
    MPI_Type_free(&dataSetDoubleType);
 
    if (comm->isRoot())
@@ -710,7 +930,7 @@ void MPIIOMigrationCoProcessor::writeAverageTripleArray(int step)
    }
 
    bool firstBlock = true;
-   int doubleCountInBlock = 0;
+   size_t doubleCountInBlock = 0;
    int ic = 0;
    for (int level = minInitLevel; level <= maxInitLevel; level++)
    {
@@ -776,7 +996,7 @@ void MPIIOMigrationCoProcessor::writeAverageTripleArray(int step)
    MPI_Offset write_offset;
    size_t sizeofOneDataSet = sizeof(DataSetSmallMigration) + doubleCountInBlock * sizeof(double);
 
-   for (int nb = 0; nb < blocksCount; nb++)
+   for (size_t nb = 0; nb < blocksCount; nb++)
    {
       write_offset = (MPI_Offset)(sizeof(dataSetParam) + dataSetSmallArray[nb].globalID * sizeofOneDataSet);
       MPI_File_write_at(file_handler, write_offset, &dataSetSmallArray[nb], 1, dataSetSmallType, MPI_STATUS_IGNORE);
@@ -785,7 +1005,6 @@ void MPIIOMigrationCoProcessor::writeAverageTripleArray(int step)
 
    MPI_File_sync(file_handler);
    MPI_File_close(&file_handler);
-
    MPI_Type_free(&dataSetDoubleType);
 
    if (comm->isRoot())
@@ -825,7 +1044,7 @@ void MPIIOMigrationCoProcessor::writeShearStressValArray(int step)
    }
 
    bool firstBlock = true;
-   int doubleCountInBlock = 0;
+   size_t doubleCountInBlock = 0;
    int ic = 0;
    for (int level = minInitLevel; level <= maxInitLevel; level++)
    {
@@ -891,7 +1110,7 @@ void MPIIOMigrationCoProcessor::writeShearStressValArray(int step)
    MPI_Offset write_offset;
    size_t sizeofOneDataSet = sizeof(DataSetSmallMigration) + doubleCountInBlock * sizeof(double);
 
-   for (int nb = 0; nb < blocksCount; nb++)
+   for (size_t nb = 0; nb < blocksCount; nb++)
    {
       write_offset = (MPI_Offset)(sizeof(dataSetParam) + dataSetSmallArray[nb].globalID * sizeofOneDataSet);
       MPI_File_write_at(file_handler, write_offset, &dataSetSmallArray[nb], 1, dataSetSmallType, MPI_STATUS_IGNORE);
@@ -900,7 +1119,6 @@ void MPIIOMigrationCoProcessor::writeShearStressValArray(int step)
 
    MPI_File_sync(file_handler);
    MPI_File_close(&file_handler);
-
    MPI_Type_free(&dataSetDoubleType);
 
    if (comm->isRoot())
@@ -940,7 +1158,7 @@ void MPIIOMigrationCoProcessor::writeRelaxationFactor(int step)
    }
 
    bool firstBlock = true;
-   int doubleCountInBlock = 0;
+   size_t doubleCountInBlock = 0;
    int ic = 0;
    for (int level = minInitLevel; level <= maxInitLevel; level++)
    {
@@ -1006,7 +1224,7 @@ void MPIIOMigrationCoProcessor::writeRelaxationFactor(int step)
    MPI_Offset write_offset;
    size_t sizeofOneDataSet = sizeof(DataSetSmallMigration) + doubleCountInBlock * sizeof(double);
 
-   for (int nb = 0; nb < blocksCount; nb++)
+   for (size_t nb = 0; nb < blocksCount; nb++)
    {
       write_offset = (MPI_Offset)(sizeof(dataSetParam) + dataSetSmallArray[nb].globalID * sizeofOneDataSet);
       MPI_File_write_at(file_handler, write_offset, &dataSetSmallArray[nb], 1, dataSetSmallType, MPI_STATUS_IGNORE);
@@ -1015,7 +1233,6 @@ void MPIIOMigrationCoProcessor::writeRelaxationFactor(int step)
 
    MPI_File_sync(file_handler);
    MPI_File_close(&file_handler);
-
    MPI_Type_free(&dataSetDoubleType);
 
    if (comm->isRoot())
@@ -1026,7 +1243,7 @@ void MPIIOMigrationCoProcessor::writeRelaxationFactor(int step)
 
    delete[] dataSetSmallArray;
 }
-
+*/
 void MPIIOMigrationCoProcessor::writeBoundaryConds(int step)
 {
    int rank, size;
@@ -1061,11 +1278,13 @@ void MPIIOMigrationCoProcessor::writeBoundaryConds(int step)
 
    bool bcindexmatrixCountNotInit = true;
    int ic = 0;
+   SPtr<BCArray3D> bcArr;
+
    for (int level = minInitLevel; level <= maxInitLevel; level++)
    {
       for (SPtr<Block3D> block : blocksVector[level])  // all the blocks of the current level
       {
-         SPtr<BCArray3D> bcArr = block->getKernel()->getBCProcessor()->getBCArray();
+         bcArr = block->getKernel()->getBCProcessor()->getBCArray();
 
          bcAddArray[ic].globalID = block->getGlobalID(); // id of the block needed to find it while regenerating the grid
          bcAddArray[ic].boundCond_count = 0;             // how many BoundaryConditions in this block
@@ -1201,7 +1420,6 @@ void MPIIOMigrationCoProcessor::writeBoundaryConds(int step)
 
    MPI_File_sync(file_handler);
    MPI_File_close(&file_handler);
-
    MPI_Type_free(&bcindexmatrixType);
 
    if (comm->isRoot())
@@ -1234,7 +1452,6 @@ void MPIIOMigrationCoProcessor::restart(int step)
    grid->setTimeStep(step);
 
    if (comm->isRoot()) UBLOG(logINFO, "Load check point - end");
-   //this->reconnect(grid);
 }
 
 void MPIIOMigrationCoProcessor::readBlocks(int step)
@@ -1256,7 +1473,7 @@ void MPIIOMigrationCoProcessor::readDataSet(int step)
    double start, finish;
    if (comm->isRoot()) start = MPI_Wtime();
 
-   int blocksCount = 0; // quantity of the blocks, that belong to this process
+   size_t blocksCount = 0; // quantity of the blocks, that belong to this process
    dataSetParam dataSetParamStr1, dataSetParamStr2, dataSetParamStr3;
 
    // read from the grid the blocks, that belong to this process
@@ -1280,7 +1497,7 @@ void MPIIOMigrationCoProcessor::readDataSet(int step)
    MPI_File_read_at(file_handler, (MPI_Offset)(sizeof(dataSetParam)), &dataSetParamStr2, 1, dataSetParamType, MPI_STATUS_IGNORE);
    MPI_File_read_at(file_handler, (MPI_Offset)(2 * sizeof(dataSetParam)), &dataSetParamStr3, 1, dataSetParamType, MPI_STATUS_IGNORE);
 
-   double doubleCountInBlock = dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3] +
+   size_t doubleCountInBlock = dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3] +
       dataSetParamStr2.nx[0] * dataSetParamStr2.nx[1] * dataSetParamStr2.nx[2] * dataSetParamStr2.nx[3] +
       dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3];
    std::vector<double> doubleValuesArray(blocksCount * doubleCountInBlock); // double-values in all blocks 
@@ -1289,7 +1506,7 @@ void MPIIOMigrationCoProcessor::readDataSet(int step)
    MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType);
    MPI_Type_commit(&dataSetDoubleType);
 
-   int ic = 0;
+   size_t ic = 0;
    MPI_Offset read_offset;
    size_t sizeofOneDataSet = size_t(sizeof(DataSetMigration) + doubleCountInBlock * sizeof(double));
 
@@ -1305,6 +1522,7 @@ void MPIIOMigrationCoProcessor::readDataSet(int step)
    }
 
    MPI_File_close(&file_handler);
+   MPI_Type_free(&dataSetDoubleType);
 
    if (comm->isRoot())
    {
@@ -1343,8 +1561,6 @@ void MPIIOMigrationCoProcessor::readDataSet(int step)
 
       // find the nesessary block and fill it
       SPtr<Block3D> block = grid->getBlock(dataSetArray[n].globalID);
-      //std::cout << "rank="<<rank<<", dataSetArray[n].globalID=" << dataSetArray[n].globalID << std::endl;
-
       this->lbmKernel->setBlock(block);
       SPtr<LBMKernel> kernel = this->lbmKernel->clone();
       kernel->setGhostLayerWidth(dataSetArray[n].ghostLayerWidth);
@@ -1358,7 +1574,6 @@ void MPIIOMigrationCoProcessor::readDataSet(int step)
       block->setKernel(kernel);
    }
 
-   MPI_Type_free(&dataSetDoubleType);
 
    if (comm->isRoot())
    {
@@ -1379,26 +1594,155 @@ void MPIIOMigrationCoProcessor::readDataSet(int step)
    MPI_File_close(&file_handler1);
 
    if (arrPresence.isAverageDensityArrayPresent)
-      readAverageDensityArray(step);
+      readArray(step, AverageDensity, std::string("/cpAverageDensityArray.bin"));
+   //readAverageDensityArray(step);
 
    if (arrPresence.isAverageVelocityArrayPresent)
-      readAverageVelocityArray(step);
+      readArray(step, AverageVelocity, std::string("/cpAverageVelocityArray.bin"));
+   //   readAverageVelocityArray(step);
 
    if (arrPresence.isAverageFluktuationsArrayPresent)
-      readAverageFluktuationsArray(step);
+      readArray(step, AverageFluktuations, std::string("/cpAverageFluktuationsArray.bin"));
+   //   readAverageFluktuationsArray(step);
 
    if (arrPresence.isAverageTripleArrayPresent)
-      readAverageTripleArray(step);
+      readArray(step, AverageTriple, std::string("/cpAverageTripleArray.bin"));
+   //  readAverageTripleArray(step);
 
    if (arrPresence.isShearStressValArrayPresent)
-      readShearStressValArray(step);
+      readArray(step, ShearStressVal, std::string("/cpShearStressValArray.bin"));
+   //   readShearStressValArray(step);
 
    if (arrPresence.isRelaxationFactorPresent)
-      readRelaxationFactor(step);
+      readArray(step, RelaxationFactor, std::string("/cpRelaxationFactor.bin"));
+   //   readRelaxationFactor(step);
+}
+
+void MPIIOMigrationCoProcessor::readArray(int step, Arrays arrType, std::string fname)
+{
+   int rank, size;
+   MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+   MPI_Comm_size(MPI_COMM_WORLD, &size);
+
+   if (comm->isRoot())
+   {
+      UBLOG(logINFO, "MPIIOMigrationCoProcessor::readArray start MPI IO rank = " << rank);
+      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+   }
+   double start, finish;
+   if (comm->isRoot()) start = MPI_Wtime();
+
+   MPI_File file_handler;
+   std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + fname;
+   int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler);
+   if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename);
+
+   size_t blocksCount = 0;
+   dataSetParam dataSetParamStr;
+   memset(&dataSetParamStr, 0, sizeof(dataSetParam));
+
+   // read from the grid the blocks, that belong to this process
+   std::vector<SPtr<Block3D>> blocksVector[25];
+   int minInitLevel = this->grid->getCoarsestInitializedLevel();
+   int maxInitLevel = this->grid->getFinestInitializedLevel();
+   for (int level = minInitLevel; level <= maxInitLevel; level++)
+   {
+      grid->getBlocks(level, rank, blocksVector[level]);
+      blocksCount += static_cast<int>(blocksVector[level].size());
+   }
+
+   MPI_File_read_at(file_handler, (MPI_Offset)0, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
+
+   DataSetSmallMigration* dataSetSmallArray = new DataSetSmallMigration[blocksCount];
+   size_t doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+   std::vector<double> doubleValuesArray(blocksCount * doubleCountInBlock); // double-values in all blocks
+
+   // define MPI_types depending on the block-specific information
+   MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType);
+   MPI_Type_commit(&dataSetDoubleType);
+
+   size_t ic = 0;
+   MPI_Offset read_offset;
+   size_t sizeofOneDataSet = size_t(sizeof(DataSetSmallMigration) + doubleCountInBlock * sizeof(double));
+
+   for (int level = minInitLevel; level <= maxInitLevel; level++)
+   {
+      for (SPtr<Block3D> block : blocksVector[level])  //	blocks of the current level
+      {
+         read_offset = (MPI_Offset)(sizeof(dataSetParam) + block->getGlobalID() * sizeofOneDataSet);
+         MPI_File_read_at(file_handler, read_offset, &dataSetSmallArray[ic], 1, dataSetSmallType, MPI_STATUS_IGNORE);
+         MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(DataSetSmallMigration)), &doubleValuesArray[ic * doubleCountInBlock], 1, dataSetDoubleType, MPI_STATUS_IGNORE);
+         ic++;
+      }
+   }
+
+   MPI_File_close(&file_handler);
+   MPI_Type_free(&dataSetDoubleType);
+
+   if (comm->isRoot())
+   {
+      finish = MPI_Wtime();
+      UBLOG(logINFO, "MPIIOMigrationCoProcessor::readArray readArray: " << finish - start << " s");
+      UBLOG(logINFO, "MPIIOMigrationCoProcessor::readArray start of restore of data, rank = " << rank);
+      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+   }
+
+   //----------------------------- restore data ---------------------------------
+   size_t index = 0;
+   size_t nextVectorSize = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+   std::vector<double> vectorsOfValues;
+   for (int n = 0; n < blocksCount; n++)
+   {
+      SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].globalID);
 
+      vectorsOfValues.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + nextVectorSize);
+      index += nextVectorSize;
+
+      // fill arrays
+      SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> ___4DArray;
+      SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> ___3DArray;
+
+      switch (arrType) {
+      case AverageDensity:
+         ___4DArray = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
+         block->getKernel()->getDataSet()->setAverageDensity(___4DArray);
+         break;
+      case AverageVelocity:
+         ___4DArray = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
+         block->getKernel()->getDataSet()->setAverageVelocity(___4DArray);
+         break;
+      case AverageFluktuations:
+         ___4DArray = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
+         block->getKernel()->getDataSet()->setAverageFluctuations(___4DArray);
+         break;
+      case AverageTriple:
+         ___4DArray = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
+         block->getKernel()->getDataSet()->setAverageTriplecorrelations(___4DArray);
+         break;
+      case ShearStressVal:
+         ___4DArray = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
+         block->getKernel()->getDataSet()->setShearStressValues(___4DArray);
+         break;
+      case RelaxationFactor:
+         ___3DArray = CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>(vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2]));
+         block->getKernel()->getDataSet()->setRelaxationFactor(___3DArray);
+         break;
+      default:
+         UB_THROW(UbException(UB_EXARGS, "MPIIOMigrationCoProcessor::readArray : array type does not exist!"));
+         break;
+      }
+   }
+
+   if (comm->isRoot())
+   {
+      UBLOG(logINFO, "MPIIOMigrationCoProcessor::readArray end of restore of data, rank = " << rank);
+      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+   }
+
+   delete[] dataSetSmallArray;
 }
 
-void MPIIOMigrationCoProcessor::readAverageDensityArray(int step)
+/*void MPIIOMigrationCoProcessor::readAverageDensityArray(int step)
 {
    int rank, size;
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
@@ -1418,7 +1762,7 @@ void MPIIOMigrationCoProcessor::readAverageDensityArray(int step)
    if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename);
 
    // read count of blocks
-   int blocksCount = 0;
+   size_t blocksCount = 0;
    dataSetParam dataSetParamStr;
    memset(&dataSetParamStr, 0, sizeof(dataSetParam));
 
@@ -1435,14 +1779,14 @@ void MPIIOMigrationCoProcessor::readAverageDensityArray(int step)
    MPI_File_read_at(file_handler, (MPI_Offset)0, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
 
    DataSetSmallMigration* dataSetSmallArray = new DataSetSmallMigration[blocksCount];
-   int doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+   size_t doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
    std::vector<double> doubleValuesArray(blocksCount * doubleCountInBlock); // double-values in all blocks
 
    // define MPI_types depending on the block-specific information
    MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType);
    MPI_Type_commit(&dataSetDoubleType);
 
-   int ic = 0;
+   size_t ic = 0;
    MPI_Offset read_offset;
    size_t sizeofOneDataSet = size_t(sizeof(DataSetSmallMigration) + doubleCountInBlock * sizeof(double));
 
@@ -1458,6 +1802,7 @@ void MPIIOMigrationCoProcessor::readAverageDensityArray(int step)
    }
 
    MPI_File_close(&file_handler);
+   MPI_Type_free(&dataSetDoubleType);
 
    if (comm->isRoot())
    {
@@ -1488,8 +1833,6 @@ void MPIIOMigrationCoProcessor::readAverageDensityArray(int step)
       block->getKernel()->getDataSet()->setAverageDensity(mAverageDensity);
    }
 
-   MPI_Type_free(&dataSetDoubleType);
-
    if (comm->isRoot())
    {
       UBLOG(logINFO, "MPIIOMigrationCoProcessor::readAverageDensityArray end of restore of data, rank = " << rank);
@@ -1519,7 +1862,7 @@ void MPIIOMigrationCoProcessor::readAverageVelocityArray(int step)
    if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename);
 
    // read count of blocks
-   int blocksCount = 0;
+   size_t blocksCount = 0;
    dataSetParam dataSetParamStr;
    memset(&dataSetParamStr, 0, sizeof(dataSetParam));
 
@@ -1536,14 +1879,14 @@ void MPIIOMigrationCoProcessor::readAverageVelocityArray(int step)
    MPI_File_read_at(file_handler, (MPI_Offset)0, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
 
    DataSetSmallMigration* dataSetSmallArray = new DataSetSmallMigration[blocksCount];
-   int doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+   size_t doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
    std::vector<double> doubleValuesArray(blocksCount * doubleCountInBlock); // double-values in all blocks
 
    // define MPI_types depending on the block-specific information
    MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType);
    MPI_Type_commit(&dataSetDoubleType);
 
-   int ic = 0;
+   size_t ic = 0;
    MPI_Offset read_offset;
    size_t sizeofOneDataSet = size_t(sizeof(DataSetSmallMigration) + doubleCountInBlock * sizeof(double));
 
@@ -1559,6 +1902,7 @@ void MPIIOMigrationCoProcessor::readAverageVelocityArray(int step)
    }
 
    MPI_File_close(&file_handler);
+   MPI_Type_free(&dataSetDoubleType);
 
    if (comm->isRoot())
    {
@@ -1588,8 +1932,6 @@ void MPIIOMigrationCoProcessor::readAverageVelocityArray(int step)
       block->getKernel()->getDataSet()->setAverageVelocity(mAverageVelocity);
    }
 
-   MPI_Type_free(&dataSetDoubleType);
-
    if (comm->isRoot())
    {
       UBLOG(logINFO, "MPIIOMigrationCoProcessor::readAverageVelocityArray end of restore of data, rank = " << rank);
@@ -1619,7 +1961,7 @@ void MPIIOMigrationCoProcessor::readAverageFluktuationsArray(int step)
    if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename);
 
    // read count of blocks
-   int blocksCount = 0;
+   size_t blocksCount = 0;
    dataSetParam dataSetParamStr;
    memset(&dataSetParamStr, 0, sizeof(dataSetParam));
 
@@ -1643,7 +1985,7 @@ void MPIIOMigrationCoProcessor::readAverageFluktuationsArray(int step)
    MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType);
    MPI_Type_commit(&dataSetDoubleType);
 
-   int ic = 0;
+   size_t ic = 0;
    MPI_Offset read_offset;
    size_t sizeofOneDataSet = size_t(sizeof(DataSetSmallMigration) + doubleCountInBlock * sizeof(double));
 
@@ -1659,6 +2001,7 @@ void MPIIOMigrationCoProcessor::readAverageFluktuationsArray(int step)
    }
 
    MPI_File_close(&file_handler);
+   MPI_Type_free(&dataSetDoubleType);
 
    if (comm->isRoot())
    {
@@ -1688,8 +2031,6 @@ void MPIIOMigrationCoProcessor::readAverageFluktuationsArray(int step)
       block->getKernel()->getDataSet()->setAverageFluctuations(mAverageFluktuations);
    }
 
-   MPI_Type_free(&dataSetDoubleType);
-
    if (comm->isRoot())
    {
       UBLOG(logINFO, "MPIIOMigrationCoProcessor::readAverageFluktuationsArray end of restore of data, rank = " << rank);
@@ -1719,7 +2060,7 @@ void MPIIOMigrationCoProcessor::readAverageTripleArray(int step)
    if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename);
 
    // read count of blocks
-   int blocksCount = 0;
+   size_t blocksCount = 0;
    dataSetParam dataSetParamStr;
    memset(&dataSetParamStr, 0, sizeof(dataSetParam));
 
@@ -1736,14 +2077,14 @@ void MPIIOMigrationCoProcessor::readAverageTripleArray(int step)
    MPI_File_read_at(file_handler, (MPI_Offset)0, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
 
    DataSetSmallMigration* dataSetSmallArray = new DataSetSmallMigration[blocksCount];
-   int doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+   size_t doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
    std::vector<double> doubleValuesArray(blocksCount * doubleCountInBlock); // double-values in all blocks
 
    // define MPI_types depending on the block-specific information
    MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType);
    MPI_Type_commit(&dataSetDoubleType);
 
-   int ic = 0;
+   size_t ic = 0;
    MPI_Offset read_offset;
    size_t sizeofOneDataSet = size_t(sizeof(DataSetSmallMigration) + doubleCountInBlock * sizeof(double));
 
@@ -1759,6 +2100,7 @@ void MPIIOMigrationCoProcessor::readAverageTripleArray(int step)
    }
 
    MPI_File_close(&file_handler);
+   MPI_Type_free(&dataSetDoubleType);
 
    if (comm->isRoot())
    {
@@ -1788,8 +2130,6 @@ void MPIIOMigrationCoProcessor::readAverageTripleArray(int step)
       block->getKernel()->getDataSet()->setAverageTriplecorrelations(mAverageTriplecorrelations);
    }
 
-   MPI_Type_free(&dataSetDoubleType);
-
    if (comm->isRoot())
    {
       UBLOG(logINFO, "MPIIOMigrationCoProcessor::readAverageTripleArray end of restore of data, rank = " << rank);
@@ -1819,7 +2159,7 @@ void MPIIOMigrationCoProcessor::readShearStressValArray(int step)
    if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename);
 
    // read count of blocks
-   int blocksCount = 0;
+   size_t blocksCount = 0;
    dataSetParam dataSetParamStr;
    memset(&dataSetParamStr, 0, sizeof(dataSetParam));
 
@@ -1836,14 +2176,14 @@ void MPIIOMigrationCoProcessor::readShearStressValArray(int step)
    MPI_File_read_at(file_handler, (MPI_Offset)0, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
 
    DataSetSmallMigration* dataSetSmallArray = new DataSetSmallMigration[blocksCount];
-   int doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+   size_t doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
    std::vector<double> doubleValuesArray(blocksCount * doubleCountInBlock); // double-values in all blocks
 
    // define MPI_types depending on the block-specific information
    MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType);
    MPI_Type_commit(&dataSetDoubleType);
 
-   int ic = 0;
+   size_t ic = 0;
    MPI_Offset read_offset;
    size_t sizeofOneDataSet = size_t(sizeof(DataSetSmallMigration) + doubleCountInBlock * sizeof(double));
 
@@ -1859,6 +2199,7 @@ void MPIIOMigrationCoProcessor::readShearStressValArray(int step)
    }
 
    MPI_File_close(&file_handler);
+   MPI_Type_free(&dataSetDoubleType);
 
    if (comm->isRoot())
    {
@@ -1888,8 +2229,6 @@ void MPIIOMigrationCoProcessor::readShearStressValArray(int step)
       block->getKernel()->getDataSet()->setShearStressValues(mShearStressValues);
    }
 
-   MPI_Type_free(&dataSetDoubleType);
-
    if (comm->isRoot())
    {
       UBLOG(logINFO, "MPIIOMigrationCoProcessor::readShearStressValArray end of restore of data, rank = " << rank);
@@ -1919,7 +2258,7 @@ void MPIIOMigrationCoProcessor::readRelaxationFactor(int step)
    if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename);
 
    // read count of blocks
-   int blocksCount = 0;
+   size_t blocksCount = 0;
    dataSetParam dataSetParamStr;
    memset(&dataSetParamStr, 0, sizeof(dataSetParam));
 
@@ -1936,14 +2275,14 @@ void MPIIOMigrationCoProcessor::readRelaxationFactor(int step)
    MPI_File_read_at(file_handler, (MPI_Offset)0, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
 
    DataSetSmallMigration* dataSetSmallArray = new DataSetSmallMigration[blocksCount];
-   int doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+   size_t doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
    std::vector<double> doubleValuesArray(blocksCount * doubleCountInBlock); // double-values in all blocks
 
    // define MPI_types depending on the block-specific information
    MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType);
    MPI_Type_commit(&dataSetDoubleType);
 
-   int ic = 0;
+   size_t ic = 0;
    MPI_Offset read_offset;
    size_t sizeofOneDataSet = size_t(sizeof(DataSetSmallMigration) + doubleCountInBlock * sizeof(double));
 
@@ -1959,6 +2298,7 @@ void MPIIOMigrationCoProcessor::readRelaxationFactor(int step)
    }
 
    MPI_File_close(&file_handler);
+   MPI_Type_free(&dataSetDoubleType);
 
    if (comm->isRoot())
    {
@@ -1988,8 +2328,6 @@ void MPIIOMigrationCoProcessor::readRelaxationFactor(int step)
       block->getKernel()->getDataSet()->setRelaxationFactor(mRelaxationFactor);
    }
 
-   MPI_Type_free(&dataSetDoubleType);
-
    if (comm->isRoot())
    {
       UBLOG(logINFO, "MPIIOMigrationCoProcessor::readRelaxationFactor end of restore of data, rank = " << rank);
@@ -1998,6 +2336,7 @@ void MPIIOMigrationCoProcessor::readRelaxationFactor(int step)
 
    delete[] dataSetSmallArray;
 }
+*/
 
 void MPIIOMigrationCoProcessor::readBoundaryConds(int step)
 {
@@ -2142,8 +2481,8 @@ void MPIIOMigrationCoProcessor::readBoundaryConds(int step)
          ic++;
       }
    }
-   MPI_File_close(&file_handler);
 
+   MPI_File_close(&file_handler);
    MPI_Type_free(&bcindexmatrixType);
 
    delete nullBouCond;
diff --git a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.h b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.h
index 8644e37ef2e9ee0c632b6411005dc1a2102bac28..2db01731d221be5889c4e691a42cb21966efcc29 100644
--- a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.h
+++ b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.h
@@ -4,8 +4,8 @@
 #include <mpi.h>
 #include <string>
 
-#include "MPIIODataStructures.h"
 #include "MPIIOCoProcessor.h"
+#include "MPIIODataStructures.h"
 
 class Grid3D;
 class UbScheduler;
@@ -14,11 +14,13 @@ class BCProcessor;
 class LBMKernel;
 
 
-//! \class MPIIOMigrationCoProcessor 
+//! \class MPIWriteBlocksCoProcessor 
 //! \brief Writes the grid each timestep into the files and reads the grip from the files before regenerating  
 class MPIIOMigrationCoProcessor : public MPIIOCoProcessor
 {
 public:
+   enum Arrays { AverageDensity = 1, AverageVelocity = 2, AverageFluktuations = 3, AverageTriple = 4, ShearStressVal = 5, RelaxationFactor = 6 };
+
    MPIIOMigrationCoProcessor(SPtr<Grid3D> grid, SPtr<UbScheduler> s, const std::string& path, SPtr<Communicator> comm);
    virtual ~MPIIOMigrationCoProcessor();
    //! Each timestep writes the grid into the files
@@ -29,12 +31,14 @@ public:
    void writeBlocks(int step);
    //! Writes the datasets of the blocks into the file cpDataSet.bin
    void writeDataSet(int step);
-   void writeAverageDensityArray(int step);
-   void writeAverageVelocityArray(int step);
-   void writeAverageFluktuationsArray(int step);
-   void writeAverageTripleArray(int step);
-   void writeShearStressValArray(int step);
-   void writeRelaxationFactor(int step);
+   void write4DArray(int step, Arrays arrType, std::string fname);
+   void write3DArray(int step, Arrays arrType, std::string fname);
+//   void writeAverageDensityArray(int step);
+//   void writeAverageVelocityArray(int step);
+//   void writeAverageFluktuationsArray(int step);
+//   void writeAverageTripleArray(int step);
+//   void writeShearStressValArray(int step);
+//   void writeRelaxationFactor(int step);
    //! Writes the boundary conditions of the blocks into the file cpBC.bin
    void writeBoundaryConds(int step);
 
@@ -42,32 +46,31 @@ public:
    void readBlocks(int step);
    //! Reads the datasets of the blocks from the file cpDataSet.bin
    void readDataSet(int step);
-   void readAverageDensityArray(int step);
-   void readAverageVelocityArray(int step);
-   void readAverageFluktuationsArray(int step);
-   void readAverageTripleArray(int step);
-   void readShearStressValArray(int step);
-   void readRelaxationFactor(int step);
+   void readArray(int step, Arrays arrType, std::string fname);
+//   void readAverageDensityArray(int step);
+//   void readAverageVelocityArray(int step);
+//   void readAverageFluktuationsArray(int step);
+//   void readAverageTripleArray(int step);
+//   void readShearStressValArray(int step);
+//   void readRelaxationFactor(int step);
    //! Reads the boundary conditions of the blocks from the file cpBC.bin
    void readBoundaryConds(int step);
    //! The function sets LBMKernel
    void setLBMKernel(SPtr<LBMKernel> kernel);
    //!The function sets BCProcessor
    void setBCProcessor(SPtr<BCProcessor> bcProcessor);
-   //!The function write a time step of last check point
-   void writeCpTimeStep(int step);
-   //!The function read a time step of last check point
-   int readCpTimeStep();
+   //!The function truncates the data files
+   void clearAllFiles(int step);
+   //void setNu(double nu);
 
 protected:
    //std::string path;
    //SPtr<Communicator> comm;
 
 private:
-   //MPI_Datatype gridParamType, block3dType, 
-   MPI_Datatype arrayPresenceType;
-   MPI_Datatype dataSetParamType, dataSetType, dataSetSmallType, dataSetDoubleType;
-   MPI_Datatype boundCondParamType, boundCondType, boundCondTypeAdd, bcindexmatrixType;
+   //MPI_Datatype gridParamType, block3dType;
+   MPI_Datatype dataSetType, dataSetSmallType, dataSetDoubleType;
+   MPI_Datatype boundCondParamType, boundCondTypeAdd, bcindexmatrixType;
 
    MPIIODataStructures::boundCondParam boundCondParamStr;
    SPtr<LBMKernel> lbmKernel;
diff --git a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.cpp b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.cpp
index e7fc67de60b0101982e7b0f7d0a62794190f4014..6e93228099f7add8a00e861cdba75adf8d3ef889 100644
--- a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.cpp
+++ b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.cpp
@@ -9,6 +9,7 @@
 #include "CoordinateTransformation3D.h"
 #include "DataSet3D.h"
 #include "Grid3D.h"
+#include "Grid3DSystem.h"
 #include "BCArray3D.h"
 #include "Communicator.h"
 #include "WbWriter.h"
@@ -25,53 +26,12 @@
 
 using namespace MPIIODataStructures;
 
-MPIIORestartCoProcessor::MPIIORestartCoProcessor(SPtr<Grid3D> grid, SPtr<UbScheduler> s,
-   const std::string& path,
-   SPtr<Communicator> comm) :
-   CoProcessor(grid, s),
-   path(path),
-   comm(comm)
+MPIIORestartCoProcessor::MPIIORestartCoProcessor(SPtr<Grid3D> grid, SPtr<UbScheduler> s, const std::string& path, SPtr<Communicator> comm) : MPIIOCoProcessor(grid, s, path, comm)
 {
-   UbSystem::makeDirectory(path + "/mpi_io_cp");
-
    memset(&boundCondParamStr, 0, sizeof(boundCondParamStr));
 
    //-------------------------   define MPI types  ---------------------------------
 
-   MPI_Datatype typesGP[3] = { MPI_DOUBLE, MPI_INT, MPI_CHAR };
-   int blocksGP[3] = { 34, 6, 5 };
-   MPI_Aint offsetsGP[3], lbGP, extentGP;
-
-   offsetsGP[0] = 0;
-   MPI_Type_get_extent(MPI_DOUBLE, &lbGP, &extentGP);
-   offsetsGP[1] = blocksGP[0] * extentGP;
-
-   MPI_Type_get_extent(MPI_INT, &lbGP, &extentGP);
-   offsetsGP[2] = offsetsGP[1] + blocksGP[1] * extentGP;
-
-   MPI_Type_create_struct(3, blocksGP, offsetsGP, typesGP, &gridParamType);
-   MPI_Type_commit(&gridParamType);
-
-   //-----------------------------------------------------------------------
-
-   MPI_Datatype typesBlock[2] = { MPI_INT, MPI_CHAR };
-   int blocksBlock[2] = { 13, 1 };
-   MPI_Aint offsetsBlock[2], lbBlock, extentBlock;
-
-   offsetsBlock[0] = 0;
-   MPI_Type_get_extent(MPI_INT, &lbBlock, &extentBlock);
-   offsetsBlock[1] = blocksBlock[0] * extentBlock;
-
-   MPI_Type_create_struct(2, blocksBlock, offsetsBlock, typesBlock, &block3dType);
-   MPI_Type_commit(&block3dType);
-
-   //-----------------------------------------------------------------------
-
-   MPI_Type_contiguous(7, MPI_INT, &dataSetParamType);
-   MPI_Type_commit(&dataSetParamType);
-
-   //-----------------------------------------------------------------------
-
    MPI_Datatype typesDataSet[3] = { MPI_DOUBLE, MPI_INT, MPI_CHAR };
    int blocksDataSet[3] = { 2, 5, 2 };
    MPI_Aint offsetsDatatSet[3], lbDataSet, extentDataSet;
@@ -96,22 +56,6 @@ MPIIORestartCoProcessor::MPIIORestartCoProcessor(SPtr<Grid3D> grid, SPtr<UbSched
    MPI_Type_contiguous(4, MPI_INT, &boundCondParamType);
    MPI_Type_commit(&boundCondParamType);
 
-   //-----------------------------------------------------------------------
-
-   MPI_Datatype typesBC[3] = { MPI_LONG_LONG_INT, MPI_FLOAT, MPI_CHAR };
-   int blocksBC[3] = { 5, 38, 1 };
-   MPI_Aint offsetsBC[3], lbBC, extentBC;
-
-   offsetsBC[0] = 0;
-   MPI_Type_get_extent(MPI_LONG_LONG_INT, &lbBC, &extentBC);
-   offsetsBC[1] = blocksBC[0] * extentBC;
-
-   MPI_Type_get_extent(MPI_FLOAT, &lbBC, &extentBC);
-   offsetsBC[2] = offsetsBC[1] + blocksBC[1] * extentBC;
-
-   MPI_Type_create_struct(3, blocksBC, offsetsBC, typesBC, &boundCondType);
-   MPI_Type_commit(&boundCondType);
-
    //---------------------------------------
 
    MPI_Type_contiguous(BLOCK_SIZE, boundCondType, &boundCondType1000);
@@ -122,25 +66,15 @@ MPIIORestartCoProcessor::MPIIORestartCoProcessor(SPtr<Grid3D> grid, SPtr<UbSched
    MPI_Type_contiguous(6, MPI_INT, &boundCondTypeAdd);
    MPI_Type_commit(&boundCondTypeAdd);
 
-   //---------------------------------------
-
-   MPI_Type_contiguous(6, MPI_CHAR, &arrayPresenceType);
-   MPI_Type_commit(&arrayPresenceType);
-
 }
 //////////////////////////////////////////////////////////////////////////
 MPIIORestartCoProcessor::~MPIIORestartCoProcessor()
 {
-   MPI_Type_free(&gridParamType);
-   MPI_Type_free(&block3dType);
-   MPI_Type_free(&dataSetParamType);
    MPI_Type_free(&dataSetType);
    MPI_Type_free(&dataSetSmallType);
    MPI_Type_free(&boundCondParamType);
-   MPI_Type_free(&boundCondType);
    MPI_Type_free(&boundCondType1000);
    MPI_Type_free(&boundCondTypeAdd);
-   MPI_Type_free(&arrayPresenceType);
 }
 
 //////////////////////////////////////////////////////////////////////////
@@ -170,65 +104,7 @@ void MPIIORestartCoProcessor::clearAllFiles(int step)
 
    UbSystem::makeDirectory(path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step));
 
-   std::string filename1 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpBlocks.bin";
-   int rc1 = MPI_File_open(MPI_COMM_WORLD, filename1.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &file_handler);
-   if (rc1 != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename1);
-   MPI_File_set_size(file_handler, new_size);
-   MPI_File_close(&file_handler);
-
-   std::string filename2 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSet.bin";
-   int rc2 = MPI_File_open(MPI_COMM_WORLD, filename2.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
-   if (rc2 != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename2);
-   MPI_File_set_size(file_handler, new_size);
-   MPI_File_close(&file_handler);
-
-   std::string filename3 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpArrays.bin";
-   int rc3 = MPI_File_open(MPI_COMM_WORLD, filename3.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
-   if (rc3 != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename3);
-   MPI_File_set_size(file_handler, new_size);
-   MPI_File_close(&file_handler);
-
-   std::string filename4 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpAverageDensityArray.bin";
-   MPI_File_delete(filename4.c_str(), info);
-   //int rc4 = MPI_File_open(MPI_COMM_WORLD, filename4.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
-   //if (rc4 != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename4);
-   //MPI_File_set_size(file_handler, new_size);
-   //MPI_File_close(&file_handler);
-
-   std::string filename5 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpAverageVelocityArray.bin";
-   MPI_File_delete(filename5.c_str(), info);
-   //int rc5 = MPI_File_open(MPI_COMM_WORLD, filename5.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
-   //if (rc5 != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename5);
-   //MPI_File_set_size(file_handler, new_size);
-   //MPI_File_close(&file_handler);
-
-   std::string filename6 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpAverageFluktuationsArray.bin";
-   MPI_File_delete(filename6.c_str(), info);
-   //int rc6 = MPI_File_open(MPI_COMM_WORLD, filename6.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
-   //if (rc6 != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename6);
-   //MPI_File_set_size(file_handler, new_size);
-   //MPI_File_close(&file_handler);
-
-   std::string filename7 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpAverageTripleArray.bin";
-   MPI_File_delete(filename7.c_str(), info);
-   //int rc7 = MPI_File_open(MPI_COMM_WORLD, filename7.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
-   //if (rc7 != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename7);
-   //MPI_File_set_size(file_handler, new_size);
-   //MPI_File_close(&file_handler);
-
-   std::string filename8 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpShearStressValArray.bin";
-   MPI_File_delete(filename8.c_str(), info);
-   //int rc8 = MPI_File_open(MPI_COMM_WORLD, filename8.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
-   //if (rc8 != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename8);
-   //MPI_File_set_size(file_handler, new_size);
-   //MPI_File_close(&file_handler);
-
-   std::string filename9 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpRelaxationFactor.bin";
-   MPI_File_delete(filename9.c_str(), info);
-   //int rc9 = MPI_File_open(MPI_COMM_WORLD, filename9.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
-   //if (rc9 != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename9);
-   //MPI_File_set_size(file_handler, new_size);
-   //MPI_File_close(&file_handler);
+   MPIIOCoProcessor::clearAllFiles(step);
 
    std::string filename10 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpBC.bin";
    int rc10 = MPI_File_open(MPI_COMM_WORLD, filename10.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
@@ -237,174 +113,9 @@ void MPIIORestartCoProcessor::clearAllFiles(int step)
    MPI_File_close(&file_handler);
 }
 //////////////////////////////////////////////////////////////////////////
-void MPIIORestartCoProcessor::writeCpTimeStep(int step)
-{
-   if (comm->isRoot())
-   {
-      UbFileOutputASCII f(path + "/mpi_io_cp/cp.txt");
-      f.writeInteger(step);
-   }
-}
-//////////////////////////////////////////////////////////////////////////
-int MPIIORestartCoProcessor::readCpTimeStep()
-{
-   UbFileInputASCII f(path + "/mpi_io_cp/cp.txt");
-   int step = f.readInteger();
-   return step;
-}
-//////////////////////////////////////////////////////////////////////////
 void MPIIORestartCoProcessor::writeBlocks(int step)
 {
-   int rank, size;
-   MPI_Comm_rank(MPI_COMM_WORLD, &rank);
-   //MPI_Comm_size(MPI_COMM_WORLD, &size);
-   size = 1;
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIORestartCoProcessor::writeBlocks start collect data rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
-   }
-
-   int blocksCount = 0; // quantity of blocks in the grid, max 2147483648 blocks!
-   int minInitLevel = this->grid->getCoarsestInitializedLevel();
-   int maxInitLevel = this->grid->getFinestInitializedLevel();
-
-   std::vector<SPtr<Block3D>> blocksVector[25]; // max 25 levels
-   for (int level = minInitLevel; level <= maxInitLevel; level++)
-   {
-      //grid->getBlocks(level, rank, blockVector[level]);
-      grid->getBlocks(level, blocksVector[level]);
-      blocksCount += static_cast<int>(blocksVector[level].size());
-   }
-
-   GridParam* gridParameters = new GridParam;
-   gridParameters->trafoParams[0] = grid->getCoordinateTransformator()->Tx1;
-   gridParameters->trafoParams[1] = grid->getCoordinateTransformator()->Tx2;
-   gridParameters->trafoParams[2] = grid->getCoordinateTransformator()->Tx3;
-   gridParameters->trafoParams[3] = grid->getCoordinateTransformator()->Sx1;
-   gridParameters->trafoParams[4] = grid->getCoordinateTransformator()->Sx2;
-   gridParameters->trafoParams[5] = grid->getCoordinateTransformator()->Sx3;
-   gridParameters->trafoParams[6] = grid->getCoordinateTransformator()->alpha;
-   gridParameters->trafoParams[7] = grid->getCoordinateTransformator()->beta;
-   gridParameters->trafoParams[8] = grid->getCoordinateTransformator()->gamma;
-
-   gridParameters->trafoParams[9] = grid->getCoordinateTransformator()->toX1factorX1;
-   gridParameters->trafoParams[10] = grid->getCoordinateTransformator()->toX1factorX2;
-   gridParameters->trafoParams[11] = grid->getCoordinateTransformator()->toX1factorX3;
-   gridParameters->trafoParams[12] = grid->getCoordinateTransformator()->toX1delta;
-   gridParameters->trafoParams[13] = grid->getCoordinateTransformator()->toX2factorX1;
-   gridParameters->trafoParams[14] = grid->getCoordinateTransformator()->toX2factorX2;
-   gridParameters->trafoParams[15] = grid->getCoordinateTransformator()->toX2factorX3;
-   gridParameters->trafoParams[16] = grid->getCoordinateTransformator()->toX2delta;
-   gridParameters->trafoParams[17] = grid->getCoordinateTransformator()->toX3factorX1;
-   gridParameters->trafoParams[18] = grid->getCoordinateTransformator()->toX3factorX2;
-   gridParameters->trafoParams[19] = grid->getCoordinateTransformator()->toX3factorX3;
-   gridParameters->trafoParams[20] = grid->getCoordinateTransformator()->toX3delta;
-
-   gridParameters->trafoParams[21] = grid->getCoordinateTransformator()->fromX1factorX1;
-   gridParameters->trafoParams[22] = grid->getCoordinateTransformator()->fromX1factorX2;
-   gridParameters->trafoParams[23] = grid->getCoordinateTransformator()->fromX1factorX3;
-   gridParameters->trafoParams[24] = grid->getCoordinateTransformator()->fromX1delta;
-   gridParameters->trafoParams[25] = grid->getCoordinateTransformator()->fromX2factorX1;
-   gridParameters->trafoParams[26] = grid->getCoordinateTransformator()->fromX2factorX2;
-   gridParameters->trafoParams[27] = grid->getCoordinateTransformator()->fromX2factorX3;
-   gridParameters->trafoParams[28] = grid->getCoordinateTransformator()->fromX2delta;
-   gridParameters->trafoParams[29] = grid->getCoordinateTransformator()->fromX3factorX1;
-   gridParameters->trafoParams[30] = grid->getCoordinateTransformator()->fromX3factorX2;
-   gridParameters->trafoParams[31] = grid->getCoordinateTransformator()->fromX3factorX3;
-   gridParameters->trafoParams[32] = grid->getCoordinateTransformator()->fromX3delta;
-
-   gridParameters->active = grid->getCoordinateTransformator()->active;
-   gridParameters->transformation = grid->getCoordinateTransformator()->transformation;
-
-   gridParameters->deltaX = grid->getDeltaX(minInitLevel);
-   UbTupleInt3 blocknx = grid->getBlockNX();
-   gridParameters->blockNx1 = val<1>(blocknx);
-   gridParameters->blockNx2 = val<2>(blocknx);
-   gridParameters->blockNx3 = val<3>(blocknx);
-   gridParameters->nx1 = grid->getNX1();
-   gridParameters->nx2 = grid->getNX2();
-   gridParameters->nx3 = grid->getNX3();
-   gridParameters->periodicX1 = grid->isPeriodicX1();
-   gridParameters->periodicX2 = grid->isPeriodicX2();
-   gridParameters->periodicX3 = grid->isPeriodicX3();
-
-   //----------------------------------------------------------------------
-
-   Block3d* block3dArray = new Block3d[blocksCount];
-   int ic = 0;
-   for (int level = minInitLevel; level <= maxInitLevel; level++)
-   {
-      for (SPtr<Block3D> block : blocksVector[level])  //	all the blocks of the current level
-      {
-         // save data describing the block
-         block3dArray[ic].x1 = block->getX1();
-         block3dArray[ic].x2 = block->getX2();
-         block3dArray[ic].x3 = block->getX3();
-         block3dArray[ic].bundle = block->getBundle();
-         block3dArray[ic].rank = block->getRank();
-         block3dArray[ic].lrank = block->getLocalRank();
-         block3dArray[ic].part = block->getPart();
-         block3dArray[ic].globalID = block->getGlobalID();
-         block3dArray[ic].localID = block->getLocalID();
-         block3dArray[ic].level = block->getLevel();
-         block3dArray[ic].interpolationFlagCF = block->getCollectionOfInterpolationFlagCF();
-         block3dArray[ic].interpolationFlagFC = block->getCollectionOfInterpolationFlagFC();
-         block3dArray[ic].counter = block->getMaxGlobalID();
-         block3dArray[ic].active = block->isActive();
-
-         ic++;
-      }
-   }
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIORestartCoProcessor::writeBlocks start MPI IO rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
-   }
-
-   MPI_File file_handler;
-   MPI_Info info = MPI_INFO_NULL;
-   //MPI_Info_create (&info);
-   //MPI_Info_set(info,"romio_cb_write","enable");
-   //MPI_Info_set(info,"cb_buffer_size","4194304");
-   //MPI_Info_set(info,"striping_unit","4194304");
-
-   // if (comm->isRoot())
-   // {
-   UbSystem::makeDirectory(path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step));
-   std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpBlocks.bin";
-   int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &file_handler);
-   if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename);
-   // }
-
-   double start, finish;
-   MPI_Offset write_offset = (MPI_Offset)(size * sizeof(int));
-
-   if (comm->isRoot())
-   {
-      start = MPI_Wtime();
-
-      // each process writes the quantity of it's blocks
-      MPI_File_write_at(file_handler, (MPI_Offset)(rank * sizeof(int)), &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE);
-      // each process writes parameters of the grid
-      MPI_File_write_at(file_handler, write_offset, gridParameters, 1, gridParamType, MPI_STATUS_IGNORE);
-      // each process writes it's blocks
-      MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(GridParam)), &block3dArray[0], blocksCount, block3dType, MPI_STATUS_IGNORE);
-   }
-
-   MPI_File_sync(file_handler);
-   MPI_File_close(&file_handler);
-
-   if (comm->isRoot())
-   {
-      finish = MPI_Wtime();
-      UBLOG(logINFO, "MPIIORestartCoProcessor::writeBlocks time: " << finish - start << " s");
-   }
-
-   delete[] block3dArray;
-   delete gridParameters;
+   MPIIOCoProcessor::writeBlocks(step);
 }
 
 void MPIIORestartCoProcessor::writeDataSet(int step)
@@ -438,6 +149,11 @@ void MPIIORestartCoProcessor::writeDataSet(int step)
    bool firstBlock = true;
    int doubleCountInBlock = 0;
    int ic = 0;
+   SPtr< D3Q27EsoTwist3DSplittedVector > D3Q27EsoTwist3DSplittedVectorPtr;
+   CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr localDistributions;
+   CbArray4D <LBMReal, IndexerX4X3X2X1>::CbArray4DPtr nonLocalDistributions;
+   CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr zeroDistributions;
+
    for (int level = minInitLevel; level <= maxInitLevel; level++)
    {
       for (SPtr<Block3D> block : blocksVector[level])  //	blocks of the current level
@@ -452,10 +168,10 @@ void MPIIORestartCoProcessor::writeDataSet(int step)
          dataSetArray[ic].compressible = block->getKernel()->getCompressible();
          dataSetArray[ic].withForcing = block->getKernel()->getWithForcing();
 
-         SPtr< D3Q27EsoTwist3DSplittedVector > D3Q27EsoTwist3DSplittedVectorPtr = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(block->getKernel()->getDataSet()->getFdistributions());
-         CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr localDistributions = D3Q27EsoTwist3DSplittedVectorPtr->getLocalDistributions();
-         CbArray4D <LBMReal, IndexerX4X3X2X1>::CbArray4DPtr nonLocalDistributions = D3Q27EsoTwist3DSplittedVectorPtr->getNonLocalDistributions();
-         CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr zeroDistributions = D3Q27EsoTwist3DSplittedVectorPtr->getZeroDistributions();
+         D3Q27EsoTwist3DSplittedVectorPtr = dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(block->getKernel()->getDataSet()->getFdistributions());
+         localDistributions = D3Q27EsoTwist3DSplittedVectorPtr->getLocalDistributions();
+         nonLocalDistributions = D3Q27EsoTwist3DSplittedVectorPtr->getNonLocalDistributions();
+         zeroDistributions = D3Q27EsoTwist3DSplittedVectorPtr->getZeroDistributions();
 
          if (firstBlock) // when first (any) valid block...
          {
@@ -671,6 +387,8 @@ void MPIIORestartCoProcessor::writeAverageDensityArray(int step)
    bool firstBlock = true;
    int doubleCountInBlock = 0;
    int ic = 0;
+   SPtr< CbArray4D<LBMReal, IndexerX4X3X2X1> > averageDensityArray;
+
    for (int level = minInitLevel; level <= maxInitLevel; level++)
    {
       for (SPtr<Block3D> block : blocksVector[level])  //	blocks of the current level
@@ -680,26 +398,21 @@ void MPIIORestartCoProcessor::writeAverageDensityArray(int step)
          dataSetSmallArray[ic].x3 = block->getX3();
          dataSetSmallArray[ic].level = block->getLevel();
 
-         SPtr< CbArray4D<LBMReal, IndexerX4X3X2X1> > averageDensityArray = block->getKernel()->getDataSet()->getAverageDensity();
+         averageDensityArray = block->getKernel()->getDataSet()->getAverageDensity();
 
          if (firstBlock) // when first (any) valid block...
          {
-            //if (averageDensityArray)
-            //{
             dataSetParamStr.nx1 = dataSetParamStr.nx2 = dataSetParamStr.nx3 = 0;
             dataSetParamStr.nx[0] = static_cast<int>(averageDensityArray->getNX1());
             dataSetParamStr.nx[1] = static_cast<int>(averageDensityArray->getNX2());
             dataSetParamStr.nx[2] = static_cast<int>(averageDensityArray->getNX3());
             dataSetParamStr.nx[3] = static_cast<int>(averageDensityArray->getNX4());
             doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
-            //}
-            //else
-            //   break;
 
             firstBlock = false;
          }
 
-         if (averageDensityArray && (dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0) && (dataSetParamStr.nx[3] > 0))
+         if ((dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0) && (dataSetParamStr.nx[3] > 0))
             doubleValuesArray.insert(doubleValuesArray.end(), averageDensityArray->getDataVector().begin(), averageDensityArray->getDataVector().end());
 
          ic++;
@@ -766,7 +479,6 @@ void MPIIORestartCoProcessor::writeAverageDensityArray(int step)
 
    MPI_File_sync(file_handler);
    MPI_File_close(&file_handler);
-
    MPI_Type_free(&dataSetDoubleType);
 
    if (comm->isRoot())
@@ -808,6 +520,8 @@ void MPIIORestartCoProcessor::writeAverageVelocityArray(int step)
    bool firstBlock = true;
    int doubleCountInBlock = 0;
    int ic = 0;
+   SPtr< CbArray4D<LBMReal, IndexerX4X3X2X1> > AverageVelocityArray3DPtr;
+
    for (int level = minInitLevel; level <= maxInitLevel; level++)
    {
       for (SPtr<Block3D> block : blocksVector[level])  //	blocks of the current level
@@ -817,26 +531,21 @@ void MPIIORestartCoProcessor::writeAverageVelocityArray(int step)
          dataSetSmallArray[ic].x3 = block->getX3();
          dataSetSmallArray[ic].level = block->getLevel();
 
-         SPtr< CbArray4D<LBMReal, IndexerX4X3X2X1> > AverageVelocityArray3DPtr = block->getKernel()->getDataSet()->getAverageVelocity();
+         AverageVelocityArray3DPtr = block->getKernel()->getDataSet()->getAverageVelocity();
 
          if (firstBlock) // when first (any) valid block...
          {
-            //if (AverageVelocityArray3DPtr)
-            //{
             dataSetParamStr.nx1 = dataSetParamStr.nx2 = dataSetParamStr.nx3 = 0;
             dataSetParamStr.nx[0] = static_cast<int>(AverageVelocityArray3DPtr->getNX1());
             dataSetParamStr.nx[1] = static_cast<int>(AverageVelocityArray3DPtr->getNX2());
             dataSetParamStr.nx[2] = static_cast<int>(AverageVelocityArray3DPtr->getNX3());
             dataSetParamStr.nx[3] = static_cast<int>(AverageVelocityArray3DPtr->getNX4());
             doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
-            //}
-            //else
-            //   break;
 
             firstBlock = false;
          }
 
-         if (AverageVelocityArray3DPtr && (dataSetParamStr.nx[0]>0) && (dataSetParamStr.nx[1]>0) && (dataSetParamStr.nx[2]>0) && (dataSetParamStr.nx[3]>0))
+         if ((dataSetParamStr.nx[0]>0) && (dataSetParamStr.nx[1]>0) && (dataSetParamStr.nx[2]>0) && (dataSetParamStr.nx[3]>0))
             doubleValuesArray.insert(doubleValuesArray.end(), AverageVelocityArray3DPtr->getDataVector().begin(), AverageVelocityArray3DPtr->getDataVector().end());
 
          ic++;
@@ -905,7 +614,6 @@ void MPIIORestartCoProcessor::writeAverageVelocityArray(int step)
    MPI_File_close(&file_handler);
 
    MPI_Type_free(&dataSetDoubleType);
-
    if (comm->isRoot())
    {
       finish = MPI_Wtime();
@@ -945,6 +653,8 @@ void MPIIORestartCoProcessor::writeAverageFluktuationsArray(int step)
    bool firstBlock = true;
    int doubleCountInBlock = 0;
    int ic = 0;
+   SPtr< CbArray4D<LBMReal, IndexerX4X3X2X1> > AverageFluctArray3DPtr;
+
    for (int level = minInitLevel; level <= maxInitLevel; level++)
    {
       for (SPtr<Block3D> block : blocksVector[level])  //	blocks of the current level
@@ -954,26 +664,21 @@ void MPIIORestartCoProcessor::writeAverageFluktuationsArray(int step)
          dataSetSmallArray[ic].x3 = block->getX3();
          dataSetSmallArray[ic].level = block->getLevel();
 
-         SPtr< CbArray4D<LBMReal, IndexerX4X3X2X1> > AverageFluctArray3DPtr = block->getKernel()->getDataSet()->getAverageFluctuations();
+         AverageFluctArray3DPtr = block->getKernel()->getDataSet()->getAverageFluctuations();
 
          if (firstBlock) // when first (any) valid block...
          {
-            //if (AverageFluctArray3DPtr)
-            //{
             dataSetParamStr.nx1 = dataSetParamStr.nx2 = dataSetParamStr.nx3 = 0;
             dataSetParamStr.nx[0] = static_cast<int>(AverageFluctArray3DPtr->getNX1());
             dataSetParamStr.nx[1] = static_cast<int>(AverageFluctArray3DPtr->getNX2());
             dataSetParamStr.nx[2] = static_cast<int>(AverageFluctArray3DPtr->getNX3());
             dataSetParamStr.nx[3] = static_cast<int>(AverageFluctArray3DPtr->getNX4());
             doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
-            //}
-            //else
-            //   break;
 
             firstBlock = false;
          }
 
-         if (AverageFluctArray3DPtr && (dataSetParamStr.nx[0]>0) && (dataSetParamStr.nx[1]>0) && (dataSetParamStr.nx[2]>0) && (dataSetParamStr.nx[3]>0))
+         if ((dataSetParamStr.nx[0]>0) && (dataSetParamStr.nx[1]>0) && (dataSetParamStr.nx[2]>0) && (dataSetParamStr.nx[3]>0))
             doubleValuesArray.insert(doubleValuesArray.end(), AverageFluctArray3DPtr->getDataVector().begin(), AverageFluctArray3DPtr->getDataVector().end());
 
          ic++;
@@ -1040,7 +745,6 @@ void MPIIORestartCoProcessor::writeAverageFluktuationsArray(int step)
 
    MPI_File_sync(file_handler);
    MPI_File_close(&file_handler);
-
    MPI_Type_free(&dataSetDoubleType);
 
    if (comm->isRoot())
@@ -1082,6 +786,8 @@ void MPIIORestartCoProcessor::writeAverageTripleArray(int step)
    bool firstBlock = true;
    int doubleCountInBlock = 0;
    int ic = 0;
+   SPtr< CbArray4D<LBMReal, IndexerX4X3X2X1> > AverageTripleArray3DPtr;
+
    for (int level = minInitLevel; level <= maxInitLevel; level++)
    {
       for (SPtr<Block3D> block : blocksVector[level])  //	blocks of the current level
@@ -1091,26 +797,21 @@ void MPIIORestartCoProcessor::writeAverageTripleArray(int step)
          dataSetSmallArray[ic].x3 = block->getX3();
          dataSetSmallArray[ic].level = block->getLevel();
 
-         SPtr< CbArray4D<LBMReal, IndexerX4X3X2X1> > AverageTripleArray3DPtr = block->getKernel()->getDataSet()->getAverageTriplecorrelations();
+         AverageTripleArray3DPtr = block->getKernel()->getDataSet()->getAverageTriplecorrelations();
 
          if (firstBlock) // when first (any) valid block...
          {
-            //if (AverageTripleArray3DPtr)
-            //{
             dataSetParamStr.nx1 = dataSetParamStr.nx2 = dataSetParamStr.nx3 = 0;
             dataSetParamStr.nx[0] = static_cast<int>(AverageTripleArray3DPtr->getNX1());
             dataSetParamStr.nx[1] = static_cast<int>(AverageTripleArray3DPtr->getNX2());
             dataSetParamStr.nx[2] = static_cast<int>(AverageTripleArray3DPtr->getNX3());
             dataSetParamStr.nx[3] = static_cast<int>(AverageTripleArray3DPtr->getNX4());
             doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
-            //}
-            //else
-            //   break;
 
             firstBlock = false;
          }
 
-         if (AverageTripleArray3DPtr && (dataSetParamStr.nx[0]>0) && (dataSetParamStr.nx[1]>0) && (dataSetParamStr.nx[2]>0) && (dataSetParamStr.nx[3]>0))
+         if ((dataSetParamStr.nx[0]>0) && (dataSetParamStr.nx[1]>0) && (dataSetParamStr.nx[2]>0) && (dataSetParamStr.nx[3]>0))
             doubleValuesArray.insert(doubleValuesArray.end(), AverageTripleArray3DPtr->getDataVector().begin(), AverageTripleArray3DPtr->getDataVector().end());
 
          ic++;
@@ -1177,7 +878,6 @@ void MPIIORestartCoProcessor::writeAverageTripleArray(int step)
 
    MPI_File_sync(file_handler);
    MPI_File_close(&file_handler);
-
    MPI_Type_free(&dataSetDoubleType);
 
    if (comm->isRoot())
@@ -1219,6 +919,8 @@ void MPIIORestartCoProcessor::writeShearStressValArray(int step)
    bool firstBlock = true;
    int doubleCountInBlock = 0;
    int ic = 0;
+   SPtr< CbArray4D<LBMReal, IndexerX4X3X2X1> > ShearStressValArray3DPtr;
+
    for (int level = minInitLevel; level <= maxInitLevel; level++)
    {
       for (SPtr<Block3D> block : blocksVector[level])  //	blocks of the current level
@@ -1228,26 +930,21 @@ void MPIIORestartCoProcessor::writeShearStressValArray(int step)
          dataSetSmallArray[ic].x3 = block->getX3();
          dataSetSmallArray[ic].level = block->getLevel();
 
-         SPtr< CbArray4D<LBMReal, IndexerX4X3X2X1> > ShearStressValArray3DPtr = block->getKernel()->getDataSet()->getShearStressValues();
+         ShearStressValArray3DPtr = block->getKernel()->getDataSet()->getShearStressValues();
 
          if (firstBlock) // when first (any) valid block...
          {
-            //if (ShearStressValArray3DPtr)
-            //{
             dataSetParamStr.nx1 = dataSetParamStr.nx2 = dataSetParamStr.nx3 = 0;
             dataSetParamStr.nx[0] = static_cast<int>(ShearStressValArray3DPtr->getNX1());
             dataSetParamStr.nx[1] = static_cast<int>(ShearStressValArray3DPtr->getNX2());
             dataSetParamStr.nx[2] = static_cast<int>(ShearStressValArray3DPtr->getNX3());
             dataSetParamStr.nx[3] = static_cast<int>(ShearStressValArray3DPtr->getNX4());
             doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
-            //}
-            //else
-            //   break;
 
             firstBlock = false;
          }
 
-         if (ShearStressValArray3DPtr && (dataSetParamStr.nx[0]>0) && (dataSetParamStr.nx[1]>0) && (dataSetParamStr.nx[2]>0) && (dataSetParamStr.nx[3]>0))
+         if ((dataSetParamStr.nx[0]>0) && (dataSetParamStr.nx[1]>0) && (dataSetParamStr.nx[2]>0) && (dataSetParamStr.nx[3]>0))
             doubleValuesArray.insert(doubleValuesArray.end(), ShearStressValArray3DPtr->getDataVector().begin(), ShearStressValArray3DPtr->getDataVector().end());
 
          ic++;
@@ -1314,7 +1011,6 @@ void MPIIORestartCoProcessor::writeShearStressValArray(int step)
 
    MPI_File_sync(file_handler);
    MPI_File_close(&file_handler);
-
    MPI_Type_free(&dataSetDoubleType);
 
    if (comm->isRoot())
@@ -1356,6 +1052,8 @@ void MPIIORestartCoProcessor::writeRelaxationFactor(int step)
    bool firstBlock = true;
    int doubleCountInBlock = 0;
    int ic = 0;
+   SPtr< CbArray3D<LBMReal, IndexerX3X2X1> > RelaxationFactor3DPtr;
+
    for (int level = minInitLevel; level <= maxInitLevel; level++)
    {
       for (SPtr<Block3D> block : blocksVector[level])  //	blocks of the current level
@@ -1365,26 +1063,21 @@ void MPIIORestartCoProcessor::writeRelaxationFactor(int step)
          dataSetSmallArray[ic].x3 = block->getX3();
          dataSetSmallArray[ic].level = block->getLevel();
 
-         SPtr< CbArray3D<LBMReal, IndexerX3X2X1> > RelaxationFactor3DPtr = block->getKernel()->getDataSet()->getRelaxationFactor();
+         RelaxationFactor3DPtr = block->getKernel()->getDataSet()->getRelaxationFactor();
 
          if (firstBlock) // when first (any) valid block...
          {
-            //if (relaxationFactor3DPtr)
-            //{
             dataSetParamStr.nx1 = dataSetParamStr.nx2 = dataSetParamStr.nx3 = 0;
             dataSetParamStr.nx[0] = static_cast<int>(RelaxationFactor3DPtr->getNX1());
             dataSetParamStr.nx[1] = static_cast<int>(RelaxationFactor3DPtr->getNX2());
             dataSetParamStr.nx[2] = static_cast<int>(RelaxationFactor3DPtr->getNX3());
             dataSetParamStr.nx[3] = 1;
             doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
-            //}
-            //else
-            //   break;
 
             firstBlock = false;
          }
 
-         if (RelaxationFactor3DPtr && (dataSetParamStr.nx[0]>0) && (dataSetParamStr.nx[1]>0) && (dataSetParamStr.nx[2]>0))
+         if ((dataSetParamStr.nx[0]>0) && (dataSetParamStr.nx[1]>0) && (dataSetParamStr.nx[2]>0))
             doubleValuesArray.insert(doubleValuesArray.end(), RelaxationFactor3DPtr->getDataVector().begin(), RelaxationFactor3DPtr->getDataVector().end());
 
          ic++;
@@ -1451,7 +1144,6 @@ void MPIIORestartCoProcessor::writeRelaxationFactor(int step)
 
    MPI_File_sync(file_handler);
    MPI_File_close(&file_handler);
-
    MPI_Type_free(&dataSetDoubleType);
 
    if (comm->isRoot())
@@ -1494,13 +1186,14 @@ void MPIIORestartCoProcessor::writeBoundaryConds(int step)
    std::vector<int> bcindexmatrixV;
    std::vector<int> indexContainerV;
    bool bcindexmatrixCountNotInit = true;
-
    int ic = 0;
+   SPtr<BCArray3D> bcArr;
+
    for (int level = minInitLevel; level <= maxInitLevel; level++)
    {
       for (SPtr<Block3D> block : blocksVector[level])  // all the blocks of the current level
       {
-         SPtr<BCArray3D> bcArr = block->getKernel()->getBCProcessor()->getBCArray();
+         bcArr = block->getKernel()->getBCProcessor()->getBCArray();
 
          bcAddArray[ic].x1 = block->getX1(); // coordinates of the block needed to find it while regenerating the grid
          bcAddArray[ic].x2 = block->getX2();
@@ -1565,7 +1258,6 @@ void MPIIORestartCoProcessor::writeBoundaryConds(int step)
       }
    }
 
-
    MPI_Type_contiguous(boundCondParamStr.bcindexmatrixCount, MPI_INT, &bcindexmatrixType);
    MPI_Type_commit(&bcindexmatrixType);
 
@@ -1651,7 +1343,6 @@ void MPIIORestartCoProcessor::writeBoundaryConds(int step)
 
    MPI_File_sync(file_handler);
    MPI_File_close(&file_handler);
-
    MPI_Type_free(&bcindexmatrixType);
 
    if (comm->isRoot())
@@ -1680,138 +1371,7 @@ void MPIIORestartCoProcessor::restart(int step)
 
 void MPIIORestartCoProcessor::readBlocks(int step)
 {
-   int rank, size;
-   MPI_Comm_rank(MPI_COMM_WORLD, &rank);
-   //MPI_Comm_size(MPI_COMM_WORLD, &size);
-   size = 1;
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIORestartCoProcessor::readBlocks start MPI IO rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
-   }
-
-   double start, finish;
-   if (comm->isRoot()) start = MPI_Wtime();
-
-   MPI_File file_handler;
-   std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpBlocks.bin";
-   int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler);
-   if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename);
-
-   // read count of blocks
-   int blocksCount = 0;
-   //MPI_File_read_at(file_handler, rank*sizeof(int), &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE);
-   MPI_File_read_at(file_handler, 0, &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE);
-   Block3d* block3dArray = new Block3d[blocksCount];
-
-   // calculate the read offset
-   MPI_Offset read_offset = (MPI_Offset)(size * sizeof(int));
-
-   GridParam* gridParameters = new GridParam;
-
-   // read parameters of the grid
-   MPI_File_read_at(file_handler, read_offset, gridParameters, 1, gridParamType, MPI_STATUS_IGNORE);
-   // read all the blocks
-   if (comm->isRoot())
-      MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(GridParam)), &block3dArray[0], blocksCount, block3dType, MPI_STATUS_IGNORE);
-
-   MPI_Bcast(block3dArray, blocksCount, block3dType, comm->getRoot(), MPI_COMM_WORLD);
-
-   MPI_File_close(&file_handler);
-
-   if (comm->isRoot())
-   {
-      finish = MPI_Wtime();
-      UBLOG(logINFO, "MPIIORestartCoProcessor::readBlocks time: " << finish - start << " s");
-   }
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIORestartCoProcessor::readBlocks start of restore of data, rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
-   }
-
-   // clear the grid
-   grid->deleteBlocks();
-
-   // restore the grid
-   SPtr<CoordinateTransformation3D> trafo(new CoordinateTransformation3D());
-   trafo->Tx1 = gridParameters->trafoParams[0];
-   trafo->Tx2 = gridParameters->trafoParams[1];
-   trafo->Tx3 = gridParameters->trafoParams[2];
-   trafo->Sx1 = gridParameters->trafoParams[3];
-   trafo->Sx2 = gridParameters->trafoParams[4];
-   trafo->Sx3 = gridParameters->trafoParams[5];
-   trafo->alpha = gridParameters->trafoParams[6];
-   trafo->beta = gridParameters->trafoParams[7];
-   trafo->gamma = gridParameters->trafoParams[8];
-
-   trafo->toX1factorX1 = gridParameters->trafoParams[9];
-   trafo->toX1factorX2 = gridParameters->trafoParams[10];
-   trafo->toX1factorX3 = gridParameters->trafoParams[11];
-   trafo->toX1delta = gridParameters->trafoParams[12];
-   trafo->toX2factorX1 = gridParameters->trafoParams[13];
-   trafo->toX2factorX2 = gridParameters->trafoParams[14];
-   trafo->toX2factorX3 = gridParameters->trafoParams[15];
-   trafo->toX2delta = gridParameters->trafoParams[16];
-   trafo->toX3factorX1 = gridParameters->trafoParams[17];
-   trafo->toX3factorX2 = gridParameters->trafoParams[18];
-   trafo->toX3factorX3 = gridParameters->trafoParams[19];
-   trafo->toX3delta = gridParameters->trafoParams[20];
-
-   trafo->fromX1factorX1 = gridParameters->trafoParams[21];
-   trafo->fromX1factorX2 = gridParameters->trafoParams[22];
-   trafo->fromX1factorX3 = gridParameters->trafoParams[23];
-   trafo->fromX1delta = gridParameters->trafoParams[24];
-   trafo->fromX2factorX1 = gridParameters->trafoParams[25];
-   trafo->fromX2factorX2 = gridParameters->trafoParams[26];
-   trafo->fromX2factorX3 = gridParameters->trafoParams[27];
-   trafo->fromX2delta = gridParameters->trafoParams[28];
-   trafo->fromX3factorX1 = gridParameters->trafoParams[29];
-   trafo->fromX3factorX2 = gridParameters->trafoParams[30];
-   trafo->fromX3factorX3 = gridParameters->trafoParams[31];
-   trafo->fromX3delta = gridParameters->trafoParams[32];
-   trafo->active = gridParameters->active;
-   trafo->transformation = gridParameters->transformation;
-
-   grid->setCoordinateTransformator(trafo);
-
-   grid->setDeltaX(gridParameters->deltaX);
-   grid->setBlockNX(gridParameters->blockNx1, gridParameters->blockNx2, gridParameters->blockNx3);
-   grid->setNX1(gridParameters->nx1);
-   grid->setNX2(gridParameters->nx2);
-   grid->setNX3(gridParameters->nx3);
-   grid->setPeriodicX1(gridParameters->periodicX1);
-   grid->setPeriodicX2(gridParameters->periodicX2);
-   grid->setPeriodicX3(gridParameters->periodicX3);
-
-   // regenerate blocks
-   for (int n = 0; n<blocksCount; n++)
-   {
-      SPtr<Block3D> block(new Block3D(block3dArray[n].x1, block3dArray[n].x2, block3dArray[n].x3, block3dArray[n].level));
-      block->setActive(block3dArray[n].active);
-      block->setBundle(block3dArray[n].bundle);
-      block->setRank(block3dArray[n].rank);
-      block->setLocalRank(block3dArray[n].lrank);
-      block->setGlobalID(block3dArray[n].globalID);
-      block->setLocalID(block3dArray[n].localID);
-      block->setPart(block3dArray[n].part);
-      block->setLevel(block3dArray[n].level);
-      block->setCollectionOfInterpolationFlagCF(block3dArray[n].interpolationFlagCF);
-      block->setCollectionOfInterpolationFlagFC(block3dArray[n].interpolationFlagFC);
-
-      grid->addBlock(block);
-   }
-
-   delete gridParameters;
-   delete[] block3dArray;
-
-   if (comm->isRoot())
-   {
-      UBLOG(logINFO, "MPIIORestartCoProcessor::readBlocks end of restore of data, rank = " << rank);
-      UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
-   }
+   MPIIOCoProcessor::readBlocks(step);
 }
 
 void MPIIORestartCoProcessor::readDataSet(int step)
@@ -1875,6 +1435,7 @@ void MPIIORestartCoProcessor::readDataSet(int step)
    MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + 3 * sizeof(dataSetParam)), dataSetArray, blocksCount, dataSetType, MPI_STATUS_IGNORE);
    MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + 3 * sizeof(dataSetParam) + blocksCount * sizeof(DataSetRestart)), &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
    MPI_File_close(&file_handler);
+   MPI_Type_free(&dataSetDoubleType);
 
    if (comm->isRoot())
    {
@@ -1926,8 +1487,6 @@ void MPIIORestartCoProcessor::readDataSet(int step)
       block->setKernel(kernel);
    }
 
-   MPI_Type_free(&dataSetDoubleType);
-
    if (comm->isRoot())
    {
       UBLOG(logINFO, "MPIIORestartCoProcessor::readDataSet end of restore of data, rank = " << rank);
@@ -2025,6 +1584,7 @@ void MPIIORestartCoProcessor::readAverageDensityArray(int step)
    if (doubleCountInBlock > 0)
       MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)), &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
    MPI_File_close(&file_handler);
+   MPI_Type_free(&dataSetDoubleType);
 
    if (comm->isRoot())
    {
@@ -2044,9 +1604,6 @@ void MPIIORestartCoProcessor::readAverageDensityArray(int step)
 
       // fill mAverageDensity arrays
       SPtr<AverageValuesArray3D> mAverageDensity;
-      //if ((dataSetParamStr.nx[0]==0)&&(dataSetParamStr.nx[1]==0)&&(dataSetParamStr.nx[2]==0)&&(dataSetParamStr.nx[3]==0))
-      //   mAverageDensity = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr();
-      //else
       mAverageDensity = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
 
       // find the nesessary block and fill it
@@ -2054,8 +1611,6 @@ void MPIIORestartCoProcessor::readAverageDensityArray(int step)
       block->getKernel()->getDataSet()->setAverageDensity(mAverageDensity);
    }
 
-   MPI_Type_free(&dataSetDoubleType);
-
    if (comm->isRoot())
    {
       UBLOG(logINFO, "MPIIORestartCoProcessor::readAverageDensityArray end of restore of data, rank = " << rank);
@@ -2122,6 +1677,7 @@ void MPIIORestartCoProcessor::readAverageVelocityArray(int step)
    if (doubleCountInBlock > 0)
       MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)), &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
    MPI_File_close(&file_handler);
+   MPI_Type_free(&dataSetDoubleType);
 
    if (comm->isRoot())
    {
@@ -2141,9 +1697,6 @@ void MPIIORestartCoProcessor::readAverageVelocityArray(int step)
 
       // fill mAverageVelocity array
       SPtr<AverageValuesArray3D> mAverageVelocity;
-      //if ((dataSetParamStr.nx[0] == 0) && (dataSetParamStr.nx[1] == 0) && (dataSetParamStr.nx[2] == 0) && (dataSetParamStr.nx[3] == 0))
-      //   mAverageVelocity = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr();
-      //else
       mAverageVelocity = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
 
       // find the nesessary block and fill it
@@ -2151,8 +1704,6 @@ void MPIIORestartCoProcessor::readAverageVelocityArray(int step)
       block->getKernel()->getDataSet()->setAverageVelocity(mAverageVelocity);
    }
 
-   MPI_Type_free(&dataSetDoubleType);
-
    if (comm->isRoot())
    {
       UBLOG(logINFO, "MPIIORestartCoProcessor::readAverageVelocityArray end of restore of data, rank = " << rank);
@@ -2219,6 +1770,7 @@ void MPIIORestartCoProcessor::readAverageFluktuationsArray(int step)
    if (doubleCountInBlock > 0)
       MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)), &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
    MPI_File_close(&file_handler);
+   MPI_Type_free(&dataSetDoubleType);
 
    if (comm->isRoot())
    {
@@ -2238,9 +1790,6 @@ void MPIIORestartCoProcessor::readAverageFluktuationsArray(int step)
 
       // fill AverageFluktuations array
       SPtr<AverageValuesArray3D> mAverageFluktuations;
-      //if ((dataSetParamStr.nx[0] == 0) && (dataSetParamStr.nx[1] == 0) && (dataSetParamStr.nx[2] == 0) && (dataSetParamStr.nx[3] == 0))
-      //   mAverageFluktuations = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr();
-      //else
       mAverageFluktuations = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
 
       // find the nesessary block and fill it
@@ -2248,8 +1797,6 @@ void MPIIORestartCoProcessor::readAverageFluktuationsArray(int step)
       block->getKernel()->getDataSet()->setAverageFluctuations(mAverageFluktuations);
    }
 
-   MPI_Type_free(&dataSetDoubleType);
-
    if (comm->isRoot())
    {
       UBLOG(logINFO, "MPIIORestartCoProcessor::readAverageFluktuationsArray end of restore of data, rank = " << rank);
@@ -2316,6 +1863,7 @@ void MPIIORestartCoProcessor::readAverageTripleArray(int step)
    if (doubleCountInBlock > 0)
       MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)), &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
    MPI_File_close(&file_handler);
+   MPI_Type_free(&dataSetDoubleType);
 
    if (comm->isRoot())
    {
@@ -2335,9 +1883,6 @@ void MPIIORestartCoProcessor::readAverageTripleArray(int step)
 
       // fill AverageTriplecorrelations array
       SPtr<AverageValuesArray3D> mAverageTriplecorrelations;
-      //if ((dataSetParamStr.nx[0] == 0) && (dataSetParamStr.nx[1] == 0) && (dataSetParamStr.nx[2] == 0) && (dataSetParamStr.nx[3] == 0))
-      //   mAverageTriplecorrelations = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr();
-      //else
       mAverageTriplecorrelations = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
 
       // find the nesessary block and fill it
@@ -2345,8 +1890,6 @@ void MPIIORestartCoProcessor::readAverageTripleArray(int step)
       block->getKernel()->getDataSet()->setAverageTriplecorrelations(mAverageTriplecorrelations);
    }
 
-   MPI_Type_free(&dataSetDoubleType);
-
    if (comm->isRoot())
    {
       UBLOG(logINFO, "MPIIORestartCoProcessor::readAverageTripleArray end of restore of data, rank = " << rank);
@@ -2413,6 +1956,7 @@ void MPIIORestartCoProcessor::readShearStressValArray(int step)
    if (doubleCountInBlock > 0)
       MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)), &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
    MPI_File_close(&file_handler);
+   MPI_Type_free(&dataSetDoubleType);
 
    if (comm->isRoot())
    {
@@ -2432,9 +1976,6 @@ void MPIIORestartCoProcessor::readShearStressValArray(int step)
 
       // fill ShearStressValuesArray array
       SPtr<ShearStressValuesArray3D> mShearStressValues;
-      //if ((dataSetParamStr.nx[0] == 0) && (dataSetParamStr.nx[1] == 0) && (dataSetParamStr.nx[2] == 0) && (dataSetParamStr.nx[3] == 0))
-      //   mShearStressValues = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr();
-      //else
       mShearStressValues = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
 
       // find the nesessary block and fill it
@@ -2442,8 +1983,6 @@ void MPIIORestartCoProcessor::readShearStressValArray(int step)
       block->getKernel()->getDataSet()->setShearStressValues(mShearStressValues);
    }
 
-   MPI_Type_free(&dataSetDoubleType);
-
    if (comm->isRoot())
    {
       UBLOG(logINFO, "MPIIORestartCoProcessor::readShearStressValArray end of restore of data, rank = " << rank);
@@ -2510,6 +2049,7 @@ void MPIIORestartCoProcessor::readRelaxationFactor(int step)
    if (doubleCountInBlock > 0)
       MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)), &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
    MPI_File_close(&file_handler);
+   MPI_Type_free(&dataSetDoubleType);
 
    if (comm->isRoot())
    {
@@ -2529,9 +2069,6 @@ void MPIIORestartCoProcessor::readRelaxationFactor(int step)
 
       // fill RelaxationFactor array
       SPtr<RelaxationFactorArray3D> mRelaxationFactor;
-      //if ((dataSetParamStr.nx[0] == 0) && (dataSetParamStr.nx[1] == 0) && (dataSetParamStr.nx[2] == 0))
-      //   mRelaxationFactor = CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr();
-      //else
       mRelaxationFactor = CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>(vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2]));
 
       // find the nesessary block and fill it
@@ -2539,8 +2076,6 @@ void MPIIORestartCoProcessor::readRelaxationFactor(int step)
       block->getKernel()->getDataSet()->setRelaxationFactor(mRelaxationFactor);
    }
 
-   MPI_Type_free(&dataSetDoubleType);
-
    if (comm->isRoot())
    {
       UBLOG(logINFO, "MPIIORestartCoProcessor::readRelaxationFactor end of restore of data, rank = " << rank);
@@ -2619,6 +2154,7 @@ void MPIIORestartCoProcessor::readBoundaryConds(int step)
    MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + blocksCount * sizeof(BCAddRestart) + dataCount * sizeof(BoundaryCondition) + blocksCount * boundCondParamStr.bcindexmatrixCount * sizeof(int)), &intArray2[0], dataCount2, MPI_INT, MPI_STATUS_IGNORE);
 
    MPI_File_close(&file_handler);
+   MPI_Type_free(&bcindexmatrixType);
 
    if (comm->isRoot())
    {
@@ -2699,8 +2235,6 @@ void MPIIORestartCoProcessor::readBoundaryConds(int step)
    delete[] intArray1;
    delete[] intArray2;
 
-   MPI_Type_free(&bcindexmatrixType);
-
    if (comm->isRoot())
    {
       UBLOG(logINFO, "MPIIORestartCoProcessor::readBoundaryConds end of restore of data, rank = " << rank);
diff --git a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.h b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.h
index 79c77481f318f81ad4cf6af8fb608b89b683d3d0..a52ebe6598fefa665455360b70b932f0a292c2d9 100644
--- a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.h
+++ b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.h
@@ -2,11 +2,11 @@
 #define _MPIIORestartCoProcessor_H_
 
 #include <mpi.h>
-#include <PointerDefinitions.h>
+//#include <PointerDefinitions.h>
 #include <string>
 
-#include "CoProcessor.h"
 #include "MPIIODataStructures.h"
+#include "MPIIOCoProcessor.h"
 
 class Grid3D;
 class UbScheduler;
@@ -16,9 +16,9 @@ class LBMKernel;
 
 
 
-//! \class MPIWriteBlocksCoProcessor 
+//! \class MPIIORestartCoProcessor 
 //! \brief Writes the grid each timestep into the files and reads the grip from the files before regenerating  
-class MPIIORestartCoProcessor : public CoProcessor
+class MPIIORestartCoProcessor : public MPIIOCoProcessor
 {
 public:
    MPIIORestartCoProcessor(SPtr<Grid3D> grid, SPtr<UbScheduler> s, const std::string& path, SPtr<Communicator> comm);
@@ -58,19 +58,15 @@ public:
    void setBCProcessor(SPtr<BCProcessor> bcProcessor);
    //!The function truncates the data files
    void clearAllFiles(int step);
-   //!The function write a time step of last check point
-   void writeCpTimeStep(int step);
-   //!The function read a time step of last check point
-   int readCpTimeStep();
 
 protected:
-   std::string path;
-   SPtr<Communicator> comm;
+   //std::string path;
+   //SPtr<Communicator> comm;
 
 private:
-   MPI_Datatype gridParamType, block3dType, arrayPresenceType;
-   MPI_Datatype dataSetParamType, dataSetType, dataSetSmallType, dataSetDoubleType;
-   MPI_Datatype boundCondParamType, boundCondType, boundCondType1000, boundCondTypeAdd, bcindexmatrixType;
+   //MPI_Datatype gridParamType, block3dType;
+   MPI_Datatype dataSetType, dataSetSmallType, dataSetDoubleType;
+   MPI_Datatype boundCondParamType, boundCondType1000, boundCondTypeAdd, bcindexmatrixType;
 
    MPIIODataStructures::boundCondParam boundCondParamStr;
    SPtr<LBMKernel> lbmKernel;