diff --git a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationBECoProcessor.cpp b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationBECoProcessor.cpp
index 384150ed680329aaad2ecf871f57a70ee2a177e2..34c247345d2d3cf1a9c527ab7a7b15e23f7ab5da 100644
--- a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationBECoProcessor.cpp
+++ b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationBECoProcessor.cpp
@@ -245,6 +245,11 @@ void MPIIOMigrationBECoProcessor::writeDataSet(int step)
                 else
                     arrPresence.isPhaseField2Present = false;
 
+                SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> pressureFieldPtr = block->getKernel()->getDataSet()->getPressureField();
+                if (pressureFieldPtr)
+                    arrPresence.isPressureFieldPresent = true;
+                else
+                    arrPresence.isPressureFieldPresent = false;
 
                 firstBlock = false;
             }
@@ -398,6 +403,10 @@ void MPIIOMigrationBECoProcessor::writeDataSet(int step)
 
     if (arrPresence.isPhaseField2Present)
         write3DArray(step, PhaseField2, std::string("/cpPhaseField2.bin"));
+
+    if (arrPresence.isPressureFieldPresent)
+        write3DArray(step, PressureField, std::string("/cpPressureField.bin"));
+
     }
 
 void MPIIOMigrationBECoProcessor::write4DArray(int step, Arrays arrayType, std::string fname)
@@ -565,6 +574,9 @@ void MPIIOMigrationBECoProcessor::write3DArray(int step, Arrays arrayType, std::
                 case PhaseField2:
                     ___Array = block->getKernel()->getDataSet()->getPhaseField2();
                     break;
+                case PressureField:
+                    ___Array = block->getKernel()->getDataSet()->getPressureField();
+                    break;
                 default:
                     UB_THROW(UbException(UB_EXARGS,
                     "MPIIOMigrationBECoProcessor::write3DArray : 3D array type does not exist!"));
@@ -1217,6 +1229,7 @@ void MPIIOMigrationBECoProcessor::readDataSet(int step)
             // find the nesessary block and fill it
             SPtr<Block3D> block = grid->getBlock(blockID);
             this->lbmKernel->setBlock(block);
+            this->lbmKernel->setNX(std::array<int, 3>{ {dataSetParamStr1.nx1, dataSetParamStr1.nx2, dataSetParamStr1.nx3}});
             SPtr<LBMKernel> kernel = this->lbmKernel->clone();
             LBMReal collFactor = LBMSystem::calcCollisionFactor(this->nue, block->getLevel());
             LBMReal collFactorL = LBMSystem::calcCollisionFactor(this->nuL, block->getLevel());
@@ -1285,9 +1298,12 @@ void MPIIOMigrationBECoProcessor::readDataSet(int step)
     if (arrPresence.isPhaseField2Present)
         readArray(step, PhaseField2, std::string("/cpPhaseField2.bin"));
 
+    if (arrPresence.isPressureFieldPresent)
+        readArray(step, PressureField, std::string("/cpPressureField.bin"));
+
     delete[] rawDataReceiveF;
     delete[] rawDataReceiveH1;
-//    delete[] rawDataReceiveH2;
+    delete[] rawDataReceiveH2;
 }
 
 void MPIIOMigrationBECoProcessor::readArray(int step, Arrays arrType, std::string fname)
@@ -1430,6 +1446,11 @@ void MPIIOMigrationBECoProcessor::readArray(int step, Arrays arrType, std::strin
                         vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2]));
                     block->getKernel()->getDataSet()->setPhaseField2(___3DArray);
                     break;
+                case PressureField:
+                    ___3DArray = CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>(
+                        vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2]));
+                    block->getKernel()->getDataSet()->setPressureField(___3DArray);
+                    break;
                 default:
                     UB_THROW(UbException(UB_EXARGS, "MPIIOMigrationBECoProcessor::readArray : array type does not exist!"));
                     break;
diff --git a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationBECoProcessor.h b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationBECoProcessor.h
index 5fbfea3717a2a8f30b9e22dca1e8f0c5e8729a2b..c60800ccd18e5ac523c5c85ea47219a96f8a69c5 100644
--- a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationBECoProcessor.h
+++ b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationBECoProcessor.h
@@ -27,7 +27,8 @@ class MPIIOMigrationBECoProcessor : public MPIIOCoProcessor
         ShearStressVal      = 5,
         RelaxationFactor    = 6,
         PhaseField1         = 7,
-        PhaseField2 = 8
+        PhaseField2         = 8,
+        PressureField = 9
     };
 
 public:
diff --git a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.cpp b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.cpp
index ca64b0e48c9a6fa4a159e73eb66e8d97159ffd88..285d6c28ae92b3bad7fb6b1171f3a09a637e0729 100644
--- a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.cpp
+++ b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.cpp
@@ -313,7 +313,7 @@ void MPIIOMigrationCoProcessor::writeDataSet(int step)
                 if (zeroDistributionsH2 && (dataSetParamStr3.nx[0] > 0) && (dataSetParamStr3.nx[1] > 0) && (dataSetParamStr3.nx[2] > 0))
                     doubleValuesArrayH2.insert(doubleValuesArrayH2.end(), zeroDistributionsH2->getDataVector().begin(), zeroDistributionsH2->getDataVector().end());
             }
-
+            
             ic++;
         }
     }
@@ -476,7 +476,7 @@ void MPIIOMigrationCoProcessor::write4DArray(int step, Arrays arrayType, std::st
 
     if (comm->isRoot()) 
     {
-        UBLOG(logINFO, "MPIIOMigrationCoProcessor::writeAverageDensityArray start collect data rank = " << rank);
+        UBLOG(logINFO, "MPIIOMigrationCoProcessor::write4DArray start collect data rank = " << rank);
         UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
@@ -606,7 +606,7 @@ void MPIIOMigrationCoProcessor::write3DArray(int step, Arrays arrayType, std::st
 
     if (comm->isRoot()) 
     {
-        UBLOG(logINFO, "MPIIOMigrationCoProcessor::write3DArray start collect data rank = " << rank);
+        UBLOG(logINFO, "MPIIOMigrationCoProcessor::write3DArray start collect data to file = " << fname);
         UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
@@ -1030,7 +1030,6 @@ void MPIIOMigrationCoProcessor::readDataSet(int step)
 
     }
     MPI_File_close(&file_handler);
-
     //----------------------------------------- H2 ----------------------------------------------------
     ic = 0;
     filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSetH2.bin";
@@ -1100,19 +1099,16 @@ void MPIIOMigrationCoProcessor::readDataSet(int step)
         if (multiPhase2)
             vectorsOfValuesH23.assign(doubleValuesArrayH2.data() + index, doubleValuesArrayH2.data() + index + vectorSize3);
         index += vectorSize3;
-
+ 
         SPtr<DistributionArray3D> mFdistributions(new D3Q27EsoTwist3DSplittedVector());
         dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
-                new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesF1, dataSetParamStr1.nx[0], dataSetParamStr1.nx[1], dataSetParamStr1.nx[2], dataSetParamStr1.nx[3])));
+            new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesF1, dataSetParamStr1.nx[0], dataSetParamStr1.nx[1], dataSetParamStr1.nx[2], dataSetParamStr1.nx[3])));
         dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setNonLocalDistributions(CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(
-                new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesF2, dataSetParamStr2.nx[0], dataSetParamStr2.nx[1], dataSetParamStr2.nx[2], dataSetParamStr2.nx[3])));
+            new CbArray4D<LBMReal, IndexerX4X3X2X1>(vectorsOfValuesF2, dataSetParamStr2.nx[0], dataSetParamStr2.nx[1], dataSetParamStr2.nx[2], dataSetParamStr2.nx[3])));
         dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setZeroDistributions(CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>(
-                    vectorsOfValuesF3, dataSetParamStr3.nx[0], dataSetParamStr3.nx[1], dataSetParamStr3.nx[2])));
-
-        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setNX1(dataSetParamStr1.nx1);
-        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setNX2(dataSetParamStr1.nx2);
-        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setNX3(dataSetParamStr1.nx3);
-
+            vectorsOfValuesF3, dataSetParamStr3.nx[0], dataSetParamStr3.nx[1], dataSetParamStr3.nx[2])));
+        
+        //----------------------------------------- H1 ----------------------------------------------------
        SPtr<DistributionArray3D> mH1distributions(new D3Q27EsoTwist3DSplittedVector());
        if (multiPhase1)
         {
@@ -1143,9 +1139,14 @@ void MPIIOMigrationCoProcessor::readDataSet(int step)
             dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mH2distributions)->setNX3(dataSetParamStr1.nx3);
         }
 
+        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setNX1(dataSetParamStr1.nx1);
+        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setNX2(dataSetParamStr1.nx2);
+        dynamicPointerCast<D3Q27EsoTwist3DSplittedVector>(mFdistributions)->setNX3(dataSetParamStr1.nx3);
+
         // find the nesessary block and fill it
         SPtr<Block3D> block = grid->getBlock(dataSetArray[n].globalID);
         this->lbmKernel->setBlock(block);
+        this->lbmKernel->setNX(std::array<int, 3>{ {dataSetParamStr1.nx1, dataSetParamStr1.nx2, dataSetParamStr1.nx3}});
         UbTupleInt3 blockNX = grid->getBlockNX();
         this->lbmKernel->setNX(std::array<int, 3>{ { val<1>(blockNX), val<2>(blockNX), val<3>(blockNX) } });
         SPtr<LBMKernel> kernel = this->lbmKernel->clone();
@@ -1223,7 +1224,7 @@ void MPIIOMigrationCoProcessor::readArray(int step, Arrays arrType, std::string
 
     if (comm->isRoot()) 
     {
-        UBLOG(logINFO, "MPIIOMigrationCoProcessor::readArray start MPI IO rank = " << rank);
+        UBLOG(logINFO, "MPIIOMigrationCoProcessor::readArray start fname = " << fname);
         UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
     
@@ -1293,7 +1294,10 @@ void MPIIOMigrationCoProcessor::readArray(int step, Arrays arrType, std::string
     size_t index = 0;
     size_t nextVectorSize = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
     std::vector<double> vectorsOfValues;
-    for (std::size_t n = 0; n < blocksCount; n++) 
+    SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> ___4DArray;
+    SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> ___3DArray;
+
+    for (std::size_t n = 0; n < blocksCount; n++)
     {
         SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].globalID);
 
@@ -1301,9 +1305,6 @@ void MPIIOMigrationCoProcessor::readArray(int step, Arrays arrType, std::string
         index += nextVectorSize;
 
         // fill arrays
-        SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> ___4DArray;
-        SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> ___3DArray;
-
         switch (arrType) 
         {
             case AverageDensity:
diff --git a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.cpp b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.cpp
index 90f393d0f83ad610da21b803a8c15a445a7811dc..1798bc68d26797833d371ae86435351c123b236a 100644
--- a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.cpp
+++ b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.cpp
@@ -286,6 +286,12 @@ void MPIIORestartCoProcessor::writeDataSet(int step)
                 else
                     arrPresence.isPhaseField2Present = false;
 
+                SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> pressureFieldPtr = block->getKernel()->getDataSet()->getPressureField();
+                if (pressureFieldPtr)
+                    arrPresence.isPressureFieldPresent = true;
+                else
+                    arrPresence.isPressureFieldPresent = false;
+
                 firstBlock = false;
             }
 
@@ -440,6 +446,33 @@ void MPIIORestartCoProcessor::writeDataSet(int step)
     MPI_File_close(&file_handler1);
 
     if (arrPresence.isAverageDensityArrayPresent)
+        write4DArray(step, AverageDensity, std::string("/cpAverageDensityArray.bin"));
+
+    if (arrPresence.isAverageVelocityArrayPresent)
+        write4DArray(step, AverageVelocity, std::string("/cpAverageVelocityArray.bin"));
+
+    if (arrPresence.isAverageFluktuationsArrayPresent)
+        write4DArray(step, AverageFluktuations, std::string("/cpAverageFluktuationsArray.bin"));
+
+    if (arrPresence.isAverageTripleArrayPresent)
+        write4DArray(step, AverageTriple, std::string("/cpAverageTripleArray.bin"));
+
+    if (arrPresence.isShearStressValArrayPresent)
+        write4DArray(step, ShearStressVal, std::string("/cpShearStressValArray.bin"));
+
+    if (arrPresence.isRelaxationFactorPresent)
+        write3DArray(step, RelaxationFactor, std::string("/cpRelaxationFactor.bin"));
+
+    if (arrPresence.isPhaseField1Present)
+        write3DArray(step, PhaseField1, std::string("/cpPhaseField1.bin"));
+
+    if (arrPresence.isPhaseField2Present)
+        write3DArray(step, PhaseField2, std::string("/cpPhaseField2.bin"));
+
+    if (arrPresence.isPressureFieldPresent)
+        write3DArray(step, PressureField, std::string("/cpPressureField.bin"));
+
+    /*if (arrPresence.isAverageDensityArrayPresent)
         writeAverageDensityArray(step);
 
     if (arrPresence.isAverageVelocityArrayPresent)
@@ -462,9 +495,327 @@ void MPIIORestartCoProcessor::writeDataSet(int step)
 
     if (arrPresence.isPhaseField2Present)
         writePhaseField(step, 2);
+
+    if (arrPresence.isPressureFieldPresent)
+        writePressureField(step);*/
+
+}
+
+void MPIIORestartCoProcessor::write4DArray(int step, Arrays arrayType, std::string fname)
+{
+    int rank, size;
+    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+    MPI_Comm_size(MPI_COMM_WORLD, &size);
+
+    int blocksCount = 0; // quantity of blocks in the grid, max 2147483648 blocks!
+
+    std::vector<SPtr<Block3D>> blocksVector[25];
+    int minInitLevel = this->grid->getCoarsestInitializedLevel();
+    int maxInitLevel = this->grid->getFinestInitializedLevel();
+    for (int level = minInitLevel; level <= maxInitLevel; level++)
+    {
+        grid->getBlocks(level, rank, blocksVector[level]);
+        blocksCount += static_cast<int>(blocksVector[level].size());
+    }
+
+    DataSetSmallRestart* dataSetSmallArray = new DataSetSmallRestart[blocksCount];
+    std::vector<double> doubleValuesArray; // double-values of the AverageDensityArray in all blocks
+    dataSetParam dataSetParamStr;
+
+    if (comm->isRoot())
+    {
+        UBLOG(logINFO, "MPIIORestartCoProcessor::writeAverageDensityArray start collect data to file = " << fname);
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+    }
+
+    bool firstBlock = true;
+    int doubleCountInBlock = 0;
+    int ic = 0;
+    SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> ___Array;
+
+    for (int level = minInitLevel; level <= maxInitLevel; level++)
+    {
+        for (SPtr<Block3D> block : blocksVector[level]) //	blocks of the current level
+        {
+            dataSetSmallArray[ic].x1 = block->getX1(); // coordinates of the block needed to find it while regenerating the grid
+            dataSetSmallArray[ic].x2 = block->getX2();
+            dataSetSmallArray[ic].x3 = block->getX3();
+            dataSetSmallArray[ic].level = block->getLevel();
+
+            switch (arrayType)
+            {
+            case AverageDensity:
+                ___Array = block->getKernel()->getDataSet()->getAverageDensity();
+                break;
+            case AverageVelocity:
+                ___Array = block->getKernel()->getDataSet()->getAverageVelocity();
+                break;
+            case AverageFluktuations:
+                ___Array = block->getKernel()->getDataSet()->getAverageFluctuations();
+                break;
+            case AverageTriple:
+                ___Array = block->getKernel()->getDataSet()->getAverageTriplecorrelations();
+                break;
+            case ShearStressVal:
+                ___Array = block->getKernel()->getDataSet()->getShearStressValues();
+                break;
+            default:
+                UB_THROW(UbException(UB_EXARGS, "MPIIORestartCoProcessor::write4DArray : 4D array type does not exist!"));
+                break;
+            }
+
+            if (firstBlock) // when first (any) valid block...
+            {
+                dataSetParamStr.nx1 = dataSetParamStr.nx2 = dataSetParamStr.nx3 = 0;
+                dataSetParamStr.nx[0] = static_cast<int>(___Array->getNX1());
+                dataSetParamStr.nx[1] = static_cast<int>(___Array->getNX2());
+                dataSetParamStr.nx[2] = static_cast<int>(___Array->getNX3());
+                dataSetParamStr.nx[3] = static_cast<int>(___Array->getNX4());
+                doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+
+                firstBlock = false;
+            }
+
+            if (___Array && (dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0) && (dataSetParamStr.nx[3] > 0))
+                doubleValuesArray.insert(doubleValuesArray.end(), ___Array->getDataVector().begin(), ___Array->getDataVector().end());
+
+            ic++;
+        }
+    }
+ 
+     // register new MPI-types depending on the block-specific information
+    MPI_Type_contiguous(int(doubleCountInBlock), MPI_DOUBLE, &dataSetDoubleType);
+    MPI_Type_commit(&dataSetDoubleType);
+
+    if (comm->isRoot())
+    {
+        UBLOG(logINFO, "MPIIORestartCoProcessor::write4DArray start MPI IO rank = " << rank);
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+    }
+
+    // write to the file
+    // all processes calculate their offsets (quantity of bytes that the process is going to write)
+    // and notify the next process (with the rank = rank + 1)
+    MPI_Offset write_offset = (MPI_Offset)(size * sizeof(int));
+    size_t next_write_offset = 0;
+
+    if (size > 1)
+    {
+        if (rank == 0)
+        {
+            next_write_offset = write_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
+            MPI_Send(&next_write_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD);
+        }
+        else
+        {
+            MPI_Recv(&write_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
+            next_write_offset = write_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
+            if (rank < size - 1)
+                MPI_Send(&next_write_offset, 1, MPI_LONG_LONG_INT, rank + 1, 5, MPI_COMM_WORLD);
+        }
+    }
+
+    double start{ 0. };
+    double finish{ 0. };
+    if (comm->isRoot())
+        start = MPI_Wtime();
+
+    MPI_Info info = MPI_INFO_NULL;
+
+#ifdef HLRN_LUSTRE
+    MPI_Info_create(&info);
+    MPI_Info_set(info, "striping_factor", "40");
+    MPI_Info_set(info, "striping_unit", "4M");
+#endif
+
+    MPI_File file_handler;
+    std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + fname;
+    int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
+    if (rc != MPI_SUCCESS)
+        throw UbException(UB_EXARGS, "couldn't open file " + filename);
+
+    // each process writes the quantity of it's blocks
+    MPI_File_write_at(file_handler, (MPI_Offset)(rank * sizeof(int)), &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE);
+    // each process writes common parameters of a dataSet
+    MPI_File_write_at(file_handler, write_offset, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
+    // each process writes data identifying blocks
+    MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(dataSetParam)), dataSetSmallArray, blocksCount, dataSetSmallType, MPI_STATUS_IGNORE);
+    // each process writes the dataSet arrays
+    if (doubleValuesArray.size() > 0)
+        MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)),
+            &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
+
+    MPI_File_sync(file_handler);
+    MPI_File_close(&file_handler);
+    MPI_Type_free(&dataSetDoubleType);
+
+    if (comm->isRoot())
+    {
+        finish = MPI_Wtime();
+        UBLOG(logINFO, "MPIIORestartCoProcessor::write4DArray time: " << finish - start << " s");
+    }
+
+    delete[] dataSetSmallArray;
+}
+
+void MPIIORestartCoProcessor::write3DArray(int step, Arrays arrayType, std::string fname)
+{
+   int rank, size;
+    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+    MPI_Comm_size(MPI_COMM_WORLD, &size);
+
+    int blocksCount = 0; // quantity of blocks in the grid, max 2147483648 blocks!
+
+    std::vector<SPtr<Block3D>> blocksVector[25];
+    int minInitLevel = this->grid->getCoarsestInitializedLevel();
+    int maxInitLevel = this->grid->getFinestInitializedLevel();
+    for (int level = minInitLevel; level <= maxInitLevel; level++)
+    {
+        grid->getBlocks(level, rank, blocksVector[level]);
+        blocksCount += static_cast<int>(blocksVector[level].size());
+    }
+
+    DataSetSmallRestart* dataSetSmallArray = new DataSetSmallRestart[blocksCount];
+    std::vector<double> doubleValuesArray; // double-values (arrays of f's) in all blocks
+    dataSetParam dataSetParamStr;
+
+    if (comm->isRoot())
+    {
+        UBLOG(logINFO, "MPIIORestartCoProcessor::write3DArray start collect data to file = " << fname);
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+    }
+
+    bool firstBlock = true;
+    size_t doubleCountInBlock = 0;
+    int ic = 0;
+    SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> ___Array;
+
+    for (int level = minInitLevel; level <= maxInitLevel; level++)
+    {
+        for (SPtr<Block3D> block : blocksVector[level]) //	blocks of the current level
+        {
+            dataSetSmallArray[ic].x1 = block->getX1(); // coordinates of the block needed to find it while regenerating the grid
+            dataSetSmallArray[ic].x2 = block->getX2();
+            dataSetSmallArray[ic].x3 = block->getX3();
+            dataSetSmallArray[ic].level = block->getLevel();
+
+            switch (arrayType)
+            {
+            case RelaxationFactor:
+                ___Array = block->getKernel()->getDataSet()->getRelaxationFactor();
+                break;
+            case PhaseField1:
+                ___Array = block->getKernel()->getDataSet()->getPhaseField();
+                break;
+            case PhaseField2:
+                ___Array = block->getKernel()->getDataSet()->getPhaseField2();
+                break;
+            case PressureField:
+                ___Array = block->getKernel()->getDataSet()->getPressureField();
+                break;
+            default:
+                UB_THROW(UbException(UB_EXARGS, "MPIIORestartCoProcessor::write3DArray : 3D array type does not exist!"));
+                break;
+            }
+
+            if (firstBlock) // when first (any) valid block...
+            {
+                dataSetParamStr.nx1 = dataSetParamStr.nx2 = dataSetParamStr.nx3 = 0;
+                dataSetParamStr.nx[0] = static_cast<int>(___Array->getNX1());
+                dataSetParamStr.nx[1] = static_cast<int>(___Array->getNX2());
+                dataSetParamStr.nx[2] = static_cast<int>(___Array->getNX3());
+                dataSetParamStr.nx[3] = 1;
+                doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+
+                firstBlock = false;
+            }
+
+            if (___Array && (dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0))
+                doubleValuesArray.insert(doubleValuesArray.end(), ___Array->getDataVector().begin(), ___Array->getDataVector().end());
+
+            ic++;
+        }
+    }
+
+     // register new MPI-types depending on the block-specific information
+    MPI_Type_contiguous(int(doubleCountInBlock), MPI_DOUBLE, &dataSetDoubleType);
+    MPI_Type_commit(&dataSetDoubleType);
+
+    if (comm->isRoot())
+    {
+        UBLOG(logINFO, "MPIIORestartCoProcessor::write3DArray start MPI IO rank = " << rank);
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+    }
+
+    // write to the file
+    // all processes calculate their offsets (quantity of bytes that the process is going to write)
+    // and notify the next process (with the rank = rank + 1)
+    MPI_Offset write_offset = (MPI_Offset)(size * sizeof(int));
+    size_t next_write_offset = 0;
+
+    if (size > 1)
+    {
+        if (rank == 0)
+        {
+            next_write_offset = write_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
+            MPI_Send(&next_write_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD);
+    }
+        else
+        {
+            MPI_Recv(&write_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
+            next_write_offset = write_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
+            if (rank < size - 1)
+                MPI_Send(&next_write_offset, 1, MPI_LONG_LONG_INT, rank + 1, 5, MPI_COMM_WORLD);
+        }
+}
+
+
+    double start{ 0. };
+    double finish{ 0. };
+    if (comm->isRoot())
+        start = MPI_Wtime();
+
+    MPI_Info info = MPI_INFO_NULL;
+
+#ifdef HLRN_LUSTRE
+    MPI_Info_create(&info);
+    MPI_Info_set(info, "striping_factor", "40");
+    MPI_Info_set(info, "striping_unit", "4M");
+#endif
+
+    MPI_File file_handler;
+    std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + fname;
+    int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
+    if (rc != MPI_SUCCESS)
+        throw UbException(UB_EXARGS, "couldn't open file " + filename);
+
+    // each process writes the quantity of it's blocks
+    MPI_File_write_at(file_handler, (MPI_Offset)(rank * sizeof(int)), &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE);
+    // each process writes common parameters of a dataSet
+    MPI_File_write_at(file_handler, write_offset, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
+    // each process writes data identifying blocks
+    MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(dataSetParam)), dataSetSmallArray, blocksCount,
+        dataSetSmallType, MPI_STATUS_IGNORE);
+    // each process writes the dataSet arrays
+    if (doubleValuesArray.size() > 0)
+        MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)),
+            &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
+
+
+    MPI_File_sync(file_handler);
+    MPI_File_close(&file_handler);
+    MPI_Type_free(&dataSetDoubleType);
+
+    if (comm->isRoot())
+    {
+        finish = MPI_Wtime();
+        UBLOG(logINFO, "MPIIORestartCoProcessor ::write3DArray time: " << finish - start << " s");
+    }
+
+    delete[] dataSetSmallArray;
 }
 
-void MPIIORestartCoProcessor::writeAverageDensityArray(int step)
+/*void MPIIORestartCoProcessor::writeAverageDensityArray(int step)
 {
     int rank, size;
     MPI_Comm_rank(MPI_COMM_WORLD, &rank);
@@ -1203,30 +1554,171 @@ void MPIIORestartCoProcessor::writeRelaxationFactor(int step)
             if (firstBlock) // when first (any) valid block...
             {
                 dataSetParamStr.nx1 = dataSetParamStr.nx2 = dataSetParamStr.nx3 = 0;
-                dataSetParamStr.nx[0] = static_cast<int>(RelaxationFactor3DPtr->getNX1());
-                dataSetParamStr.nx[1] = static_cast<int>(RelaxationFactor3DPtr->getNX2());
-                dataSetParamStr.nx[2] = static_cast<int>(RelaxationFactor3DPtr->getNX3());
+                dataSetParamStr.nx[0] = static_cast<int>(RelaxationFactor3DPtr->getNX1());
+                dataSetParamStr.nx[1] = static_cast<int>(RelaxationFactor3DPtr->getNX2());
+                dataSetParamStr.nx[2] = static_cast<int>(RelaxationFactor3DPtr->getNX3());
+                dataSetParamStr.nx[3] = 1;
+                doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+
+                firstBlock = false;
+            }
+
+            if ((dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0))
+                doubleValuesArray.insert(doubleValuesArray.end(), RelaxationFactor3DPtr->getDataVector().begin(),
+                                         RelaxationFactor3DPtr->getDataVector().end());
+
+            ic++;
+        }
+    }
+
+    // register new MPI-types depending on the block-specific information
+    MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType);
+    MPI_Type_commit(&dataSetDoubleType);
+
+    if (comm->isRoot()) 
+    {
+        UBLOG(logINFO, "MPIIORestartCoProcessor::writeRelaxationFactor start MPI IO rank = " << rank);
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+    }
+
+    // write to the file
+    // all processes calculate their offsets (quantity of bytes that the process is going to write)
+    // and notify the next process (with the rank = rank + 1)
+    MPI_Offset write_offset  = (MPI_Offset)(size * sizeof(int));
+    size_t next_write_offset = 0;
+
+    if (size > 1) 
+    {
+        if (rank == 0) 
+        {
+            next_write_offset = write_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
+            MPI_Send(&next_write_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD);
+        } 
+        else 
+        {
+            MPI_Recv(&write_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
+            next_write_offset = write_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
+            if (rank < size - 1)
+                MPI_Send(&next_write_offset, 1, MPI_LONG_LONG_INT, rank + 1, 5, MPI_COMM_WORLD);
+        }
+    }
+
+    
+    double start {0.};
+    double finish {0.};
+    if (comm->isRoot())
+        start = MPI_Wtime();
+
+    MPI_Info info = MPI_INFO_NULL;
+
+#ifdef HLRN_LUSTRE
+    MPI_Info_create(&info);
+    MPI_Info_set(info, "striping_factor", "40");
+    MPI_Info_set(info, "striping_unit", "4M");
+#endif
+
+    MPI_File file_handler;
+    std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpRelaxationFactor.bin";
+    int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
+    if (rc != MPI_SUCCESS)
+        throw UbException(UB_EXARGS, "couldn't open file " + filename);
+
+    // each process writes the quantity of it's blocks
+    MPI_File_write_at(file_handler, (MPI_Offset)(rank * sizeof(int)), &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE);
+    // each process writes common parameters of a dataSet
+    MPI_File_write_at(file_handler, write_offset, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
+    // each process writes data identifying blocks
+    MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(dataSetParam)), dataSetSmallArray, blocksCount,
+                      dataSetSmallType, MPI_STATUS_IGNORE);
+    // each process writes the dataSet arrays
+    if (doubleValuesArray.size() > 0)
+        MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)),
+                          &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
+
+    MPI_File_sync(file_handler);
+    MPI_File_close(&file_handler);
+    MPI_Type_free(&dataSetDoubleType);
+
+    if (comm->isRoot()) 
+    {
+        finish = MPI_Wtime();
+        UBLOG(logINFO, "MPIIORestartCoProcessor::writeRelaxationFactor time: " << finish - start << " s");
+    }
+
+    delete[] dataSetSmallArray;
+}
+
+void MPIIORestartCoProcessor::writePhaseField(int step, int fieldN)
+{
+    int rank, size;
+    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+    MPI_Comm_size(MPI_COMM_WORLD, &size);
+
+    int blocksCount = 0; // quantity of blocks in the grid, max 2147483648 blocks!
+
+    std::vector<SPtr<Block3D>> blocksVector[25];
+    int minInitLevel = this->grid->getCoarsestInitializedLevel();
+    int maxInitLevel = this->grid->getFinestInitializedLevel();
+    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    {
+        grid->getBlocks(level, rank, blocksVector[level]);
+        blocksCount += static_cast<int>(blocksVector[level].size());
+    }
+
+    DataSetSmallRestart *dataSetSmallArray = new DataSetSmallRestart[blocksCount];
+    std::vector<double> doubleValuesArray; // double-values (arrays of f's) in all blocks
+    dataSetParam dataSetParamStr;
+
+    if (comm->isRoot()) 
+    {
+        UBLOG(logINFO, "MPIIORestartCoProcessor::writePhaseField start collect data rank = " << rank);
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+    }
+
+    bool firstBlock        = true;
+    int doubleCountInBlock = 0;
+    int ic                 = 0;
+    SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> PhaseField3DPtr;
+
+    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    {
+        for (SPtr<Block3D> block : blocksVector[level]) //	blocks of the current level
+        {
+            dataSetSmallArray[ic].x1 = block->getX1(); // coordinates of the block needed to find it while regenerating the grid
+            dataSetSmallArray[ic].x2 = block->getX2();
+            dataSetSmallArray[ic].x3 = block->getX3();
+            dataSetSmallArray[ic].level = block->getLevel();
+
+            if(fieldN == 1)
+                PhaseField3DPtr = block->getKernel()->getDataSet()->getPhaseField();
+            else
+                PhaseField3DPtr = block->getKernel()->getDataSet()->getPhaseField2();
+
+            if (firstBlock) // when first (any) valid block...
+            {
+                dataSetParamStr.nx1 = dataSetParamStr.nx2 = dataSetParamStr.nx3 = 0;
+                dataSetParamStr.nx[0] = static_cast<int>(PhaseField3DPtr->getNX1());
+                dataSetParamStr.nx[1] = static_cast<int>(PhaseField3DPtr->getNX2());
+                dataSetParamStr.nx[2] = static_cast<int>(PhaseField3DPtr->getNX3());
                 dataSetParamStr.nx[3] = 1;
                 doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
-
+                std::cout << "writePhaseField"<<fieldN<< " = " << dataSetParamStr.nx[0] << " " << dataSetParamStr.nx[1] << " " << dataSetParamStr.nx[2] << std::endl;
                 firstBlock = false;
             }
-
             if ((dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0))
-                doubleValuesArray.insert(doubleValuesArray.end(), RelaxationFactor3DPtr->getDataVector().begin(),
-                                         RelaxationFactor3DPtr->getDataVector().end());
+                doubleValuesArray.insert(doubleValuesArray.end(), PhaseField3DPtr->getDataVector().begin(), PhaseField3DPtr->getDataVector().end());
 
             ic++;
         }
     }
-
+        
     // register new MPI-types depending on the block-specific information
     MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType);
     MPI_Type_commit(&dataSetDoubleType);
 
     if (comm->isRoot()) 
     {
-        UBLOG(logINFO, "MPIIORestartCoProcessor::writeRelaxationFactor start MPI IO rank = " << rank);
+        UBLOG(logINFO, "MPIIORestartCoProcessor::writePhaseField start MPI IO rank = " << rank);
         UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
@@ -1267,7 +1759,9 @@ void MPIIORestartCoProcessor::writeRelaxationFactor(int step)
 #endif
 
     MPI_File file_handler;
-    std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpRelaxationFactor.bin";
+    std::string filename;
+    if(fieldN == 1) filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpPhaseField1.bin";
+    else filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpPhaseField2.bin";
     int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
     if (rc != MPI_SUCCESS)
         throw UbException(UB_EXARGS, "couldn't open file " + filename);
@@ -1291,13 +1785,13 @@ void MPIIORestartCoProcessor::writeRelaxationFactor(int step)
     if (comm->isRoot()) 
     {
         finish = MPI_Wtime();
-        UBLOG(logINFO, "MPIIORestartCoProcessor::writeRelaxationFactor time: " << finish - start << " s");
+        UBLOG(logINFO, "MPIIORestartCoProcessor::writePhaseField time: " << finish - start << " s");
     }
 
     delete[] dataSetSmallArray;
 }
 
-void MPIIORestartCoProcessor::writePhaseField(int step, int fieldN)
+void MPIIORestartCoProcessor::writePressureField(int step)
 {
     int rank, size;
     MPI_Comm_rank(MPI_COMM_WORLD, &rank);
@@ -1308,28 +1802,28 @@ void MPIIORestartCoProcessor::writePhaseField(int step, int fieldN)
     std::vector<SPtr<Block3D>> blocksVector[25];
     int minInitLevel = this->grid->getCoarsestInitializedLevel();
     int maxInitLevel = this->grid->getFinestInitializedLevel();
-    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    for (int level = minInitLevel; level <= maxInitLevel; level++)
     {
         grid->getBlocks(level, rank, blocksVector[level]);
         blocksCount += static_cast<int>(blocksVector[level].size());
     }
 
-    DataSetSmallRestart *dataSetSmallArray = new DataSetSmallRestart[blocksCount];
+    DataSetSmallRestart* dataSetSmallArray = new DataSetSmallRestart[blocksCount];
     std::vector<double> doubleValuesArray; // double-values (arrays of f's) in all blocks
     dataSetParam dataSetParamStr;
 
-    if (comm->isRoot()) 
+    if (comm->isRoot())
     {
-        UBLOG(logINFO, "MPIIORestartCoProcessor::writePhaseField start collect data rank = " << rank);
+        UBLOG(logINFO, "MPIIORestartCoProcessor::writePressureField start collect data rank = " << rank);
         UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
-    bool firstBlock        = true;
+    bool firstBlock = true;
     int doubleCountInBlock = 0;
-    int ic                 = 0;
-    SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> PhaseField3DPtr;
+    int ic = 0;
+    SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> PressureField3DPtr;
 
-    for (int level = minInitLevel; level <= maxInitLevel; level++) 
+    for (int level = minInitLevel; level <= maxInitLevel; level++)
     {
         for (SPtr<Block3D> block : blocksVector[level]) //	blocks of the current level
         {
@@ -1338,53 +1832,53 @@ void MPIIORestartCoProcessor::writePhaseField(int step, int fieldN)
             dataSetSmallArray[ic].x3 = block->getX3();
             dataSetSmallArray[ic].level = block->getLevel();
 
-            if(fieldN == 1)
-                PhaseField3DPtr = block->getKernel()->getDataSet()->getPhaseField();
-            else
-                PhaseField3DPtr = block->getKernel()->getDataSet()->getPhaseField2();
+            PressureField3DPtr = block->getKernel()->getDataSet()->getPressureField();
 
             if (firstBlock) // when first (any) valid block...
             {
                 dataSetParamStr.nx1 = dataSetParamStr.nx2 = dataSetParamStr.nx3 = 0;
-                dataSetParamStr.nx[0] = static_cast<int>(PhaseField3DPtr->getNX1());
-                dataSetParamStr.nx[1] = static_cast<int>(PhaseField3DPtr->getNX2());
-                dataSetParamStr.nx[2] = static_cast<int>(PhaseField3DPtr->getNX3());
+                dataSetParamStr.nx[0] = static_cast<int>(PressureField3DPtr->getNX1());
+                dataSetParamStr.nx[1] = static_cast<int>(PressureField3DPtr->getNX2());
+                dataSetParamStr.nx[2] = static_cast<int>(PressureField3DPtr->getNX3());
                 dataSetParamStr.nx[3] = 1;
                 doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
 
                 firstBlock = false;
             }
-            if ((dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0))
-                doubleValuesArray.insert(doubleValuesArray.end(), PhaseField3DPtr->getDataVector().begin(), PhaseField3DPtr->getDataVector().end());
 
+            if ((dataSetParamStr.nx[0] > 0) && (dataSetParamStr.nx[1] > 0) && (dataSetParamStr.nx[2] > 0))
+                doubleValuesArray.insert(doubleValuesArray.end(), PressureField3DPtr->getDataVector().begin(),
+                    PressureField3DPtr->getDataVector().end());
+ 
             ic++;
         }
     }
-        
-    // register new MPI-types depending on the block-specific information
+    //doubleValuesArrayRW.assign(doubleValuesArray.begin(), doubleValuesArray.end());
+    //std::cout << "doubleValuesArrayRW = " << doubleValuesArrayRW.size() << std::endl;
+   // register new MPI-types depending on the block-specific information
     MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType);
     MPI_Type_commit(&dataSetDoubleType);
 
-    if (comm->isRoot()) 
+    if (comm->isRoot())
     {
-        UBLOG(logINFO, "MPIIORestartCoProcessor::writePhaseField start MPI IO rank = " << rank);
+        UBLOG(logINFO, "MPIIORestartCoProcessor::writePressureField start MPI IO rank = " << rank);
         UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
     }
 
     // write to the file
     // all processes calculate their offsets (quantity of bytes that the process is going to write)
     // and notify the next process (with the rank = rank + 1)
-    MPI_Offset write_offset  = (MPI_Offset)(size * sizeof(int));
+    MPI_Offset write_offset = (MPI_Offset)(size * sizeof(int));
     size_t next_write_offset = 0;
 
-    if (size > 1) 
+    if (size > 1)
     {
-        if (rank == 0) 
+        if (rank == 0)
         {
             next_write_offset = write_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
             MPI_Send(&next_write_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD);
-        } 
-        else 
+        }
+        else
         {
             MPI_Recv(&write_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
             next_write_offset = write_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
@@ -1393,9 +1887,9 @@ void MPIIORestartCoProcessor::writePhaseField(int step, int fieldN)
         }
     }
 
-    
-    double start {0.};
-    double finish {0.};
+
+    double start{ 0. };
+    double finish{ 0. };
     if (comm->isRoot())
         start = MPI_Wtime();
 
@@ -1408,9 +1902,7 @@ void MPIIORestartCoProcessor::writePhaseField(int step, int fieldN)
 #endif
 
     MPI_File file_handler;
-    std::string filename;
-    if(fieldN == 1) filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpPhaseField1.bin";
-    else filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpPhaseField2.bin";
+    std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpPressureField.bin";
     int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &file_handler);
     if (rc != MPI_SUCCESS)
         throw UbException(UB_EXARGS, "couldn't open file " + filename);
@@ -1421,24 +1913,24 @@ void MPIIORestartCoProcessor::writePhaseField(int step, int fieldN)
     MPI_File_write_at(file_handler, write_offset, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
     // each process writes data identifying blocks
     MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(dataSetParam)), dataSetSmallArray, blocksCount,
-                      dataSetSmallType, MPI_STATUS_IGNORE);
+        dataSetSmallType, MPI_STATUS_IGNORE);
     // each process writes the dataSet arrays
     if (doubleValuesArray.size() > 0)
         MPI_File_write_at(file_handler, (MPI_Offset)(write_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)),
-                          &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
+            &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
 
     MPI_File_sync(file_handler);
     MPI_File_close(&file_handler);
     MPI_Type_free(&dataSetDoubleType);
 
-    if (comm->isRoot()) 
+    if (comm->isRoot())
     {
         finish = MPI_Wtime();
-        UBLOG(logINFO, "MPIIORestartCoProcessor::writePhaseField time: " << finish - start << " s");
+        UBLOG(logINFO, "MPIIORestartCoProcessor::writePressureField time: " << finish - start << " s");
     }
 
     delete[] dataSetSmallArray;
-}
+}*/
 
 void MPIIORestartCoProcessor::writeBoundaryConds(int step)
 {
@@ -1854,6 +2346,7 @@ void MPIIORestartCoProcessor::readDataSet(int step)
         SPtr<Block3D> block = grid->getBlock(dataSetArray[n].x1, dataSetArray[n].x2, dataSetArray[n].x3, dataSetArray[n].level);
    
         this->lbmKernel->setBlock(block);
+        this->lbmKernel->setNX(std::array<int, 3>{{dataSetParamStr1.nx1, dataSetParamStr1.nx2, dataSetParamStr1.nx3}});
         SPtr<LBMKernel> kernel = this->lbmKernel->clone();
         kernel->setGhostLayerWidth(dataSetArray[n].ghostLayerWidth);
         kernel->setCollisionFactor(dataSetArray[n].collFactor);
@@ -1892,7 +2385,7 @@ void MPIIORestartCoProcessor::readDataSet(int step)
     MPI_File_read_at(file_handler1, (MPI_Offset)0, &arrPresence, 1, arrayPresenceType, MPI_STATUS_IGNORE);
     MPI_File_close(&file_handler1);
 
-    if (arrPresence.isAverageDensityArrayPresent)
+    /*if (arrPresence.isAverageDensityArrayPresent)
         readAverageDensityArray(step);
 
     if (arrPresence.isAverageVelocityArrayPresent)
@@ -1915,9 +2408,192 @@ void MPIIORestartCoProcessor::readDataSet(int step)
 
     if (arrPresence.isPhaseField2Present)
         readPhaseField(step, 2);
+
+    if (arrPresence.isPressureFieldPresent)
+        readPressureField(step);*/
+
+    if (arrPresence.isAverageDensityArrayPresent)
+        readArray(step, AverageDensity, std::string("/cpAverageDensityArray.bin"));
+
+    if (arrPresence.isAverageVelocityArrayPresent)
+        readArray(step, AverageVelocity, std::string("/cpAverageVelocityArray.bin"));
+
+    if (arrPresence.isAverageFluktuationsArrayPresent)
+        readArray(step, AverageFluktuations, std::string("/cpAverageFluktuationsArray.bin"));
+
+    if (arrPresence.isAverageTripleArrayPresent)
+        readArray(step, AverageTriple, std::string("/cpAverageTripleArray.bin"));
+
+    if (arrPresence.isShearStressValArrayPresent)
+        readArray(step, ShearStressVal, std::string("/cpShearStressValArray.bin"));
+
+    if (arrPresence.isRelaxationFactorPresent)
+        readArray(step, RelaxationFactor, std::string("/cpRelaxationFactor.bin"));
+
+    if (arrPresence.isPhaseField1Present)
+        readArray(step, PhaseField1, std::string("/cpPhaseField1.bin"));
+
+    if (arrPresence.isPhaseField2Present)
+        readArray(step, PhaseField2, std::string("/cpPhaseField2.bin"));
+
+    if (arrPresence.isPressureFieldPresent)
+        readArray(step, PressureField, std::string("/cpPressureField.bin"));
+
+}
+
+void MPIIORestartCoProcessor::readArray(int step, Arrays arrType, std::string fname)
+{
+    int rank, size;
+    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+    MPI_Comm_size(MPI_COMM_WORLD, &size);
+
+    if (comm->isRoot())
+    {
+        UBLOG(logINFO, "MPIIORestartCoProcessor::readArray start fname = " << fname);
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+    }
+
+    double start{ 0. };
+    double finish{ 0. };
+    if (comm->isRoot())
+        start = MPI_Wtime();
+
+    MPI_File file_handler;
+    std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + fname;
+    int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler);
+    if (rc != MPI_SUCCESS)
+        throw UbException(UB_EXARGS, "couldn't open file " + filename);
+
+    // read count of blocks
+    size_t blocksCount = 0;
+    dataSetParam dataSetParamStr;
+    memset(&dataSetParamStr, 0, sizeof(dataSetParam));
+
+    MPI_File_read_at(file_handler, (MPI_Offset)(rank * sizeof(int)), &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE);
+    MPI_File_read_at(file_handler, (MPI_Offset)(size * sizeof(int)), &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
+
+    DataSetSmallRestart* dataSetSmallArray = new DataSetSmallRestart[blocksCount];
+    int doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+    std::vector<double> doubleValuesArray(blocksCount * doubleCountInBlock); // double-values in all blocks
+
+    // define MPI_types depending on the block-specific information
+    MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType);
+    MPI_Type_commit(&dataSetDoubleType);
+
+    // calculate the read offset
+    MPI_Offset read_offset = (MPI_Offset)(size * sizeof(int));
+    size_t next_read_offset = 0;
+
+    if (size > 1)
+    {
+        if (rank == 0)
+        {
+            next_read_offset = read_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
+            MPI_Send(&next_read_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD);
+        }
+        else
+        {
+            MPI_Recv(&read_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
+            next_read_offset = read_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
+            if (rank < size - 1)
+                MPI_Send(&next_read_offset, 1, MPI_LONG_LONG_INT, rank + 1, 5, MPI_COMM_WORLD);
+        }
+    }
+
+    MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam)), dataSetSmallArray, blocksCount, dataSetSmallType, MPI_STATUS_IGNORE);
+    if (doubleCountInBlock > 0)
+        MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)),
+            &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
+    MPI_File_close(&file_handler);
+    MPI_Type_free(&dataSetDoubleType);
+
+    if (comm->isRoot())
+    {
+        finish = MPI_Wtime();
+        UBLOG(logINFO, "MPIIORestartCoProcessor::readArray time: " << finish - start << " s");
+        UBLOG(logINFO, "MPIIORestartCoProcessor::readArray start of restore of data, rank = " << rank);
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+    }
+
+    //----------------------------- restore data ---------------------------------
+    SPtr<CbArray4D<LBMReal, IndexerX4X3X2X1>> ___4DArray;
+    SPtr<CbArray3D<LBMReal, IndexerX3X2X1>> ___3DArray;
+
+    size_t index = 0;
+    size_t nextVectorSize = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+    std::vector<double> vectorsOfValues;
+    for (std::size_t n = 0; n < blocksCount; n++)
+    {
+        vectorsOfValues.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + nextVectorSize);
+        index += nextVectorSize;
+
+        // find the nesessary block and fill it
+        SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].x1, dataSetSmallArray[n].x2, dataSetSmallArray[n].x3, dataSetSmallArray[n].level);
+
+       // fill arrays
+       switch (arrType)
+        {
+        case AverageDensity:
+            ___4DArray = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(
+                vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
+            block->getKernel()->getDataSet()->setAverageDensity(___4DArray);
+            break;
+        case AverageVelocity:
+            ___4DArray = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(
+                vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
+            block->getKernel()->getDataSet()->setAverageVelocity(___4DArray);
+            break;
+        case AverageFluktuations:
+            ___4DArray = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(
+                vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
+            block->getKernel()->getDataSet()->setAverageFluctuations(___4DArray);
+            break;
+        case AverageTriple:
+            ___4DArray = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(
+                vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
+            block->getKernel()->getDataSet()->setAverageTriplecorrelations(___4DArray);
+            break;
+        case ShearStressVal:
+            ___4DArray = CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr(new CbArray4D<LBMReal, IndexerX4X3X2X1>(
+                vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2], dataSetParamStr.nx[3]));
+            block->getKernel()->getDataSet()->setShearStressValues(___4DArray);
+            break;
+        case RelaxationFactor:
+            ___3DArray = CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>(
+                vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2]));
+            block->getKernel()->getDataSet()->setRelaxationFactor(___3DArray);
+            break;
+        case PhaseField1:
+            ___3DArray = CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>(
+                vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2]));
+            block->getKernel()->getDataSet()->setPhaseField(___3DArray);
+            break;
+        case PhaseField2:
+            ___3DArray = CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>(
+                vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2]));
+            block->getKernel()->getDataSet()->setPhaseField2(___3DArray);
+            break;
+        case PressureField:
+            ___3DArray = CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>(
+                vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2]));
+            block->getKernel()->getDataSet()->setPressureField(___3DArray);
+            break;
+        default:
+            UB_THROW(UbException(UB_EXARGS, "MPIIORestartCoProcessor::readArray : array type does not exist!"));
+            break;
+        }
+    }
+
+    if (comm->isRoot())
+    {
+        UBLOG(logINFO, "MPIIORestartCoProcessor::readArray end of restore of data, rank = " << rank);
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+    }
+
+    delete[] dataSetSmallArray;
 }
 
-void MPIIORestartCoProcessor::readAverageDensityArray(int step)
+/*void MPIIORestartCoProcessor::readAverageDensityArray(int step)
 {
     int rank, size;
     MPI_Comm_rank(MPI_COMM_WORLD, &rank);
@@ -2592,6 +3268,7 @@ void MPIIORestartCoProcessor::readPhaseField(int step, int fieldN)
     size_t index = 0;
     size_t nextVectorSize = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
     std::vector<double> vectorsOfValues;
+    std::cout << "readPhaseField"<< fieldN<<" = " << dataSetParamStr.nx[0] << " " << dataSetParamStr.nx[1] << " " << dataSetParamStr.nx[2] << std::endl;
 
     for (int n = 0; n < blocksCount; n++)
     {
@@ -2609,6 +3286,12 @@ void MPIIORestartCoProcessor::readPhaseField(int step, int fieldN)
             block->getKernel()->getDataSet()->setPhaseField(mPhaseField);
         else
             block->getKernel()->getDataSet()->setPhaseField2(mPhaseField);
+       int nx1 = static_cast<int>(block->getKernel()->getDataSet()->getPhaseField()->getNX1());
+       int nx2 = static_cast<int>(block->getKernel()->getDataSet()->getPhaseField()->getNX2());
+       int nx3 = static_cast<int>(block->getKernel()->getDataSet()->getPhaseField()->getNX3());
+        dataSetParamStr.nx[3] = 1;
+        doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+        std::cout << "writePhaseField" << fieldN << " = " << nx1 << " " << nx2 << " " << nx3 << std::endl;
 
     }
 
@@ -2621,6 +3304,106 @@ void MPIIORestartCoProcessor::readPhaseField(int step, int fieldN)
     delete[] dataSetSmallArray;
 }
 
+void MPIIORestartCoProcessor::readPressureField(int step)
+{
+    int rank, size;
+    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+    MPI_Comm_size(MPI_COMM_WORLD, &size);
+
+    if (comm->isRoot())
+    {
+        UBLOG(logINFO, "MPIIORestartCoProcessor::readPressureField start MPI IO rank = " << rank);
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+    }
+
+    double start{ 0. };
+    double finish{ 0. };
+    if (comm->isRoot())
+        start = MPI_Wtime();
+
+    MPI_File file_handler;
+    std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpPressureField.bin";
+    int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler);
+    if (rc != MPI_SUCCESS)
+        throw UbException(UB_EXARGS, "couldn't open file " + filename);
+
+    // read count of blocks
+    int blocksCount = 0;
+    dataSetParam dataSetParamStr;
+    MPI_File_read_at(file_handler, (MPI_Offset)(rank * sizeof(int)), &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE);
+    MPI_File_read_at(file_handler, (MPI_Offset)(size * sizeof(int)), &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
+
+    DataSetSmallRestart* dataSetSmallArray = new DataSetSmallRestart[blocksCount];
+    int doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+    std::vector<double> doubleValuesArray(blocksCount * doubleCountInBlock); // double-values in all blocks
+
+    // define MPI_types depending on the block-specific information
+    MPI_Type_contiguous(doubleCountInBlock, MPI_DOUBLE, &dataSetDoubleType);
+    MPI_Type_commit(&dataSetDoubleType);
+
+    // calculate the read offset
+    MPI_Offset read_offset = (MPI_Offset)(size * sizeof(int));
+    size_t next_read_offset = 0;
+
+    if (size > 1)
+    {
+        if (rank == 0)
+        {
+            next_read_offset = read_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
+            MPI_Send(&next_read_offset, 1, MPI_LONG_LONG_INT, 1, 5, MPI_COMM_WORLD);
+        }
+        else
+        {
+            MPI_Recv(&read_offset, 1, MPI_LONG_LONG_INT, rank - 1, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
+            next_read_offset = read_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRestart) + doubleCountInBlock * sizeof(double));
+            if (rank < size - 1)
+                MPI_Send(&next_read_offset, 1, MPI_LONG_LONG_INT, rank + 1, 5, MPI_COMM_WORLD);
+        }
+    }
+
+    MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam)), dataSetSmallArray, blocksCount, dataSetSmallType, MPI_STATUS_IGNORE);
+    if (doubleCountInBlock > 0)
+        MPI_File_read_at(file_handler, (MPI_Offset)(read_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRestart)),
+            &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
+    MPI_File_close(&file_handler);
+    MPI_Type_free(&dataSetDoubleType);
+
+    if (comm->isRoot())
+    {
+        finish = MPI_Wtime();
+        UBLOG(logINFO, "MPIIORestartCoProcessor::readPressureField time: " << finish - start << " s");
+        UBLOG(logINFO, "MPIIORestartCoProcessor::readPressureField start of restore of data, rank = " << rank);
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+    }
+
+    size_t index = 0;
+    size_t nextVectorSize = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+    std::vector<double> vectorsOfValues;
+
+    for (int n = 0; n < blocksCount; n++)
+    {
+        vectorsOfValues.assign(doubleValuesArray.data() + index, doubleValuesArray.data() + index + nextVectorSize);
+        index += nextVectorSize;
+    
+        // fill Pressure array
+        SPtr<PressureFieldArray3D> mPressureField;
+        mPressureField = CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr(new CbArray3D<LBMReal, IndexerX3X2X1>(
+            vectorsOfValues, dataSetParamStr.nx[0], dataSetParamStr.nx[1], dataSetParamStr.nx[2]));
+
+        // find the nesessary block and fill it
+        SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].x1, dataSetSmallArray[n].x2, dataSetSmallArray[n].x3, dataSetSmallArray[n].level);
+        block->getKernel()->getDataSet()->setPressureField(mPressureField);
+    }
+
+    if (comm->isRoot())
+    {
+        UBLOG(logINFO, "MPIIORestartCoProcessor::readPressureField end of restore of data, rank = " << rank);
+        UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+    }
+
+    delete[] dataSetSmallArray;
+}*/
+
 void MPIIORestartCoProcessor::readBoundaryConds(int step)
 {
     int rank, size;
diff --git a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.h b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.h
index beae84af94809e4ffb84fffa19d637afec53f188..1a1e1fb4d45066a93826fe7a819b056e10544036 100644
--- a/src/cpu/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.h
+++ b/src/cpu/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.h
@@ -20,6 +20,18 @@ class LBMKernel;
 class MPIIORestartCoProcessor : public MPIIOCoProcessor
 {
 public:
+    enum Arrays {
+        AverageDensity = 1,
+        AverageVelocity = 2,
+        AverageFluktuations = 3,
+        AverageTriple = 4,
+        ShearStressVal = 5,
+        RelaxationFactor = 6,
+        PhaseField1 = 7,
+        PhaseField2 = 8,
+        PressureField = 9
+    };
+
     MPIIORestartCoProcessor(SPtr<Grid3D> grid, SPtr<UbScheduler> s, const std::string &path, std::shared_ptr<vf::mpi::Communicator> comm);
     ~MPIIORestartCoProcessor() override;
     //! Each timestep writes the grid into the files
@@ -30,13 +42,16 @@ public:
     void writeBlocks(int step);
     //! Writes the datasets of the blocks into the file cpDataSet.bin
     void writeDataSet(int step);
-    void writeAverageDensityArray(int step);
-    void writeAverageVelocityArray(int step);
-    void writeAverageFluktuationsArray(int step);
-    void writeAverageTripleArray(int step);
-    void writeShearStressValArray(int step);
-    void writeRelaxationFactor(int step);
-    void writePhaseField(int step, int num);
+    void write4DArray(int step, Arrays arrType, std::string fname);
+    void write3DArray(int step, Arrays arrType, std::string fname);
+    //void writeAverageDensityArray(int step);
+    //void writeAverageVelocityArray(int step);
+    //void writeAverageFluktuationsArray(int step);
+    //void writeAverageTripleArray(int step);
+    //void writeShearStressValArray(int step);
+    //void writeRelaxationFactor(int step);
+    //void writePhaseField(int step, int num);
+    //void writePressureField(int step);
     //! Writes the boundary conditions of the blocks into the file cpBC.bin
     void writeBoundaryConds(int step);
 
@@ -44,14 +59,18 @@ public:
     void readBlocks(int step);
     //! Reads the datasets of the blocks from the file cpDataSet.bin
     void readDataSet(int step);
-    void readAverageDensityArray(int step);
-    void readAverageVelocityArray(int step);
-    void readAverageFluktuationsArray(int step);
-    void readAverageTripleArray(int step);
-    void readShearStressValArray(int step);
-    void readRelaxationFactor(int step);
-    void readPhaseField(int step, int num);
-    //! Reads the boundary conditions of the blocks from the file cpBC.bin
+    void readArray(int step, Arrays arrType, std::string fname);
+
+    //void readAverageDensityArray(int step);
+    //void readAverageVelocityArray(int step);
+    //void readAverageFluktuationsArray(int step);
+    //void readAverageTripleArray(int step);
+    //void readShearStressValArray(int step);
+    //void readRelaxationFactor(int step);
+    //void readPhaseField(int step, int num);
+    //void readPressureField(int step);
+    // 
+   //! Reads the boundary conditions of the blocks from the file cpBC.bin
     void readBoundaryConds(int step);
     //! The function sets LBMKernel
     void setLBMKernel(SPtr<LBMKernel> kernel);
@@ -68,6 +87,8 @@ private:
     MPIIODataStructures::boundCondParam boundCondParamStr;
     SPtr<LBMKernel> lbmKernel;
     SPtr<BCProcessor> bcProcessor;
+
+    //std::vector<double> doubleValuesArrayRW;
 };
 
 #endif
diff --git a/src/cpu/VirtualFluidsCore/LBM/MultiphasePressureFilterLBMKernel.cpp b/src/cpu/VirtualFluidsCore/LBM/MultiphasePressureFilterLBMKernel.cpp
index 995dd814d2779608076e27528078876b1767a952..f86e008961fd3843ba6b6ed7d7fc3b9b18a180ef 100644
--- a/src/cpu/VirtualFluidsCore/LBM/MultiphasePressureFilterLBMKernel.cpp
+++ b/src/cpu/VirtualFluidsCore/LBM/MultiphasePressureFilterLBMKernel.cpp
@@ -241,7 +241,7 @@ void MultiphasePressureFilterLBMKernel::calculate(int step)
 					mfcca = (*this->nonLocalDistributionsF)(D3Q27System::ET_BNE, x1, x2, x3p);
 
 					mfbbb = (*this->zeroDistributionsF)(x1, x2, x3);
-
+					
 					LBMReal rhoH = 1.0;
 					LBMReal rhoL = 1.0 / densityRatio;
 
@@ -305,7 +305,7 @@ void MultiphasePressureFilterLBMKernel::calculate(int step)
 	}
 
 	////!filter
-
+	//bool firstTime = true;
 	for (int x3 = minX3; x3 < maxX3; x3++) {
 		for (int x2 = minX2; x2 < maxX2; x2++) {
 			for (int x1 = minX1; x1 < maxX1; x1++) {