diff --git a/source/Applications/DLR-F16/F16BombadilTest10e-6.cfg b/source/Applications/DLR-F16/F16BombadilTest10e-6.cfg
index cb62161f5b894002642bc1eefe613efcf94486c2..60ee8161cf664626441f253ccd72ba2878d95ab1 100644
--- a/source/Applications/DLR-F16/F16BombadilTest10e-6.cfg
+++ b/source/Applications/DLR-F16/F16BombadilTest10e-6.cfg
@@ -9,7 +9,7 @@ zigZagTape = 2zackenbaender0.stl
 
 numOfThreads = 4
 availMem = 13e9
-refineLevel = 1 #10
+refineLevel = 2 #10
 #blockNx = 7 8 8
 #blockNx = 7 6 7
 blockNx = 21 6 13
@@ -38,8 +38,8 @@ boundingBox = -0.90 1.20 0.035 0.065 -0.65 0.65
 #deltaXfine = 0.000009765625
 
 #deltaXfine = 0.005 #level 0
-deltaXfine = 0.0025 #level 1
-#deltaXfine = 0.00125 #level 2
+#deltaXfine = 0.0025 #level 1
+deltaXfine = 0.00125 #level 2
 #deltaXfine = 0.000625 #level 3
 #deltaXfine = 0.0003125 #level 4
 #deltaXfine = 0.00015625 #level 5
@@ -52,12 +52,14 @@ deltaXfine = 0.0025 #level 1
 startDistance = -1.0
 refineDistance = 0.3
 
-newStart = false
+newStart = true
 restartStep = 10
-restartStepStart = 50
 
-outTime = 60
-endTime = 60
+cpStart = 10
+cpStep = 10
+
+outTime = 10
+endTime = 10
 
 logToFile = false
 
diff --git a/source/Applications/DLR-F16/f16.cpp b/source/Applications/DLR-F16/f16.cpp
index bcb150b5253a79343cc4d56c1fb60848999e2ccf..e7becc6be67a63d9e71ece308763ffdee1c80ea0 100644
--- a/source/Applications/DLR-F16/f16.cpp
+++ b/source/Applications/DLR-F16/f16.cpp
@@ -30,7 +30,8 @@ void run(string configname)
       vector<double>  boundingBox = config.getVector<double>("boundingBox");
       double          uLB = config.getDouble("uLB");
       double          restartStep = config.getDouble("restartStep");
-      double          restartStepStart = config.getDouble("restartStepStart");
+      double          cpStart = config.getDouble("cpStart");
+      double          cpStep = config.getDouble("cpStep");
       double          endTime = config.getDouble("endTime");
       double          outTime = config.getDouble("outTime");
       double          availMem = config.getDouble("availMem");
@@ -152,7 +153,7 @@ void run(string configname)
 
       //////////////////////////////////////////////////////////////////////////
       //restart
-      UbSchedulerPtr rSch(new UbScheduler(restartStep, restartStepStart));
+      UbSchedulerPtr rSch(new UbScheduler(cpStep, cpStart));
       //RestartCoProcessor rp(grid, rSch, comm, pathOut, RestartCoProcessor::BINARY);
       MPIIORestartCoProcessor rcp(grid, rSch, pathOut, comm);
       //////////////////////////////////////////////////////////////////////////
@@ -504,20 +505,20 @@ void run(string configname)
          intHelper.addInteractor(outflowIntr);
          intHelper.addInteractor(addWallZminInt);
          intHelper.addInteractor(addWallZmaxInt);
-         //intHelper.addInteractor(triBand1Interactor);
-         //intHelper.addInteractor(triBand2Interactor);
-         //intHelper.addInteractor(triBand3Interactor);
-         //intHelper.addInteractor(triBand4Interactor);
-         //
-         //if (porousTralingEdge)
-         //{
-         //   intHelper.addInteractor(fngIntrBodyPart);
-         //   //intHelper.addInteractor(fngIntrTrailingEdge);
-         //} 
-         //else
-         //{
-         //   intHelper.addInteractor(fngIntrWhole);
-         //}
+         intHelper.addInteractor(triBand1Interactor);
+         intHelper.addInteractor(triBand2Interactor);
+         intHelper.addInteractor(triBand3Interactor);
+         intHelper.addInteractor(triBand4Interactor);
+         
+         if (porousTralingEdge)
+         {
+            intHelper.addInteractor(fngIntrBodyPart);
+            //intHelper.addInteractor(fngIntrTrailingEdge);
+         } 
+         else
+         {
+            intHelper.addInteractor(fngIntrWhole);
+         }
          
          //////////////////////////////////////////////////////////////////////////
          intHelper.selectBlocks();
@@ -626,8 +627,8 @@ void run(string configname)
       }
       else
       {
-         rcp.restart(restartStepStart);
-         grid->setTimeStep(restartStepStart);
+         rcp.restart((int)restartStep);
+         grid->setTimeStep(restartStep);
 
          {
             WriteBlocksCoProcessor ppblocks(grid, UbSchedulerPtr(new UbScheduler(1)), pathOut, WbWriterVtkXmlASCII::getInstance(), comm);
diff --git a/source/Applications/mpi_benchmark/mpib.cfg b/source/Applications/mpi_benchmark/mpib.cfg
index da53366e9d172fd7d1b84a71ef3aa739d16cee49..39d73af8a9c747c7b9b164a0daf609fe9161428d 100644
--- a/source/Applications/mpi_benchmark/mpib.cfg
+++ b/source/Applications/mpi_benchmark/mpib.cfg
@@ -7,4 +7,4 @@ logToFile = false
 oneD = true
 priorityQueue = false
 nupsStep = 10 10 100
-endTime = 100
\ No newline at end of file
+endTime = 10
\ No newline at end of file
diff --git a/source/Applications/mpi_benchmark/mpib.cpp b/source/Applications/mpi_benchmark/mpib.cpp
index 9292bc0a1e35e98782f67210f75cc613a0596d4f..a52e29f6a1a3b090fbf47f8bca400c1fae5b161d 100644
--- a/source/Applications/mpi_benchmark/mpib.cpp
+++ b/source/Applications/mpi_benchmark/mpib.cpp
@@ -207,6 +207,13 @@ void run(string configname)
       initVisitor.setVx1(uLB);
       grid->accept(initVisitor);
 
+      //////////////////////////////////////////////////////////////////////////
+      //restart
+      UbSchedulerPtr rSch(new UbScheduler(20, 20));
+      //RestartCoProcessor rp(grid, rSch, comm, pathOut, RestartCoProcessor::BINARY);
+      MPIIORestartCoProcessor rcp(grid, rSch, pathOut, comm);
+      rcp.restart(10);
+      //////////////////////////////////////////////////////////////////////////
 
       UbSchedulerPtr nupsSch(new UbScheduler(nupsStep[0], nupsStep[1], nupsStep[2]));
       NUPSCounterCoProcessor npr(grid, nupsSch, numOfThreads, comm);
diff --git a/source/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.cpp b/source/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.cpp
index 80f9cf48e4e861a0ee0b8c96e5b57368fc6b3cda..fca82f26ca4c6c661ea4990ae21fdd79bc0ae6b0 100644
--- a/source/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.cpp
+++ b/source/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.cpp
@@ -128,8 +128,8 @@ void MPIIORestartCoProcessor::writeBlocks(int step)
 
    UbSystem::makeDirectory(path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step));
    std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpBlocks.bin";
-	int error = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_RDWR , MPI_INFO_NULL, &file_handler);
-   if(error) throw UbException(UB_EXARGS,"couldn't open file "+filename);
+	int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &file_handler);
+   if(rc != MPI_SUCCESS) throw UbException(UB_EXARGS,"couldn't open file "+filename);
 
    int blocksCount = 0; // quantity of blocks in the grid, max 2147483648 blocks!
    int minInitLevel = this->grid->getCoarsestInitializedLevel();
@@ -371,14 +371,14 @@ void MPIIORestartCoProcessor::writeBlocks(int step)
 	}
 
    // each process writes the quantity of it's blocks
-	MPI_File_write_at(file_handler, rank * sizeof(int), &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE);
+	MPI_File_write_at_all(file_handler, rank * sizeof(int), &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE);
    // each process writes parameters of the grid
-   MPI_File_write_at(file_handler, view_offset, gridParameters, 1, gridParamType, MPI_STATUS_IGNORE);
+   MPI_File_write_at_all(file_handler, view_offset, gridParameters, 1, gridParamType, MPI_STATUS_IGNORE);
    // each process writes common parameters of a block
-   MPI_File_write_at(file_handler, view_offset + sizeof(GridParam), &blockParamStr, 1, blockParamType, MPI_STATUS_IGNORE);
+   MPI_File_write_at_all(file_handler, view_offset + sizeof(GridParam), &blockParamStr, 1, blockParamType, MPI_STATUS_IGNORE);
    // each process writes it's blocks
-   MPI_File_write_at(file_handler, view_offset + sizeof(GridParam) + sizeof(blockParam), &block3dArray[0], blocksCount, block3dType, MPI_STATUS_IGNORE);
-	MPI_File_sync(file_handler);
+   MPI_File_write_at_all(file_handler, view_offset + sizeof(GridParam) + sizeof(blockParam), &block3dArray[0], blocksCount, block3dType, MPI_STATUS_IGNORE);
+	//MPI_File_sync(file_handler);
 	MPI_File_close(&file_handler);
 
    // register new MPI-types depending on the block-specific information
@@ -487,16 +487,16 @@ void MPIIORestartCoProcessor::writeDataSet(int step)
 
 	MPI_File file_handler;
    std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSet.bin";
-	int error = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_RDWR , MPI_INFO_NULL, &file_handler);
-   if(error) throw UbException(UB_EXARGS,"couldn't open file "+filename);
+	int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY , MPI_INFO_NULL, &file_handler);
+   if(rc != MPI_SUCCESS) throw UbException(UB_EXARGS,"couldn't open file "+filename);
 
    // each process writes the quantity of it's blocks
-   MPI_File_write_at(file_handler, rank * sizeof(int), &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE);
+   MPI_File_write_at_all(file_handler, rank * sizeof(int), &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE);
    // each process writes data identifying blocks
-   MPI_File_write_at(file_handler, view_offset, &dataSetArray[0], blocksCount, dataSetType, MPI_STATUS_IGNORE);
+   MPI_File_write_at_all(file_handler, view_offset, &dataSetArray[0], blocksCount, dataSetType, MPI_STATUS_IGNORE);
    // each process writes the dataSet arrays
-	MPI_File_write_at(file_handler, view_offset + blocksCount * sizeof(dataSet), &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
-	MPI_File_sync(file_handler);
+	MPI_File_write_at_all(file_handler, view_offset + blocksCount * sizeof(dataSet), &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
+	//MPI_File_sync(file_handler);
 	MPI_File_close(&file_handler);
 
 	delete[] dataSetArray;
@@ -624,25 +624,28 @@ void MPIIORestartCoProcessor::writeBoundaryConds(int step)
 
 	MPI_File file_handler;
    std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpBC.bin";
-	int error = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_RDWR , MPI_INFO_NULL, &file_handler);
-   if(error) throw UbException(UB_EXARGS,"couldn't open file "+filename);
+	int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY , MPI_INFO_NULL, &file_handler);
+   if(rc != MPI_SUCCESS) throw UbException(UB_EXARGS,"couldn't open file "+filename);
 
    // each process writes the quantity of it's blocks
-   MPI_File_write_at(file_handler, rank * sizeof(int), &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE);	//	blocks quantity
+   MPI_File_write_at_all(file_handler, rank * sizeof(int), &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE);	//	blocks quantity
    // each process writes the quantity of "big blocks" of BLOCK_SIZE of boundary conditions
-   MPI_File_write_at(file_handler, (rank + size) * sizeof(int), &bcBlockCount, 1, MPI_INT, MPI_STATUS_IGNORE); // quantity of BoundConds / BLOCK_SIZE
+   MPI_File_write_at_all(file_handler, (rank + size) * sizeof(int), &bcBlockCount, 1, MPI_INT, MPI_STATUS_IGNORE); // quantity of BoundConds / BLOCK_SIZE
    // each process writes the quantity of indexContainer elements in all blocks
-	MPI_File_write_at(file_handler, (rank + 2 * size) * sizeof(int), &count_indexContainer, 1, MPI_INT, MPI_STATUS_IGNORE); // quantity of indexContainer	
+	MPI_File_write_at_all(file_handler, (rank + 2 * size) * sizeof(int), &count_indexContainer, 1, MPI_INT, MPI_STATUS_IGNORE); // quantity of indexContainer	
 
    // each process writes data identifying the blocks
-   MPI_File_write_at(file_handler, view_offset, &bcAddArray[0], blocksCount, boundCondTypeAdd, MPI_STATUS_IGNORE);
+   MPI_File_write_at_all(file_handler, view_offset, bcAddArray, blocksCount, boundCondTypeAdd, MPI_STATUS_IGNORE);
    // each process writes boundary conditions
-	MPI_File_write_at(file_handler, view_offset + blocksCount * sizeof(BCAdd), &bcVector[0], bcBlockCount, boundCondType1000, MPI_STATUS_IGNORE);
+   if (bcVector.size() > 0)
+   {
+      	MPI_File_write_at_all(file_handler, view_offset + blocksCount * sizeof(BCAdd), &bcVector[0], bcBlockCount, boundCondType1000, MPI_STATUS_IGNORE);
+   }
    // each process writes bcindexmatrix values
-	MPI_File_write_at(file_handler, view_offset + blocksCount * sizeof(BCAdd) + bcBlockCount * BLOCK_SIZE * sizeof(BoundaryCondition), &bcindexmatrixV[0], blocksCount, bcindexmatrixType, MPI_STATUS_IGNORE);
+	MPI_File_write_at_all(file_handler, view_offset + blocksCount * sizeof(BCAdd) + bcBlockCount * BLOCK_SIZE * sizeof(BoundaryCondition), &bcindexmatrixV[0], blocksCount, bcindexmatrixType, MPI_STATUS_IGNORE);
    // each process writes indexContainer values
-	MPI_File_write_at(file_handler, view_offset + blocksCount * sizeof(BCAdd) + bcBlockCount * BLOCK_SIZE * sizeof(BoundaryCondition) + blocksCount * blockParamStr.bcindexmatrix_count * sizeof(int), &indexContainerV[0], count_indexContainer, MPI_INT, MPI_STATUS_IGNORE);
-	MPI_File_sync(file_handler);
+	MPI_File_write_at_all(file_handler, view_offset + blocksCount * sizeof(BCAdd) + bcBlockCount * BLOCK_SIZE * sizeof(BoundaryCondition) + blocksCount * blockParamStr.bcindexmatrix_count * sizeof(int), &indexContainerV[0], count_indexContainer, MPI_INT, MPI_STATUS_IGNORE);
+	//MPI_File_sync(file_handler);
 	MPI_File_close(&file_handler);
 
 	delete [] bcAddArray;
@@ -668,8 +671,8 @@ void MPIIORestartCoProcessor::readBlocks(int step)
 
 	MPI_File file_handler;
    std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpBlocks.bin";
-	int error = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_RDWR , MPI_INFO_NULL, &file_handler);
-   if(error) throw UbException(UB_EXARGS,"couldn't open file "+filename);
+	int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY , MPI_INFO_NULL, &file_handler);
+   if(rc != MPI_SUCCESS) throw UbException(UB_EXARGS,"couldn't open file "+filename);
 
    // read count of blocks
    int blocksCount = 0;
@@ -699,12 +702,12 @@ void MPIIORestartCoProcessor::readBlocks(int step)
    GridParam* gridParameters = new GridParam;
 
    // read parameters of the grid
-   MPI_File_read_at(file_handler, read_offset, gridParameters, 1, gridParamType, MPI_STATUS_IGNORE);
+   MPI_File_read_at_all(file_handler, read_offset, gridParameters, 1, gridParamType, MPI_STATUS_IGNORE);
    // read parameters of a block
-   MPI_File_read_at(file_handler, read_offset + sizeof(GridParam), &blockParamStr, 1, blockParamType, MPI_STATUS_IGNORE);
+   MPI_File_read_at_all(file_handler, read_offset + sizeof(GridParam), &blockParamStr, 1, blockParamType, MPI_STATUS_IGNORE);
    // read all the blocks
-	MPI_File_read_at(file_handler, read_offset + sizeof(GridParam) + sizeof(blockParam), &block3dArray[0], blocksCount, block3dType, MPI_STATUS_IGNORE);
-	MPI_File_sync(file_handler);
+	MPI_File_read_at_all(file_handler, read_offset + sizeof(GridParam) + sizeof(blockParam), &block3dArray[0], blocksCount, block3dType, MPI_STATUS_IGNORE);
+	//MPI_File_sync(file_handler);
 
 	MPI_File_close(&file_handler);
 
@@ -819,8 +822,8 @@ void MPIIORestartCoProcessor::readDataSet(int step)
 
 	MPI_File file_handler;
    std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSet.bin";
-	int error = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_RDWR , MPI_INFO_NULL, &file_handler);
-   if(error) throw UbException(UB_EXARGS,"couldn't open file "+filename);
+	int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler);
+   if(rc != MPI_SUCCESS) throw UbException(UB_EXARGS,"couldn't open file "+filename);
 
    // read count of blocks
    int blocksCount = 0;
@@ -849,9 +852,9 @@ void MPIIORestartCoProcessor::readDataSet(int step)
 		}
 	}
 
-   MPI_File_read_at(file_handler, read_offset, &dataSetArray[0], blocksCount, dataSetType, MPI_STATUS_IGNORE);
-	MPI_File_read_at(file_handler, read_offset + blocksCount * sizeof(dataSet), &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
-	MPI_File_sync(file_handler);
+   MPI_File_read_at_all(file_handler, read_offset, &dataSetArray[0], blocksCount, dataSetType, MPI_STATUS_IGNORE);
+	MPI_File_read_at_all(file_handler, read_offset + blocksCount * sizeof(dataSet), &doubleValuesArray[0], blocksCount, dataSetDoubleType, MPI_STATUS_IGNORE);
+	//MPI_File_sync(file_handler);
 	MPI_File_close(&file_handler);
 
 	size_t index = 0, nextVectorSize = 0;
@@ -937,8 +940,8 @@ void MPIIORestartCoProcessor::readBoundaryConds(int step)
 
 	MPI_File file_handler;
    std::string filename = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpBC.bin";
-   int error = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_RDWR , MPI_INFO_NULL, &file_handler);
-   if(error) throw UbException(UB_EXARGS,"couldn't open file "+filename);
+   int rc = MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler);
+   if(rc != MPI_SUCCESS) throw UbException(UB_EXARGS,"couldn't open file "+filename);
 
 	int blocksCount = 0;
    int dataCount1000 = 0;
@@ -977,11 +980,11 @@ void MPIIORestartCoProcessor::readBoundaryConds(int step)
 		}
 	}
 
-   MPI_File_read_at(file_handler, read_offset, &bcAddArray[0], blocksCount, boundCondTypeAdd, MPI_STATUS_IGNORE);
-	MPI_File_read_at(file_handler, read_offset + blocksCount * sizeof(BCAdd), &bcArray[0], dataCount1000, boundCondType1000, MPI_STATUS_IGNORE);
-	MPI_File_read_at(file_handler, read_offset + blocksCount * sizeof(BCAdd) + dataCount * sizeof(BoundaryCondition), &intArray1[0], blocksCount, bcindexmatrixType, MPI_STATUS_IGNORE);
-	MPI_File_read_at(file_handler, read_offset + blocksCount * sizeof(BCAdd) + dataCount * sizeof(BoundaryCondition) + blocksCount * blockParamStr.bcindexmatrix_count * sizeof(int), &intArray2[0], dataCount2, MPI_INT, MPI_STATUS_IGNORE);
-	MPI_File_sync(file_handler);
+   MPI_File_read_at_all(file_handler, read_offset, &bcAddArray[0], blocksCount, boundCondTypeAdd, MPI_STATUS_IGNORE);
+	MPI_File_read_at_all(file_handler, read_offset + blocksCount * sizeof(BCAdd), &bcArray[0], dataCount1000, boundCondType1000, MPI_STATUS_IGNORE);
+	MPI_File_read_at_all(file_handler, read_offset + blocksCount * sizeof(BCAdd) + dataCount * sizeof(BoundaryCondition), &intArray1[0], blocksCount, bcindexmatrixType, MPI_STATUS_IGNORE);
+	MPI_File_read_at_all(file_handler, read_offset + blocksCount * sizeof(BCAdd) + dataCount * sizeof(BoundaryCondition) + blocksCount * blockParamStr.bcindexmatrix_count * sizeof(int), &intArray2[0], dataCount2, MPI_INT, MPI_STATUS_IGNORE);
+	//MPI_File_sync(file_handler);
 
 	MPI_File_close(&file_handler);