diff --git a/source/Applications/Applications.cmake b/source/Applications/Applications.cmake
index c307637124ebc82d3f22a7b337908be53f7a8df3..e81477c548f5e3dbb6943b123cc6ad6bcb5bd5cd 100644
--- a/source/Applications/Applications.cmake
+++ b/source/Applications/Applications.cmake
@@ -58,3 +58,4 @@ add_subdirectory(Applications/PoiseuilleFlow)
 add_subdirectory(Applications/InterfaceTest)
 add_subdirectory(Applications/teperm)
 add_subdirectory(Applications/Thermoplast)
+add_subdirectory(Applications/bChannelA)
diff --git a/source/Applications/Thermoplast/config.txt b/source/Applications/Thermoplast/config.txt
index c9e2ee4114cc6c607bb238bb0992a506b1e4287c..7cfa58a4aff25edcde5b644af68c0eeb8673bc31 100644
--- a/source/Applications/Thermoplast/config.txt
+++ b/source/Applications/Thermoplast/config.txt
@@ -29,7 +29,7 @@ pathGeo = d:/Projects/ThermoPlast/SimGeo
 michel = /michel.stl
 plexiglas = /plexiglas.stl
 
-pathOut = g:/temp/thermoplast9
+pathOut = g:/temp/thermoplast10
 
 logToFile = false
 
diff --git a/source/Applications/Thermoplast/thermoplast.cpp b/source/Applications/Thermoplast/thermoplast.cpp
index 548ccd779161f4e6947b87627226c29a3815628f..3057b49883c99c5d18ca702eff56e6ac42584a84 100644
--- a/source/Applications/Thermoplast/thermoplast.cpp
+++ b/source/Applications/Thermoplast/thermoplast.cpp
@@ -149,18 +149,18 @@ std::shared_ptr<DemCoProcessor> makePeCoProcessor(SPtr<Grid3D> grid, SPtr<Commun
    return std::make_shared<DemCoProcessor>(grid, peScheduler, comm, forceCalculator, peSolver);
 }
 
-void createSpheres(double radius,  Vector3D origin, double maxX2, double maxX3, double uLB, SPtr<CreateDemObjectsCoProcessor> createSphereCoProcessor)
+void createSpheres(double radius,  Vector3D origin, int maxX2, int maxX3, double uLB, SPtr<CreateDemObjectsCoProcessor> createSphereCoProcessor)
 {
    double d = 2.0*radius;
-   double dividerX2 = maxX2/2.0;
-   double dividerX3 = maxX3/2.0;
+   double dividerX2 = (double)maxX2/2.0;
+   double dividerX3 = (double)maxX3/2.0;
    for (int x3 = 0; x3 < maxX3; x3++)
       for (int x2 = 0; x2 < maxX2; x2++)
          //for (int x1 = 0; x1 < 1; x1++)
          {
             //SPtr<GbObject3D> sphere(new GbSphere3D(origin[0]+2.0*d*(double)x1, origin[1]+(double)x2*1.0*d, origin[2]+(double)x3*1.0*d, radius));
             SPtr<GbObject3D> sphere(new GbSphere3D(origin[0]+2.0*d, origin[1]+(double)x2*1.0*d, origin[2]+(double)x3*1.0*d, radius));
-            createSphereCoProcessor->addGeoObject(sphere, Vector3D(uLB, -uLB+uLB/dividerX2*(double)x2, -uLB+uLB/dividerX2*(double)x3));
+            createSphereCoProcessor->addGeoObject(sphere, Vector3D(uLB, -uLB+uLB/dividerX2*(double)x2, -uLB+uLB/dividerX3*(double)x3));
          }
 }
 
@@ -357,26 +357,10 @@ void thermoplast(string configname)
       if (myid==0) UBLOG(logINFO, "Read plexiglasGeo:end");
       if (myid==0) GbSystem3D::writeGeoObject(plexiglasGeo.get(), pathOut+"/geo/plexiglasGeo", WbWriterVtkXmlBinary::getInstance());
 
-      ////Duese
-      //if (myid==0) UBLOG(logINFO, "Read Duese:start");
-      //SPtr<GbTriFaceMesh3D> s1Geo = SPtr<GbTriFaceMesh3D>(GbTriFaceMesh3DCreator::getInstance()->readMeshFromSTLFile2(pathGeo+"/Duese/s1.stl", "plexiglasGeo", GbTriFaceMesh3D::KDTREE_SAHPLIT, false));
-      //SPtr<GbTriFaceMesh3D> b1Geo = SPtr<GbTriFaceMesh3D>(GbTriFaceMesh3DCreator::getInstance()->readMeshFromSTLFile2(pathGeo+"/Duese/b1.stl", "plexiglasGeo", GbTriFaceMesh3D::KDTREE_SAHPLIT, false));
-      //SPtr<GbTriFaceMesh3D> p1Geo = SPtr<GbTriFaceMesh3D>(GbTriFaceMesh3DCreator::getInstance()->readMeshFromSTLFile2(pathGeo+"/Duese/p1.stl", "plexiglasGeo", GbTriFaceMesh3D::KDTREE_SAHPLIT, false));
-      //SPtr<GbTriFaceMesh3D> p2Geo = SPtr<GbTriFaceMesh3D>(GbTriFaceMesh3DCreator::getInstance()->readMeshFromSTLFile2(pathGeo+"/Duese/p2.stl", "plexiglasGeo", GbTriFaceMesh3D::KDTREE_SAHPLIT, false));
-      //if (myid==0) UBLOG(logINFO, "Read Duese:end");
-
-
-
       //inflow
       GbCuboid3DPtr geoOutflowMichel(new GbCuboid3D(g_minX1-blockLength, g_minX2 - blockLength, g_minX3 - blockLength, g_minX1, g_maxX2 + blockLength, g_maxX3 + blockLength));
       if (myid == 0) GbSystem3D::writeGeoObject(geoOutflowMichel.get(), pathOut + "/geo/geoOutflowMichel", WbWriterVtkXmlASCII::getInstance());
 
-      //GbCuboid3DPtr geoInflow3(new GbCuboid3D(g_minX1-blockLength, g_minX2-radius, g_maxX3-4.0*radius-1, g_minX1+1, g_maxX2+radius, g_maxX3-4.0*radius));
-      //if (myid == 0) GbSystem3D::writeGeoObject(geoInflow3.get(), pathOut + "/geo/geoInflow3", WbWriterVtkXmlASCII::getInstance());
-
-      //GbCuboid3DPtr geoInflow4(new GbCuboid3D(g_minX1-blockLength, g_minX2+4.0*radius, g_maxX3-4.0*radius-1.0, g_minX1+1, g_minX2+4.0*radius+1.0, g_maxX3+radius));
-      //if (myid == 0) GbSystem3D::writeGeoObject(geoInflow4.get(), pathOut + "/geo/geoInflow4", WbWriterVtkXmlASCII::getInstance());
-
       //outflow
       GbCuboid3DPtr geoOutflowPlexiglas(new GbCuboid3D(g_maxX1, g_minX2 - blockLength, g_minX3 - blockLength, g_maxX1 + blockLength, g_maxX2 + blockLength, g_maxX3 + blockLength));
       if (myid == 0) GbSystem3D::writeGeoObject(geoOutflowPlexiglas.get(), pathOut + "/geo/geoOutflowPlexiglas", WbWriterVtkXmlASCII::getInstance());
@@ -391,9 +375,6 @@ void thermoplast(string configname)
       
       SPtr<D3Q27Interactor> outflowMichelInt = SPtr<D3Q27Interactor>(new D3Q27Interactor(geoOutflowMichel, grid, outflowAdapter, Interactor3D::SOLID));
 
-      //SPtr<D3Q27Interactor> inflowInt3 = SPtr<D3Q27Interactor>(new D3Q27Interactor(geoInflow3, grid, noSlipBCAdapter, Interactor3D::SOLID));
-      //SPtr<D3Q27Interactor> inflowInt4 = SPtr<D3Q27Interactor>(new D3Q27Interactor(geoInflow4, grid, noSlipBCAdapter, Interactor3D::SOLID));
-
       //outflow
       SPtr<D3Q27Interactor> outflowPlexiglasInt = SPtr<D3Q27Interactor>(new D3Q27Interactor(geoOutflowPlexiglas, grid, outflowAdapter, Interactor3D::SOLID));
 
@@ -403,12 +384,6 @@ void thermoplast(string configname)
       //plexiglas
       SPtr<Interactor3D> plexiglasInt = SPtr<D3Q27TriFaceMeshInteractor>(new D3Q27TriFaceMeshInteractor(plexiglasGeo, grid, noSlipBCAdapter, Interactor3D::SOLID));
 
-      ////Duese
-      //SPtr<Interactor3D> s1Int = SPtr<D3Q27TriFaceMeshInteractor>(new D3Q27TriFaceMeshInteractor(s1Geo, grid, noSlipBCAdapter, Interactor3D::SOLID));
-      //SPtr<Interactor3D> b1Int = SPtr<D3Q27TriFaceMeshInteractor>(new D3Q27TriFaceMeshInteractor(b1Geo, grid, noSlipBCAdapter, Interactor3D::SOLID));
-      //SPtr<Interactor3D> p1Int = SPtr<D3Q27TriFaceMeshInteractor>(new D3Q27TriFaceMeshInteractor(p1Geo, grid, noSlipBCAdapter, Interactor3D::SOLID));
-      //SPtr<Interactor3D> p2Int = SPtr<D3Q27TriFaceMeshInteractor>(new D3Q27TriFaceMeshInteractor(p2Geo, grid, noSlipBCAdapter, Interactor3D::SOLID));
-
       SPtr<D3Q27Interactor> testWallInt = SPtr<D3Q27Interactor>(new D3Q27Interactor(testWallGeo, grid, inflowAdapter, Interactor3D::SOLID));
 
       //////////////////////////////////////////////////////////////////////////
@@ -418,11 +393,6 @@ void thermoplast(string configname)
       intHelper.addInteractor(boxInt);
       intHelper.addInteractor(michelInt);
       intHelper.addInteractor(plexiglasInt);
-      //addNozzle(grid,comm,noSlipBCAdapter,intHelper);
-      //////intHelper.addInteractor(s1Int);
-      //////intHelper.addInteractor(b1Int);
-      //////intHelper.addInteractor(p1Int);
-      //////intHelper.addInteractor(p2Int);
       intHelper.addInteractor(inflowInjector5Int);
       intHelper.addInteractor(inflowInjector4Int);
       intHelper.addInteractor(inflowInjector7Int);
@@ -519,7 +489,7 @@ void thermoplast(string configname)
    ////generating spheres 
    //UBLOG(logINFO, "generating spheres - start, rank="<<myid);
    SPtr<UbScheduler> sphereScheduler(new UbScheduler(sphereTime/*10,10,10*/));
-   double toleranz = 0.05;
+   double toleranz = 0.0;
    SPtr<CreateDemObjectsCoProcessor> createSphereCoProcessor(new CreateDemObjectsCoProcessor(grid, sphereScheduler, comm, demCoProcessor, sphereMaterial, toleranz));
    //UBLOG(logINFO, "generating spheres - stop, rank="<<myid);
 
@@ -535,9 +505,11 @@ void thermoplast(string configname)
 
    if (restart)
    {
+      createSphereCoProcessor->setToleranz(0.05);
       restartDemObjectsCoProcessor->restart(restartStep);
+      createSphereCoProcessor->setToleranz(toleranz);
    }
-
+ 
    //set connectors
    //UBLOG(logINFO, "set connectors - start, rank="<<myid);
    InterpolationProcessorPtr iProcessor(new IncompressibleOffsetInterpolationProcessor());
@@ -553,19 +525,9 @@ void thermoplast(string configname)
    //sphere prototypes
    //UBLOG(logINFO, "sphere prototypes - start, rank="<<myid);
    double d = 2.0*radiusLB;
-   double maxX2 = 5;
-   double maxX3 = 5;
-   //Vector3D origin1(g_minX1+peMinOffset[0]+radiusLB, geoInjector5->getX2Minimum()+1.4*d, geoInjector5->getX3Minimum()+1.5*d);
-   Vector3D origin1(g_minX1+peMinOffset[0]-1.5*d, geoInjector5->getX2Minimum()+1.4*d, geoInjector5->getX3Minimum()+1.5*d);
-   createSpheres(radiusLB,origin1,maxX2,maxX3,uLB,createSphereCoProcessor);
-
-   //Vector3D origin2(g_minX1+peMinOffset[0]+radiusLB, geoInjector4->getX2Minimum()+3.0*d, geoInjector4->getX3Minimum()+2.0*d);
-   //createSpheres(radiusLB, origin2, uLB, createSphereCoProcessor);
-
-   //maxX2 = 7;
-   //maxX3 = 7;
-   //Vector3D origin3(g_minX1+peMinOffset[0]+radiusLB, geoInjector7->getX2Minimum()+2.0*d, geoInjector7->getX3Minimum()+2.0*d);
-   //createSpheres(radiusLB, origin3, uLB, createSphereCoProcessor);
+   int maxX2 = 5;
+   int maxX3 = 5;
+   Vector3D origin1(g_minX1+peMinOffset[0]-1.5*d, geoInjector5->getX2Minimum()+1.4*d, geoInjector5->getX3Minimum()+1.5*d);
   createSpheres(radiusLB, origin1, maxX2, maxX3, uLB, createSphereCoProcessor);
   Vector3D origin2(g_minX1+peMinOffset[0]-1.5*d, geoInjector4->getX2Minimum()+2.2*d, geoInjector4->getX3Minimum()+1.5*d);
   createSpheres(radiusLB, origin2, maxX2, maxX3, uLB, createSphereCoProcessor);
   maxX2 = 7;
   maxX3 = 7;
   Vector3D origin3(g_minX1+peMinOffset[0]-1.5*d, geoInjector7->getX2Minimum()+0.5*d, geoInjector7->getX3Minimum()+0.5*d);
   createSpheres(radiusLB, origin3, maxX2, maxX3, uLB, createSphereCoProcessor);
 
    //for (int x3 = 0; x3 < 6; x3++)
    //   for (int x2 = 0; x2 < 5; x2++)
@@ -592,7 +554,7 @@ void thermoplast(string configname)
    //         createSphereCoProcessor->addGeoObject(sphere, Vector3D(uLB, 0.0, 0.0));
    //      }
 
-   //createSphereCoProcessor->process(0);
+   createSphereCoProcessor->process(0);
 
    //write data for visualization of macroscopic quantities
    SPtr<UbScheduler> visSch(new UbScheduler(outTime));
diff --git a/source/Applications/bChannelA/CMakeLists.txt b/source/Applications/bChannelA/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..51f2c4be524155746529538ee10d9511c3dcbdc7
--- /dev/null
+++ b/source/Applications/bChannelA/CMakeLists.txt
@@ -0,0 +1,25 @@
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+
+########################################################
+## C++ PROJECT                                       ###
+########################################################
+PROJECT(bchannel)
+
+INCLUDE(${APPS_ROOT}/IncludsList.cmake) 
+
+#################################################################
+###   LOCAL FILES                                             ###
+#################################################################
+FILE(GLOB SPECIFIC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/*.h
+                         ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp
+                         ${CMAKE_CURRENT_SOURCE_DIR}/*.hpp  )
+ 
+SET(ALL_SOURCES ${ALL_SOURCES} ${SPECIFIC_FILES})
+SOURCE_GROUP(src FILES ${SPECIFIC_FILES})
+  
+SET(CAB_ADDITIONAL_LINK_LIBRARIES VirtualFluids)
+
+#################################################################
+###   CREATE PROJECT                                          ###
+#################################################################
+CREATE_CAB_PROJECT(bchannel BINARY)
diff --git a/source/Applications/bChannelA/bChannelA.cpp b/source/Applications/bChannelA/bChannelA.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..83337ba23f8b8de705449c8c09cb9c50808cdec6
--- /dev/null
+++ b/source/Applications/bChannelA/bChannelA.cpp
@@ -0,0 +1,431 @@
+#include <iostream>
+#include <string>
+#include "VirtualFluids.h"
+
+using namespace std;
+
+//////////////////////////////////////////////////////////////////////////
+void run(string configname)
+{
+   try
+   {
+      ConfigurationFile   config;
+      config.load(configname);
+
+      string          pathOut           = config.getValue<string>("pathOut");
+      //string          pathGeo           = config.getValue<string>("pathGeo");
+      int             numOfThreads      = config.getValue<int>("numOfThreads");
+      vector<int>     blocknx           = config.getVector<int>("blocknx");
+      double          u_LB              = config.getValue<double>("u_LB");
+      double          restartStep       = config.getValue<double>("restartStep");
+      double          cpStep            = config.getValue<double>("cpStep");
+      double          cpStart           = config.getValue<double>("cpStart");
+      double          endTime           = config.getValue<double>("endTime");
+      double          outTime           = config.getValue<double>("outTime");
+      double          availMem          = config.getValue<double>("availMem");
+      bool            logToFile         = config.getValue<bool>("logToFile");
+      double          deltaXfine        = config.getValue<double>("deltaXfine");
+      int             refineLevel       = config.getValue<int>("refineLevel");
+      double          Re                = config.getValue<double>("Re");
+      double          timeAvStart       = config.getValue<double>("timeAvStart");
+      double          timeAvStop        = config.getValue<double>("timeAvStop");
+      bool            newStart          = config.getValue<bool>("newStart");
+      vector<double>  nupsStep          = config.getVector<double>("nupsStep");
+      vector<double>  boundingBox       = config.getVector<double>("boundingBox");
+
+      SPtr<Communicator> comm = MPICommunicator::getInstance();
+      int myid = comm->getProcessID();
+
+      if (logToFile)
+      {
+#if defined(__unix__)
+         if (myid == 0)
+         {
+            const char* str = pathOut.c_str();
+            mkdir(str, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
+         }
+#endif 
+
+         if (myid == 0)
+         {
+            stringstream logFilename;
+            logFilename << pathOut + "/logfile" + UbSystem::toString(UbSystem::getTimeStamp()) + ".txt";
+            UbLog::output_policy::setStream(logFilename.str());
+         }
+      }
+
+      //Sleep(30000);
+
+      if (myid == 0) UBLOG(logINFO, "Testcase porous channel");
+
+      SPtr<LBMUnitConverter> conv = SPtr<LBMUnitConverter>(new LBMUnitConverter());
+
+      const int baseLevel = 0;
+      double deltaXcoarse = deltaXfine*(double)(1<<refineLevel);
+
+      LBMReal rho_LB = 0.0;
+      double rhoReal = 1.2041; //(kg/m3)
+      double uReal = 48; //m/s
+      double lReal = 0.008;//m
+      double hLB = lReal / deltaXcoarse;
+      double Ma = 0.13;//Ma-Real!
+      double csReal = uReal / Ma;
+      LBMUnitConverter unitConverter(lReal, csReal, rhoReal, hLB);
+      if (myid==0) UBLOG(logINFO, unitConverter.toString());
+
+      //double coord[6];
+
+      vector<double> origin(3);
+      origin[0] = 0;
+      origin[1] = 0;
+      origin[2] = 0;
+
+      //real velocity is 49.63 m/s
+
+      SPtr<Grid3D> grid(new Grid3D(comm));
+
+      //BC adapters
+      SPtr<BCAdapter> noSlipBCAdapter(new NoSlipBCAdapter());
+      noSlipBCAdapter->setBcAlgorithm(SPtr<BCAlgorithm>(new NoSlipBCAlgorithm()));
+
+      BoundaryConditionsBlockVisitor bcVisitor;
+      bcVisitor.addBC(noSlipBCAdapter);
+
+      SPtr<BCProcessor> bcProc;
+      bcProc = SPtr<BCProcessor>(new BCProcessor());
+
+      SPtr<LBMKernel> kernel = SPtr<LBMKernel>(new IncompressibleCumulantLBMKernel());
+      
+      mu::Parser fctForcingX1;
+      fctForcingX1.SetExpr("Fx1");
+      fctForcingX1.DefineConst("Fx1", 1.0e-6);
+      kernel->setWithForcing(true);
+      
+      kernel->setBCProcessor(bcProc);
+
+      //////////////////////////////////////////////////////////////////////////
+      //restart
+      SPtr<UbScheduler> rSch(new UbScheduler(cpStep, cpStart));
+      SPtr<MPIIORestartCoProcessor> restartCoProcessor(new MPIIORestartCoProcessor(grid, rSch, pathOut, comm));
+      restartCoProcessor->setLBMKernel(kernel);
+      restartCoProcessor->setBCProcessor(bcProc);
+
+      SPtr<UbScheduler> mSch(new UbScheduler(cpStep, cpStart));
+      SPtr<MPIIOMigrationCoProcessor> migCoProcessor(new MPIIOMigrationCoProcessor(grid, mSch, pathOut+"/mig", comm));
+      migCoProcessor->setLBMKernel(kernel);
+      migCoProcessor->setBCProcessor(bcProc);
+      //////////////////////////////////////////////////////////////////////////
+
+      //bounding box
+      double g_minX1 = boundingBox[0];
+      double g_minX2 = boundingBox[1];
+      double g_minX3 = boundingBox[2];
+
+      double g_maxX1 = boundingBox[3];
+      double g_maxX2 = boundingBox[4];
+      double g_maxX3 = boundingBox[5];
+
+      double blockLength = (double)blocknx[0]*deltaXcoarse;
+
+      double channel_hight = (g_maxX3-g_minX3)/2.0;
+      double channel_hight_LB = channel_hight/deltaXcoarse;
+      double d_p = channel_hight/20.0;
+      //////////////////////////////////////////////////////////////////////////
+      double nu_LB = (u_LB*channel_hight_LB)/Re;
+      //////////////////////////////////////////////////////////////////////////
+      if (myid == 0)
+      {
+         UBLOG(logINFO, "Parameters:");
+         UBLOG(logINFO, "Re                  = " << Re);
+         UBLOG(logINFO, "u_LB                = " << u_LB);
+         UBLOG(logINFO, "rho_LB              = " << rho_LB);
+         UBLOG(logINFO, "nu_LB               = " << nu_LB);
+         UBLOG(logINFO, "dx coarse           = " << deltaXcoarse << " m");
+         UBLOG(logINFO, "dx fine             = " << deltaXfine << " m");
+         UBLOG(logINFO, "channel_high        = " << channel_hight << " m");
+         UBLOG(logINFO, "channel_high_LB     = " << channel_hight_LB);
+         UBLOG(logINFO, "number of levels    = " << refineLevel + 1);
+         UBLOG(logINFO, "number of processes = " << comm->getNumberOfProcesses());
+         UBLOG(logINFO, "number of threads   = " << numOfThreads);
+         UBLOG(logINFO, "path = " << pathOut);
+         UBLOG(logINFO, "Preprocess - start");
+      }
+
+
+      if (newStart)
+      {
+         if (myid == 0) UBLOG(logINFO, "new start...");
+
+         
+
+         grid->setPeriodicX1(true);
+         grid->setPeriodicX2(true);
+         grid->setPeriodicX3(false);
+         grid->setDeltaX(deltaXcoarse);
+         grid->setBlockNX(blocknx[0], blocknx[1], blocknx[2]);
+
+         SPtr<GbObject3D> gridCube(new GbCuboid3D(g_minX1, g_minX2, g_minX3, g_maxX1, g_maxX2, g_maxX3));
+         if (myid == 0) GbSystem3D::writeGeoObject(gridCube.get(), pathOut + "/geo/gridCube", WbWriterVtkXmlBinary::getInstance());
+
+
+         GenBlocksGridVisitor genBlocks(gridCube);
+         grid->accept(genBlocks);
+
+
+         //////////////////////////////////////////////////////////////////////////
+         //refinement
+         double blockLengthX3Fine = grid->getDeltaX(refineLevel) * blocknx[2];
+         double refHight = 0.002;
+
+         GbCuboid3DPtr refineBoxTop(new GbCuboid3D(g_minX1-blockLength, g_minX2-blockLength, g_maxX3-refHight, g_maxX1+blockLength, g_maxX2+blockLength, g_maxX3));
+         if (myid == 0) GbSystem3D::writeGeoObject(refineBoxTop.get(), pathOut + "/geo/refineBoxTop", WbWriterVtkXmlASCII::getInstance());
+
+         //GbCuboid3DPtr refineBoxBottom(new GbCuboid3D(g_minX1-blockLength, g_minX2-blockLength, g_minX3, g_maxX1+blockLength, g_maxX2+blockLength, g_minX3+offsetMinX3+blockLengthX3Fine));
+         GbCuboid3DPtr refineBoxBottom(new GbCuboid3D(g_minX1-blockLength, g_minX2-blockLength, g_minX3, g_maxX1+blockLength, g_maxX2+blockLength, g_minX3+refHight));
+         if (myid == 0) GbSystem3D::writeGeoObject(refineBoxBottom.get(), pathOut + "/geo/refineBoxBottom", WbWriterVtkXmlASCII::getInstance());
+
+         if (refineLevel > 0)
+         {
+            if (myid == 0) UBLOG(logINFO, "Refinement - start");
+            RefineCrossAndInsideGbObjectHelper refineHelper(grid, refineLevel, comm);
+            refineHelper.addGbObject(refineBoxTop, refineLevel);
+            refineHelper.addGbObject(refineBoxBottom, refineLevel);
+            refineHelper.refine();
+            if (myid == 0) UBLOG(logINFO, "Refinement - end");
+         }
+         //////////////////////////////////////////////////////////////////////////
+
+         //walls
+         GbCuboid3DPtr addWallZmin(new GbCuboid3D(g_minX1-blockLength, g_minX2-blockLength, g_minX3-blockLength, g_maxX1+blockLength, g_maxX2+blockLength, g_minX3));
+         if (myid == 0) GbSystem3D::writeGeoObject(addWallZmin.get(), pathOut+"/geo/addWallZmin", WbWriterVtkXmlASCII::getInstance());
+
+         GbCuboid3DPtr addWallZmax(new GbCuboid3D(g_minX1-blockLength, g_minX2-blockLength, g_maxX3, g_maxX1+blockLength, g_maxX2+blockLength, g_maxX3+blockLength));
+         if (myid == 0) GbSystem3D::writeGeoObject(addWallZmax.get(), pathOut+"/geo/addWallZmax", WbWriterVtkXmlASCII::getInstance());
+
+
+         //wall interactors
+         SPtr<D3Q27Interactor> addWallZminInt(new D3Q27Interactor(addWallZmin, grid, noSlipBCAdapter, Interactor3D::SOLID));
+         SPtr<D3Q27Interactor> addWallZmaxInt(new D3Q27Interactor(addWallZmax, grid, noSlipBCAdapter, Interactor3D::SOLID));
+
+         ////////////////////////////////////////////
+         //METIS
+         SPtr<Grid3DVisitor> metisVisitor(new MetisPartitioningGridVisitor(comm, MetisPartitioningGridVisitor::LevelBased, D3Q27System::BSW, MetisPartitioner::KWAY));
+         ////////////////////////////////////////////
+         if (myid == 0) UBLOG(logINFO, "deleteSolidBlocks - start");
+         InteractorsHelper intHelper(grid, metisVisitor);
+         intHelper.addInteractor(addWallZminInt);
+         intHelper.addInteractor(addWallZmaxInt);
+         intHelper.selectBlocks();
+         if (myid == 0) UBLOG(logINFO, "deleteSolidBlocks - end");
+         //////////////////////////////////////
+
+         {
+            WriteBlocksCoProcessor ppblocks(grid, SPtr<UbScheduler>(new UbScheduler(1)), pathOut, WbWriterVtkXmlBinary::getInstance(), comm);
+            ppblocks.process(0);
+         }
+
+         unsigned long long numberOfBlocks = (unsigned long long)grid->getNumberOfBlocks();
+         int ghostLayer = 3;
+         unsigned long long numberOfNodesPerBlock = (unsigned long long)(blocknx[0])* (unsigned long long)(blocknx[1])* (unsigned long long)(blocknx[2]);
+         unsigned long long numberOfNodes = numberOfBlocks * numberOfNodesPerBlock;
+         unsigned long long numberOfNodesPerBlockWithGhostLayer = numberOfBlocks * (blocknx[0] + ghostLayer) * (blocknx[1] + ghostLayer) * (blocknx[2] + ghostLayer);
+         double needMemAll = double(numberOfNodesPerBlockWithGhostLayer*(27 * sizeof(double) + sizeof(int) + sizeof(float) * 4));
+         double needMem = needMemAll / double(comm->getNumberOfProcesses());
+
+         if (myid == 0)
+         {
+            UBLOG(logINFO, "Number of blocks = " << numberOfBlocks);
+            UBLOG(logINFO, "Number of nodes  = " << numberOfNodes);
+            int minInitLevel = grid->getCoarsestInitializedLevel();
+            int maxInitLevel = grid->getFinestInitializedLevel();
+            for (int level = minInitLevel; level <= maxInitLevel; level++)
+            {
+               int nobl = grid->getNumberOfBlocks(level);
+               UBLOG(logINFO, "Number of blocks for level " << level << " = " << nobl);
+               UBLOG(logINFO, "Number of nodes for level " << level << " = " << nobl*numberOfNodesPerBlock);
+            }
+            UBLOG(logINFO, "Necessary memory  = " << needMemAll << " bytes");
+            UBLOG(logINFO, "Necessary memory per process = " << needMem << " bytes");
+            UBLOG(logINFO, "Available memory per process = " << availMem << " bytes");
+         }
+
+
+         SetKernelBlockVisitor kernelVisitor(kernel, nu_LB, availMem, needMem);
+         grid->accept(kernelVisitor);
+
+         //////////////////////////////////
+         //undef nodes for refinement
+         if (refineLevel > 0)
+         {
+            SetUndefinedNodesBlockVisitor undefNodesVisitor;
+            grid->accept(undefNodesVisitor);
+         }
+
+
+         //BC
+         intHelper.setBC();
+
+
+
+         grid->accept(bcVisitor);
+
+         mu::Parser inflowProfileVx1, inflowProfileVx2, inflowProfileVx3, inflowProfileRho;
+         inflowProfileVx1.SetExpr("x3 < h ? 0.0 : uLB+1*x1");
+         inflowProfileVx1.DefineConst("uLB", u_LB);
+         inflowProfileVx1.DefineConst("h", channel_hight-d_p);
+
+         InitDistributionsBlockVisitor initVisitor;
+         initVisitor.setVx1(inflowProfileVx1);
+         grid->accept(initVisitor);
+
+         ////set connectors
+         InterpolationProcessorPtr iProcessor(new IncompressibleOffsetInterpolationProcessor());
+         SetConnectorsBlockVisitor setConnsVisitor(comm, true, D3Q27System::ENDDIR, nu_LB, iProcessor);
+         grid->accept(setConnsVisitor);
+
+         //domain decomposition for threads
+         PQueuePartitioningGridVisitor pqPartVisitor(numOfThreads);
+         grid->accept(pqPartVisitor);
+
+         //Postrozess
+         {
+            SPtr<UbScheduler> geoSch(new UbScheduler(1));
+            WriteBoundaryConditionsCoProcessor ppgeo(grid, geoSch, pathOut, WbWriterVtkXmlBinary::getInstance(), comm);
+            ppgeo.process(0);
+         }
+
+         if (myid == 0) UBLOG(logINFO, "Preprozess - end");
+      }
+      else
+      {
+         //restartCoProcessor->restart((int)restartStep);
+         migCoProcessor->restart((int)restartStep);
+         grid->setTimeStep(restartStep);
+         ////////////////////////////////////////////////////////////////////////////
+         InterpolationProcessorPtr iProcessor(new CompressibleOffsetInterpolationProcessor());
+         SetConnectorsBlockVisitor setConnsVisitor(comm, true, D3Q27System::ENDDIR, nu_LB, iProcessor);
+         grid->accept(setConnsVisitor);
+
+         grid->accept(bcVisitor);
+
+         if (myid == 0) UBLOG(logINFO, "Restart - end");
+      }
+     
+      SPtr<UbScheduler> nupsSch(new UbScheduler(nupsStep[0], nupsStep[1], nupsStep[2]));
+      std::shared_ptr<NUPSCounterCoProcessor> nupsCoProcessor(new NUPSCounterCoProcessor(grid, nupsSch, numOfThreads, comm));
+
+      SPtr<UbScheduler> stepSch(new UbScheduler(outTime));
+
+      SPtr<WriteMacroscopicQuantitiesCoProcessor> writeMQCoProcessor(new WriteMacroscopicQuantitiesCoProcessor(grid, stepSch, pathOut, WbWriterVtkXmlBinary::getInstance(), conv, comm));
+
+      SPtr<GbObject3D> bbBox(new GbCuboid3D(g_minX1-blockLength, (g_maxX2-g_minX2)/2.0, g_minX3-blockLength, g_maxX1+blockLength, (g_maxX2-g_minX2)/2.0+deltaXcoarse, g_maxX3+blockLength));
+      if (myid==0) GbSystem3D::writeGeoObject(bbBox.get(), pathOut+"/geo/bbBox", WbWriterVtkXmlASCII::getInstance());
+      SPtr<WriteMQFromSelectionCoProcessor> writeMQSelectCoProcessor(new WriteMQFromSelectionCoProcessor(grid, stepSch, bbBox, pathOut, WbWriterVtkXmlBinary::getInstance(), conv, comm));
+
+
+      SPtr<UbScheduler> AdjForcSch(new UbScheduler());
+      AdjForcSch->addSchedule(10, 0, 10000000);
+      SPtr<IntegrateValuesHelper> intValHelp(new IntegrateValuesHelper(grid, comm, g_minX1, g_minX2, channel_hight-d_p, g_maxX1, g_maxX2, g_maxX3));
+      if (myid == 0) GbSystem3D::writeGeoObject(intValHelp->getBoundingBox().get(), pathOut + "/geo/IntValHelp", WbWriterVtkXmlBinary::getInstance());
+
+      double vxTarget=u_LB;
+      SPtr<AdjustForcingCoProcessor> AdjForcCoProcessor(new AdjustForcingCoProcessor(grid, AdjForcSch, pathOut, intValHelp, vxTarget, comm));
+
+
+      std::vector<double> levelCoords;
+      std::vector<int> levels;
+      std::vector<double> bounds;
+      //bounds.push_back(0);
+      //bounds.push_back(0);
+      //bounds.push_back(0);
+      //bounds.push_back(0.004);
+      //bounds.push_back(0.002);
+      //bounds.push_back(0.003);
+      //levels.push_back(1);
+      //levels.push_back(0);
+      //levels.push_back(1);
+      //levelCoords.push_back(0);
+      //levelCoords.push_back(0.0016);
+      //levelCoords.push_back(0.0024);
+      //levelCoords.push_back(0.003);
+      bounds.push_back(0);
+      bounds.push_back(0);
+      bounds.push_back(0);
+      bounds.push_back(0.004);
+      bounds.push_back(0.002);
+      bounds.push_back(0.002);
+      levels.push_back(0);
+      levelCoords.push_back(0);
+      levelCoords.push_back(0.002);
+      //SPtr<UbScheduler> tavSch(new UbScheduler(1, timeAvStart, timeAvStop));
+      //SPtr<CoProcessor> tav(new TimeAveragedValuesCoProcessor(grid, pathOut, WbWriterVtkXmlBinary::getInstance(), tavSch, comm,
+      //   TimeAveragedValuesCoProcessor::Velocity | TimeAveragedValuesCoProcessor::Fluctuations | TimeAveragedValuesCoProcessor::Triplecorrelations,
+      //   levels, levelCoords, bounds));
+      
+      
+      //create line time series
+      SPtr<UbScheduler> tpcSch(new UbScheduler(1,1,3));
+      //GbPoint3DPtr p1(new GbPoint3D(0.0,0.005,0.01));
+      //GbPoint3DPtr p2(new GbPoint3D(0.064,0.005,0.01));
+      //SPtr<GbLine3D> line(new GbLine3D(p1.get(),p2.get()));
+      SPtr<GbLine3D> line(new GbLine3D(new GbPoint3D(0.0,0.005,0.01),new GbPoint3D(0.064,0.005,0.01)));
+      LineTimeSeriesCoProcessor lineTs(grid, tpcSch,pathOut+"/TimeSeries/line1.csv",line, 0,comm);
+      if (myid==0) lineTs.writeLine(pathOut+"/geo/line1");
+
+      if (myid == 0)
+      {
+         UBLOG(logINFO, "PID = " << myid << " Total Physical Memory (RAM): " << Utilities::getTotalPhysMem());
+         UBLOG(logINFO, "PID = " << myid << " Physical Memory currently used: " << Utilities::getPhysMemUsed());
+         UBLOG(logINFO, "PID = " << myid << " Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe());
+      }
+
+      omp_set_num_threads(numOfThreads);
+      SPtr<UbScheduler> stepGhostLayer(new UbScheduler(1));
+      SPtr<Calculator> calculator(new BasicCalculator(grid, stepGhostLayer, endTime));
+      calculator->addCoProcessor(nupsCoProcessor);
+      calculator->addCoProcessor(AdjForcCoProcessor);
+      calculator->addCoProcessor(migCoProcessor);
+      //calculator->addCoProcessor(restartCoProcessor);
+      calculator->addCoProcessor(writeMQSelectCoProcessor);
+      calculator->addCoProcessor(writeMQCoProcessor);
+
+      if (myid == 0) UBLOG(logINFO, "Simulation-start");
+      calculator->calculate();
+      if (myid == 0) UBLOG(logINFO, "Simulation-end");
+   }
+   catch (exception& e)
+   {
+      cerr << e.what() << endl << flush;
+   }
+   catch (string& s)
+   {
+      cerr << s << endl;
+   }
+   catch (mu::Parser::exception_type &e)
+   {
+      std::cout << e.GetMsg() << std::endl;
+   }
+   catch (...)
+   {
+      cerr << "unknown exception" << endl;
+   }
+
+}
+//////////////////////////////////////////////////////////////////////////
+int main(int argc, char* argv[])
+{
+
+   if (argv != NULL)
+   {
+      if (argv[1] != NULL)
+      {
+         run(string(argv[1]));
+      }
+      else
+      {
+         cout << "Configuration file is missing!" << endl;
+      }
+   }
+
+   return 0;
+}
diff --git a/source/Applications/bChannelA/configBombadilpChannel.cfg b/source/Applications/bChannelA/configBombadilpChannel.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..390aebfb0e517913da4972e5f24d44c281884845
--- /dev/null
+++ b/source/Applications/bChannelA/configBombadilpChannel.cfg
@@ -0,0 +1,32 @@
+#
+#Simulation parameters for porous channel
+#
+
+pathOut = d:/temp/BreugemChannelAnisotrop
+numOfThreads = 1
+availMem = 14e9
+logToFile = false
+
+#grid
+boundingBox = 0 0 0 600 400 400
+refineLevel = 0
+deltaXfine  = 1
+blocknx = 20 20 20
+u_LB = 0.1
+Re = 5500
+
+newStart = true
+restartStep = 230000
+cpStep = 100000
+cpStart = 100000
+
+timeAvStart = 21000000
+timeAvStop = 2100010000
+
+nupsStep = 10 10 10000000 
+outTime = 100
+endTime = 100
+
+ 
+
+
diff --git a/source/Applications/pChannel/configBombadilpChannel.cfg b/source/Applications/pChannel/configBombadilpChannel.cfg
index d255acfe806a9155e11821e10c6a0e6dcf53e8dd..c94f7669ad153820f6a88bdcf841446dc2d90d66 100644
--- a/source/Applications/pChannel/configBombadilpChannel.cfg
+++ b/source/Applications/pChannel/configBombadilpChannel.cfg
@@ -58,8 +58,8 @@ pmL = 1e-3 1e-3 1e-3
 #grid
 refineLevel = 0
 #deltaXfine  = 10e-6
-#deltaXfine  = 20e-6
-deltaXfine  = 80e-6 #level 2
+deltaXfine  = 20e-6
+#deltaXfine  = 80e-6 #level 2
 
 blocknx = 10 10 10
 
diff --git a/source/Applications/pChannel/pChannel.cpp b/source/Applications/pChannel/pChannel.cpp
index 56a519953ac3ed8e7da662aa8038d2fa8d4d0cc6..e7fa6abcf13f93748f0619eabacd5658fab55e8e 100644
--- a/source/Applications/pChannel/pChannel.cpp
+++ b/source/Applications/pChannel/pChannel.cpp
@@ -90,13 +90,21 @@ void run(string configname)
 
       if (myid == 0) UBLOG(logINFO, "Testcase porous channel");
 
-      LBMReal rho_LB = 0.0;
-
       SPtr<LBMUnitConverter> conv = SPtr<LBMUnitConverter>(new LBMUnitConverter());
 
       const int baseLevel = 0;
       double deltaXcoarse = deltaXfine*(double)(1<<refineLevel);
 
+      LBMReal rho_LB = 0.0;
+      double rhoReal = 1.2041; //(kg/m3)
+      double uReal = 48; //m/s
+      double lReal = 0.008;//m
+      double hLB = lReal / deltaXcoarse;
+      double Ma = 0.13;//Ma-Real!
+      double csReal = uReal / Ma;
+      LBMUnitConverter unitConverter(lReal, csReal, rhoReal, hLB);
+      if (myid==0) UBLOG(logINFO, unitConverter.toString());
+
       //double coord[6];
 
       vector<double> origin(3);
@@ -393,7 +401,7 @@ void run(string configname)
          //Postrozess
          {
             SPtr<UbScheduler> geoSch(new UbScheduler(1));
-            WriteBoundaryConditionsCoProcessor ppgeo(grid, geoSch, pathOut, WbWriterVtkXmlBinary::getInstance(), conv, comm);
+            WriteBoundaryConditionsCoProcessor ppgeo(grid, geoSch, pathOut, WbWriterVtkXmlBinary::getInstance(), comm);
             ppgeo.process(0);
          }
 
diff --git a/source/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.cpp b/source/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.cpp
index a1e8404ecd6e628577d7b72e7d6df1552441fb80..71ba9ea381cb147eb6b51c384a04032842f265ac 100644
--- a/source/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.cpp
+++ b/source/VirtualFluidsCore/CoProcessors/MPIIOMigrationCoProcessor.cpp
@@ -344,8 +344,8 @@ void MPIIOMigrationCoProcessor::writeBlocks(int step)
          block3dArray[ic].globalID = block->getGlobalID();
          block3dArray[ic].localID = block->getLocalID();
          block3dArray[ic].level = block->getLevel();
-         block3dArray[ic].interpolationFlagCF = block->getInterpolationFlagCF();
-         block3dArray[ic].interpolationFlagFC = block->getInterpolationFlagFC();
+         block3dArray[ic].interpolationFlagCF = block->getCollectionOfInterpolationFlagCF();
+         block3dArray[ic].interpolationFlagFC = block->getCollectionOfInterpolationFlagFC();
          block3dArray[ic].counter = block->getMaxGlobalID();
          block3dArray[ic].active = block->isActive();
 
@@ -1626,8 +1626,8 @@ void MPIIOMigrationCoProcessor::readBlocks(int step)
       block->setLocalID(block3dArray[n].localID);
       block->setPart(block3dArray[n].part);
       block->setLevel(block3dArray[n].level);
-      block->interpolationFlagCF = block3dArray[n].interpolationFlagCF;
-      block->interpolationFlagFC = block3dArray[n].interpolationFlagFC;
+      block->setCollectionOfInterpolationFlagCF(block3dArray[n].interpolationFlagCF);
+      block->setCollectionOfInterpolationFlagFC(block3dArray[n].interpolationFlagFC);
 
       grid->addBlock(block);
    }
@@ -1884,7 +1884,7 @@ void MPIIOMigrationCoProcessor::readAverageDensityArray(int step)
       //std::cout << "rank=" << rank << ", dataSetArray[n].globalID=" << dataSetSmallArray[n].globalID << std::endl;
       // find the nesessary block and fill it
       SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].globalID);
-      block->kernel->getDataSet()->setAverageDencity(mAverageDensity);
+      block->getKernel()->getDataSet()->setAverageDencity(mAverageDensity);
    }
 
    MPI_Type_free(&dataSetDoubleType);
@@ -1984,7 +1984,7 @@ void MPIIOMigrationCoProcessor::readAverageVelocityArray(int step)
 
       // find the nesessary block and fill it
       SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].globalID);
-      block->kernel->getDataSet()->setAverageVelocity(mAverageVelocity);
+      block->getKernel()->getDataSet()->setAverageVelocity(mAverageVelocity);
    }
 
    MPI_Type_free(&dataSetDoubleType);
@@ -2084,7 +2084,7 @@ void MPIIOMigrationCoProcessor::readAverageFluktuationsArray(int step)
 
       // find the nesessary block and fill it
       SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].globalID);
-      block->kernel->getDataSet()->setAverageFluctuations(mAverageFluktuations);
+      block->getKernel()->getDataSet()->setAverageFluctuations(mAverageFluktuations);
    }
 
    MPI_Type_free(&dataSetDoubleType);
@@ -2184,7 +2184,7 @@ void MPIIOMigrationCoProcessor::readAverageTripleArray(int step)
 
       // find the nesessary block and fill it
       SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].globalID);
-      block->kernel->getDataSet()->setAverageTriplecorrelations(mAverageTriplecorrelations);
+      block->getKernel()->getDataSet()->setAverageTriplecorrelations(mAverageTriplecorrelations);
    }
 
    MPI_Type_free(&dataSetDoubleType);
@@ -2284,7 +2284,7 @@ void MPIIOMigrationCoProcessor::readShearStressValArray(int step)
 
       // find the nesessary block and fill it
       SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].globalID);
-      block->kernel->getDataSet()->setShearStressValues(mShearStressValues);
+      block->getKernel()->getDataSet()->setShearStressValues(mShearStressValues);
    }
 
    MPI_Type_free(&dataSetDoubleType);
@@ -2384,7 +2384,7 @@ void MPIIOMigrationCoProcessor::readRelaxationFactor(int step)
 
       // find the nesessary block and fill it
       SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].globalID);
-      block->kernel->getDataSet()->setRelaxationFactor(mRelaxationFactor);
+      block->getKernel()->getDataSet()->setRelaxationFactor(mRelaxationFactor);
    }
 
    MPI_Type_free(&dataSetDoubleType);
diff --git a/source/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.cpp b/source/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.cpp
index f31ca569e9596b81f8cbdf4c433014392624e7ac..cbafb11f5d35dfec0f224f9146d2b66969eadfd6 100644
--- a/source/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.cpp
+++ b/source/VirtualFluidsCore/CoProcessors/MPIIORestartCoProcessor.cpp
@@ -347,8 +347,8 @@ void MPIIORestartCoProcessor::writeBlocks(int step)
          block3dArray[ic].globalID = block->getGlobalID();
          block3dArray[ic].localID = block->getLocalID();
          block3dArray[ic].level = block->getLevel();
-         block3dArray[ic].interpolationFlagCF = block->getInterpolationFlagCF();
-         block3dArray[ic].interpolationFlagFC = block->getInterpolationFlagFC();
+         block3dArray[ic].interpolationFlagCF = block->getCollectionOfInterpolationFlagCF();
+         block3dArray[ic].interpolationFlagFC = block->getCollectionOfInterpolationFlagFC();
          block3dArray[ic].counter = block->getMaxGlobalID();
          block3dArray[ic].active = block->isActive();
 
@@ -1801,8 +1801,8 @@ void MPIIORestartCoProcessor::readBlocks(int step)
       block->setLocalID(block3dArray[n].localID);
       block->setPart(block3dArray[n].part);
       block->setLevel(block3dArray[n].level);
-      block->interpolationFlagCF = block3dArray[n].interpolationFlagCF;
-      block->interpolationFlagFC = block3dArray[n].interpolationFlagFC;
+      block->setCollectionOfInterpolationFlagCF(block3dArray[n].interpolationFlagCF);
+      block->setCollectionOfInterpolationFlagFC(block3dArray[n].interpolationFlagFC);
 
       grid->addBlock(block);
    }
@@ -2053,7 +2053,7 @@ void MPIIORestartCoProcessor::readAverageDensityArray(int step)
 
       // find the nesessary block and fill it
       SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].x1, dataSetSmallArray[n].x2, dataSetSmallArray[n].x3, dataSetSmallArray[n].level);
-      block->kernel->getDataSet()->setAverageDencity(mAverageDensity);
+      block->getKernel()->getDataSet()->setAverageDencity(mAverageDensity);
    }
 
    MPI_Type_free(&dataSetDoubleType);
@@ -2150,7 +2150,7 @@ void MPIIORestartCoProcessor::readAverageVelocityArray(int step)
 
       // find the nesessary block and fill it
       SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].x1, dataSetSmallArray[n].x2, dataSetSmallArray[n].x3, dataSetSmallArray[n].level);
-      block->kernel->getDataSet()->setAverageVelocity(mAverageVelocity);
+      block->getKernel()->getDataSet()->setAverageVelocity(mAverageVelocity);
    }
 
    MPI_Type_free(&dataSetDoubleType);
@@ -2247,7 +2247,7 @@ void MPIIORestartCoProcessor::readAverageFluktuationsArray(int step)
 
       // find the nesessary block and fill it
       SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].x1, dataSetSmallArray[n].x2, dataSetSmallArray[n].x3, dataSetSmallArray[n].level);
-      block->kernel->getDataSet()->setAverageFluctuations(mAverageFluktuations);
+      block->getKernel()->getDataSet()->setAverageFluctuations(mAverageFluktuations);
    }
 
    MPI_Type_free(&dataSetDoubleType);
@@ -2344,7 +2344,7 @@ void MPIIORestartCoProcessor::readAverageTripleArray(int step)
 
       // find the nesessary block and fill it
       SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].x1, dataSetSmallArray[n].x2, dataSetSmallArray[n].x3, dataSetSmallArray[n].level);
-      block->kernel->getDataSet()->setAverageTriplecorrelations(mAverageTriplecorrelations);
+      block->getKernel()->getDataSet()->setAverageTriplecorrelations(mAverageTriplecorrelations);
    }
 
    MPI_Type_free(&dataSetDoubleType);
@@ -2441,7 +2441,7 @@ void MPIIORestartCoProcessor::readShearStressValArray(int step)
 
       // find the nesessary block and fill it
       SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].x1, dataSetSmallArray[n].x2, dataSetSmallArray[n].x3, dataSetSmallArray[n].level);
-      block->kernel->getDataSet()->setShearStressValues(mShearStressValues);
+      block->getKernel()->getDataSet()->setShearStressValues(mShearStressValues);
    }
 
    MPI_Type_free(&dataSetDoubleType);
@@ -2538,7 +2538,7 @@ void MPIIORestartCoProcessor::readRelaxationFactor(int step)
 
       // find the nesessary block and fill it
       SPtr<Block3D> block = grid->getBlock(dataSetSmallArray[n].x1, dataSetSmallArray[n].x2, dataSetSmallArray[n].x3, dataSetSmallArray[n].level);
-      block->kernel->getDataSet()->setRelaxationFactor(mRelaxationFactor);
+      block->getKernel()->getDataSet()->setRelaxationFactor(mRelaxationFactor);
    }
 
    MPI_Type_free(&dataSetDoubleType);
diff --git a/source/VirtualFluidsCore/Grid/Block3D.cpp b/source/VirtualFluidsCore/Grid/Block3D.cpp
index fb95b36fc8be9c3975588ed5556970235094cc9a..3bf805945bc10f81bb58f3fdbc9cc0344b982e19 100644
--- a/source/VirtualFluidsCore/Grid/Block3D.cpp
+++ b/source/VirtualFluidsCore/Grid/Block3D.cpp
@@ -363,13 +363,17 @@ int Block3D::getNumberOfRemoteConnectorsForSurfaces()
    }
    return count;
 }
+void Block3D::setCollectionOfInterpolationFlagCF(int flags)
+{
+   interpolationFlagCF = flags;
+}
 //////////////////////////////////////////////////////////////////////////
 void Block3D::setInterpolationFlagCF(int dir)
 {
    UbSystem::setBit(interpolationFlagCF, 1<<dir);
 }
 //////////////////////////////////////////////////////////////////////////
-int Block3D::getInterpolationFlagCF()
+int Block3D::getCollectionOfInterpolationFlagCF()
 {
    return interpolationFlagCF;
 }
@@ -378,13 +382,17 @@ bool Block3D::hasInterpolationFlagCF(int dir)
 {
    return UbSystem::bitCheck( interpolationFlagCF, 1<<dir );
 }
+void Block3D::setCollectionOfInterpolationFlagFC(int flags)
+{
+   interpolationFlagFC = flags;
+}
 //////////////////////////////////////////////////////////////////////////
 void Block3D::setInterpolationFlagFC(int dir)
 {
    UbSystem::setBit(interpolationFlagFC, 1<<dir);
 }
 //////////////////////////////////////////////////////////////////////////
-int Block3D::getInterpolationFlagFC()
+int Block3D::getCollectionOfInterpolationFlagFC()
 {
    return interpolationFlagFC;
 }
diff --git a/source/VirtualFluidsCore/Grid/Block3D.h b/source/VirtualFluidsCore/Grid/Block3D.h
index 85887aeacca4f44688f4b78854395f52a0206bb0..795d85f6be046fd74575552ef784d7c0f3d09418 100644
--- a/source/VirtualFluidsCore/Grid/Block3D.h
+++ b/source/VirtualFluidsCore/Grid/Block3D.h
@@ -83,13 +83,17 @@ public:
    bool hasInterpolationFlag(int dir);
    void deleteInterpolationFlag();
 
+   int  getCollectionOfInterpolationFlagCF();
+   void setCollectionOfInterpolationFlagCF(int flags);
+   
    void setInterpolationFlagCF(int dir);
-   int  getInterpolationFlagCF();
    bool hasInterpolationFlagCF(int dir);
    bool hasInterpolationFlagCF();
 
+   int  getCollectionOfInterpolationFlagFC();
+   void setCollectionOfInterpolationFlagFC(int flags);
+   
    void setInterpolationFlagFC(int dir);
-   int  getInterpolationFlagFC();
    bool hasInterpolationFlagFC(int dir);
    bool hasInterpolationFlagFC();
 
@@ -123,9 +127,6 @@ private:
   int level;
   static int counter;
 
-  friend class MPIIORestartCoProcessor;
-  friend class MPIIOMigrationCoProcessor;
-
 };
 
 #endif  //BLOCK3D_H
diff --git a/source/VirtualFluidsCore/Parallel/Communicator.h b/source/VirtualFluidsCore/Parallel/Communicator.h
index 3d07e06d41366c268e3d4d78e7d4ff7761e3fe70..d75ae2241084ea4588fe89c735f277ce99e1d81c 100644
--- a/source/VirtualFluidsCore/Parallel/Communicator.h
+++ b/source/VirtualFluidsCore/Parallel/Communicator.h
@@ -52,7 +52,6 @@ protected:
    Communicator(){}
    Communicator( const Communicator& ){}
    static SPtr<Communicator> instance;
-private:
 };
 
 #endif
diff --git a/source/VirtualFluidsCore/Parallel/MPICommunicator.h b/source/VirtualFluidsCore/Parallel/MPICommunicator.h
index 9338a29259f6df3d71881a8095df2a4ff368b626..041029e045f97f7285fee8f4b2fcbf2e4b89e9aa 100644
--- a/source/VirtualFluidsCore/Parallel/MPICommunicator.h
+++ b/source/VirtualFluidsCore/Parallel/MPICommunicator.h
@@ -20,6 +20,7 @@ class MPICommunicator : public Communicator
 {
 private:
    MPICommunicator();
+   MPICommunicator( const MPICommunicator& ){}
 public:
    ~MPICommunicator();
    static SPtr<Communicator> getInstance();
diff --git a/source/VirtualFluidsCore/Utilities/CheckpointConverter.cpp b/source/VirtualFluidsCore/Utilities/CheckpointConverter.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..0d2550a3745355684ed5e2c8211e41ceaca5a895
--- /dev/null
+++ b/source/VirtualFluidsCore/Utilities/CheckpointConverter.cpp
@@ -0,0 +1,536 @@
+#include "CheckpointConverter.h"
+#include <MemoryUtil.h>
+#include "BoundaryConditions.h"
+#include "Block3D.h"
+#include "DataSet3D.h"
+#include "Grid3D.h"
+#include "Communicator.h"
+#include <stdio.h>
+
+#define BLOCK_SIZE 1024
+
+CheckpointConverter::CheckpointConverter(SPtr<Grid3D> grid, const std::string& path, SPtr<Communicator> comm) :
+   grid(grid), path(path), comm(comm)
+{
+   UbSystem::makeDirectory(path + "/mpi_io_cp");
+
+   memset(&boundCondParamStr, 0, sizeof(boundCondParamStr));
+
+   //-------------------------   define MPI types  ---------------------------------
+
+   MPI_Datatype typesGP[3] = { MPI_DOUBLE, MPI_INT, MPI_CHAR };
+   int blocksGP[3] = { 34, 6, 5 };
+   MPI_Aint offsetsGP[3], lbGP, extentGP;
+
+   offsetsGP[0] = 0;
+   MPI_Type_get_extent(MPI_DOUBLE, &lbGP, &extentGP);
+   offsetsGP[1] = blocksGP[0] * extentGP;
+
+   MPI_Type_get_extent(MPI_INT, &lbGP, &extentGP);
+   offsetsGP[2] = offsetsGP[1] + blocksGP[1] * extentGP;
+
+   MPI_Type_create_struct(3, blocksGP, offsetsGP, typesGP, &gridParamType);
+   MPI_Type_commit(&gridParamType);
+
+   //-----------------------------------------------------------------------
+
+   MPI_Datatype typesBlock[2] = { MPI_INT, MPI_CHAR };
+   int blocksBlock[2] = { 13, 1 };
+   MPI_Aint offsetsBlock[2], lbBlock, extentBlock;
+
+   offsetsBlock[0] = 0;
+   MPI_Type_get_extent(MPI_INT, &lbBlock, &extentBlock);
+   offsetsBlock[1] = blocksBlock[0] * extentBlock;
+
+   MPI_Type_create_struct(2, blocksBlock, offsetsBlock, typesBlock, &block3dType);
+   MPI_Type_commit(&block3dType);
+
+   //-----------------------------------------------------------------------
+
+   MPI_Datatype typesBC[3] = { MPI_LONG_LONG_INT, MPI_FLOAT, MPI_CHAR };
+   int blocksBC[3] = { 5, 38, 1 };
+   MPI_Aint offsetsBC[3], lbBC, extentBC;
+
+   offsetsBC[0] = 0;
+   MPI_Type_get_extent(MPI_LONG_LONG_INT, &lbBC, &extentBC);
+   offsetsBC[1] = blocksBC[0] * extentBC;
+
+   MPI_Type_get_extent(MPI_FLOAT, &lbBC, &extentBC);
+   offsetsBC[2] = offsetsBC[1] + blocksBC[1] * extentBC;
+
+   MPI_Type_create_struct(3, blocksBC, offsetsBC, typesBC, &boundCondType);
+   MPI_Type_commit(&boundCondType);
+
+   //-----------------------------------------------------------------------
+
+   MPI_Type_contiguous(BLOCK_SIZE, boundCondType, &boundCondType1000);
+   MPI_Type_commit(&boundCondType1000);
+
+   //---------------------------------------
+
+   MPI_Type_contiguous(7, MPI_INT, &dataSetParamType);
+   MPI_Type_commit(&dataSetParamType);
+
+   //---------------------------------------
+
+   MPI_Datatype typesDataSetRead[3] = { MPI_DOUBLE, MPI_INT, MPI_CHAR };
+   int blocksDataSetRead[3] = { 2, 5, 2 };
+   MPI_Aint offsetsDataSetRead[3], lbDataSetRead, extentDataSetRead;
+
+   offsetsDataSetRead[0] = 0;
+   MPI_Type_get_extent(MPI_DOUBLE, &lbDataSetRead, &extentDataSetRead);
+   offsetsDataSetRead[1] = blocksDataSetRead[0] * extentDataSetRead;
+
+   MPI_Type_get_extent(MPI_INT, &lbDataSetRead, &extentDataSetRead);
+   offsetsDataSetRead[2] = offsetsDataSetRead[1] + blocksDataSetRead[1] * extentDataSetRead;
+
+   MPI_Type_create_struct(3, blocksDataSetRead, offsetsDataSetRead, typesDataSetRead, &dataSetTypeRead);
+   MPI_Type_commit(&dataSetTypeRead);
+
+   //-----------------------------------------------------------------------
+
+   MPI_Datatype typesDataSetWrite[3] = { MPI_DOUBLE, MPI_INT, MPI_CHAR };
+   int blocksDataSetWrite[3] = { 2, 2, 2 };
+   MPI_Aint offsetsDataSetWrite[3], lbDataSetWrite, extentDataSetWrite;
+
+   offsetsDataSetWrite[0] = 0;
+   MPI_Type_get_extent(MPI_DOUBLE, &lbDataSetWrite, &extentDataSetWrite);
+   offsetsDataSetWrite[1] = blocksDataSetWrite[0] * extentDataSetWrite;
+
+   MPI_Type_get_extent(MPI_INT, &lbDataSetWrite, &extentDataSetWrite);
+   offsetsDataSetWrite[2] = offsetsDataSetWrite[1] + blocksDataSetWrite[1] * extentDataSetWrite;
+
+   MPI_Type_create_struct(3, blocksDataSetWrite, offsetsDataSetWrite, typesDataSetWrite, &dataSetTypeWrite);
+   MPI_Type_commit(&dataSetTypeWrite);
+
+}
+//////////////////////////////////////////////////////////////////////////
+CheckpointConverter::~CheckpointConverter()
+{
+   MPI_Type_free(&gridParamType);
+   MPI_Type_free(&block3dType);
+   MPI_Type_free(&boundCondType);
+   MPI_Type_free(&dataSetParamType);
+   MPI_Type_free(&dataSetTypeRead);
+   MPI_Type_free(&dataSetTypeWrite);
+   MPI_Type_free(&boundCondType1000);
+}
+
+//------------------------------------------- READ -----------------------------------------------
+void CheckpointConverter::convert(int step, int procCount)
+{
+   UBLOG(logINFO, "UtilConvertor::convert start ");
+
+   convertBlocks(step, procCount);
+   convertDataSet(step, procCount);
+   convertBC(step, procCount);
+   
+   UBLOG(logINFO, "UtilConvertor::convert finish ");
+}
+
+void CheckpointConverter::convertBlocks(int step, int procCount)
+{
+   //   UBLOG(logINFO, "Physical Memory currently used by current process: " << Utilities::getPhysMemUsedByMe() / 1073741824.0 << " GB");
+
+   double start, finish;
+   start = MPI_Wtime();
+
+   // file to read from
+   MPI_File file_handlerR;
+   std::string filenameR = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpBlocks.bin";
+   int rcR = MPI_File_open(MPI_COMM_WORLD, filenameR.c_str(), MPI_MODE_RDWR, MPI_INFO_NULL, &file_handlerR);
+   if (rcR != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filenameR);
+
+   // file to write to
+   MPI_File file_handlerW;
+   UbSystem::makeDirectory(path + "/mig/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step));
+   std::string filenameW = path + "/mig/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpBlocks.bin";
+   int rcW = MPI_File_open(MPI_COMM_WORLD, filenameW.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &file_handlerW);
+   if (rcW != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filenameW);
+
+   // read count of blocks
+   int blocksCount = 0;
+   MPI_File_read_at(file_handlerR, 0, &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE);
+   Block3d* block3dArray = new Block3d[blocksCount];
+   GridParam* gridParameters = new GridParam;
+
+   // calculate the read offset
+   procCount = 1; // readBlocks and writeBlocks in both MPIIORestartCoProcessor and MPIIOMigrationCoProcessor have size == 1!
+   MPI_Offset read_offset = (MPI_Offset)(procCount * sizeof(int));
+
+   // read parameters of the grid and blocks
+   MPI_File_read_at(file_handlerR, read_offset, gridParameters, 1, gridParamType, MPI_STATUS_IGNORE);
+   MPI_File_read_at(file_handlerR, (MPI_Offset)(read_offset + sizeof(GridParam)), &block3dArray[0], blocksCount, block3dType, MPI_STATUS_IGNORE);
+
+   // clear the grid
+   std::vector<SPtr<Block3D>> blocksVector[25];
+   int minInitLevel = this->grid->getCoarsestInitializedLevel();
+   int maxInitLevel = this->grid->getFinestInitializedLevel();
+   for (int level = minInitLevel; level <= maxInitLevel; level++)
+   {
+      grid->getBlocks(level, blocksVector[level]);
+      for (SPtr<Block3D> block : blocksVector[level])  //	blocks of the current level
+         grid->deleteBlock(block);
+   }
+
+   // regenerate blocks
+   for (int n = 0; n < blocksCount; n++)
+   {
+      SPtr<Block3D> block(new Block3D(block3dArray[n].x1, block3dArray[n].x2, block3dArray[n].x3, block3dArray[n].level));
+      block->setActive(block3dArray[n].active);
+      block->setBundle(block3dArray[n].bundle);
+      block->setRank(block3dArray[n].rank);
+      block->setLocalRank(block3dArray[n].lrank);
+      block->setGlobalID(block3dArray[n].globalID);
+      block->setLocalID(block3dArray[n].localID);
+      block->setPart(block3dArray[n].part);
+      block->setLevel(block3dArray[n].level);
+      block->setCollectionOfInterpolationFlagCF(block3dArray[n].interpolationFlagCF);
+      block->setCollectionOfInterpolationFlagFC(block3dArray[n].interpolationFlagFC);
+      
+      grid->addBlock(block);
+   }
+
+   // renumber blocks
+   grid->renumberBlockIDs();
+
+   // refresh globalID in all the blocks
+   SPtr<Block3D> block;
+   for (int n = 0; n < blocksCount; n++)
+   {
+      block = grid->getBlock(block3dArray[n].x1, block3dArray[n].x2, block3dArray[n].x3, block3dArray[n].level);
+      block3dArray[n].globalID = block->getGlobalID();
+   }
+
+   // write all data to the file
+   MPI_Offset write_offset = read_offset;
+   MPI_File_write_at(file_handlerW, 0, &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE);
+   MPI_File_write_at(file_handlerW, write_offset, gridParameters, 1, gridParamType, MPI_STATUS_IGNORE);
+   MPI_File_write_at(file_handlerW, (MPI_Offset)(write_offset + sizeof(GridParam)), &block3dArray[0], blocksCount, block3dType, MPI_STATUS_IGNORE);
+
+   MPI_File_close(&file_handlerR);
+   MPI_File_close(&file_handlerW);
+
+   finish = MPI_Wtime();
+   UBLOG(logINFO, "UtilConvertor::convertBlocks time: " << finish - start << " s");
+
+   delete gridParameters;
+   delete[] block3dArray;
+}
+
+void CheckpointConverter::convertDataSet(int step, int procCount)
+{
+   // file to read from
+   MPI_File file_handlerR;
+   std::string filenameR = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSet.bin";
+   int rcR = MPI_File_open(MPI_COMM_WORLD, filenameR.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handlerR);
+   if (rcR != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filenameR);
+
+   // file to write to
+   MPI_File file_handlerW;
+   std::string filenameW = path + "/mig/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpDataSet.bin";
+   int rcW = MPI_File_open(MPI_COMM_WORLD, filenameW.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &file_handlerW);
+   if (rcW != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filenameW);
+
+   double start, finish;
+   start = MPI_Wtime();
+
+   int blocksCount = 0;
+   dataSetParam dataSetParamStr1, dataSetParamStr2, dataSetParamStr3;
+   DataSetRead* dataSetReadArray;
+   DataSetWrite* dataSetWriteArray;
+   size_t doubleCountInBlock;
+   std::vector<double> doubleValuesArray;
+   size_t sizeofOneDataSet;
+
+   // calculate the read offset
+   MPI_Offset read_offset = (MPI_Offset)(procCount * sizeof(int));
+   MPI_Offset write_offset;
+
+   for (int pc = 0; pc < procCount; pc++)
+   {
+      // read count of blocks and parameters of data arrays
+      MPI_File_read_at(file_handlerR, (MPI_Offset)(pc * sizeof(int)), &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE);
+      MPI_File_read_at(file_handlerR, read_offset, &dataSetParamStr1, 1, dataSetParamType, MPI_STATUS_IGNORE);
+      MPI_File_read_at(file_handlerR, (MPI_Offset)(read_offset + sizeof(dataSetParam)), &dataSetParamStr2, 1, dataSetParamType, MPI_STATUS_IGNORE);
+      MPI_File_read_at(file_handlerR, (MPI_Offset)(read_offset + 2 * sizeof(dataSetParam)), &dataSetParamStr3, 1, dataSetParamType, MPI_STATUS_IGNORE);
+      doubleCountInBlock = dataSetParamStr1.nx[0] * dataSetParamStr1.nx[1] * dataSetParamStr1.nx[2] * dataSetParamStr1.nx[3] +
+         dataSetParamStr2.nx[0] * dataSetParamStr2.nx[1] * dataSetParamStr2.nx[2] * dataSetParamStr2.nx[3] +
+         dataSetParamStr3.nx[0] * dataSetParamStr3.nx[1] * dataSetParamStr3.nx[2] * dataSetParamStr3.nx[3];
+
+      dataSetReadArray = new DataSetRead[blocksCount];
+      dataSetWriteArray = new DataSetWrite[blocksCount];
+      doubleValuesArray.resize(blocksCount * doubleCountInBlock);
+
+      // read data
+      MPI_File_read_at(file_handlerR, (MPI_Offset)(read_offset + 3 * sizeof(dataSetParam)), dataSetReadArray, blocksCount, dataSetTypeRead, MPI_STATUS_IGNORE);
+      MPI_File_read_at(file_handlerR, (MPI_Offset)(read_offset + 3 * sizeof(dataSetParam) + blocksCount * sizeof(DataSetRead)), 
+         &doubleValuesArray[0], blocksCount * (int)doubleCountInBlock, MPI_DOUBLE, MPI_STATUS_IGNORE);
+     
+      // offset to read the data of the next process
+      read_offset = read_offset + (MPI_Offset)(3 * sizeof(dataSetParam) + blocksCount * (sizeof(DataSetRead) + doubleCountInBlock * sizeof(double)));
+
+      // write parameters of data arrays
+      MPI_File_write_at(file_handlerW, (MPI_Offset)0, &dataSetParamStr1, 1, dataSetParamType, MPI_STATUS_IGNORE);
+      MPI_File_write_at(file_handlerW, (MPI_Offset)(sizeof(dataSetParam)), &dataSetParamStr2, 1, dataSetParamType, MPI_STATUS_IGNORE);
+      MPI_File_write_at(file_handlerW, (MPI_Offset)(2 * sizeof(dataSetParam)), &dataSetParamStr3, 1, dataSetParamType, MPI_STATUS_IGNORE);
+      
+      sizeofOneDataSet = sizeof(DataSetWrite) + doubleCountInBlock * sizeof(double);
+
+      // write blocks and their data arrays
+      for (int nb = 0; nb < blocksCount; nb++)
+      {
+         SPtr<Block3D> block = grid->getBlock(dataSetReadArray[nb].x1, dataSetReadArray[nb].x2, dataSetReadArray[nb].x3, dataSetReadArray[nb].level);
+         dataSetWriteArray[nb].globalID = block->getGlobalID();
+         dataSetWriteArray[nb].ghostLayerWidth = dataSetReadArray[nb].ghostLayerWidth;
+         dataSetWriteArray[nb].collFactor = dataSetReadArray[nb].collFactor;
+         dataSetWriteArray[nb].deltaT = dataSetReadArray[nb].deltaT;
+         dataSetWriteArray[nb].compressible = dataSetReadArray[nb].compressible;
+         dataSetWriteArray[nb].withForcing = dataSetReadArray[nb].withForcing;
+
+         write_offset = (MPI_Offset)(3 * sizeof(dataSetParam) + dataSetWriteArray[nb].globalID * sizeofOneDataSet);
+         MPI_File_write_at(file_handlerW, write_offset, &dataSetWriteArray[nb], 1, dataSetTypeWrite, MPI_STATUS_IGNORE);
+         MPI_File_write_at(file_handlerW, (MPI_Offset)(write_offset + sizeof(DataSetWrite)), &doubleValuesArray[nb * doubleCountInBlock], 
+            doubleCountInBlock, MPI_DOUBLE, MPI_STATUS_IGNORE);
+      }
+
+      delete[] dataSetReadArray;
+      delete[] dataSetWriteArray;
+   }
+
+   MPI_File_close(&file_handlerR);
+
+   MPI_File_sync(file_handlerW);
+   MPI_File_close(&file_handlerW);
+
+   DSArraysPresence arrPresence;
+   MPI_File file_handler1;
+   std::string filename1 = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpArrays.bin";
+   int rc = MPI_File_open(MPI_COMM_WORLD, filename1.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handler1);
+   if (rc != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename1);
+   MPI_File_read_at(file_handler1, (MPI_Offset)0, &arrPresence, 6, MPI_CHAR, MPI_STATUS_IGNORE);
+   MPI_File_close(&file_handler1);
+
+   MPI_File file_handler2;
+   std::string filename2 = path + "/mig/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpArrays.bin";
+   int rc2 = MPI_File_open(MPI_COMM_WORLD, filename2.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &file_handler2);
+   if (rc2 != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filename2);
+   MPI_File_write_at(file_handler2, (MPI_Offset)0, &arrPresence, 6, MPI_CHAR, MPI_STATUS_IGNORE);
+   MPI_File_sync(file_handler2);
+   MPI_File_close(&file_handler2);
+
+   std::string filenameRR = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step);
+   std::string filenameWW = path + "/mig/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step);
+
+   if (arrPresence.isAverageDensityArrayPresent)
+      convert___Array(step, procCount, filenameRR + "/cpAverageDensityArray.bin", filenameWW + "/cpAverageDensityArray.bin");
+
+   if (arrPresence.isAverageVelocityArrayPresent)
+      convert___Array(step, procCount, filenameRR + "/cpAverageVelocityArray.bin", filenameWW + "/cpAverageVelocityArray.bin");
+
+   if (arrPresence.isAverageFluktuationsArrayPresent)
+      convert___Array(step, procCount, filenameRR + "/cpAverageFluktuationsArray.bin", filenameWW + "/cpAverageFluktuationsArray.bin");
+
+   if (arrPresence.isAverageTripleArrayPresent)
+      convert___Array(step, procCount, filenameRR + "/cpAverageTripleArray.bin", filenameWW + "/cpAverageTripleArray.bin");
+
+   if (arrPresence.isShearStressValArrayPresent)
+      convert___Array(step, procCount, filenameRR + "/cpShearStressValArray.bin", filenameWW + "/cpShearStressValArray.bin");
+
+   if (arrPresence.isRelaxationFactorPresent)
+      convert___Array(step, procCount, filenameRR + "/cpRelaxationFactor.bin", filenameWW + "/cpRelaxationFactor.bin");
+
+   finish = MPI_Wtime();
+   UBLOG(logINFO, "UtilConvertor::convertDataSet time: " << finish - start << " s");
+
+}
+
+void CheckpointConverter::convert___Array(int step, int procCount, std::string filenameR, std::string filenameW)
+{
+   double start, finish;
+   if (comm->isRoot()) start = MPI_Wtime();
+
+   MPI_File file_handlerR;
+   int rcR = MPI_File_open(MPI_COMM_WORLD, filenameR.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handlerR);
+   if (rcR != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filenameR);
+
+   MPI_File file_handlerW;
+   int rcW = MPI_File_open(MPI_COMM_WORLD, filenameW.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &file_handlerW);
+   if (rcW != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filenameW);
+
+   int blocksCount = 0;
+   dataSetParam dataSetParamStr;
+   memset(&dataSetParamStr, 0, sizeof(dataSetParam));
+   DataSetSmallRead* dataSetSmallReadArray;
+   DataSetSmallWrite* dataSetSmallWriteArray;
+   int doubleCountInBlock;
+   std::vector<double> doubleValuesArray;
+
+   // calculate the read offset
+   MPI_Offset read_offset = (MPI_Offset)(procCount * sizeof(int));
+   MPI_Offset write_offset;
+   size_t sizeofOneDataSet;
+
+   for (int pc = 0; pc < procCount; pc++)
+   {
+      MPI_File_read_at(file_handlerR, (MPI_Offset)(pc * sizeof(int)), &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE);
+      MPI_File_read_at(file_handlerR, read_offset, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
+
+      dataSetSmallReadArray = new DataSetSmallRead[blocksCount];
+      dataSetSmallWriteArray = new DataSetSmallWrite[blocksCount];
+      doubleCountInBlock = dataSetParamStr.nx[0] * dataSetParamStr.nx[1] * dataSetParamStr.nx[2] * dataSetParamStr.nx[3];
+      doubleValuesArray.resize(blocksCount * doubleCountInBlock); 
+
+      MPI_File_read_at(file_handlerR, (MPI_Offset)(read_offset + sizeof(dataSetParam)), dataSetSmallReadArray, blocksCount * 4, MPI_INT, MPI_STATUS_IGNORE);
+      if (doubleCountInBlock > 0)
+         MPI_File_read_at(file_handlerR, (MPI_Offset)(read_offset + sizeof(dataSetParam) + blocksCount * sizeof(DataSetSmallRead)), 
+            &doubleValuesArray[0], blocksCount * doubleCountInBlock, MPI_DOUBLE, MPI_STATUS_IGNORE);
+
+      read_offset = read_offset + sizeof(dataSetParam) + blocksCount * (sizeof(DataSetSmallRead) + doubleCountInBlock * sizeof(double));
+
+      sizeofOneDataSet = sizeof(DataSetSmallWrite) + doubleCountInBlock * sizeof(double);
+      
+      MPI_File_write_at(file_handlerW, 0, &dataSetParamStr, 1, dataSetParamType, MPI_STATUS_IGNORE);
+
+      for (int nb = 0; nb < blocksCount; nb++)
+      {
+         SPtr<Block3D> block = grid->getBlock(dataSetSmallReadArray[nb].x1, dataSetSmallReadArray[nb].x2, dataSetSmallReadArray[nb].x3, dataSetSmallReadArray[nb].level);
+         dataSetSmallWriteArray[nb].globalID = block->getGlobalID();    
+
+         write_offset = (MPI_Offset)(sizeof(dataSetParam) + dataSetSmallWriteArray[nb].globalID * sizeofOneDataSet);
+         MPI_File_write_at(file_handlerW, write_offset, &dataSetSmallWriteArray[nb], 1, MPI_INT, MPI_STATUS_IGNORE);
+         MPI_File_write_at(file_handlerW, (MPI_Offset)(write_offset + sizeof(DataSetSmallWrite)), 
+            &doubleValuesArray[nb * doubleCountInBlock], doubleCountInBlock, MPI_DOUBLE, MPI_STATUS_IGNORE);
+      }
+
+      delete[] dataSetSmallReadArray;
+      delete[] dataSetSmallWriteArray;
+   }
+   MPI_File_close(&file_handlerR);
+
+   MPI_File_sync(file_handlerW);
+   MPI_File_close(&file_handlerW);
+   
+   finish = MPI_Wtime();
+   UBLOG(logINFO, "UtilConvertor::convert___Array time: " << finish - start << " s");
+
+}
+
+void CheckpointConverter::convertBC(int step, int procCount)
+{
+   // file to read from
+   MPI_File file_handlerR;
+   std::string filenameR = path + "/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpBC.bin";
+   int rcR = MPI_File_open(MPI_COMM_WORLD, filenameR.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handlerR);
+   if (rcR != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filenameR);
+
+   // file to write to
+   MPI_File file_handlerW;
+   std::string filenameW = path + "/mig/mpi_io_cp/mpi_io_cp_" + UbSystem::toString(step) + "/cpBC.bin";
+   int rcW = MPI_File_open(MPI_COMM_WORLD, filenameW.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &file_handlerW);
+   if (rcW != MPI_SUCCESS) throw UbException(UB_EXARGS, "couldn't open file " + filenameW);
+
+   double start, finish;
+   if (comm->isRoot()) start = MPI_Wtime();
+
+   int blocksCount = 0;
+   int dataCount1000 = 0;
+   int dataCount2 = 0;
+   size_t dataCount;
+   BCAddRead* bcAddReadArray;
+   BCAddWrite* bcAddWriteArray;
+   BoundaryCondition* bcArray;
+   BoundaryCondition* nullBouCond = new BoundaryCondition();
+   memset(nullBouCond, 0, sizeof(BoundaryCondition));
+   int* intArray1;
+   int* intArray2;
+   int indexBC;
+   int indexC;
+
+   MPI_Offset read_offset;
+   MPI_Offset read_offset1 = (MPI_Offset)(procCount * (3 * sizeof(int) + sizeof(boundCondParam)));
+   MPI_Offset write_offset = (MPI_Offset)(sizeof(boundCondParam) + grid->getNumberOfBlocks() * sizeof(size_t));
+   MPI_Offset write_offsetIndex;
+
+   for (int pc = 0; pc < procCount; pc++)
+   {
+      read_offset = (MPI_Offset)(pc * (3 * sizeof(int) + sizeof(boundCondParam)));
+
+      // read count of blocks
+      MPI_File_read_at(file_handlerR, read_offset, &blocksCount, 1, MPI_INT, MPI_STATUS_IGNORE);
+      // read count of big BoundaryCondition blocks
+      MPI_File_read_at(file_handlerR, (MPI_Offset)(read_offset + sizeof(int)), &dataCount1000, 1, MPI_INT, MPI_STATUS_IGNORE);
+      // read count of indexContainer values in all blocks
+      MPI_File_read_at(file_handlerR, (MPI_Offset)(read_offset + 2 * sizeof(int)), &dataCount2, 1, MPI_INT, MPI_STATUS_IGNORE);
+      // read count of bcindexmatrix values in every block
+      MPI_File_read_at(file_handlerR, (MPI_Offset)(read_offset + 3 * sizeof(int)), &boundCondParamStr, 4, MPI_INT, MPI_STATUS_IGNORE);
+
+      bcAddReadArray = new BCAddRead[blocksCount];
+      bcAddWriteArray = new BCAddWrite[blocksCount];
+      dataCount = dataCount1000 * BLOCK_SIZE;
+      bcArray = new BoundaryCondition[dataCount];
+      intArray1 = new int[blocksCount * boundCondParamStr.bcindexmatrixCount];
+      intArray2 = new int[dataCount2];
+
+      // read data arrays
+      MPI_File_read_at(file_handlerR, read_offset1, bcAddReadArray, blocksCount * 6, MPI_INT, MPI_STATUS_IGNORE);
+      MPI_File_read_at(file_handlerR, (MPI_Offset)(read_offset1 + blocksCount * sizeof(BCAddRead)),
+         &bcArray[0], dataCount1000, boundCondType1000, MPI_STATUS_IGNORE);
+      MPI_File_read_at(file_handlerR, (MPI_Offset)(read_offset1 + blocksCount * sizeof(BCAddRead) + dataCount * sizeof(BoundaryCondition)),
+         &intArray1[0], blocksCount * boundCondParamStr.bcindexmatrixCount, MPI_INT, MPI_STATUS_IGNORE);
+      MPI_File_read_at(file_handlerR, (MPI_Offset)(read_offset1 + blocksCount * sizeof(BCAddRead) + dataCount * sizeof(BoundaryCondition) + blocksCount * boundCondParamStr.bcindexmatrixCount * sizeof(int)),
+         &intArray2[0], dataCount2, MPI_INT, MPI_STATUS_IGNORE);
+
+      // offset to read the data of the next process
+      read_offset1 = read_offset1 + blocksCount * sizeof(BCAddRead) + dataCount * sizeof(BoundaryCondition) + (blocksCount * boundCondParamStr.bcindexmatrixCount + dataCount2) * sizeof(int);
+
+      MPI_File_write_at(file_handlerW, 0, &boundCondParamStr, 4, MPI_INT, MPI_STATUS_IGNORE);
+
+      indexBC = 0;
+      indexC = 0;
+      // write blocks and their BC data arrays
+      for (int nb = 0; nb < blocksCount; nb++)
+      {
+         SPtr<Block3D> block = grid->getBlock(bcAddReadArray[nb].x1, bcAddReadArray[nb].x2, bcAddReadArray[nb].x3, bcAddReadArray[nb].level);
+         bcAddWriteArray[nb].globalID = block->getGlobalID(); 
+         bcAddWriteArray[nb].boundCond_count = bcAddReadArray[nb].boundCond_count;             // how many BoundaryConditions in this block
+         bcAddWriteArray[nb].indexContainer_count = bcAddReadArray[nb].indexContainer_count;        // how many indexContainer-values in this block
+
+         write_offsetIndex = (MPI_Offset)(sizeof(boundCondParam) + bcAddWriteArray[nb].globalID * sizeof(size_t));
+         MPI_File_write_at(file_handlerW, write_offsetIndex, &write_offset, 1, MPI_LONG_LONG_INT, MPI_STATUS_IGNORE);
+
+         MPI_File_write_at(file_handlerW, write_offset, &bcAddWriteArray[nb], 3, MPI_INT, MPI_STATUS_IGNORE);
+         if (bcAddWriteArray[nb].boundCond_count > 0)
+            MPI_File_write_at(file_handlerW, (MPI_Offset)(write_offset + sizeof(BCAddWrite)), &bcArray[indexBC], bcAddWriteArray[nb].boundCond_count, boundCondType, MPI_STATUS_IGNORE);
+         indexBC += bcAddWriteArray[nb].boundCond_count;
+
+         if (boundCondParamStr.bcindexmatrixCount > 0)
+            MPI_File_write_at(file_handlerW, (MPI_Offset)(write_offset + sizeof(BCAddWrite) + bcAddWriteArray[nb].boundCond_count * sizeof(BoundaryCondition)),
+               &intArray1[nb * boundCondParamStr.bcindexmatrixCount * sizeof(int)], boundCondParamStr.bcindexmatrixCount, MPI_INT, MPI_STATUS_IGNORE);
+
+         if (bcAddWriteArray[nb].indexContainer_count > 0)
+            MPI_File_write_at(file_handlerW, (MPI_Offset)(write_offset + sizeof(BCAddWrite) + bcAddWriteArray[nb].boundCond_count * sizeof(BoundaryCondition) + boundCondParamStr.bcindexmatrixCount * sizeof(int)),
+               &intArray2[indexC], bcAddWriteArray[nb].indexContainer_count, MPI_INT, MPI_STATUS_IGNORE);
+         indexC += bcAddWriteArray[nb].indexContainer_count;
+
+         write_offset += sizeof(BCAddWrite) + bcAddWriteArray[nb].boundCond_count * sizeof(BoundaryCondition) + boundCondParamStr.bcindexmatrixCount * sizeof(int) + bcAddWriteArray[nb].indexContainer_count * sizeof(int);
+      }
+
+      delete[] bcAddReadArray;
+      delete[] bcAddWriteArray;
+      delete[] bcArray;
+      delete[] intArray1;
+      delete[] intArray2;
+   }
+
+   delete nullBouCond;
+
+   MPI_File_close(&file_handlerR);
+
+   MPI_File_sync(file_handlerW);
+   MPI_File_close(&file_handlerW);
+
+   finish = MPI_Wtime();
+   UBLOG(logINFO, "UtilConvertor::convertBC time: " << finish - start << " s");
+
+}
diff --git a/source/VirtualFluidsCore/Utilities/CheckpointConverter.h b/source/VirtualFluidsCore/Utilities/CheckpointConverter.h
new file mode 100644
index 0000000000000000000000000000000000000000..f51a306efead14bb3749b9526f28a60c4aa3bd2c
--- /dev/null
+++ b/source/VirtualFluidsCore/Utilities/CheckpointConverter.h
@@ -0,0 +1,212 @@
+#ifndef _UTILITACONVERTOR_H_
+#define _UTILITACONVERTOR_H_
+
+#include <mpi.h>
+#include <PointerDefinitions.h>
+#include <string>
+#include <vector>
+
+class Grid3D;
+class Communicator;
+
+//! \class UtilConvertor 
+//! \brief Converts timestep data from MPIIORestartCoProcessor format into MPIIOMigrationCoProcessor format  
+class CheckpointConverter
+{
+   //! \struct GridParam
+   //! \brief Structure describes parameters of the grid
+   //! \details The structure is necessary to restore the grid correctly
+   struct GridParam
+   {
+      double trafoParams[33];
+      double deltaX;
+      int blockNx1;
+      int blockNx2;
+      int blockNx3;
+      int nx1;
+      int nx2;
+      int nx3;
+      bool periodicX1;
+      bool periodicX2;
+      bool periodicX3;
+      bool active;
+      bool transformation;
+   };
+
+   //! \struct Block3d
+   //! \brief Structure contains information of the block
+   //! \details The structure is used to write the data describing the block in the grid when saving the grid 
+   //! and to read it when restoring the grid
+   struct Block3d
+   {
+      int x1;
+      int x2;
+      int x3;
+      int bundle;
+      int rank;
+      int lrank;
+      int part;
+      int globalID;
+      int localID;
+      int level;
+      int interpolationFlagCF;
+      int interpolationFlagFC;
+      int counter;
+      bool active;
+   };
+
+   //! \struct dataSetParam
+   //! \brief Structure describes parameters of the dataSet that are equal in all blocks
+   //! \details The structure used to store some parameters needed to restore dataSet arrays
+   struct dataSetParam
+   {
+      int nx1;
+      int nx2;
+      int nx3;
+      int nx[4]; //nx1, nx2, nx3, nx4
+   };
+
+   //! \struct DataSetRead
+   //! \brief Structure describes parameters of the dataSet in MPIIORestartCoProcessor format
+   //! \details The structure is used when reading from the file
+   struct DataSetRead
+   {
+      double collFactor;
+      double deltaT;
+      int x1;
+      int x2;
+      int x3;
+      int level;
+      int ghostLayerWidth;
+      bool compressible;
+      bool withForcing;
+   };
+
+   //! \struct DataSetWrite
+   //! \brief Structure describes parameters of the dataSet in MPIIOMigrationCoProcessor format
+   //! \details The structure is used when writing to the file
+   struct DataSetWrite
+   {
+      double collFactor;
+      double deltaT;
+      int globalID;
+      int ghostLayerWidth;
+      bool compressible;
+      bool withForcing;
+   };
+
+   //! \struct DataSetSmallRead
+   //! \brief Structure describes parameters of the DataSetSmall in MPIIORestartCoProcessor format
+   //! \details The structure is used when reading from the file
+   struct DataSetSmallRead
+   {
+      int x1;
+      int x2;
+      int x3;
+      int level;
+   };
+
+   //! \struct DataSetSmallWrite
+   //! \brief Structure describes parameters of the DataSetSmall in MPIIOMigrationCoProcessor format
+   //! \details The structure is used when writing to the file
+   struct DataSetSmallWrite
+   {
+      int globalID;
+   };
+
+   //! \struct BoundaryCondition
+   //! \brief Structure containes information about boundary conditions of the block 
+   //! \details The structure is used to write data describing boundary conditions of the blocks when saving the grid 
+   //! and to read it when restoring the grid
+   struct BoundaryCondition
+   {
+      long long noslipBoundaryFlags;	//	MPI_LONG_LONG
+      long long slipBoundaryFlags;
+      long long velocityBoundaryFlags;
+      long long densityBoundaryFlags;
+      long long wallModelBoundaryFlags;
+
+      float  bcVelocityX1;
+      float  bcVelocityX2;
+      float  bcVelocityX3;
+      float  bcDensity;
+
+      float  bcLodiDensity;
+      float  bcLodiVelocityX1;
+      float  bcLodiVelocityX2;
+      float  bcLodiVelocityX3;
+      float  bcLodiLentgh;
+
+      float  nx1, nx2, nx3;
+      float q[26];					//	MPI_FLOAT
+
+      char algorithmType;
+   };
+
+   //! \struct boundCondParam
+   //! \brief Structure describes parameters of the boundaryConditions that are equal in all blocks
+   //! \details The structure used to store some parameters needed to restore boundaryConditions arrays
+   struct boundCondParam
+   {
+      int nx1;
+      int nx2;
+      int nx3;
+      int bcindexmatrixCount;	// how many bcindexmatrix-values in one (any) block 
+   };
+
+   //! \struct BCAddRead
+   //! \brief Structure describes parameters of the BCAdd in MPIIORestartCoProcessor format
+   //! \details The structure is used when reading from the file
+   struct BCAddRead
+   {
+      int x1;		//	to find the right block
+      int x2;
+      int x3;
+      int level;
+      int boundCond_count;		//	how many BoundaryCondition-structures are in this block
+      int indexContainer_count;	// how many indexContainer-values are in this block
+   };
+
+   //! \struct BCAddWrite
+   //! \brief Structure describes parameters of the BCAdd in MPIIOMigrationCoProcessor format
+   //! \details The structure is used when writing to the file
+   struct BCAddWrite
+   {
+      int globalID;
+      int boundCond_count;		//	how many BoundaryCondition-structures are in this block
+      int indexContainer_count;	// how many indexContainer-values are in this block
+   };
+
+   struct DSArraysPresence
+   {
+      bool isAverageDensityArrayPresent;
+      bool isAverageVelocityArrayPresent;
+      bool isAverageFluktuationsArrayPresent;
+      bool isAverageTripleArrayPresent;
+      bool isShearStressValArrayPresent;
+      bool isRelaxationFactorPresent;
+   };
+
+public:
+   CheckpointConverter(SPtr<Grid3D> grid, const std::string& path, SPtr<Communicator> comm);
+   virtual ~CheckpointConverter();
+
+   void convert(int step, int procCount);
+   void convertBlocks(int step, int procCount);
+   void convertDataSet(int step, int procCount);
+   void convertBC(int step, int procCount);
+   void convert___Array(int step, int procCount, std::string filenameR, std::string filenameW);
+
+protected:
+   std::string path;
+   SPtr<Communicator> comm;
+   SPtr<Grid3D> grid;
+
+private:
+   MPI_Datatype gridParamType, block3dType, dataSetParamType, boundCondType;
+   MPI_Datatype dataSetTypeRead, dataSetTypeWrite, boundCondType1000;
+
+   boundCondParam boundCondParamStr;
+};
+
+#endif