From 66db3c83fa7aae3cdd6b6a6d72dd7b2a1376225b Mon Sep 17 00:00:00 2001
From: Soeren Peters <peters@irmb.tu-bs.de>
Date: Thu, 10 Sep 2020 13:42:43 +0200
Subject: [PATCH] Set CXX Standard to 14! Move VirtualFluids header to src. Add
 CI file and add files, only existing in the open source version.

---
 .gitlab-ci.yml                                |  22 +
 CMake/VirtualFluidsMacros.cmake               |  10 +-
 CMake/cmake_config_files/TESLA01.config.cmake |   2 +-
 CMakeLists.txt                                |   2 +-
 apps/cpu/LidDrivenCavity/CMakeLists.txt       |   4 +-
 apps/cpu/LidDrivenCavity/LidDrivenCavity.cpp  |  26 +-
 cpu.cmake                                     |  10 +-
 gpu.cmake                                     |   7 +
 .../geometry3d/CoordinateTransformation3D.h   |  10 +-
 {apps => src}/cpu/VirtualFluids.h             |  47 +-
 .../LBM/CumulantK17LBMKernel.cpp              | 648 ++++++++++++++++++
 .../LBM/CumulantK17LBMKernel.h                | 146 ++++
 src/gpu/GksGpu/Output/VtkWriter.cpp           | 146 ++++
 src/gpu/GksGpu/Output/VtkWriter.h             |  52 ++
 14 files changed, 1088 insertions(+), 44 deletions(-)
 create mode 100644 .gitlab-ci.yml
 rename {apps => src}/cpu/VirtualFluids.h (83%)
 create mode 100644 src/cpu/VirtualFluidsCore/LBM/CumulantK17LBMKernel.cpp
 create mode 100644 src/cpu/VirtualFluidsCore/LBM/CumulantK17LBMKernel.h
 create mode 100644 src/gpu/GksGpu/Output/VtkWriter.cpp
 create mode 100644 src/gpu/GksGpu/Output/VtkWriter.h

diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
new file mode 100644
index 000000000..6d12cb41b
--- /dev/null
+++ b/.gitlab-ci.yml
@@ -0,0 +1,22 @@
+image: ubuntu
+
+stages:
+  - build
+
+build:
+  stage: build
+
+  before_script:
+    - export CAB_MACHINE=ELLADAN
+    - export DEBIAN_FRONTEND=noninteractive
+    - apt-get update
+    - apt-get install build-essential -y
+    - apt-get install cmake -y
+    - apt-get install ninja-build -y
+    - apt-get install openmpi-bin -y
+    - apt-get install libomp-dev -y
+    - apt-get install nvidia-cuda-dev  nvidia-cuda-toolkit -y
+
+  script:
+    - cmake -S . -B build -DBUILD_VF_CPU:BOOL=ON
+    - cmake --build build --target VirtualFluidsCore
\ No newline at end of file
diff --git a/CMake/VirtualFluidsMacros.cmake b/CMake/VirtualFluidsMacros.cmake
index 83274c2fb..9aeeb49c1 100644
--- a/CMake/VirtualFluidsMacros.cmake
+++ b/CMake/VirtualFluidsMacros.cmake
@@ -46,9 +46,9 @@ endfunction()
 
 #################################################################################
 ## Add a target, link the libraries and add the compiler flags to the target
-## The name of the target is vf_get_library_name().
 ##
 ## parameter:
+## NAME      - Name of the target. If not passed the name is vf_get_library_name().
 ## BUILDTYPE - STATIC; SHARED; EXECUTABLE
 ## DEPENDS   - libraries to link
 ## FILES     - adds these files to the target
@@ -63,10 +63,14 @@ function(vf_add_library)
 
     set( options )
     set( oneValueArgs )
-    set( multiValueArgs BUILDTYPE DEPENDS FILES FOLDER EXCLUDE)
+    set( multiValueArgs NAME BUILDTYPE DEPENDS FILES FOLDER EXCLUDE)
     cmake_parse_arguments( ARG "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN} )
 
-    vf_get_library_name (library_name)
+    if(${ARG_NAME})
+        set(library_name ${ARG_NAME})
+    else()
+        vf_get_library_name (library_name)
+    endif()
     status("Configuring the target: ${library_name} (type=${ARG_BUILDTYPE})...")
 
 
diff --git a/CMake/cmake_config_files/TESLA01.config.cmake b/CMake/cmake_config_files/TESLA01.config.cmake
index bc3b0e796..dcf273d03 100644
--- a/CMake/cmake_config_files/TESLA01.config.cmake
+++ b/CMake/cmake_config_files/TESLA01.config.cmake
@@ -15,4 +15,4 @@ IF(${USE_METIS})
     SET(METIS_INCLUDEDIR "C:/Libraries/metis-5.1.0//include")
     SET(METIS_DEBUG_LIBRARY "C:/Libraries/metis-5.1.0/build/libmetis/Debug/metis.lib")
     SET(METIS_RELEASE_LIBRARY "C:/Libraries/metis-5.1.0/build/libmetis/Release/metis.lib")
-ENDIF()
+ENDIF()
\ No newline at end of file
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 3cb629f54..a167d96bd 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -13,7 +13,7 @@ cmake_minimum_required(VERSION 3.13..3.18 FATAL_ERROR)
 
 project(VirtualFluids CXX)
 
-set(CMAKE_CXX_STANDARD 11)
+set(CMAKE_CXX_STANDARD 14)
 set(CMAKE_CXX_STANDARD_REQUIRED ON)
 
 set_property(GLOBAL PROPERTY USE_FOLDERS ON)
diff --git a/apps/cpu/LidDrivenCavity/CMakeLists.txt b/apps/cpu/LidDrivenCavity/CMakeLists.txt
index a2518ab60..9527e73ea 100644
--- a/apps/cpu/LidDrivenCavity/CMakeLists.txt
+++ b/apps/cpu/LidDrivenCavity/CMakeLists.txt
@@ -1,6 +1,4 @@
 
 PROJECT(LidDrivenCavity)
 
-INCLUDE(${APPS_ROOT}/IncludsList.cmake)
-
-vf_add_library(BUILDTYPE binary DEPENDS VirtualFluidsCore VirtualFluidsBasic FILES LidDrivenCavity.cpp)
+vf_add_library(NAME LidDrivenCavityCPU BUILDTYPE binary DEPENDS VirtualFluidsCore basics muparser FILES LidDrivenCavity.cpp)
diff --git a/apps/cpu/LidDrivenCavity/LidDrivenCavity.cpp b/apps/cpu/LidDrivenCavity/LidDrivenCavity.cpp
index 6eb64ffe2..bde6df56d 100644
--- a/apps/cpu/LidDrivenCavity/LidDrivenCavity.cpp
+++ b/apps/cpu/LidDrivenCavity/LidDrivenCavity.cpp
@@ -10,8 +10,8 @@
 //        \    \|    |  |  |         __          __     __     __     ______      _______    
 //         \         |  |  |_____   |  |        |  |   |  |   |  |   |   _  \    /  _____)   
 //          \        |  |   _____|  |  |        |  |   |  |   |  |   |  | \  \   \_______    
-//           \       |  |  |        |  |_____   |   \_/   |   |  |   |  |_/  /    _____  \   
-//            \ _____|  |__|        |________|   \_______/    |__|   |______/    (_______/   
+//           \       |  |  |        |  |_____   |   \_/   |   |  |   |  |_/  /    _____  |
+//            \ _____|  |__|        |________|   \_______/    |__|   |______/    (_______/
 //
 //  This file is part of VirtualFluids. VirtualFluids is free software: you can 
 //  redistribute it and/or modify it under the terms of the GNU General Public
@@ -26,7 +26,7 @@
 //  You should have received a copy of the GNU General Public License along
 //  with VirtualFluids (see COPYING.txt). If not, see <http://www.gnu.org/licenses/>.
 //
-//! \file ldc.cpp
+//! \file LidDrivenCavity.cpp
 //! \ingroup Applications
 //! \author Konstantin Kutscher
 //=======================================================================================
@@ -44,7 +44,9 @@ int main(int argc, char* argv[])
       //////////////////////////////////////////////////////////////////////////
       // Simulation parameters
       //////////////////////////////////////////////////////////////////////////
-      string path = "d:/temp/LidDrivenCavityClean";
+
+      // set your output path here
+      string path = "./output";
 
       const double L = 1.0;
       const double Re = 1000.0;
@@ -52,11 +54,11 @@ int main(int argc, char* argv[])
       const double dt = 0.5e-3;
       const unsigned int nx = 64;
 
-      const double timeStepOut = 10000;
-      const double timeStepEnd = 250000;
+      const double timeStepOut = 1000;
+      const double timeStepEnd = 25000;
 
       // Number of OpenMP threads
-      int numOfThreads = 1;
+      int numOfThreads = 4;
 
       //////////////////////////////////////////////////////////////////////////
 
@@ -84,13 +86,16 @@ int main(int argc, char* argv[])
       // set grid spacing
       grid->setDeltaX(dx);
       // set block size for three dimensions
-      grid->setBlockNX(64,64,64);
+      int blockSize = nx / 2;
+      grid->setBlockNX(blockSize,blockSize,blockSize);
       
       // Create simulation bounding box
       SPtr<GbObject3D> gridCube(new GbCuboid3D(g_minX1, g_minX2, g_minX3, g_maxX1, g_maxX2, g_maxX3));
       GbSystem3D::writeGeoObject(gridCube.get(), path + "/geo/gridCube", WbWriterVtkXmlBinary::getInstance());
 
-      UBLOG(logINFO, "Lid Driven Cavity");
+      UBLOG(logINFO, "Lid Driven Cavity:");
+      UBLOG(logINFO, "Domain size = " << nx << " x "<< nx << " x "<< nx);
+      UBLOG(logINFO, "Block size = " << blockSize << " x "<< blockSize << " x "<< blockSize);
       UBLOG(logINFO, "velocity    = " << velocity << " m/s");
       UBLOG(logINFO, "velocityLB  = " << velocityLB);
       UBLOG(logINFO, "viscosityLB = " << viscosityLB);
@@ -209,8 +214,9 @@ int main(int argc, char* argv[])
       SPtr<CoProcessor> nupsCoProcessor(new NUPSCounterCoProcessor(grid, nupsSch, numOfThreads, comm));
 
       // OpenMP threads control
+#ifdef _OPENMP
       omp_set_num_threads(numOfThreads);
-
+#endif
       // Create simulation
       SPtr<Calculator> calculator(new BasicCalculator(grid, visSch, (int)timeStepEnd));
       // Add coprocessors objects to simulation
diff --git a/cpu.cmake b/cpu.cmake
index 1b8d3b383..8a18f999b 100644
--- a/cpu.cmake
+++ b/cpu.cmake
@@ -1,5 +1,3 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 3.10)
-
 #workaround for machine with mpi compiler wrapper
 #it most define before project
 
@@ -35,14 +33,11 @@ SET(USE_BOOST OFF CACHE BOOL "include Boost support")
 #SET(USE_PYTHON OFF CACHE BOOL "include Python scripting support")
 #SET(USE_FETOL OFF CACHE BOOL "include FETOL library support")
 SET(USE_INTEL OFF CACHE BOOL "include Intel compiler support")
-SET(USE_GCC OFF CACHE BOOL "include gcc compiler support") #TODO: why do we need to set this manually?
 SET(USE_HLRN_LUSTRE OFF CACHE BOOL "include HLRN Lustre support")
 SET(USE_DEM_COUPLING OFF CACHE BOOL "PE plugin")
 
 #CAB
 include("CMake/CMakeCABMacros.cmake") #TODO: Currently we have to include the CABMacros also here, so that the USE_* are defined in the config files for the cpu version
-#include("CMake/FileUtilities.cmake")
-#include("CMake/VirtualFluidsMacros.cmake")
 
 #MPI
 IF((NOT ${CMAKE_CXX_COMPILER} MATCHES mpicxx) AND (NOT ${CMAKE_CXX_COMPILER} MATCHES mpiicpc))# OR NOT ${CMAKE_CXX_COMPILER} MATCHES cc OR NOT ${CMAKE_CXX_COMPILER} MATCHES mpiCC)
@@ -107,9 +102,6 @@ ENDIF()
 IF(${USE_MPI})
     LIST(APPEND CAB_ADDTIONAL_COMPILER_FLAGS -DVF_MPI)
 ENDIF()
-# IF(${USE_FETOL})
-# LIST(APPEND CAB_ADDTIONAL_COMPILER_FLAGS -DVF_FETOL)
-# ENDIF()
 IF(${USE_VTK})
     LIST(APPEND CAB_ADDTIONAL_COMPILER_FLAGS -DVF_VTK)
 ENDIF()
@@ -129,6 +121,8 @@ IF(${USE_INTEL})
     SET(CAB_ADDITIONAL_LINK_FLAGS ${CAB_ADDITIONAL_LINK_FLAGS} -parallel)
 ENDIF()
 
+message ("GCC IS : " ${USE_GCC})
+
 IF(${USE_GCC})
     SET(CAB_ADDITIONAL_LINK_FLAGS ${CAB_ADDITIONAL_LINK_FLAGS} -lgomp)
 ENDIF()
diff --git a/gpu.cmake b/gpu.cmake
index 78d21ec95..e9f33a5ea 100644
--- a/gpu.cmake
+++ b/gpu.cmake
@@ -4,6 +4,13 @@ if(UNIX)
     set(CMAKE_CXX_STANDARD 14)
 endif()
 
+#############################################################
+###                     CUDAPATH                          ###
+#############################################################
+
+# if CMake cannot find CUDA by itself, set the correct paths manually:
+#SET(CUDA_CUT_INCLUDE_DIR    "/cluster/cuda/9.0/include;/cluster/cuda/9.0/samples/common/inc" CACHE PATH "CUDA_CUT_INCLUDE_DIR")
+#SET(CUDA_SAMPLE_INCLUDE_DIR "/cluster/cuda/9.0/samples/common/inc" CACHE PATH "CUDA_CUT_INCLUDE_DIR")
 
 #############################################################
 ###                   PROJECT SETTINGS                    ###
diff --git a/src/basics/geometry3d/CoordinateTransformation3D.h b/src/basics/geometry3d/CoordinateTransformation3D.h
index 362da6b62..112f78789 100644
--- a/src/basics/geometry3d/CoordinateTransformation3D.h
+++ b/src/basics/geometry3d/CoordinateTransformation3D.h
@@ -120,11 +120,11 @@ private:
    bool   active;
    bool   transformation;
 
-    friend class MPIIOCoProcessor;
-    friend class MPIIORestartCoProcessor;
-    friend class MPIIOMigrationCoProcessor;
-    friend class MPIIOMigrationBECoProcessor;
-    friend class CheckpointConverter;
+   friend class MPIIOCoProcessor;
+   friend class MPIIORestartCoProcessor;
+   friend class MPIIOMigrationCoProcessor;
+   friend class MPIIOMigrationBECoProcessor;
+   friend class CheckpointConverter;
 
 
 };
diff --git a/apps/cpu/VirtualFluids.h b/src/cpu/VirtualFluids.h
similarity index 83%
rename from apps/cpu/VirtualFluids.h
rename to src/cpu/VirtualFluids.h
index fe92dd803..83ebf0494 100644
--- a/apps/cpu/VirtualFluids.h
+++ b/src/cpu/VirtualFluids.h
@@ -1,16 +1,44 @@
+//=======================================================================================
+// ____          ____    __    ______     __________   __      __       __        __
+// \    \       |    |  |  |  |   _   \  |___    ___| |  |    |  |     /  \      |  |
+//  \    \      |    |  |  |  |  |_)   |     |  |     |  |    |  |    /    \     |  |
+//   \    \     |    |  |  |  |   _   /      |  |     |  |    |  |   /  /\  \    |  |
+//    \    \    |    |  |  |  |  | \  \      |  |     |   \__/   |  /  ____  \   |  |____
+//     \    \   |    |  |__|  |__|  \__\     |__|      \________/  /__/    \__\  |_______|
+//      \    \  |    |   ________________________________________________________________
+//       \    \ |    |  |  ______________________________________________________________|
+//        \    \|    |  |  |         __          __     __     __     ______      _______
+//         \         |  |  |_____   |  |        |  |   |  |   |  |   |   _  \    /  _____)
+//          \        |  |   _____|  |  |        |  |   |  |   |  |   |  | \  \   \_______
+//           \       |  |  |        |  |_____   |   \_/   |   |  |   |  |_/  /    _____  \
+//            \ _____|  |__|        |________|   \_______/    |__|   |______/    (_______/
+//
+//  This file is part of VirtualFluids. VirtualFluids is free software: you can
+//  redistribute it and/or modify it under the terms of the GNU General Public
+//  License as published by the Free Software Foundation, either version 3 of
+//  the License, or (at your option) any later version.
+//
+//  VirtualFluids is distributed in the hope that it will be useful, but WITHOUT
+//  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+//  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+//  for more details.
+//
+//  You should have received a copy of the GNU General Public License along
+//  with VirtualFluids (see COPYING.txt). If not, see <http://www.gnu.org/licenses/>.
+//
+//! \file VirtualFluids.h
+//! \ingroup Applications
+//! \author Konstantin Kutscher
+//=======================================================================================
+
 #ifndef VirtualFluids_h__
 #define VirtualFluids_h__
 
 //VirtualFluids header files
- 
-#if defined VF_FETOL
-#define WIN32_LEAN_AND_MEAN
-#include <JM.h>
-#endif
 
 #ifdef _OPENMP
 #include <omp.h>
-#endif 
+#endif
 
 #include <basics/PointerDefinitions.h>
 
@@ -268,11 +296,4 @@
 #include <Visitors/RefineCrossAndInsideGbObjectHelper.h>
 #include <RefineAroundGbObjectHelper.h>
 
-#if defined VF_FETOL
-   #include <FETOL/FETOLCalculator.h>
-   #include <FETOL/FETOLCommunicator.h>
-   #include <FETOL/FETOLSetConnectorsBlockVisitor.h>
-   #include <FETOL/FETOLTransmitterBondPool.h>   
-#endif
-
 #endif // VirtualFluids_h__
diff --git a/src/cpu/VirtualFluidsCore/LBM/CumulantK17LBMKernel.cpp b/src/cpu/VirtualFluidsCore/LBM/CumulantK17LBMKernel.cpp
new file mode 100644
index 000000000..b3a3c989b
--- /dev/null
+++ b/src/cpu/VirtualFluidsCore/LBM/CumulantK17LBMKernel.cpp
@@ -0,0 +1,648 @@
+//=======================================================================================
+// ____          ____    __    ______     __________   __      __       __        __         
+// \    \       |    |  |  |  |   _   \  |___    ___| |  |    |  |     /  \      |  |        
+//  \    \      |    |  |  |  |  |_)   |     |  |     |  |    |  |    /    \     |  |        
+//   \    \     |    |  |  |  |   _   /      |  |     |  |    |  |   /  /\  \    |  |        
+//    \    \    |    |  |  |  |  | \  \      |  |     |   \__/   |  /  ____  \   |  |____    
+//     \    \   |    |  |__|  |__|  \__\     |__|      \________/  /__/    \__\  |_______|   
+//      \    \  |    |   ________________________________________________________________    
+//       \    \ |    |  |  ______________________________________________________________|   
+//        \    \|    |  |  |         __          __     __     __     ______      _______    
+//         \         |  |  |_____   |  |        |  |   |  |   |  |   |   _  \    /  _____)   
+//          \        |  |   _____|  |  |        |  |   |  |   |  |   |  | \  \   \_______    
+//           \       |  |  |        |  |_____   |   \_/   |   |  |   |  |_/  /    _____  |
+//            \ _____|  |__|        |________|   \_______/    |__|   |______/    (_______/   
+//
+//  This file is part of VirtualFluids. VirtualFluids is free software: you can 
+//  redistribute it and/or modify it under the terms of the GNU General Public
+//  License as published by the Free Software Foundation, either version 3 of 
+//  the License, or (at your option) any later version.
+//  
+//  VirtualFluids is distributed in the hope that it will be useful, but WITHOUT 
+//  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 
+//  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License 
+//  for more details.
+//  
+//  You should have received a copy of the GNU General Public License along
+//  with VirtualFluids (see COPYING.txt). If not, see <http://www.gnu.org/licenses/>.
+//
+//! \file CumulantK17LBMKernel.cpp
+//! \ingroup LBM
+//! \author Konstantin Kutscher, Martin Geier
+//=======================================================================================
+
+#include "CumulantK17LBMKernel.h"
+#include "D3Q27System.h"
+#include "D3Q27EsoTwist3DSplittedVector.h"
+#include <math.h>
+#include "DataSet3D.h"
+#include "LBMKernel.h"
+#include "Block3D.h"
+#include "BCArray3D.h"
+
+#define PROOF_CORRECTNESS
+
+using namespace UbMath;
+
+//////////////////////////////////////////////////////////////////////////
+CumulantK17LBMKernel::CumulantK17LBMKernel()
+{
+   this->compressible = true;
+}
+//////////////////////////////////////////////////////////////////////////
+CumulantK17LBMKernel::~CumulantK17LBMKernel(void)
+{
+
+}
+//////////////////////////////////////////////////////////////////////////
+void CumulantK17LBMKernel::initDataSet()
+{
+   SPtr<DistributionArray3D> d(new D3Q27EsoTwist3DSplittedVector(nx[0] + 2, nx[1] + 2, nx[2] + 2, -999.9));
+   dataSet->setFdistributions(d);
+}
+//////////////////////////////////////////////////////////////////////////
+SPtr<LBMKernel> CumulantK17LBMKernel::clone()
+{
+   SPtr<LBMKernel> kernel(new CumulantK17LBMKernel());
+   kernel->setNX(nx);
+   std::dynamic_pointer_cast<CumulantK17LBMKernel>(kernel)->initDataSet();
+   kernel->setCollisionFactor(this->collFactor);
+   kernel->setBCProcessor(bcProcessor->clone(kernel));
+   kernel->setWithForcing(withForcing);
+   kernel->setForcingX1(muForcingX1);
+   kernel->setForcingX2(muForcingX2);
+   kernel->setForcingX3(muForcingX3);
+   kernel->setIndex(ix1, ix2, ix3);
+   kernel->setDeltaT(deltaT);
+   kernel->setBlock(block.lock());
+
+   return kernel;
+}
+//////////////////////////////////////////////////////////////////////////
+void CumulantK17LBMKernel::calculate(int step)
+{
+   //////////////////////////////////////////////////////////////////////////
+	//! Cumulant K17 Kernel is based on
+   //! <a href="https://doi.org/10.1016/j.jcp.2017.05.040"><b>[ M. Geier et al. (2017), DOI:10.1016/j.jcp.2017.05.040 ]</b></a>
+	//! and 
+	//! <a href="https://doi.org/10.1016/j.jcp.2017.07.004"><b>[ M. Geier et al. (2017), DOI:10.1016/j.jcp.2017.07.004 ]</b></a>
+	//!
+	//! The cumulant kernel is executed in the following steps
+	//!
+	////////////////////////////////////////////////////////////////////////////////
+	//! - Get node index coordinates from thredIdx, blockIdx, blockDim and gridDim.
+	//!
+   using namespace D3Q27System;
+   using namespace std;
+
+   //initializing of forcing stuff 
+   if (withForcing)
+   {
+      muForcingX1.DefineVar("x1", &muX1); muForcingX1.DefineVar("x2", &muX2); muForcingX1.DefineVar("x3", &muX3);
+      muForcingX2.DefineVar("x1", &muX1); muForcingX2.DefineVar("x2", &muX2); muForcingX2.DefineVar("x3", &muX3);
+      muForcingX3.DefineVar("x1", &muX1); muForcingX3.DefineVar("x2", &muX2); muForcingX3.DefineVar("x3", &muX3);
+
+      muDeltaT = deltaT;
+
+      muForcingX1.DefineVar("dt", &muDeltaT);
+      muForcingX2.DefineVar("dt", &muDeltaT);
+      muForcingX3.DefineVar("dt", &muDeltaT);
+
+      muNu = (1.0 / 3.0) * (1.0 / collFactor - 1.0 / 2.0);
+
+      muForcingX1.DefineVar("nu", &muNu);
+      muForcingX2.DefineVar("nu", &muNu);
+      muForcingX3.DefineVar("nu", &muNu);
+
+      LBMReal forcingX1 = 0;
+      LBMReal forcingX2 = 0;
+      LBMReal forcingX3 = 0;
+   }
+   /////////////////////////////////////
+
+   localDistributions = dynamic_pointer_cast<D3Q27EsoTwist3DSplittedVector>(dataSet->getFdistributions())->getLocalDistributions();
+   nonLocalDistributions = dynamic_pointer_cast<D3Q27EsoTwist3DSplittedVector>(dataSet->getFdistributions())->getNonLocalDistributions();
+   restDistributions = dynamic_pointer_cast<D3Q27EsoTwist3DSplittedVector>(dataSet->getFdistributions())->getZeroDistributions();
+
+   SPtr<BCArray3D> bcArray = this->getBCProcessor()->getBCArray();
+
+   const int bcArrayMaxX1 = (int)bcArray->getNX1();
+   const int bcArrayMaxX2 = (int)bcArray->getNX2();
+   const int bcArrayMaxX3 = (int)bcArray->getNX3();
+
+   int minX1 = ghostLayerWidth;
+   int minX2 = ghostLayerWidth;
+   int minX3 = ghostLayerWidth;
+   int maxX1 = bcArrayMaxX1 - ghostLayerWidth;
+   int maxX2 = bcArrayMaxX2 - ghostLayerWidth;
+   int maxX3 = bcArrayMaxX3 - ghostLayerWidth;
+
+   LBMReal omega = collFactor;
+
+   for (int x3 = minX3; x3 < maxX3; x3++)
+   {
+      for (int x2 = minX2; x2 < maxX2; x2++)
+      {
+         for (int x1 = minX1; x1 < maxX1; x1++)
+         {
+            if (!bcArray->isSolid(x1, x2, x3) && !bcArray->isUndefined(x1, x2, x3))
+            {
+               int x1p = x1 + 1;
+               int x2p = x2 + 1;
+               int x3p = x3 + 1;
+               //////////////////////////////////////////////////////////////////////////
+               //////////////////////////////////////////////////////////////////////////
+			      //! - Read distributions: style of reading and writing the distributions from/to stored arrays dependent on timestep is based on the esoteric twist algorithm
+			      //! <a href="https://doi.org/10.3390/computation5020019"><b>[ M. Geier et al. (2017), DOI:10.3390/computation5020019 ]</b></a>
+			      //!
+               ////////////////////////////////////////////////////////////////////////////
+               //////////////////////////////////////////////////////////////////////////
+
+               //E   N  T
+               //c   c  c
+               //////////
+               //W   S  B
+               //a   a  a
+
+               //Rest is b
+
+               //mfxyz
+               //a - negative
+               //b - null
+               //c - positive
+
+               // a b c
+               //-1 0 1
+
+               LBMReal mfcbb = (*this->localDistributions)(D3Q27System::ET_E, x1, x2, x3);
+               LBMReal mfbcb = (*this->localDistributions)(D3Q27System::ET_N, x1, x2, x3);
+               LBMReal mfbbc = (*this->localDistributions)(D3Q27System::ET_T, x1, x2, x3);
+               LBMReal mfccb = (*this->localDistributions)(D3Q27System::ET_NE, x1, x2, x3);
+               LBMReal mfacb = (*this->localDistributions)(D3Q27System::ET_NW, x1p, x2, x3);
+               LBMReal mfcbc = (*this->localDistributions)(D3Q27System::ET_TE, x1, x2, x3);
+               LBMReal mfabc = (*this->localDistributions)(D3Q27System::ET_TW, x1p, x2, x3);
+               LBMReal mfbcc = (*this->localDistributions)(D3Q27System::ET_TN, x1, x2, x3);
+               LBMReal mfbac = (*this->localDistributions)(D3Q27System::ET_TS, x1, x2p, x3);
+               LBMReal mfccc = (*this->localDistributions)(D3Q27System::ET_TNE, x1, x2, x3);
+               LBMReal mfacc = (*this->localDistributions)(D3Q27System::ET_TNW, x1p, x2, x3);
+               LBMReal mfcac = (*this->localDistributions)(D3Q27System::ET_TSE, x1, x2p, x3);
+               LBMReal mfaac = (*this->localDistributions)(D3Q27System::ET_TSW, x1p, x2p, x3);
+
+               LBMReal mfabb = (*this->nonLocalDistributions)(D3Q27System::ET_W, x1p, x2, x3);
+               LBMReal mfbab = (*this->nonLocalDistributions)(D3Q27System::ET_S, x1, x2p, x3);
+               LBMReal mfbba = (*this->nonLocalDistributions)(D3Q27System::ET_B, x1, x2, x3p);
+               LBMReal mfaab = (*this->nonLocalDistributions)(D3Q27System::ET_SW, x1p, x2p, x3);
+               LBMReal mfcab = (*this->nonLocalDistributions)(D3Q27System::ET_SE, x1, x2p, x3);
+               LBMReal mfaba = (*this->nonLocalDistributions)(D3Q27System::ET_BW, x1p, x2, x3p);
+               LBMReal mfcba = (*this->nonLocalDistributions)(D3Q27System::ET_BE, x1, x2, x3p);
+               LBMReal mfbaa = (*this->nonLocalDistributions)(D3Q27System::ET_BS, x1, x2p, x3p);
+               LBMReal mfbca = (*this->nonLocalDistributions)(D3Q27System::ET_BN, x1, x2, x3p);
+               LBMReal mfaaa = (*this->nonLocalDistributions)(D3Q27System::ET_BSW, x1p, x2p, x3p);
+               LBMReal mfcaa = (*this->nonLocalDistributions)(D3Q27System::ET_BSE, x1, x2p, x3p);
+               LBMReal mfaca = (*this->nonLocalDistributions)(D3Q27System::ET_BNW, x1p, x2, x3p);
+               LBMReal mfcca = (*this->nonLocalDistributions)(D3Q27System::ET_BNE, x1, x2, x3p);
+
+               LBMReal mfbbb = (*this->restDistributions)(x1, x2, x3);
+
+               ////////////////////////////////////////////////////////////////////////////////////
+               //! - Calculate density and velocity using pyramid summation for low round-off errors as in Eq. (J1)-(J3)
+			      //! <a href="https://doi.org/10.1016/j.camwa.2015.05.001"><b>[ M. Geier et al. (2015), DOI:10.1016/j.camwa.2015.05.001 ]</b></a>
+			      //!
+               LBMReal drho = ((((mfccc + mfaaa) + (mfaca + mfcac)) + ((mfacc + mfcaa) + (mfaac + mfcca))) +
+                  (((mfbac + mfbca) + (mfbaa + mfbcc)) + ((mfabc + mfcba) + (mfaba + mfcbc)) + ((mfacb + mfcab) + (mfaab + mfccb))) +
+                  ((mfabb + mfcbb) + (mfbab + mfbcb)) + (mfbba + mfbbc)) + mfbbb;
+
+               LBMReal rho = c1 + drho;
+               LBMReal OOrho = c1 / rho;
+               ////////////////////////////////////////////////////////////////////////////////////
+               LBMReal vvx = ((((mfccc - mfaaa) + (mfcac - mfaca)) + ((mfcaa - mfacc) + (mfcca - mfaac))) +
+                  (((mfcba - mfabc) + (mfcbc - mfaba)) + ((mfcab - mfacb) + (mfccb - mfaab))) +
+                  (mfcbb - mfabb)) / rho;
+               LBMReal vvy = ((((mfccc - mfaaa) + (mfaca - mfcac)) + ((mfacc - mfcaa) + (mfcca - mfaac))) +
+                  (((mfbca - mfbac) + (mfbcc - mfbaa)) + ((mfacb - mfcab) + (mfccb - mfaab))) +
+                  (mfbcb - mfbab)) / rho;
+               LBMReal vvz = ((((mfccc - mfaaa) + (mfcac - mfaca)) + ((mfacc - mfcaa) + (mfaac - mfcca))) +
+                  (((mfbac - mfbca) + (mfbcc - mfbaa)) + ((mfabc - mfcba) + (mfcbc - mfaba))) +
+                  (mfbbc - mfbba)) / rho;
+               ////////////////////////////////////////////////////////////////////////////////////
+               //forcing 
+               ///////////////////////////////////////////////////////////////////////////////////////////
+               if (withForcing)
+               {
+                  muX1 = static_cast<double>(x1 - 1 + ix1 * maxX1);
+                  muX2 = static_cast<double>(x2 - 1 + ix2 * maxX2);
+                  muX3 = static_cast<double>(x3 - 1 + ix3 * maxX3);
+
+                  forcingX1 = muForcingX1.Eval();
+                  forcingX2 = muForcingX2.Eval();
+                  forcingX3 = muForcingX3.Eval();
+
+                  ////////////////////////////////////////////////////////////////////////////////////
+			         //! - Add half of the acceleration (body force) to the velocity as in Eq. (42)
+			         //! <a href="https://doi.org/10.1016/j.camwa.2015.05.001"><b>[ M. Geier et al. (2015), DOI:10.1016/j.camwa.2015.05.001 ]</b></a>
+			         //!
+                  vvx += forcingX1 * deltaT * c1o2; // X
+                  vvy += forcingX2 * deltaT * c1o2; // Y
+                  vvz += forcingX3 * deltaT * c1o2; // Z
+               }
+               ///////////////////////////////////////////////////////////////////////////////////////////               
+               ////////////////////////////////////////////////////////////////////////////////////
+               LBMReal oMdrho = c1;
+			      ////////////////////////////////////////////////////////////////////////////////////
+			      // calculate the square of velocities for this lattice node
+               LBMReal vx2 = vvx * vvx;
+               LBMReal vy2 = vvy * vvy;
+               LBMReal vz2 = vvz * vvz;
+			      ////////////////////////////////////////////////////////////////////////////////////
+			      //! - Set relaxation limiters for third order cumulants to default value \f$ \lambda=0.001 \f$ according to section 6 in
+			      //! <a href="https://doi.org/10.1016/j.jcp.2017.05.040"><b>[ M. Geier et al. (2017), DOI:10.1016/j.jcp.2017.05.040 ]</b></a>
+			      //!
+               LBMReal wadjust;
+               LBMReal qudricLimitP = c1o100;
+               LBMReal qudricLimitM = c1o100;
+               LBMReal qudricLimitD = c1o100;
+			      ////////////////////////////////////////////////////////////////////////////////////
+			      //! - Chimera transform from well conditioned distributions to central moments as defined in Appendix J in
+			      //! <a href="https://doi.org/10.1016/j.camwa.2015.05.001"><b>[ M. Geier et al. (2015), DOI:10.1016/j.camwa.2015.05.001 ]</b></a>
+			      //! see also Eq. (6)-(14) in
+			      //! <a href="https://doi.org/10.1016/j.jcp.2017.05.040"><b>[ M. Geier et al. (2017), DOI:10.1016/j.jcp.2017.05.040 ]</b></a>
+			      //!
+               ////////////////////////////////////////////////////////////////////////////////////
+               // Z - Dir
+               forwardInverseChimeraWithK(mfaaa, mfaab, mfaac, vvz, vz2, c36, c1o36);
+               forwardInverseChimeraWithK(mfaba, mfabb, mfabc, vvz, vz2, c9, c1o9);
+               forwardInverseChimeraWithK(mfaca, mfacb, mfacc, vvz, vz2, c36, c1o36);
+               forwardInverseChimeraWithK(mfbaa, mfbab, mfbac, vvz, vz2, c9, c1o9);
+               forwardInverseChimeraWithK(mfbba, mfbbb, mfbbc, vvz, vz2, c9o4, c4o9);
+               forwardInverseChimeraWithK(mfbca, mfbcb, mfbcc, vvz, vz2, c9, c1o9);
+               forwardInverseChimeraWithK(mfcaa, mfcab, mfcac, vvz, vz2, c36, c1o36);
+               forwardInverseChimeraWithK(mfcba, mfcbb, mfcbc, vvz, vz2, c9, c1o9);
+               forwardInverseChimeraWithK(mfcca, mfccb, mfccc, vvz, vz2, c36, c1o36);
+
+               ////////////////////////////////////////////////////////////////////////////////////
+               // Y - Dir
+               forwardInverseChimeraWithK(mfaaa, mfaba, mfaca, vvy, vy2, c6, c1o6);
+               forwardChimera(mfaab, mfabb, mfacb, vvy, vy2);
+               forwardInverseChimeraWithK(mfaac, mfabc, mfacc, vvy, vy2, c18, c1o18);
+               forwardInverseChimeraWithK(mfbaa, mfbba, mfbca, vvy, vy2, c3o2, c2o3);
+               forwardChimera(mfbab, mfbbb, mfbcb, vvy, vy2);
+               forwardInverseChimeraWithK(mfbac, mfbbc, mfbcc, vvy, vy2, c9o2, c2o9);
+               forwardInverseChimeraWithK(mfcaa, mfcba, mfcca, vvy, vy2, c6, c1o6);
+               forwardChimera(mfcab, mfcbb, mfccb, vvy, vy2);
+               forwardInverseChimeraWithK(mfcac, mfcbc, mfccc, vvy, vy2, c18, c1o18);
+
+               ////////////////////////////////////////////////////////////////////////////////////
+               // X - Dir
+               forwardInverseChimeraWithK(mfaaa, mfbaa, mfcaa, vvx, vx2, c1, c1);
+               forwardChimera(mfaba, mfbba, mfcba, vvx, vx2);
+               forwardInverseChimeraWithK(mfaca, mfbca, mfcca, vvx, vx2, c3, c1o3);
+               forwardChimera(mfaab, mfbab, mfcab, vvx, vx2);
+               forwardChimera(mfabb, mfbbb, mfcbb, vvx, vx2);
+               forwardChimera(mfacb, mfbcb, mfccb, vvx, vx2);
+               forwardInverseChimeraWithK(mfaac, mfbac, mfcac, vvx, vx2, c3, c1o3);
+               forwardChimera(mfabc, mfbbc, mfcbc, vvx, vx2);
+               forwardInverseChimeraWithK(mfacc, mfbcc, mfccc, vvx, vx2, c9, c1o9);
+
+			      ////////////////////////////////////////////////////////////////////////////////////
+			      //! - Setting relaxation rates for non-hydrodynamic cumulants (default values). Variable names and equations according to
+			      //! <a href="https://doi.org/10.1016/j.jcp.2017.05.040"><b>[ M. Geier et al. (2017), DOI:10.1016/j.jcp.2017.05.040 ]</b></a>
+			      //!  => [NAME IN PAPER]=[NAME IN CODE]=[DEFAULT VALUE].
+			      //!  - Trace of second order cumulants \f$ C_{200}+C_{020}+C_{002} \f$ used to adjust bulk viscosity:\f$\omega_2=OxxPyyPzz=1.0 \f$.
+			      //!  - Third order cumulants \f$ C_{120}+C_{102} \f$, \f$ C_{210}+C_{012} \f$, \f$ C_{201}+C_{021} \f$: \f$\omega_3=OxyyPxzz\f$ set according to Eq. (111) with simplifications assuming \f$\omega_2=1.0\f$.
+			      //!  - Third order cumulants \f$ C_{120}-C_{102} \f$, \f$ C_{210}-C_{012} \f$, \f$ C_{201}-C_{021} \f$: \f$\omega_4 = OxyyMxzz\f$ set according to Eq. (112) with simplifications assuming \f$\omega_2 = 1.0\f$.
+			      //!  - Third order cumulants \f$ C_{111} \f$: \f$\omega_5 = Oxyz\f$ set according to Eq. (113) with simplifications assuming \f$\omega_2 = 1.0\f$  (modify for different bulk viscosity).
+			      //!  - Fourth order cumulants \f$ C_{220} \f$, \f$ C_{202} \f$, \f$ C_{022} \f$, \f$ C_{211} \f$, \f$ C_{121} \f$, \f$ C_{112} \f$: for simplification all set to the same default value \f$ \omega_6=\omega_7=\omega_8=O4=1.0 \f$.
+			      //!  - Fifth order cumulants \f$ C_{221}\f$, \f$C_{212}\f$, \f$C_{122}\f$: \f$\omega_9=O5=1.0\f$.
+			      //!  - Sixth order cumulant \f$ C_{222}\f$: \f$\omega_{10}=O6=1.0\f$.
+			      //!
+			      ////////////////////////////////////////////////////////////
+			      //2.
+			      LBMReal OxxPyyPzz = c1;
+			      ////////////////////////////////////////////////////////////
+			      //3.
+			      LBMReal OxyyPxzz = c8  * (-c2 + omega) * ( c1 + c2*omega) / (-c8 - c14*omega + c7*omega*omega);
+			      LBMReal OxyyMxzz = c8  * (-c2 + omega) * (-c7 + c4*omega) / (c56 - c50*omega + c9*omega*omega);
+			      LBMReal Oxyz     = c24 * (-c2 + omega) * (-c2 - c7*omega + c3*omega*omega) / (c48 + c152*omega - c130*omega*omega + c29*omega*omega*omega);
+			      ////////////////////////////////////////////////////////////
+			      //4.
+			      LBMReal O4 = c1;
+			      ////////////////////////////////////////////////////////////
+			      //5.
+			      LBMReal O5 = c1;
+			      ////////////////////////////////////////////////////////////
+			      //6.
+			      LBMReal O6 = c1;
+
+			      ////////////////////////////////////////////////////////////////////////////////////
+			      //! - A and B: parameters for fourth order convergence of the diffusion term according to Eq. (114) and (115)
+			      //! <a href="https://doi.org/10.1016/j.jcp.2017.05.040"><b>[ M. Geier et al. (2017), DOI:10.1016/j.jcp.2017.05.040 ]</b></a>
+			      //! with simplifications assuming \f$\omega_2 = 1.0\f$ (modify for different bulk viscosity).
+			      //!
+			      LBMReal A = (c4 + c2*omega - c3*omega*omega) / (c2 - c7*omega + c5*omega*omega);
+			      LBMReal B = (c4 + c28*omega - c14*omega*omega) / (c6 - c21*omega + c15*omega*omega);
+
+			      ////////////////////////////////////////////////////////////////////////////////////
+			      //! - Compute cumulants from central moments according to Eq. (20)-(23) in
+			      //! <a href="https://doi.org/10.1016/j.jcp.2017.05.040"><b>[ M. Geier et al. (2017), DOI:10.1016/j.jcp.2017.05.040 ]</b></a>
+			      //!
+			      ////////////////////////////////////////////////////////////
+               //4.
+               LBMReal CUMcbb = mfcbb - ((mfcaa + c1o3) * mfabb + c2 * mfbba * mfbab) * OOrho;
+               LBMReal CUMbcb = mfbcb - ((mfaca + c1o3) * mfbab + c2 * mfbba * mfabb) * OOrho;
+               LBMReal CUMbbc = mfbbc - ((mfaac + c1o3) * mfbba + c2 * mfbab * mfabb) * OOrho;
+
+               LBMReal CUMcca = mfcca - (((mfcaa * mfaca + c2 * mfbba * mfbba) + c1o3 * (mfcaa + mfaca)) * OOrho - c1o9 * (drho * OOrho));
+               LBMReal CUMcac = mfcac - (((mfcaa * mfaac + c2 * mfbab * mfbab) + c1o3 * (mfcaa + mfaac)) * OOrho - c1o9 * (drho * OOrho));
+               LBMReal CUMacc = mfacc - (((mfaac * mfaca + c2 * mfabb * mfabb) + c1o3 * (mfaac + mfaca)) * OOrho - c1o9 * (drho * OOrho));
+               ////////////////////////////////////////////////////////////
+               //5.
+               LBMReal CUMbcc = mfbcc - ((mfaac * mfbca + mfaca * mfbac + c4 * mfabb * mfbbb + c2 * (mfbab * mfacb + mfbba * mfabc)) + c1o3 * (mfbca + mfbac)) * OOrho;
+               LBMReal CUMcbc = mfcbc - ((mfaac * mfcba + mfcaa * mfabc + c4 * mfbab * mfbbb + c2 * (mfabb * mfcab + mfbba * mfbac)) + c1o3 * (mfcba + mfabc)) * OOrho;
+               LBMReal CUMccb = mfccb - ((mfcaa * mfacb + mfaca * mfcab + c4 * mfbba * mfbbb + c2 * (mfbab * mfbca + mfabb * mfcba)) + c1o3 * (mfacb + mfcab)) * OOrho;
+               ////////////////////////////////////////////////////////////
+               //6.
+               LBMReal CUMccc = mfccc + ((-c4 * mfbbb * mfbbb
+                     - (mfcaa * mfacc + mfaca * mfcac + mfaac * mfcca)
+                     - c4 * (mfabb * mfcbb + mfbab * mfbcb + mfbba * mfbbc)
+                     - c2 * (mfbca * mfbac + mfcba * mfabc + mfcab * mfacb)) * OOrho
+                     + (c4 * (mfbab * mfbab * mfaca + mfabb * mfabb * mfcaa + mfbba * mfbba * mfaac)
+                     + c2 * (mfcaa * mfaca * mfaac)
+                     + c16 * mfbba * mfbab * mfabb) * OOrho * OOrho
+                     - c1o3 * (mfacc + mfcac + mfcca) * OOrho
+                     - c1o9 * (mfcaa + mfaca + mfaac) * OOrho
+                     + (c2 * (mfbab * mfbab + mfabb * mfabb + mfbba * mfbba)
+                     + (mfaac * mfaca + mfaac * mfcaa + mfaca * mfcaa) + c1o3 * (mfaac + mfaca + mfcaa)) * OOrho * OOrho * c2o3
+                     + c1o27 * ((drho * drho - drho) * OOrho * OOrho));
+
+			      ////////////////////////////////////////////////////////////////////////////////////
+			      //! - Compute linear combinations of second and third order cumulants
+			      //!
+			      ////////////////////////////////////////////////////////////
+               //2.
+               LBMReal mxxPyyPzz = mfcaa + mfaca + mfaac;
+               LBMReal mxxMyy = mfcaa - mfaca;
+               LBMReal mxxMzz = mfcaa - mfaac;
+			      ////////////////////////////////////////////////////////////
+			      //3.
+               LBMReal mxxyPyzz = mfcba + mfabc;
+               LBMReal mxxyMyzz = mfcba - mfabc;
+
+               LBMReal mxxzPyyz = mfcab + mfacb;
+               LBMReal mxxzMyyz = mfcab - mfacb;
+
+               LBMReal mxyyPxzz = mfbca + mfbac;
+               LBMReal mxyyMxzz = mfbca - mfbac;
+
+			      ////////////////////////////////////////////////////////////////////////////////////
+			      //incl. correction
+			      ////////////////////////////////////////////////////////////
+			      //! - Compute velocity  gradients from second order cumulants according to Eq. (27)-(32)
+			      //! <a href="https://doi.org/10.1016/j.jcp.2017.05.040"><b>[ M. Geier et al. (2017), DOI:10.1016/j.jcp.2017.05.040 ]</b></a>
+			      //! Further explanations of the correction in viscosity in Appendix H of
+			      //! <a href="https://doi.org/10.1016/j.camwa.2015.05.001"><b>[ M. Geier et al. (2015), DOI:10.1016/j.camwa.2015.05.001 ]</b></a>
+			      //! Note that the division by rho is omitted here as we need rho times the gradients later.
+			      //!
+               LBMReal Dxy = -c3 * omega * mfbba;
+               LBMReal Dxz = -c3 * omega * mfbab;
+               LBMReal Dyz = -c3 * omega * mfabb;
+               LBMReal dxux = c1o2 * (-omega) * (mxxMyy + mxxMzz) + c1o2 * OxxPyyPzz * (mfaaa - mxxPyyPzz);
+               LBMReal dyuy = dxux + omega * c3o2 * mxxMyy;
+               LBMReal dzuz = dxux + omega * c3o2 * mxxMzz;
+			      ////////////////////////////////////////////////////////////
+			      //! - Relaxation of second order cumulants with correction terms according to Eq. (33)-(35) in
+			      //! <a href="https://doi.org/10.1016/j.jcp.2017.05.040"><b>[ M. Geier et al. (2017), DOI:10.1016/j.jcp.2017.05.040 ]</b></a>
+			      //!
+               mxxPyyPzz += OxxPyyPzz * (mfaaa - mxxPyyPzz) - c3 * (c1 - c1o2 * OxxPyyPzz) * (vx2 * dxux + vy2 * dyuy + vz2 * dzuz);
+               mxxMyy += omega * (-mxxMyy) - c3 * (c1 + c1o2 * (-omega)) * (vx2 * dxux - vy2 * dyuy);
+               mxxMzz += omega * (-mxxMzz) - c3 * (c1 + c1o2 * (-omega)) * (vx2 * dxux - vz2 * dzuz);
+
+               /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+               ////no correction
+               //mxxPyyPzz += OxxPyyPzz*(mfaaa - mxxPyyPzz);
+               //mxxMyy += -(-omega) * (-mxxMyy);
+               //mxxMzz += -(-omega) * (-mxxMzz);
+               /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+               mfabb += omega * (-mfabb);
+               mfbab += omega * (-mfbab);
+               mfbba += omega * (-mfbba);
+
+			      ////////////////////////////////////////////////////////////////////////////////////
+			      //relax
+			      //////////////////////////////////////////////////////////////////////////
+			      // incl. limiter
+			      //! - Relaxation of third order cumulants including limiter according to Eq. (116)-(123)
+			      //! <a href="https://doi.org/10.1016/j.jcp.2017.05.040"><b>[ M. Geier et al. (2017), DOI:10.1016/j.jcp.2017.05.040 ]</b></a>
+			      //!
+               wadjust = Oxyz + (c1 - Oxyz) * abs(mfbbb) / (abs(mfbbb) + qudricLimitD);
+               mfbbb += wadjust * (-mfbbb);
+               wadjust = OxyyPxzz + (c1 - OxyyPxzz) * abs(mxxyPyzz) / (abs(mxxyPyzz) + qudricLimitP);
+               mxxyPyzz += wadjust * (-mxxyPyzz);
+               wadjust = OxyyMxzz + (c1 - OxyyMxzz) * abs(mxxyMyzz) / (abs(mxxyMyzz) + qudricLimitM);
+               mxxyMyzz += wadjust * (-mxxyMyzz);
+               wadjust = OxyyPxzz + (c1 - OxyyPxzz) * abs(mxxzPyyz) / (abs(mxxzPyyz) + qudricLimitP);
+               mxxzPyyz += wadjust * (-mxxzPyyz);
+               wadjust = OxyyMxzz + (c1 - OxyyMxzz) * abs(mxxzMyyz) / (abs(mxxzMyyz) + qudricLimitM);
+               mxxzMyyz += wadjust * (-mxxzMyyz);
+               wadjust = OxyyPxzz + (c1 - OxyyPxzz) * abs(mxyyPxzz) / (abs(mxyyPxzz) + qudricLimitP);
+               mxyyPxzz += wadjust * (-mxyyPxzz);
+               wadjust = OxyyMxzz + (c1 - OxyyMxzz) * abs(mxyyMxzz) / (abs(mxyyMxzz) + qudricLimitM);
+               mxyyMxzz += wadjust * (-mxyyMxzz);
+               //////////////////////////////////////////////////////////////////////////
+               // no limiter
+               //mfbbb += OxyyMxzz * (-mfbbb);
+               //mxxyPyzz += OxyyPxzz * (-mxxyPyzz);
+               //mxxyMyzz += OxyyMxzz * (-mxxyMyzz);
+               //mxxzPyyz += OxyyPxzz * (-mxxzPyyz);
+               //mxxzMyyz += OxyyMxzz * (-mxxzMyyz);
+               //mxyyPxzz += OxyyPxzz * (-mxyyPxzz);
+               //mxyyMxzz += OxyyMxzz * (-mxyyMxzz);
+
+			      ////////////////////////////////////////////////////////////////////////////////////
+			      //! - Compute inverse linear combinations of second and third order cumulants
+			      //!
+               mfcaa = c1o3 * (mxxMyy + mxxMzz + mxxPyyPzz);
+               mfaca = c1o3 * (-c2 * mxxMyy + mxxMzz + mxxPyyPzz);
+               mfaac = c1o3 * (mxxMyy - c2 * mxxMzz + mxxPyyPzz);
+
+               mfcba = (mxxyMyzz + mxxyPyzz) * c1o2;
+               mfabc = (-mxxyMyzz + mxxyPyzz) * c1o2;
+               mfcab = (mxxzMyyz + mxxzPyyz) * c1o2;
+               mfacb = (-mxxzMyyz + mxxzPyyz) * c1o2;
+               mfbca = (mxyyMxzz + mxyyPxzz) * c1o2;
+               mfbac = (-mxyyMxzz + mxyyPxzz) * c1o2;
+               //////////////////////////////////////////////////////////////////////////
+
+			      //////////////////////////////////////////////////////////////////////////
+			      //4.
+			      // no limiter
+			      //! - Relax fourth order cumulants to modified equilibrium for fourth order convergence of diffusion according to Eq. (43)-(48)
+			      //! <a href="https://doi.org/10.1016/j.jcp.2017.05.040"><b>[ M. Geier et al. (2017), DOI:10.1016/j.jcp.2017.05.040 ]</b></a>
+			      //!
+               CUMacc = -O4 * (c1 / omega - c1o2) * (dyuy + dzuz) * c2o3 * A + (c1 - O4) * (CUMacc);
+               CUMcac = -O4 * (c1 / omega - c1o2) * (dxux + dzuz) * c2o3 * A + (c1 - O4) * (CUMcac);
+               CUMcca = -O4 * (c1 / omega - c1o2) * (dyuy + dxux) * c2o3 * A + (c1 - O4) * (CUMcca);
+               CUMbbc = -O4 * (c1 / omega - c1o2) * Dxy * c1o3 * B + (c1 - O4) * (CUMbbc);
+               CUMbcb = -O4 * (c1 / omega - c1o2) * Dxz * c1o3 * B + (c1 - O4) * (CUMbcb);
+               CUMcbb = -O4 * (c1 / omega - c1o2) * Dyz * c1o3 * B + (c1 - O4) * (CUMcbb);
+
+               //////////////////////////////////////////////////////////////////////////
+               //5.
+               CUMbcc += O5 * (-CUMbcc);
+               CUMcbc += O5 * (-CUMcbc);
+               CUMccb += O5 * (-CUMccb);
+
+               //////////////////////////////////////////////////////////////////////////
+               //6.
+               CUMccc += O6 * (-CUMccc);
+
+			      ////////////////////////////////////////////////////////////////////////////////////
+			      //! - Compute central moments from post collision cumulants according to Eq. (53)-(56) in
+			      //! <a href="https://doi.org/10.1016/j.jcp.2017.05.040"><b>[ M. Geier et al. (2017), DOI:10.1016/j.jcp.2017.05.040 ]</b></a>
+			      //!
+
+			      //////////////////////////////////////////////////////////////////////////
+               //4.
+               mfcbb = CUMcbb + c1o3 * ((c3 * mfcaa + c1) * mfabb + c6 * mfbba * mfbab) * OOrho;
+               mfbcb = CUMbcb + c1o3 * ((c3 * mfaca + c1) * mfbab + c6 * mfbba * mfabb) * OOrho;
+               mfbbc = CUMbbc + c1o3 * ((c3 * mfaac + c1) * mfbba + c6 * mfbab * mfabb) * OOrho;
+
+               mfcca = CUMcca + (((mfcaa * mfaca + c2 * mfbba * mfbba) * c9 + c3 * (mfcaa + mfaca)) * OOrho - (drho * OOrho)) * c1o9;
+               mfcac = CUMcac + (((mfcaa * mfaac + c2 * mfbab * mfbab) * c9 + c3 * (mfcaa + mfaac)) * OOrho - (drho * OOrho)) * c1o9;
+               mfacc = CUMacc + (((mfaac * mfaca + c2 * mfabb * mfabb) * c9 + c3 * (mfaac + mfaca)) * OOrho - (drho * OOrho)) * c1o9;
+
+               //////////////////////////////////////////////////////////////////////////
+               //5.
+               mfbcc = CUMbcc + c1o3 * (c3 * (mfaac * mfbca + mfaca * mfbac + c4 * mfabb * mfbbb + c2 * (mfbab * mfacb + mfbba * mfabc)) + (mfbca + mfbac)) * OOrho;
+               mfcbc = CUMcbc + c1o3 * (c3 * (mfaac * mfcba + mfcaa * mfabc + c4 * mfbab * mfbbb + c2 * (mfabb * mfcab + mfbba * mfbac)) + (mfcba + mfabc)) * OOrho;
+               mfccb = CUMccb + c1o3 * (c3 * (mfcaa * mfacb + mfaca * mfcab + c4 * mfbba * mfbbb + c2 * (mfbab * mfbca + mfabb * mfcba)) + (mfacb + mfcab)) * OOrho;
+
+               //////////////////////////////////////////////////////////////////////////
+               //6.
+               mfccc = CUMccc - ((-c4 * mfbbb * mfbbb
+                     - (mfcaa * mfacc + mfaca * mfcac + mfaac * mfcca)
+                     - c4 * (mfabb * mfcbb + mfbab * mfbcb + mfbba * mfbbc)
+                     - c2 * (mfbca * mfbac + mfcba * mfabc + mfcab * mfacb)) * OOrho
+                     + (c4 * (mfbab * mfbab * mfaca + mfabb * mfabb * mfcaa + mfbba * mfbba * mfaac)
+                     + c2 * (mfcaa * mfaca * mfaac)
+                     + c16 * mfbba * mfbab * mfabb) * OOrho * OOrho
+                     - c1o3 * (mfacc + mfcac + mfcca) * OOrho
+                     - c1o9 * (mfcaa + mfaca + mfaac) * OOrho
+                     + (c2 * (mfbab * mfbab + mfabb * mfabb + mfbba * mfbba)
+                     + (mfaac * mfaca + mfaac * mfcaa + mfaca * mfcaa) + c1o3 * (mfaac + mfaca + mfcaa)) * OOrho * OOrho * c2o3
+                     + c1o27 * ((drho * drho - drho) * OOrho * OOrho));
+
+
+			      ////////////////////////////////////////////////////////////////////////////////////
+			      //! -  Add acceleration (body force) to first order cumulants according to Eq. (85)-(87) in
+			      //! <a href="https://doi.org/10.1016/j.camwa.2015.05.001"><b>[ M. Geier et al. (2015), DOI:10.1016/j.camwa.2015.05.001 ]</b></a>
+			      //!
+               mfbaa = -mfbaa;
+               mfaba = -mfaba;
+               mfaab = -mfaab;
+               ////////////////////////////////////////////////////////////////////////////////////
+
+			      ////////////////////////////////////////////////////////////////////////////////////
+			      //! - Chimera transform from central moments to well conditioned distributions as defined in Appendix J in
+			      //! <a href="https://doi.org/10.1016/j.camwa.2015.05.001"><b>[ M. Geier et al. (2015), DOI:10.1016/j.camwa.2015.05.001 ]</b></a>
+			      //! see also Eq. (88)-(96) in
+			      //! <a href="https://doi.org/10.1016/j.jcp.2017.05.040"><b>[ M. Geier et al. (2017), DOI:10.1016/j.jcp.2017.05.040 ]</b></a>
+			      //!
+               ////////////////////////////////////////////////////////////////////////////////////
+               // X - Dir
+               backwardInverseChimeraWithK(mfaaa, mfbaa, mfcaa, vvx, vx2, c1, c1);
+               backwardChimera(mfaba, mfbba, mfcba, vvx, vx2);
+               backwardInverseChimeraWithK(mfaca, mfbca, mfcca, vvx, vx2, c3, c1o3);
+               backwardChimera(mfaab, mfbab, mfcab, vvx, vx2);
+               backwardChimera(mfabb, mfbbb, mfcbb, vvx, vx2);
+               backwardChimera(mfacb, mfbcb, mfccb, vvx, vx2);
+               backwardInverseChimeraWithK(mfaac, mfbac, mfcac, vvx, vx2, c3, c1o3);
+               backwardChimera(mfabc, mfbbc, mfcbc, vvx, vx2);
+               backwardInverseChimeraWithK(mfacc, mfbcc, mfccc, vvx, vx2, c9, c1o9);
+
+               ////////////////////////////////////////////////////////////////////////////////////
+               // Y - Dir
+               backwardInverseChimeraWithK(mfaaa, mfaba, mfaca, vvy, vy2, c6, c1o6);
+               backwardChimera(mfaab, mfabb, mfacb, vvy, vy2);
+               backwardInverseChimeraWithK(mfaac, mfabc, mfacc, vvy, vy2, c18, c1o18);
+               backwardInverseChimeraWithK(mfbaa, mfbba, mfbca, vvy, vy2, c3o2, c2o3);
+               backwardChimera(mfbab, mfbbb, mfbcb, vvy, vy2);
+               backwardInverseChimeraWithK(mfbac, mfbbc, mfbcc, vvy, vy2, c9o2, c2o9);
+               backwardInverseChimeraWithK(mfcaa, mfcba, mfcca, vvy, vy2, c6, c1o6);
+               backwardChimera(mfcab, mfcbb, mfccb, vvy, vy2);
+               backwardInverseChimeraWithK(mfcac, mfcbc, mfccc, vvy, vy2, c18, c1o18);
+
+               ////////////////////////////////////////////////////////////////////////////////////
+               // Z - Dir
+               backwardInverseChimeraWithK(mfaaa, mfaab, mfaac, vvz, vz2, c36, c1o36);
+               backwardInverseChimeraWithK(mfaba, mfabb, mfabc, vvz, vz2, c9, c1o9);
+               backwardInverseChimeraWithK(mfaca, mfacb, mfacc, vvz, vz2, c36, c1o36);
+               backwardInverseChimeraWithK(mfbaa, mfbab, mfbac, vvz, vz2, c9, c1o9);
+               backwardInverseChimeraWithK(mfbba, mfbbb, mfbbc, vvz, vz2, c9o4, c4o9);
+               backwardInverseChimeraWithK(mfbca, mfbcb, mfbcc, vvz, vz2, c9, c1o9);
+               backwardInverseChimeraWithK(mfcaa, mfcab, mfcac, vvz, vz2, c36, c1o36);
+               backwardInverseChimeraWithK(mfcba, mfcbb, mfcbc, vvz, vz2, c9, c1o9);
+               backwardInverseChimeraWithK(mfcca, mfccb, mfccc, vvz, vz2, c36, c1o36);
+               ////////////////////////////////////////////////////////////////////////////////////
+
+               //////////////////////////////////////////////////////////////////////////
+               //proof correctness
+               //////////////////////////////////////////////////////////////////////////
+#ifdef  PROOF_CORRECTNESS
+               LBMReal drho_post = (mfaaa + mfaac + mfaca + mfcaa + mfacc + mfcac + mfccc + mfcca)
+                  + (mfaab + mfacb + mfcab + mfccb) + (mfaba + mfabc + mfcba + mfcbc) + (mfbaa + mfbac + mfbca + mfbcc)
+                  + (mfabb + mfcbb) + (mfbab + mfbcb) + (mfbba + mfbbc) + mfbbb;
+               LBMReal dif = drho - drho_post;
+#ifdef SINGLEPRECISION
+               if (dif > 10.0E-7 || dif < -10.0E-7)
+#else
+               if (dif > 10.0E-15 || dif < -10.0E-15)
+#endif
+               {
+                  UB_THROW(UbException(UB_EXARGS, "rho=" + UbSystem::toString(drho) + ", rho_post=" + UbSystem::toString(drho_post)
+                     + " dif=" + UbSystem::toString(dif)
+                     + " rho is not correct for node " + UbSystem::toString(x1) + "," + UbSystem::toString(x2) + "," + UbSystem::toString(x3)
+                     + " in " + block.lock()->toString() + " step = " + UbSystem::toString(step)));
+               }
+#endif
+			      ////////////////////////////////////////////////////////////////////////////////////
+			      //! - Write distributions: style of reading and writing the distributions from/to stored arrays dependent on timestep is based on the esoteric twist algorithm
+			      //! <a href="https://doi.org/10.3390/computation5020019"><b>[ M. Geier et al. (2017), DOI:10.3390/computation5020019 ]</b></a>
+			      //!
+               (*this->localDistributions)(D3Q27System::ET_E, x1, x2, x3) = mfabb;
+               (*this->localDistributions)(D3Q27System::ET_N, x1, x2, x3) = mfbab;
+               (*this->localDistributions)(D3Q27System::ET_T, x1, x2, x3) = mfbba;
+               (*this->localDistributions)(D3Q27System::ET_NE, x1, x2, x3) = mfaab;
+               (*this->localDistributions)(D3Q27System::ET_NW, x1p, x2, x3) = mfcab;
+               (*this->localDistributions)(D3Q27System::ET_TE, x1, x2, x3) = mfaba;
+               (*this->localDistributions)(D3Q27System::ET_TW, x1p, x2, x3) = mfcba;
+               (*this->localDistributions)(D3Q27System::ET_TN, x1, x2, x3) = mfbaa;
+               (*this->localDistributions)(D3Q27System::ET_TS, x1, x2p, x3) = mfbca;
+               (*this->localDistributions)(D3Q27System::ET_TNE, x1, x2, x3) = mfaaa;
+               (*this->localDistributions)(D3Q27System::ET_TNW, x1p, x2, x3) = mfcaa;
+               (*this->localDistributions)(D3Q27System::ET_TSE, x1, x2p, x3) = mfaca;
+               (*this->localDistributions)(D3Q27System::ET_TSW, x1p, x2p, x3) = mfcca;
+
+               (*this->nonLocalDistributions)(D3Q27System::ET_W, x1p, x2, x3) = mfcbb;
+               (*this->nonLocalDistributions)(D3Q27System::ET_S, x1, x2p, x3) = mfbcb;
+               (*this->nonLocalDistributions)(D3Q27System::ET_B, x1, x2, x3p) = mfbbc;
+               (*this->nonLocalDistributions)(D3Q27System::ET_SW, x1p, x2p, x3) = mfccb;
+               (*this->nonLocalDistributions)(D3Q27System::ET_SE, x1, x2p, x3) = mfacb;
+               (*this->nonLocalDistributions)(D3Q27System::ET_BW, x1p, x2, x3p) = mfcbc;
+               (*this->nonLocalDistributions)(D3Q27System::ET_BE, x1, x2, x3p) = mfabc;
+               (*this->nonLocalDistributions)(D3Q27System::ET_BS, x1, x2p, x3p) = mfbcc;
+               (*this->nonLocalDistributions)(D3Q27System::ET_BN, x1, x2, x3p) = mfbac;
+               (*this->nonLocalDistributions)(D3Q27System::ET_BSW, x1p, x2p, x3p) = mfccc;
+               (*this->nonLocalDistributions)(D3Q27System::ET_BSE, x1, x2p, x3p) = mfacc;
+               (*this->nonLocalDistributions)(D3Q27System::ET_BNW, x1p, x2, x3p) = mfcac;
+               (*this->nonLocalDistributions)(D3Q27System::ET_BNE, x1, x2, x3p) = mfaac;
+
+               (*this->restDistributions)(x1, x2, x3) = mfbbb;
+               //////////////////////////////////////////////////////////////////////////
+
+            }
+         }
+      }
+   }
+}
+//////////////////////////////////////////////////////////////////////////
+
diff --git a/src/cpu/VirtualFluidsCore/LBM/CumulantK17LBMKernel.h b/src/cpu/VirtualFluidsCore/LBM/CumulantK17LBMKernel.h
new file mode 100644
index 000000000..49a8da4ac
--- /dev/null
+++ b/src/cpu/VirtualFluidsCore/LBM/CumulantK17LBMKernel.h
@@ -0,0 +1,146 @@
+//=======================================================================================
+// ____          ____    __    ______     __________   __      __       __        __         
+// \    \       |    |  |  |  |   _   \  |___    ___| |  |    |  |     /  \      |  |        
+//  \    \      |    |  |  |  |  |_)   |     |  |     |  |    |  |    /    \     |  |        
+//   \    \     |    |  |  |  |   _   /      |  |     |  |    |  |   /  /\  \    |  |        
+//    \    \    |    |  |  |  |  | \  \      |  |     |   \__/   |  /  ____  \   |  |____    
+//     \    \   |    |  |__|  |__|  \__\     |__|      \________/  /__/    \__\  |_______|   
+//      \    \  |    |   ________________________________________________________________    
+//       \    \ |    |  |  ______________________________________________________________|   
+//        \    \|    |  |  |         __          __     __     __     ______      _______    
+//         \         |  |  |_____   |  |        |  |   |  |   |  |   |   _  \    /  _____)   
+//          \        |  |   _____|  |  |        |  |   |  |   |  |   |  | \  \   \_______    
+//           \       |  |  |        |  |_____   |   \_/   |   |  |   |  |_/  /    _____  |
+//            \ _____|  |__|        |________|   \_______/    |__|   |______/    (_______/   
+//
+//  This file is part of VirtualFluids. VirtualFluids is free software: you can 
+//  redistribute it and/or modify it under the terms of the GNU General Public
+//  License as published by the Free Software Foundation, either version 3 of 
+//  the License, or (at your option) any later version.
+//  
+//  VirtualFluids is distributed in the hope that it will be useful, but WITHOUT 
+//  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 
+//  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License 
+//  for more details.
+//  
+//  You should have received a copy of the GNU General Public License along
+//  with VirtualFluids (see COPYING.txt). If not, see <http://www.gnu.org/licenses/>.
+//
+//! \file CumulantK17LBMKernel.h
+//! \ingroup LBM
+//! \author Konstantin Kutscher, Martin Geier
+//=======================================================================================
+
+#ifndef CumulantK17LBMKernel_h__
+#define CumulantK17LBMKernel_h__
+
+#include "LBMKernel.h"
+#include "BCProcessor.h"
+#include "D3Q27System.h"
+#include "basics/utilities/UbTiming.h"
+#include "basics/container/CbArray4D.h"
+#include "basics/container/CbArray3D.h"
+
+//! \brief   Compressible cumulant LBM kernel. 
+//! \details  LBM implementation that use Cascaded Cumulant Lattice Boltzmann method for D3Q27 model
+//!
+//! The model is publisched in
+//! <a href="http://dx.doi.org/10.1016/j.jcp.2017.05.040"><b>[ Geier et al., (2017), 10.1016/j.jcp.2017.05.040]</b></a>,
+//! <a href="http://dx.doi.org/10.1016/j.jcp.2017.07.004"><b>[ Geier et al., (2017), 10.1016/j.jcp.2017.07.004]</b></a> 
+//! 
+class CumulantK17LBMKernel : public LBMKernel
+{
+public:
+   CumulantK17LBMKernel();
+   virtual ~CumulantK17LBMKernel(void);
+   virtual void calculate(int step);
+   virtual SPtr<LBMKernel> clone();
+
+protected:
+   inline void forwardInverseChimeraWithK(LBMReal& mfa, LBMReal& mfb, LBMReal& mfc, LBMReal vv, LBMReal v2, LBMReal Kinverse, LBMReal K);
+   inline void backwardInverseChimeraWithK(LBMReal& mfa, LBMReal& mfb, LBMReal& mfc, LBMReal vv, LBMReal v2, LBMReal Kinverse, LBMReal K);
+   inline void forwardChimera(LBMReal& mfa, LBMReal& mfb, LBMReal& mfc, LBMReal vv, LBMReal v2);
+   inline void backwardChimera(LBMReal& mfa, LBMReal& mfb, LBMReal& mfc, LBMReal vv, LBMReal v2);
+
+   virtual void initDataSet();
+   LBMReal f[D3Q27System::ENDF + 1];
+
+   CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr localDistributions;
+   CbArray4D<LBMReal, IndexerX4X3X2X1>::CbArray4DPtr nonLocalDistributions;
+   CbArray3D<LBMReal, IndexerX3X2X1>::CbArray3DPtr   restDistributions;
+
+   mu::value_type muX1, muX2, muX3;
+   mu::value_type muDeltaT;
+   mu::value_type muNu;
+   LBMReal forcingX1;
+   LBMReal forcingX2;
+   LBMReal forcingX3;
+};
+
+////////////////////////////////////////////////////////////////////////////////
+//! \brief forward chimera transformation \ref forwardInverseChimeraWithK 
+//! Transformation from distributions to central moments according to Eq. (6)-(14) in
+//! <a href="https://doi.org/10.1016/j.jcp.2017.05.040"><b>[ M. Geier et al. (2017), DOI:10.1016/j.jcp.2017.05.040 ]</b></a>
+//! Modified for lower round-off errors.
+////////////////////////////////////////////////////////////////////////////////
+inline void CumulantK17LBMKernel::forwardInverseChimeraWithK(LBMReal& mfa, LBMReal& mfb, LBMReal& mfc, LBMReal vv, LBMReal v2, LBMReal Kinverse, LBMReal K)
+{
+   using namespace UbMath;
+   LBMReal m2 = mfa + mfc;
+   LBMReal m1 = mfc - mfa;
+   LBMReal m0 = m2 + mfb;
+   mfa = m0;
+   m0 *= Kinverse;
+   m0 += c1;
+   mfb = (m1 * Kinverse - m0 * vv) * K;
+   mfc = ((m2 - c2 * m1 * vv) * Kinverse + v2 * m0) * K;
+}
+////////////////////////////////////////////////////////////////////////////////
+//! \brief backward chimera transformation \ref backwardInverseChimeraWithK 
+//! Transformation from central moments to distributions according to Eq. (57)-(65) in
+//! <a href="https://doi.org/10.1016/j.jcp.2017.05.040"><b>[ M. Geier et al. (2017), DOI:10.1016/j.jcp.2017.05.040 ]</b></a>
+//! Modified for lower round-off errors.
+////////////////////////////////////////////////////////////////////////////////
+inline void CumulantK17LBMKernel::backwardInverseChimeraWithK(LBMReal& mfa, LBMReal& mfb, LBMReal& mfc, LBMReal vv, LBMReal v2, LBMReal Kinverse, LBMReal K)
+{
+   using namespace UbMath;
+   LBMReal m0 = (((mfc - mfb) * c1o2 + mfb * vv) * Kinverse + (mfa * Kinverse + c1) * (v2 - vv) * c1o2) * K;
+   LBMReal m1 = (((mfa - mfc) - c2 * mfb * vv) * Kinverse + (mfa * Kinverse + c1) * (-v2)) * K;
+   mfc = (((mfc + mfb) * c1o2 + mfb * vv) * Kinverse + (mfa * Kinverse + c1) * (v2 + vv) * c1o2) * K;
+   mfa = m0;
+   mfb = m1;
+}
+////////////////////////////////////////////////////////////////////////////////
+//! \brief forward chimera transformation \ref forwardChimera 
+//! Transformation from distributions to central moments according to Eq. (6)-(14) in
+//! <a href="https://doi.org/10.1016/j.jcp.2017.05.040"><b>[ M. Geier et al. (2017), DOI:10.1016/j.jcp.2017.05.040 ]</b></a>
+//! for \f$ K_{abc}=0 \f$. This is to avoid unnessary floating point operations.
+//! Modified for lower round-off errors.
+////////////////////////////////////////////////////////////////////////////////
+inline void CumulantK17LBMKernel::forwardChimera(LBMReal& mfa, LBMReal& mfb, LBMReal& mfc, LBMReal vv, LBMReal v2)
+{
+   using namespace UbMath;
+   LBMReal m1 = (mfa + mfc) + mfb;
+   LBMReal m2 = mfc - mfa;
+   mfc = (mfc + mfa) + (v2 * m1 - c2 * vv * m2);
+   mfb = m2 - vv * m1;
+   mfa = m1;
+}
+////////////////////////////////////////////////////////////////////////////////
+//! \brief backward chimera transformation \ref backwardChimera 
+//! Transformation from central moments to distributions according to Eq. (57)-(65) in
+//! <a href="https://doi.org/10.1016/j.jcp.2017.05.040"><b>[ M. Geier et al. (2017), DOI:10.1016/j.jcp.2017.05.040 ]</b></a>
+//! for \f$ K_{abc}=0 \f$. This is to avoid unnessary floating point operations.
+//! Modified for lower round-off errors.
+////////////////////////////////////////////////////////////////////////////////
+inline void CumulantK17LBMKernel::backwardChimera(LBMReal& mfa, LBMReal& mfb, LBMReal& mfc, LBMReal vv, LBMReal v2)
+{
+   using namespace UbMath;
+   LBMReal ma = (mfc + mfa * (v2 - vv)) * c1o2 + mfb * (vv - c1o2);
+   LBMReal mb = ((mfa - mfc) - mfa * v2) - c2 * mfb * vv;
+   mfc = (mfc + mfa * (v2 + vv)) * c1o2 + mfb * (vv + c1o2);
+   mfb = mb;
+   mfa = ma;
+}
+
+#endif // CumulantK17LBMKernel_h__
\ No newline at end of file
diff --git a/src/gpu/GksGpu/Output/VtkWriter.cpp b/src/gpu/GksGpu/Output/VtkWriter.cpp
new file mode 100644
index 000000000..a1a0ab9f6
--- /dev/null
+++ b/src/gpu/GksGpu/Output/VtkWriter.cpp
@@ -0,0 +1,146 @@
+//=======================================================================================
+// ____          ____    __    ______     __________   __      __       __        __         
+// \    \       |    |  |  |  |   _   \  |___    ___| |  |    |  |     /  \      |  |        
+//  \    \      |    |  |  |  |  |_)   |     |  |     |  |    |  |    /    \     |  |        
+//   \    \     |    |  |  |  |   _   /      |  |     |  |    |  |   /  /\  \    |  |        
+//    \    \    |    |  |  |  |  | \  \      |  |     |   \__/   |  /  ____  \   |  |____    
+//     \    \   |    |  |__|  |__|  \__\     |__|      \________/  /__/    \__\  |_______|   
+//      \    \  |    |   ________________________________________________________________    
+//       \    \ |    |  |  ______________________________________________________________|   
+//        \    \|    |  |  |         __          __     __     __     ______      _______    
+//         \         |  |  |_____   |  |        |  |   |  |   |  |   |   _  \    /  _____)   
+//          \        |  |   _____|  |  |        |  |   |  |   |  |   |  | \  \   \_______    
+//           \       |  |  |        |  |_____   |   \_/   |   |  |   |  |_/  /    _____  |
+//            \ _____|  |__|        |________|   \_______/    |__|   |______/    (_______/   
+//
+//  This file is part of VirtualFluids. VirtualFluids is free software: you can 
+//  redistribute it and/or modify it under the terms of the GNU General Public
+//  License as published by the Free Software Foundation, either version 3 of 
+//  the License, or (at your option) any later version.
+//  
+//  VirtualFluids is distributed in the hope that it will be useful, but WITHOUT 
+//  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 
+//  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License 
+//  for more details.
+//  
+//  You should have received a copy of the GNU General Public License along
+//  with VirtualFluids (see COPYING.txt). If not, see <http://www.gnu.org/licenses/>.
+//
+//! \file VtkWriter.cpp
+//! \ingroup Output
+//! \author Stephan Lenz
+//=======================================================================================
+#include "VtkWriter.h"
+
+#include <vector>
+#include <memory>
+
+#include "Core/Logger/Logger.h"
+
+#include "basics/utilities/UbTuple.h"
+#include "basics/writer/WbWriterVtkXmlBinary.h"
+
+#include "DataBase/DataBase.h"
+#include "Parameters/Parameters.h"
+
+#include "FlowStateData/FlowStateData.cuh"
+#include "FlowStateData/FlowStateDataConversion.cuh"
+#include "FlowStateData/AccessDeviceData.cuh"
+
+void VtkWriter::write(std::shared_ptr<DataBase> dataBase, Parameters parameters, std::string filename)
+{
+    *logging::out << logging::Logger::INFO_INTERMEDIATE << "Write " << filename << ".vtu" << " ... \n";
+
+    //////////////////////////////////////////////////////////////////////////
+
+    std::vector< UbTupleFloat3 > nodes;
+    std::vector< UbTupleInt8  > cells;
+
+    nodes.resize( dataBase->numberOfNodes );
+    cells.resize( dataBase->numberOfCells );
+
+    for( uint nodeIdx = 0; nodeIdx < dataBase->numberOfNodes; nodeIdx++ )
+    {
+        Vec3& node = dataBase->nodeCoordinates[ nodeIdx ];
+
+        nodes[nodeIdx] = makeUbTuple( node.x, node.y, node.z );
+    }
+    
+    for( uint cellIdx = 0; cellIdx < dataBase->numberOfCells; cellIdx++ )
+    {
+        cells[cellIdx] = makeUbTuple( (int)dataBase->cellToNode[ cellIdx ][ 0 ],
+                                      (int)dataBase->cellToNode[ cellIdx ][ 1 ],
+                                      (int)dataBase->cellToNode[ cellIdx ][ 2 ],
+                                      (int)dataBase->cellToNode[ cellIdx ][ 3 ],
+                                      (int)dataBase->cellToNode[ cellIdx ][ 4 ],
+                                      (int)dataBase->cellToNode[ cellIdx ][ 5 ],
+                                      (int)dataBase->cellToNode[ cellIdx ][ 6 ],
+                                      (int)dataBase->cellToNode[ cellIdx ][ 7 ] );
+    }
+
+    //////////////////////////////////////////////////////////////////////////
+
+    std::vector< std::string > cellDataNames;
+    cellDataNames.push_back("Press");       // 0
+    cellDataNames.push_back("Rho");         // 1
+    cellDataNames.push_back("Vx");          // 2
+    cellDataNames.push_back("Vy");          // 3
+    cellDataNames.push_back("Vz");          // 4
+    cellDataNames.push_back("Temperature"); // 5
+    cellDataNames.push_back("Geometry");    // 6
+#ifdef USE_PASSIVE_SCALAR
+    cellDataNames.push_back("S_1");         // 7
+    cellDataNames.push_back("S_2");         // 8
+#endif
+
+    //////////////////////////////////////////////////////////////////////////
+
+    std::vector< std::vector< double > > cellData(cellDataNames.size());
+
+    for( auto& i : cellData ) i.resize( dataBase->numberOfCells );
+
+    for( uint cellIdx = 0; cellIdx < dataBase->numberOfCells; cellIdx++ )
+    {
+        ConservedVariables cons;
+
+        cons.rho  = dataBase->dataHost[ RHO__(cellIdx, dataBase->numberOfCells) ];
+        cons.rhoU = dataBase->dataHost[ RHO_U(cellIdx, dataBase->numberOfCells) ];
+        cons.rhoV = dataBase->dataHost[ RHO_V(cellIdx, dataBase->numberOfCells) ];
+        cons.rhoW = dataBase->dataHost[ RHO_W(cellIdx, dataBase->numberOfCells) ];
+        cons.rhoE = dataBase->dataHost[ RHO_E(cellIdx, dataBase->numberOfCells) ];
+#ifdef USE_PASSIVE_SCALAR
+        cons.rhoS_1 = dataBase->dataHost[ RHO_S_1(cellIdx, dataBase->numberOfCells) ];
+        cons.rhoS_2 = dataBase->dataHost[ RHO_S_2(cellIdx, dataBase->numberOfCells) ];
+#endif // USE_PASSIVE_SCALAR
+
+        PrimitiveVariables prim = toPrimitiveVariables(cons, parameters.K);
+
+        real p = 0.5 * prim.rho / prim.lambda;
+
+#ifdef USE_PASSIVE_SCALAR
+        real T = getT(prim);
+#else // USE_PASSIVE_SCALAR
+        real T = 1.0 / prim.lambda;
+#endif // USE_PASSIVE_SCALAR
+
+        cellData[0][cellIdx] = p;
+        cellData[1][cellIdx] = prim.rho;
+        cellData[2][cellIdx] = prim.U;
+        cellData[3][cellIdx] = prim.V;
+        cellData[4][cellIdx] = prim.W;
+        cellData[5][cellIdx] = T;
+        cellData[6][cellIdx] = dataBase->isGhostCell(cellIdx);
+#ifdef USE_PASSIVE_SCALAR
+        cellData[7][cellIdx] = prim.S_1;
+        cellData[8][cellIdx] = prim.S_2;
+#endif
+    }
+
+    //////////////////////////////////////////////////////////////////////////
+
+    WbWriterVtkXmlBinary::getInstance()->writeOctsWithCellData(filename, nodes, cells, cellDataNames, cellData);
+
+    //////////////////////////////////////////////////////////////////////////
+
+    *logging::out << logging::Logger::INFO_INTERMEDIATE << "done!\n";
+}
diff --git a/src/gpu/GksGpu/Output/VtkWriter.h b/src/gpu/GksGpu/Output/VtkWriter.h
new file mode 100644
index 000000000..0596fc7bd
--- /dev/null
+++ b/src/gpu/GksGpu/Output/VtkWriter.h
@@ -0,0 +1,52 @@
+//=======================================================================================
+// ____          ____    __    ______     __________   __      __       __        __         
+// \    \       |    |  |  |  |   _   \  |___    ___| |  |    |  |     /  \      |  |        
+//  \    \      |    |  |  |  |  |_)   |     |  |     |  |    |  |    /    \     |  |        
+//   \    \     |    |  |  |  |   _   /      |  |     |  |    |  |   /  /\  \    |  |        
+//    \    \    |    |  |  |  |  | \  \      |  |     |   \__/   |  /  ____  \   |  |____    
+//     \    \   |    |  |__|  |__|  \__\     |__|      \________/  /__/    \__\  |_______|   
+//      \    \  |    |   ________________________________________________________________    
+//       \    \ |    |  |  ______________________________________________________________|   
+//        \    \|    |  |  |         __          __     __     __     ______      _______    
+//         \         |  |  |_____   |  |        |  |   |  |   |  |   |   _  \    /  _____)   
+//          \        |  |   _____|  |  |        |  |   |  |   |  |   |  | \  \   \_______    
+//           \       |  |  |        |  |_____   |   \_/   |   |  |   |  |_/  /    _____  |
+//            \ _____|  |__|        |________|   \_______/    |__|   |______/    (_______/   
+//
+//  This file is part of VirtualFluids. VirtualFluids is free software: you can 
+//  redistribute it and/or modify it under the terms of the GNU General Public
+//  License as published by the Free Software Foundation, either version 3 of 
+//  the License, or (at your option) any later version.
+//  
+//  VirtualFluids is distributed in the hope that it will be useful, but WITHOUT 
+//  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 
+//  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License 
+//  for more details.
+//  
+//  You should have received a copy of the GNU General Public License along
+//  with VirtualFluids (see COPYING.txt). If not, see <http://www.gnu.org/licenses/>.
+//
+//! \file VtkWriter.h
+//! \ingroup Output
+//! \author Stephan Lenz
+//=======================================================================================
+#ifndef VTK_WRITER_H
+#define VTK_WRITER_H
+
+#include <memory>
+#include <string>
+
+#include "GksGpu_export.h"
+
+struct DataBase;
+struct Parameters;
+
+class GKSGPU_EXPORT VtkWriter
+{
+public:
+    static void write( std::shared_ptr<DataBase> dataBase, 
+                       Parameters parameters, 
+                       std::string filename );
+};
+
+#endif
\ No newline at end of file
-- 
GitLab