Skip to content
Snippets Groups Projects
Commit 25a3f914 authored by Martin Schönherr's avatar Martin Schönherr :speech_balloon:
Browse files

Merge branch 'develop' into RefactorParameter

parents ede5c85a e37a3c8b
No related branches found
No related tags found
1 merge request!256Kernel names changed
Showing
with 336 additions and 278 deletions
{
"name": "virtual-fluids-environment",
"extensions": [
"mhutchie.git-graph",
"donjayamanne.githistory",
"twxs.cmake",
"ms-vscode.cpptools",
"visualstudioexptteam.vscodeintellicode",
"xaver.clang-format",
"notskm.clang-tidy",
"streetsidesoftware.code-spell-checker"
],
"customizations": {
"vscode": {
"extensions": [
"mhutchie.git-graph",
"donjayamanne.githistory",
"twxs.cmake",
"ms-vscode.cpptools",
"visualstudioexptteam.vscodeintellicode",
"xaver.clang-format",
"notskm.clang-tidy",
"streetsidesoftware.code-spell-checker",
"llvm-vs-code-extensions.vscode-clangd",
"jbenden.c-cpp-flylint",
"ms-python.python",
]
}
},
"containerEnv": {"TZ": "${localEnv:TZ:Europe/Berlin}"},
"runArgs": ["--gpus","all", // remove this line in case you have no gpus available
"--hostname=${localEnv:HOSTNAME}"], // HOSTNAME needs to be known by the vscode environment. It is probably necessary to add "export HOSTNAME=<hostname>" to the config file of your host machine's bash.
"image": "git.rz.tu-bs.de:4567/irmb/virtualfluids/ubuntu20_04:1.5"
"image": "git.rz.tu-bs.de:4567/irmb/virtualfluids/ubuntu22_04:1.0"
}
###############################################################################
## VirtualFluids CI Pipeline ##
###############################################################################
image: git.rz.tu-bs.de:4567/irmb/virtualfluids/ubuntu20_04:1.5
image: git.rz.tu-bs.de:4567/irmb/virtualfluids/ubuntu22_04:1.0
stages:
- build
......@@ -13,6 +13,11 @@ stages:
- deploy
- release
workflow:
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
when: never # disable detached merge request pipelines
- when: always # add all jobs to normal pipeline. This can be overwritten by the rules of the jobs.
###############################################################################
## Builds ##
......@@ -51,7 +56,7 @@ stages:
- cmake .. -LAH
--preset=make_all
-DBUILD_WARNINGS_AS_ERRORS=ON
-DCMAKE_CUDA_ARCHITECTURES=60
-DCMAKE_CUDA_ARCHITECTURES=70
-DBUILD_VF_ALL_SAMPLES=ON
- make -j4
- ccache --show-stats
......@@ -60,7 +65,7 @@ stages:
BUILD_FOLDER: "build"
###############################################################################
gcc_9:
gcc_12:
extends: .gnu_build_template
before_script:
......@@ -68,7 +73,7 @@ gcc_9:
- export CXX=g++
###############################################################################
clang_10:
clang_15:
extends: .gnu_build_template
before_script:
......@@ -112,13 +117,13 @@ msvc_17:
###############################################################################
## Build Python ##
###############################################################################
gcc_9_python:
gcc_12_python:
stage: build_python
needs: ["gcc_9"]
needs: ["gcc_12"]
cache:
key: "gcc_9-$CI_COMMIT_REF_SLUG"
key: "gcc_12-$CI_COMMIT_REF_SLUG"
paths:
- build
......@@ -149,8 +154,6 @@ build_poiseuille_test_container:
rules:
- if: $REMOTE_USER && $REMOTE_HOST && $PRIVATE_KEY && $CI_PIPELINE_SOURCE == "schedule"
when: always
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
when: never
- when: manual
allow_failure: true
......@@ -169,10 +172,10 @@ build_poiseuille_test_container:
###############################################################################
## Tests ##
###############################################################################
gcc_9_unit_tests:
gcc_12_unit_tests:
stage: test
needs: ["gcc_9"]
needs: ["gcc_12"]
before_script:
- cd $CI_PROJECT_DIR/build
......@@ -199,10 +202,10 @@ msvc_17_unit_tests:
- ctest --output-on-failure -C Release
###############################################################################
gcc_9_python_bindings_test:
gcc_12_python_bindings_test:
stage: test
needs: ["gcc_9_python"]
needs: ["gcc_12_python"]
before_script:
- export PYTHONPATH="Python"
......@@ -214,7 +217,7 @@ gcc_9_python_bindings_test:
- python3 -m unittest discover -s Python -v
###############################################################################
gcc_9_python_hpc_test:
gcc_12_python_hpc_test:
image: python:latest
stage: test
......@@ -223,8 +226,6 @@ gcc_9_python_hpc_test:
rules:
- if: $REMOTE_USER && $REMOTE_HOST && $PRIVATE_KEY && $CI_PIPELINE_SOURCE == "schedule"
when: always
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
when: never
- when: manual
allow_failure: true
......@@ -253,8 +254,6 @@ build-regression-tests-ci:
rules:
- if: $CI_PIPELINE_SOURCE == "schedule"
when: always
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
when: never
- when: manual
allow_failure: true
......@@ -280,8 +279,6 @@ regression_test_4gpu:
rules:
- if: $REMOTE_USER && $REMOTE_HOST && $PRIVATE_KEY && $CI_PIPELINE_SOURCE == "schedule"
when: always
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
when: never
- when: manual
allow_failure: true
......@@ -306,8 +303,6 @@ regression_test_8gpu:
rules:
- if: $REMOTE_USER && $REMOTE_HOST && $PRIVATE_KEY && $CI_PIPELINE_SOURCE == "schedule"
when: always
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
when: never
- when: manual
allow_failure: true
......@@ -323,14 +318,13 @@ regression_test_8gpu:
- cd ..
- fieldcompare dir output/8GPU test_data/regression_tests/gpu/DrivenCavity_8GPU_2Levels --include-files "DrivenCavityMultiGPU*.vtu"
- fieldcompare dir output/8GPU test_data/regression_tests/gpu/SphereScaling_8GPU_2Levels --include-files "SphereScaling*.vtu"
###############################################################################
## Benchmark ##
###############################################################################
nvidia_test:
stage: benchmark
image: nvidia/cuda:11.1.1-devel-ubuntu20.04
image: nvidia/cuda:12.1.1-devel-ubuntu22.04
needs: []
......@@ -349,8 +343,6 @@ gpu_numerical_tests:
rules:
- if: $CI_PIPELINE_SOURCE == "schedule"
when: always
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
when: never
- when: manual
allow_failure: true
......@@ -386,20 +378,32 @@ gpu_numerical_tests:
- $CI_PROJECT_DIR/cache
artifacts:
expire_in: 1 hrs
paths:
- $CI_PROJECT_DIR/numerical_tests_gpu_results.txt
###############################################################################
## Code analysis ##
###############################################################################
clang_build_analyzer_clang_10:
.analyze_template:
stage: analyze
only: ["schedules"]
needs: []
tags:
- gpu
- linux
rules:
- if: '$CI_PROJECT_NAMESPACE == "irmb" && $CI_COMMIT_BRANCH == "develop"'
when: always
- if: '$CI_PROJECT_NAMESPACE != "irmb"'
when: manual
allow_failure: true
clang_build_analyzer:
extends: .analyze_template
before_script:
- export CC=clang
- export CXX=clang++
......@@ -424,22 +428,19 @@ clang_build_analyzer_clang_10:
- ClangBuildAnalyzer --analyze CBA
###############################################################################
include_what_you_use_clang_10:
stage: analyze
only: ["schedules"]
needs: []
include_what_you_use_clang_15:
extends: .analyze_template
before_script:
- apt-get update && apt-get install -y libclang-15-dev llvm-15-dev
- export CC=clang
- export CXX=clang++
- $CXX --version
- cd /tmp
- git clone https://github.com/include-what-you-use/include-what-you-use.git
- cd include-what-you-use
- git checkout clang_10
- cmake . -DCMAKE_PREFIX_PATH=/usr/lib/llvm-10
- git checkout clang_15
- cmake . -DCMAKE_PREFIX_PATH=/usr/lib/llvm-15
- make
- export PATH+=:$(pwd)/bin
......@@ -453,24 +454,13 @@ include_what_you_use_clang_10:
- make
###############################################################################
.analyze_template:
rules: # dont create MR pipeline, run always on irmb/develop and manual in all other cases
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
when: never
- if: '$CI_PROJECT_NAMESPACE == "irmb" && $CI_COMMIT_BRANCH == "develop"'
when: always
- when: manual
allow_failure: true
cppcheck:
stage: analyze
extends: .analyze_template
needs: []
before_script:
- apt-get update && apt-get install -y libpcre3-dev
- chmod +x utilities/install-cppcheck.sh
- ./utilities/install-cppcheck.sh
- cppcheck --version
script:
......@@ -486,17 +476,9 @@ cppcheck:
# lizard - Cyclomatic Complexity Analyzer
# Ignore warnings is manually set to 191. This job will fail when new warnings are added.
lizard:
stage: analyze
extends: .analyze_template
needs: []
before_script:
- cd /tmp
- git clone https://github.com/terryyin/lizard.git
- cd lizard
- python3 setup.py install
- lizard --version
script:
......@@ -510,25 +492,25 @@ lizard:
###############################################################################
# code coverage
gcov_gcc_9:
gcov_gcc:
stage: analyze
extends: .analyze_template
needs: []
before_script:
- gcovr --version
- export CC=/usr/bin/gcc-11
- export CXX=/usr/bin/g++-11
script:
- mkdir -p $CI_PROJECT_DIR/build
- cd $CI_PROJECT_DIR/build
- cmake ..
--preset=make_all
-DCMAKE_CUDA_ARCHITECTURES=60
-DBUILD_VF_COVERAGE=ON
-DCMAKE_BUILD_TYPE=PROFILE
-DCMAKE_CUDA_ARCHITECTURES=70
- make -j4
- ./bin/basicsTests
- ctest --output-on-failure
- cd ..
- mkdir coverage
- gcovr -r $CI_PROJECT_DIR -k build -f "src" --print-summary --html coverage/coverage.html --html-details --xml coverage/coverage.xml
......@@ -551,16 +533,10 @@ gcov_gcc_9:
###############################################################################
# this job also produces a compile commands json file.
clang-tidy:
stage: analyze
extends: .analyze_template
needs: []
allow_failure: true
before_script:
- run-clang-tidy -h
- run-clang-tidy-15 -h
script:
- mkdir -p $CI_PROJECT_DIR/build
......@@ -570,7 +546,7 @@ clang-tidy:
-DBUILD_VF_DOUBLE_ACCURACY=ON
-DBUILD_VF_GPU=OFF
- python3 ../utilities/filterCompileCommands.py compile_commands.json
- run-clang-tidy -quiet > clangtidy.txt
- run-clang-tidy-15 -quiet > clangtidy.txt
artifacts:
when: always
......@@ -580,6 +556,33 @@ clang-tidy:
- build/compile_commands.json
###############################################################################
## Sanitizer ##
###############################################################################
sanitizer-address-leak-ub:
extends: .analyze_template
script:
- mkdir -p build && cd build
- cmake ..
--preset=make_all
-DCMAKE_CUDA_ARCHITECTURES=70
-DENABLE_SANITIZER_ADDRESS=ON
-DENABLE_SANITIZER_LEAK=ON
-DENABLE_SANITIZER_UNDEFINED_BEHAVIOR=ON
- make -j8
- ctest --output-on-failure
sanitizer-memory:
extends: .analyze_template
script:
- mkdir -p build && cd build
- cmake ..
--preset=make_all
-DCMAKE_CUDA_ARCHITECTURES=70
-DENABLE_SANITIZER_MEMORY=ON
- make -j8
- ctest --output-on-failure
###############################################################################
# doxgen
pages:
......@@ -628,7 +631,7 @@ pages:
vf_to_phoenix:
extends: .deploy_template
stage: deploy
needs: ["gcc_9_python", "gcc_9_unit_tests", "gcc_9_python_bindings_test"]
needs: ["gcc_12_python", "gcc_12_unit_tests", "gcc_12_python_bindings_test"]
when: manual
......@@ -652,7 +655,7 @@ vf_wheel_to_jupyterhub:
only: ["manual"]
needs: ["gcc_9_python", "gcc_9_unit_tests", "gcc_9_python_bindings_test"]
needs: ["gcc_12_python", "gcc_12_unit_tests", "gcc_12_python_bindings_test"]
variables:
HOST: "gitlab-runner01.irmb.bau.tu-bs.de"
......@@ -680,7 +683,7 @@ sonar-scanner:
variables:
SONAR_HOST_URL: "http://gitlab-runner01.irmb.bau.tu-bs.de/sonarqube/"
needs: ["cppcheck","clang-tidy","gcov_gcc_9"]
needs: ["cppcheck","clang-tidy","gcov_gcc"]
before_script:
- cd /tmp
......
{
"c-cpp-flylint.flexelint.enable": false,
"c-cpp-flylint.flawfinder.enable": false
}
\ No newline at end of file
......@@ -48,7 +48,7 @@
#include <vector>
// includes, timer, string parsing, image helpers
#include <helper_image.h> // helper functions for image compare, dump, data comparisons
// #include <helper_image.h> // helper functions for image compare, dump, data comparisons
#include <helper_string.h> // helper functions for string parsing
#include <helper_timer.h> // helper functions for timers
......
......@@ -52,6 +52,7 @@ function(enable_sanitizers project_name)
"${LIST_OF_SANITIZERS}"
STREQUAL
"")
message(STATUS "Enabling sanitizers: ${LIST_OF_SANITIZERS}")
target_compile_options(${project_name} INTERFACE -fsanitize=${LIST_OF_SANITIZERS})
target_link_options(${project_name} INTERFACE -fsanitize=${LIST_OF_SANITIZERS})
endif()
......
......@@ -17,10 +17,10 @@ if(NOT BUILD_VF_INCLUDE_WHAT_YOU_USE) # optimization flag '-funroll-all-loops' i
endif()
# gcov
if (BUILD_VF_COVERAGE)
list(APPEND CS_COMPILER_FLAGS_CXX "--coverage")
set(CMAKE_EXE_LINKER_FLAGS ${CMAKE_EXE_LINKER_FLAGS} " --coverage")
endif()
# According to https://gcovr.com/en/stable/cookbook.html#out-of-source-builds-with-cmake
# This flags are used if cmake is called with -DCMAKE_BUILD_TYPE=PROFILE
set(CMAKE_C_FLAGS_PROFILE --coverage)
set(CMAKE_CXX_FLAGS_PROFILE --coverage)
#############################################################################################################
# warnings
......
......@@ -58,7 +58,6 @@ option(BUILD_VF_UNIT_TESTS "Build VirtualFluids unit tests" OFF)
option(BUILD_VF_CLANG_TIDY "Add the clang tidy checks to the targets" OFF)
option(BUILD_VF_INCLUDE_WHAT_YOU_USE "Add IWYU to the targets" OFF)
option(BUILD_VF_CPPCHECK "Add cppcheck to the targets" OFF)
option(BUILD_VF_COVERAGE "Add the -coverage compiler flag." OFF)
option(BUILD_CUDA_LTO "Enables the cuda link optimization." OFF)
......
# VirtualFluids Development Image:
# Ubuntu 22.04
FROM nvidia/cuda:12.1.1-devel-ubuntu22.04
# timezone
ARG TZ
ENV TZ="$TZ"
ARG DEBIAN_FRONTEND=noninteractive
RUN apt-get update && \
apt-get install -y \
git wget unzip software-properties-common \
build-essential g++-12 gcc-12 gdb \
ccache \
ninja-build \
openmpi-bin \
libopenmpi-dev \
libomp-15-dev \
clang-15 \
clang-format-15 \
clang-tidy-15 \
clang-tools-15 \
python3.11 \
python3-pip \
python3.11-dev \
cppcheck \
clangd-12 \
&& update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-12 100 \
&& update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 100 \
&& update-alternatives --install /usr/bin/clang++ clang++ /usr/bin/clang++-15 100 \
&& update-alternatives --install /usr/bin/clang clang /usr/bin/clang-15 100 \
&& update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 100 \
&& update-alternatives --install /usr/bin/clangd clangd /usr/bin/clangd-12 100 \
&& pip3 install \
cmake==3.26.3 \
setuptools \
wheel \
scikit-build \
pyvista \
numpy \
ansible \
'jinja2<3.1' \
gcovr==6.0 \
lizard==1.17.10
from typing import Collection, List
# from typing import Collection, List
import pyvista as pv
from poiseuille.analytical import PoiseuilleSettings, poiseuille_at_heights
from vtk_utilities import vertical_column_from_mesh, get_values_from_indices
from SlurmTests.poiseuille.settings import Scaling
# import pyvista as pv
# from poiseuille.analytical import PoiseuilleSettings, poiseuille_at_heights
# from vtk_utilities import vertical_column_from_mesh, get_values_from_indices
# from SlurmTests.poiseuille.settings import Scaling
def get_output_file_name(output_folder, runtime_params):
timesteps = runtime_params.number_of_timesteps
file_name = f"{output_folder}/mq/mq{timesteps}/mq0_{timesteps}.bin.vtu"
# def get_output_file_name(output_folder, runtime_params):
# timesteps = runtime_params.number_of_timesteps
# file_name = f"{output_folder}/mq/mq{timesteps}/mq0_{timesteps}.bin.vtu"
return file_name
# return file_name
def get_mesh_for_last_timestep(output_folder, runtime_params):
file_name_of_last_timestep = get_output_file_name(output_folder, runtime_params)
mesh_of_last_timestep = pv.read(file_name_of_last_timestep)
return mesh_of_last_timestep
# def get_mesh_for_last_timestep(output_folder, runtime_params):
# file_name_of_last_timestep = get_output_file_name(output_folder, runtime_params)
# mesh_of_last_timestep = pv.read(file_name_of_last_timestep)
# return mesh_of_last_timestep
def get_heights_from_indices(mesh, indices):
return [mesh.points[index][2] for index in indices]
# def get_heights_from_indices(mesh, indices):
# return [mesh.points[index][2] for index in indices]
def get_heights(output_folder, runtime_params):
mesh_of_last_timestep = get_mesh_for_last_timestep(output_folder, runtime_params)
column_indices = vertical_column_from_mesh(mesh_of_last_timestep)
heights = get_heights_from_indices(mesh_of_last_timestep, column_indices)
return heights
# def get_heights(output_folder, runtime_params):
# mesh_of_last_timestep = get_mesh_for_last_timestep(output_folder, runtime_params)
# column_indices = vertical_column_from_mesh(mesh_of_last_timestep)
# heights = get_heights_from_indices(mesh_of_last_timestep, column_indices)
# return heights
def get_numerical_results(runtime_params, output_folder):
mesh_of_last_timestep = get_mesh_for_last_timestep(output_folder, runtime_params)
velocities_in_x_direction = mesh_of_last_timestep.get_array("Vx")
column_indices = vertical_column_from_mesh(mesh_of_last_timestep)
numerical_results = get_values_from_indices(velocities_in_x_direction, column_indices)
# def get_numerical_results(runtime_params, output_folder):
# mesh_of_last_timestep = get_mesh_for_last_timestep(output_folder, runtime_params)
# velocities_in_x_direction = mesh_of_last_timestep.get_array("Vx")
# column_indices = vertical_column_from_mesh(mesh_of_last_timestep)
# numerical_results = get_values_from_indices(velocities_in_x_direction, column_indices)
return numerical_results
# return numerical_results
def get_analytical_results(grid_params, physical_params, kernel, height_values):
channel_height = grid_params.number_of_nodes_per_direction[2]
settings = get_analytical_poiseuille_settings(channel_height, physical_params, kernel)
max_grid_height = channel_height * grid_params.node_distance
adjusted_height_values = [value / max_grid_height * channel_height for value in height_values]
analytical_results = poiseuille_at_heights(settings, adjusted_height_values)
return analytical_results
# def get_analytical_results(grid_params, physical_params, kernel, height_values):
# channel_height = grid_params.number_of_nodes_per_direction[2]
# settings = get_analytical_poiseuille_settings(channel_height, physical_params, kernel)
# max_grid_height = channel_height * grid_params.node_distance
# adjusted_height_values = [value / max_grid_height * channel_height for value in height_values]
# analytical_results = poiseuille_at_heights(settings, adjusted_height_values)
# return analytical_results
def get_analytical_poiseuille_settings(height, physical_params, kernel):
settings = PoiseuilleSettings()
settings.height = height
settings.viscosity = physical_params.lattice_viscosity
settings.density = 1
settings.force = kernel.forcing_in_x1
# def get_analytical_poiseuille_settings(height, physical_params, kernel):
# settings = PoiseuilleSettings()
# settings.height = height
# settings.viscosity = physical_params.lattice_viscosity
# settings.density = 1
# settings.force = kernel.forcing_in_x1
return settings
# return settings
def collect_results() -> (List[List[float]], List[List[float]]):
analytical_results = []
numerical_results = []
# def collect_results() -> (List[List[float]], List[List[float]]):
# analytical_results = []
# numerical_results = []
for simulation_run in range(0, 3):
output_folder = f"output-{simulation_run}"
grid_params, physical_params, runtime_params, kernel = Scaling.configuration_for_scale_level(simulation_run)
heights = get_heights(output_folder, runtime_params)
analytical_results.append(
get_analytical_results(grid_params, physical_params, kernel, heights))
numerical_results.append(get_numerical_results(runtime_params, output_folder))
# for simulation_run in range(0, 3):
# output_folder = f"output-{simulation_run}"
# grid_params, physical_params, runtime_params, kernel = Scaling.configuration_for_scale_level(simulation_run)
# heights = get_heights(output_folder, runtime_params)
# analytical_results.append(
# get_analytical_results(grid_params, physical_params, kernel, heights))
# numerical_results.append(get_numerical_results(runtime_params, output_folder))
return analytical_results, numerical_results
# return analytical_results, numerical_results
......@@ -32,153 +32,154 @@ r"""
! \author Sven Marcus, Henry Korb
=======================================================================================
"""
import os
import shutil
import unittest
import matplotlib.pyplot as plt
import numpy as np
import pyvista as pv
from pyfluids import cpu
from scipy import stats
# import os
# import shutil
# import unittest
from errors import normalized_l2_error
from poiseuille.analytical import poiseuille_at_heights, PoiseuilleSettings
from poiseuille.simulation import run_simulation
from vtk_utilities import vertical_column_from_mesh, get_values_from_indices
# import matplotlib.pyplot as plt
# import numpy as np
# import pyvista as pv
# from pyfluids import cpu
# from scipy import stats
# from errors import normalized_l2_error
# from poiseuille.analytical import poiseuille_at_heights, PoiseuilleSettings
# from poiseuille.simulation import run_simulation
# from vtk_utilities import vertical_column_from_mesh, get_values_from_indices
class TestPoiseuilleFlow(unittest.TestCase):
node_distances = [1, 0.5, 0.25]
number_of_nodes = [16, 32, 64]
number_of_timesteps = [2_500_000, 5_000_000, 10_000_000]
forcings = [1e-9, 5e-10, 2.5e-10]
viscosities = [1e-3, 2e-3, 4e-3]
def zipped_settings(self):
return zip(self.node_distances,
self.number_of_nodes,
self.number_of_timesteps,
self.forcings,
self.viscosities)
# class TestPoiseuilleFlow(unittest.TestCase):
# node_distances = [1, 0.5, 0.25]
# number_of_nodes = [16, 32, 64]
# number_of_timesteps = [2_500_000, 5_000_000, 10_000_000]
# forcings = [1e-9, 5e-10, 2.5e-10]
# viscosities = [1e-3, 2e-3, 4e-3]
def test_poiseuille_flow(self):
self.skipTest("This test is not implemented correctly yet")
plt.ion()
# def zipped_settings(self):
# return zip(self.node_distances,
# self.number_of_nodes,
# self.number_of_timesteps,
# self.forcings,
# self.viscosities)
physical_params = cpu.parameters.PhysicalParameters()
# def test_poiseuille_flow(self):
# self.skipTest("This test is not implemented correctly yet")
# plt.ion()
runtime_params = cpu.parameters.RuntimeParameters()
runtime_params.number_of_threads = os.cpu_count()
runtime_params.timestep_log_interval = 10000
# physical_params = cpu.parameters.PhysicalParameters()
kernel = cpu.kernel.LBMKernel(cpu.kernel.KernelType.CompressibleCumulantFourthOrderViscosity)
kernel.use_forcing = True
# runtime_params = cpu.parameters.RuntimeParameters()
# runtime_params.number_of_threads = os.cpu_count()
# runtime_params.timestep_log_interval = 10000
normalized_l2_errors = []
for delta_x, nodes, timesteps, forcing, viscosity in self.zipped_settings():
physical_params.lattice_viscosity = viscosity
runtime_params.number_of_timesteps = timesteps
kernel.forcing_in_x1 = forcing
# kernel = cpu.kernel.LBMKernel(cpu.kernel.KernelType.CompressibleCumulantFourthOrderViscosity)
# kernel.use_forcing = True
grid_params = create_grid_params_with_nodes_in_column(nodes, delta_x)
l2_error = get_l2_error_for_simulation(grid_params, physical_params, runtime_params, kernel)
normalized_l2_errors.append(l2_error)
# normalized_l2_errors = []
# for delta_x, nodes, timesteps, forcing, viscosity in self.zipped_settings():
# physical_params.lattice_viscosity = viscosity
# runtime_params.number_of_timesteps = timesteps
# kernel.forcing_in_x1 = forcing
nodes_as_log = [np.log10(node) for node in self.number_of_nodes]
l2_norms_as_log = [np.log10(l2) for l2 in normalized_l2_errors]
res = stats.linregress(nodes_as_log, l2_norms_as_log)
# grid_params = create_grid_params_with_nodes_in_column(nodes, delta_x)
# l2_error = get_l2_error_for_simulation(grid_params, physical_params, runtime_params, kernel)
# normalized_l2_errors.append(l2_error)
plt.xscale("log")
plt.yscale("log")
plt.plot(self.number_of_nodes, [np.power(10, res.intercept + res.slope * node) for node in nodes_as_log], 'r-')
plt.plot(self.number_of_nodes, normalized_l2_errors, "x:")
plt.show()
# nodes_as_log = [np.log10(node) for node in self.number_of_nodes]
# l2_norms_as_log = [np.log10(l2) for l2 in normalized_l2_errors]
# res = stats.linregress(nodes_as_log, l2_norms_as_log)
print(normalized_l2_errors)
self.assertAlmostEqual(res.slope, -2, places=2)
# plt.xscale("log")
# plt.yscale("log")
# plt.plot(self.number_of_nodes, [np.power(10, res.intercept + res.slope * node) for node in nodes_as_log], 'r-')
# plt.plot(self.number_of_nodes, normalized_l2_errors, "x:")
# plt.show()
# print(normalized_l2_errors)
# self.assertAlmostEqual(res.slope, -2, places=2)
def get_l2_error_for_simulation(grid_params, physical_params, runtime_params, kernel):
output_folder = "./output"
run_simulation_with_settings(grid_params, physical_params, runtime_params, kernel, output_folder)
heights = get_heights(output_folder, runtime_params)
numerical_results = get_numerical_results(runtime_params, output_folder)
analytical_results = get_analytical_results(grid_params, physical_params, kernel, heights)
# def get_l2_error_for_simulation(grid_params, physical_params, runtime_params, kernel):
# output_folder = "./output"
# run_simulation_with_settings(grid_params, physical_params, runtime_params, kernel, output_folder)
# heights = get_heights(output_folder, runtime_params)
plt.plot(heights, numerical_results)
plt.plot(heights, analytical_results)
plt.legend(["numerical", "analytical"])
plt.show()
# numerical_results = get_numerical_results(runtime_params, output_folder)
# analytical_results = get_analytical_results(grid_params, physical_params, kernel, heights)
return normalized_l2_error(analytical_results, numerical_results)
# plt.plot(heights, numerical_results)
# plt.plot(heights, analytical_results)
# plt.legend(["numerical", "analytical"])
# plt.show()
# return normalized_l2_error(analytical_results, numerical_results)
def run_simulation_with_settings(grid_params, physical_params, runtime_params, kernel, output_folder):
shutil.rmtree(output_folder, ignore_errors=True)
run_simulation(physical_params, grid_params, runtime_params, kernel)
# def run_simulation_with_settings(grid_params, physical_params, runtime_params, kernel, output_folder):
# shutil.rmtree(output_folder, ignore_errors=True)
# run_simulation(physical_params, grid_params, runtime_params, kernel)
def get_heights(output_folder, runtime_params):
mesh_of_last_timestep = get_mesh_for_last_timestep(output_folder, runtime_params)
column_indices = vertical_column_from_mesh(mesh_of_last_timestep)
heights = get_heights_from_indices(mesh_of_last_timestep, column_indices)
return heights
# def get_heights(output_folder, runtime_params):
# mesh_of_last_timestep = get_mesh_for_last_timestep(output_folder, runtime_params)
# column_indices = vertical_column_from_mesh(mesh_of_last_timestep)
# heights = get_heights_from_indices(mesh_of_last_timestep, column_indices)
# return heights
def get_numerical_results(runtime_params, output_folder):
mesh_of_last_timestep = get_mesh_for_last_timestep(output_folder, runtime_params)
velocities_in_x_direction = mesh_of_last_timestep.get_array("Vx")
column_indices = vertical_column_from_mesh(mesh_of_last_timestep)
numerical_results = get_values_from_indices(velocities_in_x_direction, column_indices)
return numerical_results
# def get_numerical_results(runtime_params, output_folder):
# mesh_of_last_timestep = get_mesh_for_last_timestep(output_folder, runtime_params)
# velocities_in_x_direction = mesh_of_last_timestep.get_array("Vx")
# column_indices = vertical_column_from_mesh(mesh_of_last_timestep)
# numerical_results = get_values_from_indices(velocities_in_x_direction, column_indices)
# return numerical_results
def get_analytical_results(grid_params, physical_params, kernel, height_values):
channel_height = grid_params.number_of_nodes_per_direction[2]
settings = get_analytical_poiseuille_settings(channel_height, physical_params, kernel)
max_grid_height = channel_height * grid_params.node_distance
adjusted_height_values = [value / max_grid_height * channel_height for value in height_values]
analytical_results = poiseuille_at_heights(settings, adjusted_height_values)
return analytical_results
# def get_analytical_results(grid_params, physical_params, kernel, height_values):
# channel_height = grid_params.number_of_nodes_per_direction[2]
# settings = get_analytical_poiseuille_settings(channel_height, physical_params, kernel)
# max_grid_height = channel_height * grid_params.node_distance
# adjusted_height_values = [value / max_grid_height * channel_height for value in height_values]
# analytical_results = poiseuille_at_heights(settings, adjusted_height_values)
# return analytical_results
def get_mesh_for_last_timestep(output_folder, runtime_params):
file_name_of_last_timestep = get_output_file_name(output_folder, runtime_params)
mesh_of_last_timestep = pv.read(file_name_of_last_timestep)
return mesh_of_last_timestep
# def get_mesh_for_last_timestep(output_folder, runtime_params):
# file_name_of_last_timestep = get_output_file_name(output_folder, runtime_params)
# mesh_of_last_timestep = pv.read(file_name_of_last_timestep)
# return mesh_of_last_timestep
def get_analytical_poiseuille_settings(height, physical_params, kernel):
settings = PoiseuilleSettings()
settings.height = height
settings.viscosity = physical_params.lattice_viscosity
settings.density = 1
settings.force = kernel.forcing_in_x1
return settings
# def get_analytical_poiseuille_settings(height, physical_params, kernel):
# settings = PoiseuilleSettings()
# settings.height = height
# settings.viscosity = physical_params.lattice_viscosity
# settings.density = 1
# settings.force = kernel.forcing_in_x1
# return settings
def get_output_file_name(output_folder, runtime_params):
timesteps = runtime_params.number_of_timesteps
file_name = f"{output_folder}/mq/mq{timesteps}/mq0_{timesteps}.bin.vtu"
return file_name
# def get_output_file_name(output_folder, runtime_params):
# timesteps = runtime_params.number_of_timesteps
# file_name = f"{output_folder}/mq/mq{timesteps}/mq0_{timesteps}.bin.vtu"
# return file_name
def get_heights_from_indices(mesh, indices):
return [mesh.points[index][2] for index in indices]
# def get_heights_from_indices(mesh, indices):
# return [mesh.points[index][2] for index in indices]
def create_grid_params_with_nodes_in_column(nodes_in_column, delta_x):
grid_params = cpu.parameters.GridParameters()
grid_params.node_distance = delta_x
grid_params.number_of_nodes_per_direction = [1, 1, nodes_in_column]
grid_params.blocks_per_direction = [1, 1, 8]
grid_params.periodic_boundary_in_x1 = True
grid_params.periodic_boundary_in_x2 = True
grid_params.periodic_boundary_in_x3 = False
return grid_params
# def create_grid_params_with_nodes_in_column(nodes_in_column, delta_x):
# grid_params = cpu.parameters.GridParameters()
# grid_params.node_distance = delta_x
# grid_params.number_of_nodes_per_direction = [1, 1, nodes_in_column]
# grid_params.blocks_per_direction = [1, 1, 8]
# grid_params.periodic_boundary_in_x1 = True
# grid_params.periodic_boundary_in_x2 = True
# grid_params.periodic_boundary_in_x3 = False
# return grid_params
......@@ -4,20 +4,19 @@ cycler==0.10.0
imageio==2.9.0
iniconfig==1.1.1
kiwisolver==1.3.1
matplotlib==3.3.3
matplotlib==3.7.1
meshio==4.3.8
numpy==1.19.5
numpy==1.24.0
packaging==20.8
Pillow==8.1.0
Pillow==9.5.0
pluggy==0.13.1
py==1.10.0
pyparsing==2.4.7
pytest==6.2.1
python-dateutil==2.8.1
pyvista==0.28.1
scipy==1.6.1
pyvista==0.39.1
scipy==1.10
scooby==0.5.6
six==1.15.0
toml==0.10.2
transforms3d==0.3.1
vtk==9.0.1
transforms3d==0.4.1
#include "MathematicaListOfListsImp.h"
#include <iomanip>
#include <limits>
std::shared_ptr<MathematicaListOfLists> MathematicaListOfListsImp::getNewInstance(std::string listName, std::vector<std::vector<double>> listOfLists)
{
......
#ifndef MATHEMATICA_LIST_OF_LISTS_IMP_H
#define MATHEMATICA_LIST_OF_LISTS_IM_H
#define MATHEMATICA_LIST_OF_LISTS_IMP_H
#include "MathematicaListOfLists.h"
......
......@@ -4,6 +4,7 @@
#include "MathematicaPointList.h"
#include <iomanip>
#include <limits>
std::shared_ptr<MathematicaPointList> MathematicaPointListImp::getNewInstance(std::string listName, std::vector<std::shared_ptr<DataPoint> > plotData)
{
......
......@@ -9,6 +9,7 @@ class PostProcessingConfigData;
class PostProcessingConfigFileReader
{
public:
virtual ~PostProcessingConfigFileReader() = default;
virtual std::shared_ptr<PostProcessingConfigData> readConfigFile(std::string filePath) = 0;
};
#endif
\ No newline at end of file
......@@ -64,7 +64,3 @@ std::shared_ptr<PostProcessingConfigData> PostProcessingConfigFileReaderImp::rea
return data;
}
PostProcessingConfigFileReaderImp::PostProcessingConfigFileReaderImp()
{
}
......@@ -8,10 +8,10 @@ class PostProcessingConfigFileReaderImp : public PostProcessingConfigFileReader
public:
static std::shared_ptr<PostProcessingConfigFileReader> getNewInstance();
std::shared_ptr<PostProcessingConfigData> readConfigFile(std::string filePath);
std::shared_ptr<PostProcessingConfigData> readConfigFile(std::string filePath) override;
private:
PostProcessingConfigFileReaderImp();
PostProcessingConfigFileReaderImp() = default;
};
#endif
\ No newline at end of file
......@@ -39,12 +39,12 @@ int main(int argc, char **argv)
std::shared_ptr<MathematicaAssistantFactory> assistantFactory = MathematicaAssistantFactoryImp::getNewInstance();
std::vector<std::shared_ptr<MathematicaAssistant> > mathematicaAssistants = assistantFactory->makeMathematicaAssistants(configData->getAssistants(), functionFactory);
for (int sim = 0; sim < configData->getSimulations().size(); sim++) {
for (int comb = 0; comb < configData->getDataCombinations().size(); comb++) {
for (uint sim = 0; sim < configData->getSimulations().size(); sim++) {
for (uint comb = 0; comb < configData->getDataCombinations().size(); comb++) {
std::shared_ptr<LogFileDataAssistantStrategy> strategy = assistentStrategyFactory->makeLogFileDataAssistantStrategy(configData->getSimulations().at(sim));
std::vector<std::shared_ptr<LogFileDataGroup> > logFileDataSorted = assistentLogFile->findDataCombination(logFileDataVector, strategy, configData->getDataCombinations().at(comb));
for (int i = 0; i < logFileDataSorted.size(); i++) {
for (int j = 0; j < mathematicaAssistants.size(); j++)
for (uint i = 0; i < logFileDataSorted.size(); i++) {
for (uint j = 0; j < mathematicaAssistants.size(); j++)
mathematicaAssistants.at(j)->makeMathematicaOutput(logFileDataSorted.at(i), aMathmaticaFile);
}
}
......
......@@ -17,7 +17,7 @@ std::shared_ptr<PhiTest> PhiTest::getNewInstance(std::shared_ptr<ColorConsoleOut
void PhiTest::evaluate()
{
for (int i = 0; i < postProStrategies.size(); i++)
for (uint i = 0; i < postProStrategies.size(); i++)
phiDiff.push_back(postProStrategies.at(i)->getPhiDiff(dataToCalculate));
orderOfAccuracy = calcOrderOfAccuracy(phiDiff);
......@@ -46,7 +46,7 @@ std::string PhiTest::getDataToCalculate()
std::vector<int> PhiTest::getLx()
{
std::vector<int> lxINT;
for (int i = 0; i < lx.size(); i++)
for (uint i = 0; i < lx.size(); i++)
lxINT.push_back((int)lx.at(i));
return lxINT;
}
......@@ -62,7 +62,7 @@ double PhiTest::getOrderOfAccuracy()
}
PhiTest::PhiTest(std::shared_ptr<ColorConsoleOutput> colorOutput, double viscosity, std::shared_ptr<PhiTestParameterStruct> testPara, std::string dataToCalculate)
: TestImp(colorOutput), viscosity(viscosity), dataToCalculate(dataToCalculate)
: TestImp(colorOutput), dataToCalculate(dataToCalculate)
{
minOrderOfAccuracy = testPara->minOrderOfAccuracy;
startStepCalculation = testPara->startTimeStepCalculation;
......@@ -92,7 +92,7 @@ std::vector<std::string> PhiTest::buildTestOutput()
std::vector<std::string> output = buildBasicTestOutput();
std::ostringstream oss;
for (int i = 0; i < phiDiff.size(); i++) {
for (uint i = 0; i < phiDiff.size(); i++) {
oss << "PhiDiff" << simInfos.at(i)->getLx() << ": " << phiDiff.at(i);
output.push_back(oss.str());
oss.str(std::string());
......@@ -125,7 +125,7 @@ std::vector<std::string> PhiTest::buildBasicTestOutput()
output.push_back(oss.str());
oss.str(std::string());
for (int i = 0; i < simInfos.size(); i++) {
for (uint i = 0; i < simInfos.size(); i++) {
oss << "L: " << std::setfill(' ') << std::right << std::setw(4) << simInfos.at(i)->getLx() << simInfos.at(i)->getSimulationParameterString();
output.push_back(oss.str());
oss.str(std::string());
......
......@@ -40,7 +40,6 @@ private:
std::vector<double> phiDiff;
double orderOfAccuracy;
double minOrderOfAccuracy;
double viscosity;
std::string dataToCalculate;
std::vector<std::shared_ptr<PhiTestPostProcessingStrategy> > postProStrategies;
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment