Skip to content
Snippets Groups Projects
Commit a827f3ef authored by Anna Wellmann's avatar Anna Wellmann
Browse files

Always upload artifacts

parent fb2c0c85
No related branches found
No related tags found
1 merge request!244Fix the regression tests that run on the Phoenix cluster
...@@ -303,9 +303,9 @@ regression_test_4gpu: ...@@ -303,9 +303,9 @@ regression_test_4gpu:
- hpc-rocket cancel regression-tests/multigpu_test/rocket4GPU.yml $(python regression-tests/multigpu_test/utilities/parsejobid.py hpcrocket4GPU.log) - hpc-rocket cancel regression-tests/multigpu_test/rocket4GPU.yml $(python regression-tests/multigpu_test/utilities/parsejobid.py hpcrocket4GPU.log)
artifacts: artifacts:
when: always
paths: paths:
- output/4GPU/slurm4GPU.out - output/4GPU/slurm4GPU.out
- hpcrocket4GPU.log
expire_in: 1 week expire_in: 1 week
############################################################################### ###############################################################################
regression_test_8gpu: regression_test_8gpu:
...@@ -339,9 +339,9 @@ regression_test_8gpu: ...@@ -339,9 +339,9 @@ regression_test_8gpu:
- hpc-rocket cancel regression-tests/multigpu_test/rocket8GPU.yml $(python regression-tests/multigpu_test/utilities/parsejobid.py hpcrocket8GPU.log) - hpc-rocket cancel regression-tests/multigpu_test/rocket8GPU.yml $(python regression-tests/multigpu_test/utilities/parsejobid.py hpcrocket8GPU.log)
artifacts: artifacts:
when: always
paths: paths:
- output/8GPU/slurm8GPU.out - output/8GPU/slurm8GPU.out
- hpcrocket8GPU.log
expire_in: 1 week expire_in: 1 week
############################################################################### ###############################################################################
## Benchmark ## ## Benchmark ##
......
...@@ -14,7 +14,7 @@ echo "SLURM_JOB_NODELIST"=$SLURM_JOB_NODELIST ...@@ -14,7 +14,7 @@ echo "SLURM_JOB_NODELIST"=$SLURM_JOB_NODELIST
echo "SLURM_NNODES"=$SLURM_NNODES echo "SLURM_NNODES"=$SLURM_NNODES
echo "SLURM_TASKS_PER_NODE"=$SLURM_TASKS_PER_NODE echo "SLURM_TASKS_PER_NODE"=$SLURM_TASKS_PER_NODE
module purge module purge
module load comp/gcc/10.2.0 module load comp/gcc/10.2.0
module load mpi/openmpi/4.0.5_gcc_9.3/openmpi module load mpi/openmpi/4.0.5_gcc_9.3/openmpi
module load cuda/11.3 module load cuda/11.3
......
...@@ -14,7 +14,7 @@ echo "SLURM_JOB_NODELIST"=$SLURM_JOB_NODELIST ...@@ -14,7 +14,7 @@ echo "SLURM_JOB_NODELIST"=$SLURM_JOB_NODELIST
echo "SLURM_NNODES"=$SLURM_NNODES echo "SLURM_NNODES"=$SLURM_NNODES
echo "SLURM_TASKS_PER_NODE"=$SLURM_TASKS_PER_NODE echo "SLURM_TASKS_PER_NODE"=$SLURM_TASKS_PER_NODE
module purge module purge
module load comp/gcc/10.2.0 module load comp/gcc/10.2.0
module load mpi/openmpi/4.0.5_gcc_9.3/openmpi module load mpi/openmpi/4.0.5_gcc_9.3/openmpi
module load cuda/11.3 module load cuda/11.3
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment