Commit 17cf4cbd authored by Illia Kolesnik's avatar Illia Kolesnik
Browse files

Updated submiting of mpi tasks

parent 4c8bdde0
Loading
Loading
Loading
Loading
+13 −9
Original line number Diff line number Diff line
#!/bin/bash

#SBATCH --job-name=mpi-grid     # job name (default is the name of this file)
#SBATCH --account=OPEN-28-65
#SBATCH --job-name=mpi          # job name (default is the name of this file)
#SBATCH --output=log.%x.job_%j  # file name for stdout/stderr (%x will be replaced with the job name, %j with the jobid)
#SBATCH --time=6-0:00:00        # maximum wall time allocated for the job (D-H:MM:SS)
#SBATCH --time=24:00:00        # maximum wall time allocated for the job (D-H:MM:SS)

#SBATCH --partition=gpXY        # partition/queue name for the job submission
#SBATCH --partition=qcpu        # partition/queue name for the job submission

#SBATCH --nodes=4               # number of nodes
#SBATCH --ntasks-per-node=2     # MPI processes per node
#SBATCH --nodes=8               # number of nodes
#SBATCH --ntasks-per-node=1     # MPI processes per node

#SBATCH --threads-per-core=1    # do not use hyperthreads (i.e. CPUs = physical cores below)
#SBATCH --cpus-per-task=4       # number of CPUs per process
#SBATCH --gpus-per-task=0       # number of GPUs per process
#SBATCH --gpu-bind=single:1     # bind each process to its own GPU (single:<tasks_per_gpu>)
#SBATCH --cpus-per-task=8       # number of CPUs per process

# how much RAM per node can be allocated for the job (default: 2G, max: 60G)
#SBATCH --mem=10G
#SBATCH --mem=200G

# Stop on error
set -e

module load foss

# start the job in the directory it was submitted from
cd "$SLURM_SUBMIT_DIR"
+1 −1
Original line number Diff line number Diff line
@@ -9,7 +9,7 @@
#SBATCH --ntasks=1              # number of tasks/processes

# how much RAM per node can be allocated for the job (default: 2G, max: 60G)
#SBATCH --mem=250G
#SBATCH --mem=200G

# Stop on error
set -e