This commit is contained in:
2026-03-16 17:28:52 +03:00
parent adbad08ae2
commit 9f6793616a
7 changed files with 886 additions and 0 deletions

24
task1/scripts/build.sh Executable file
View File

@@ -0,0 +1,24 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "$0")/.." && pwd)"
cd "$ROOT_DIR"
mkdir -p bin results
CUDA_ARCH="${CUDA_ARCH:-sm_35}"
module purge
module load compiler/gcc/11
module load nvidia/cuda/11.6u2
nvcc \
-ccbin g++ \
-O3 \
-std=c++14 \
-lineinfo \
-arch="${CUDA_ARCH}" \
-o bin/linpack_cuda \
src/main.cu
echo "Built: $ROOT_DIR/bin/linpack_cuda"

54
task1/scripts/run_cuda.slurm Executable file
View File

@@ -0,0 +1,54 @@
#!/usr/bin/env bash
#SBATCH --job-name=task1-cuda
#SBATCH --partition=tornado-k40
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --time=00:20:00
#SBATCH --output=results/%x-%j.out
#SBATCH --error=results/%x-%j.err
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "$0")/.." && pwd)"
cd "$ROOT_DIR"
mkdir -p results bin
./scripts/build.sh
echo "===== account info ====="
whoami
hostname
date
echo
echo "===== slurm info ====="
echo "SLURM_JOB_ID=${SLURM_JOB_ID:-unknown}"
echo "SLURM_JOB_NAME=${SLURM_JOB_NAME:-unknown}"
echo "SLURM_JOB_PARTITION=${SLURM_JOB_PARTITION:-unknown}"
echo "SLURM_JOB_NUM_NODES=${SLURM_JOB_NUM_NODES:-unknown}"
echo "SLURM_NODELIST=${SLURM_NODELIST:-unknown}"
echo "CUDA_VISIBLE_DEVICES=${CUDA_VISIBLE_DEVICES:-unset}"
scontrol show job "${SLURM_JOB_ID}" || true
echo
echo "===== node config ====="
lscpu | sed -n '1,20p'
if [ -n "${SLURMD_NODENAME:-}" ]; then
scontrol show node "${SLURMD_NODENAME}" || true
fi
nvidia-smi -L || true
nvidia-smi || true
echo
echo "===== benchmark ====="
./bin/linpack_cuda \
--start 1000 \
--step 500 \
--count 6 \
--eps 1e-6 \
--max-iters 15000 \
--threads 256 \
--repeat 3 \
--warmup 1 \
--csv "results/task1-cuda-${SLURM_JOB_ID}.csv"

View File

@@ -0,0 +1,56 @@
#!/usr/bin/env bash
#SBATCH --job-name=task1-intel-linpack
#SBATCH --partition=tornado
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=56
#SBATCH --time=00:20:00
#SBATCH --output=results/%x-%j.out
#SBATCH --error=results/%x-%j.err
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "$0")/.." && pwd)"
cd "$ROOT_DIR"
mkdir -p results
LINPACK_DIR="${LINPACK_DIR:-/linux/share/mkl/benchmarks/linpack}"
LINPACK_INPUT="${LINPACK_INPUT:-lininput_xeon64}"
if [ ! -x "${LINPACK_DIR}/xlinpack_xeon64" ]; then
echo "Intel LINPACK binary not found: ${LINPACK_DIR}/xlinpack_xeon64"
echo "If the path differs on the cluster, submit with:"
echo "sbatch --export=ALL,LINPACK_DIR=/path/to/linpack scripts/run_intel_linpack.slurm"
exit 1
fi
echo "===== account info ====="
whoami
hostname
date
echo
echo "===== slurm info ====="
echo "SLURM_JOB_ID=${SLURM_JOB_ID:-unknown}"
echo "SLURM_JOB_NAME=${SLURM_JOB_NAME:-unknown}"
echo "SLURM_JOB_PARTITION=${SLURM_JOB_PARTITION:-unknown}"
echo "SLURM_JOB_NUM_NODES=${SLURM_JOB_NUM_NODES:-unknown}"
echo "SLURM_NODELIST=${SLURM_NODELIST:-unknown}"
echo "OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK:-56}"
scontrol show job "${SLURM_JOB_ID}" || true
echo
echo "===== node config ====="
lscpu | sed -n '1,20p'
if [ -n "${SLURMD_NODENAME:-}" ]; then
scontrol show node "${SLURMD_NODENAME}" || true
fi
echo
echo "===== intel linpack ====="
export OMP_NUM_THREADS="${SLURM_CPUS_PER_TASK:-56}"
export MKL_NUM_THREADS="${SLURM_CPUS_PER_TASK:-56}"
cd "${LINPACK_DIR}"
./xlinpack_xeon64 "${LINPACK_INPUT}"