task2
This commit is contained in:
7
task2/scripts/build_cuda.sh
Executable file
7
task2/scripts/build_cuda.sh
Executable file
@@ -0,0 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
cd "$(dirname "$0")/.."
|
||||
CUDA_ARCH="${CUDA_ARCH:-sm_35}"
|
||||
mkdir -p bin
|
||||
nvcc -ccbin g++ -O3 -arch="$CUDA_ARCH" -o bin/wave_cuda src/wave_cuda.cu
|
||||
echo "Built bin/wave_cuda (arch=$CUDA_ARCH)"
|
||||
6
task2/scripts/build_mpi.sh
Executable file
6
task2/scripts/build_mpi.sh
Executable file
@@ -0,0 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
cd "$(dirname "$0")/.."
|
||||
mkdir -p bin
|
||||
mpicc -O3 -std=c99 -o bin/wave_mpi src/wave_mpi.c
|
||||
echo "Built bin/wave_mpi"
|
||||
73
task2/scripts/plot_task2_results.py
Executable file
73
task2/scripts/plot_task2_results.py
Executable file
@@ -0,0 +1,73 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Строит график зависимости времени вычисления от размера полигона
|
||||
для MPI (1, 2, 4 узла) и CUDA.
|
||||
|
||||
Использование:
|
||||
python3 plot_task2_results.py \
|
||||
--mpi1 results/task2-mpi-1n-XXXXX.csv \
|
||||
--mpi2 results/task2-mpi-2n-XXXXX.csv \
|
||||
--mpi4 results/task2-mpi-4n-XXXXX.csv \
|
||||
--cuda results/task2-cuda-XXXXX.csv \
|
||||
-o report/img/task2-time-comparison.png
|
||||
"""
|
||||
import argparse
|
||||
import csv
|
||||
from pathlib import Path
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
|
||||
def read_mpi_csv(path: str) -> tuple[list[int], list[float]]:
|
||||
sizes, times = [], []
|
||||
with open(path) as f:
|
||||
reader = csv.DictReader(f)
|
||||
for row in reader:
|
||||
sizes.append(int(row["n"]))
|
||||
times.append(float(row["time_ms"]))
|
||||
return sizes, times
|
||||
|
||||
|
||||
def read_cuda_csv(path: str) -> tuple[list[int], list[float]]:
|
||||
sizes, times = [], []
|
||||
with open(path) as f:
|
||||
reader = csv.DictReader(f)
|
||||
for row in reader:
|
||||
sizes.append(int(row["n"]))
|
||||
times.append(float(row["time_ms"]))
|
||||
return sizes, times
|
||||
|
||||
|
||||
def main() -> None:
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--mpi1", required=True, help="CSV for MPI 1 node")
|
||||
parser.add_argument("--mpi2", required=True, help="CSV for MPI 2 nodes")
|
||||
parser.add_argument("--mpi4", required=True, help="CSV for MPI 4 nodes")
|
||||
parser.add_argument("--cuda", required=True, help="CSV for CUDA")
|
||||
parser.add_argument("-o", "--output", default="task2-time-comparison.png")
|
||||
args = parser.parse_args()
|
||||
|
||||
fig, ax = plt.subplots(figsize=(10, 6))
|
||||
|
||||
for label, path in [
|
||||
("MPI 1 node", args.mpi1),
|
||||
("MPI 2 nodes", args.mpi2),
|
||||
("MPI 4 nodes", args.mpi4),
|
||||
("CUDA", args.cuda),
|
||||
]:
|
||||
sizes, times = read_mpi_csv(path) if "mpi" in label.lower() else read_cuda_csv(path)
|
||||
ax.plot(sizes, times, marker="o", label=label)
|
||||
|
||||
ax.set_xlabel("Размер полигона n")
|
||||
ax.set_ylabel("Время, мс")
|
||||
ax.set_title("Зависимость времени вычисления от размера полигона")
|
||||
ax.legend()
|
||||
ax.grid(True, alpha=0.3)
|
||||
|
||||
Path(args.output).parent.mkdir(parents=True, exist_ok=True)
|
||||
fig.savefig(args.output, dpi=150, bbox_inches="tight")
|
||||
print(f"Saved: {args.output}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
49
task2/scripts/run_cuda.slurm
Normal file
49
task2/scripts/run_cuda.slurm
Normal file
@@ -0,0 +1,49 @@
|
||||
#!/usr/bin/env bash
|
||||
#SBATCH --job-name=task2-cuda
|
||||
#SBATCH --partition=tornado-k40
|
||||
#SBATCH --nodes=1
|
||||
#SBATCH --ntasks=1
|
||||
#SBATCH --time=00:20:00
|
||||
#SBATCH --output=results/%x-%j.out
|
||||
#SBATCH --error=results/%x-%j.err
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
cd "${SLURM_SUBMIT_DIR}"
|
||||
|
||||
module purge
|
||||
module load compiler/gcc/11
|
||||
module load nvidia/cuda/11.6u2
|
||||
|
||||
mkdir -p results bin
|
||||
|
||||
./scripts/build_cuda.sh
|
||||
|
||||
echo "===== account info ====="
|
||||
whoami; hostname; date
|
||||
|
||||
echo
|
||||
echo "===== slurm info ====="
|
||||
echo "SLURM_JOB_ID=${SLURM_JOB_ID:-unknown}"
|
||||
echo "SLURM_JOB_PARTITION=${SLURM_JOB_PARTITION:-unknown}"
|
||||
echo "SLURM_NODELIST=${SLURM_NODELIST:-unknown}"
|
||||
scontrol show job "${SLURM_JOB_ID}" || true
|
||||
|
||||
echo
|
||||
echo "===== node config ====="
|
||||
lscpu | head -20
|
||||
nvidia-smi -L || true
|
||||
nvidia-smi || true
|
||||
|
||||
CSV="results/task2-cuda-${SLURM_JOB_ID}.csv"
|
||||
echo "n,impl,time_ms,path_len,iterations" > "$CSV"
|
||||
|
||||
echo
|
||||
echo "===== benchmark ====="
|
||||
for N in 500 1000 2000 3000 5000; do
|
||||
echo "--- n=$N ---"
|
||||
./bin/wave_cuda "$N" 256 256 "$CSV"
|
||||
done
|
||||
|
||||
echo
|
||||
echo "===== done ====="
|
||||
54
task2/scripts/run_mpi.slurm
Normal file
54
task2/scripts/run_mpi.slurm
Normal file
@@ -0,0 +1,54 @@
|
||||
#!/usr/bin/env bash
|
||||
#SBATCH --job-name=task2-mpi
|
||||
#SBATCH --partition=tornado
|
||||
#SBATCH --ntasks-per-node=1
|
||||
#SBATCH --cpus-per-task=56
|
||||
#SBATCH --time=00:20:00
|
||||
#SBATCH --output=results/%x-%j.out
|
||||
#SBATCH --error=results/%x-%j.err
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
cd "${SLURM_SUBMIT_DIR}"
|
||||
|
||||
module purge
|
||||
module load compiler/gcc/11
|
||||
module load mpi/openmpi
|
||||
|
||||
mkdir -p results bin
|
||||
|
||||
./scripts/build_mpi.sh
|
||||
|
||||
RANKS=${SLURM_JOB_NUM_NODES}
|
||||
|
||||
echo "===== account info ====="
|
||||
whoami; hostname; date
|
||||
|
||||
echo
|
||||
echo "===== slurm info ====="
|
||||
echo "SLURM_JOB_ID=${SLURM_JOB_ID:-unknown}"
|
||||
echo "SLURM_JOB_PARTITION=${SLURM_JOB_PARTITION:-unknown}"
|
||||
echo "SLURM_JOB_NUM_NODES=${SLURM_JOB_NUM_NODES:-unknown}"
|
||||
echo "SLURM_NODELIST=${SLURM_NODELIST:-unknown}"
|
||||
echo "RANKS=${RANKS}"
|
||||
scontrol show job "${SLURM_JOB_ID}" || true
|
||||
|
||||
echo
|
||||
echo "===== node config ====="
|
||||
lscpu | head -20
|
||||
if [ -n "${SLURMD_NODENAME:-}" ]; then
|
||||
scontrol show node "${SLURMD_NODENAME}" || true
|
||||
fi
|
||||
|
||||
CSV="results/task2-mpi-${RANKS}n-${SLURM_JOB_ID}.csv"
|
||||
echo "n,procs,time_ms,path_len,iterations" > "$CSV"
|
||||
|
||||
echo
|
||||
echo "===== benchmark (${RANKS} nodes / ${RANKS} ranks) ====="
|
||||
for N in 500 1000 2000 3000 5000; do
|
||||
echo "--- n=$N ---"
|
||||
mpirun -np "${RANKS}" --map-by ppr:1:node --bind-to none ./bin/wave_mpi "$N" "$CSV"
|
||||
done
|
||||
|
||||
echo
|
||||
echo "===== done ====="
|
||||
Reference in New Issue
Block a user