-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathbenchmark_gpu.sh
More file actions
executable file
·61 lines (50 loc) · 1.57 KB
/
benchmark_gpu.sh
File metadata and controls
executable file
·61 lines (50 loc) · 1.57 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
#!/bin/bash
#SBATCH --job-name=bench_transformers
#SBATCH --partition=agent-xlong
#SBATCH --gres=gpu:1
#SBATCH --output=slurm_out/%j.out
#SBATCH --time=1-00:00:00
# This script runs the performance_benchmark.py script to fill in the model variant performance table.
# It uses randomly initialized models and tests across multiple sequence lengths.
#
# To submit this job, run:
# sbatch benchmark_gpu.sh
#
# To monitor the job, use:
# squeue -u $USER
echo "--- Setting up environment for Benchmarking ---"
# Set OMP_NUM_THREADS to 1 for efficiency with torch
export OMP_NUM_THREADS=1
# Install uv (if not already installed)
if ! command -v uv &> /dev/null
then
echo "uv could not be found, installing it now..."
curl -LsSf https://astral.sh/uv/install.sh | sh
source "$HOME/.local/bin/env"
fi
# Create and sync virtual environment
if [ ! -d ".venv" ]
then
echo "Creating virtual environment..."
uv venv
fi
echo "Installing dependencies with uv..."
export UV_HTTP_TIMEOUT=600
uv sync
# Activate venv
echo "Activating virtual environment..."
source .venv/bin/activate
echo "--- Starting benchmarking run ---"
# Define sequence lengths as requested
SEQ_LENGTHS="1024,2048,4096,8192,16384,32768"
MODEL_SIZE="0.5B"
# Execute the performance benchmark script
# It will iterate through Dense, MoD, SDT (Causal), and STT (Causal)
# and print the Markdown table to the output.
python performance_benchmark.py \
--model_size $MODEL_SIZE \
--sequence_lengths $SEQ_LENGTHS \
--batch_size 1 \
--num_runs 5 \
--num_warmup_runs 2
echo "--- Benchmarking run finished ---"