From e428e11947b0c781becb1ed007c2d2dd45835f99 Mon Sep 17 00:00:00 2001 From: Ankit Kumar Date: Fri, 13 Mar 2026 09:16:50 +0000 Subject: [PATCH 1/6] Added recipe for Qwen3-235b --- .../tensorrt-llm/READEME.md | 156 ++++++++++++++++++ 1 file changed, 156 insertions(+) create mode 100644 inference/g4/qwen3_235b/single-host-serving/tensorrt-llm/READEME.md diff --git a/inference/g4/qwen3_235b/single-host-serving/tensorrt-llm/READEME.md b/inference/g4/qwen3_235b/single-host-serving/tensorrt-llm/READEME.md new file mode 100644 index 00000000..4330a547 --- /dev/null +++ b/inference/g4/qwen3_235b/single-host-serving/tensorrt-llm/READEME.md @@ -0,0 +1,156 @@ +# Single host inference benchmark of Qwen3-235B with TensorRT-LLM on G4 + +This recipe shows how to serve and benchmark the Qwen3-4B model using [NVIDIA TensorRT-LLM](https://github.com/NVIDIA/TensorRT-LLM) on a single GCP VM with G4 GPUs. For more information on G4 machine types, see the [GCP documentation](https://cloud.google.com/compute/docs/accelerator-optimized-machines#g4-machine-types). + +## Before you begin + +### 1. Create a GCP VM with G4 GPUs + +First, we will create a Google Cloud Platform (GCP) Virtual Machine (VM) that has the necessary GPU resources. + +Make sure you have the following prerequisites: +* [Google Cloud SDK](https://cloud.google.com/sdk/docs/install) is initialized. +* You have a project with a GPU quota. See [Request a quota increase](https://cloud.google.com/docs/quota/view-request#requesting_higher_quota). +* [Enable required APIs](https://console.cloud.google.com/flows/enableapi?apiid=compute.googleapis.com). + +The following commands set up environment variables and create a GCE instance. The `MACHINE_TYPE` is set to `g4-standard-48` for a single GPU VM. The boot disk is set to 200GB to accommodate the models and dependencies. + +```bash +export VM_NAME="${USER}-g4-trtllm-qwen3-235b" +export PROJECT_ID="your-project-id" +export ZONE="your-zone" +export MACHINE_TYPE="g4-standard-48" +export IMAGE_PROJECT="ubuntu-os-accelerator-images" +export IMAGE_FAMILY="ubuntu-accelerator-2404-amd64-with-nvidia-570" + +gcloud compute instances create ${VM_NAME} \ + --machine-type=${MACHINE_TYPE} \ + --project=${PROJECT_ID} \ + --zone=${ZONE} \ + --image-project=${IMAGE_PROJECT} \ + --image-family=${IMAGE_FAMILY} \ + --maintenance-policy=TERMINATE \ + --boot-disk-size=200GB +``` + +### 2. Connect to the VM + +Use `gcloud compute ssh` to connect to the newly created instance. + +```bash +gcloud compute ssh ${VM_NAME?} --project=${PROJECT_ID?} --zone=${ZONE?} +``` + +```bash +# Run NVIDIA smi to verify the driver installation and see the available GPUs. +nvidia-smi +``` + +## Serve a model + +### 1. Install Docker + +Before you can serve the model, you need to have Docker installed on your VM. You can follow the official documentation to install Docker on Ubuntu: +[Install Docker Engine on Ubuntu](https://docs.docker.com/engine/install/ubuntu/) + +After installing Docker, make sure the Docker daemon is running. + +### 2. Install NVIDIA Container Toolkit + +To enable Docker containers to access the GPU, you need to install the NVIDIA Container Toolkit. + +You can follow the official NVIDIA documentation to install the container toolkit: +[NVIDIA Container Toolkit Install Guide](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) + +### 3. Setup TensorRT-LLM + +```bash +sudo apt-get update +sudo apt-get -y install git git-lfs + +git clone https://github.com/NVIDIA/TensorRT-LLM.git +cd TensorRT-LLM +git checkout v1.2.0rc3 +git submodule update --init --recursive +git lfs install +git lfs pull + +# Build the Docker image +make -C docker release_build + +# Run the Docker container +mkdir -p /scratch/cache +make -C docker release_run DOCKER_RUN_ARGS="-v /scratch:/scratch -v /scratch/cache:/root/.cache --ipc=host" +``` + +Now you are inside the container. + +### 4. Download and Quantize the Model + +```bash +# Inside the container + +# Download the base model from Hugging Face +apt-get update && apt-get install -y huggingface-cli + +huggingface-cli download Qwen/Qwen3-235B --local-dir /scratch/models/Qwen3-235B + +# Quantize the model using FP8 +python examples/llm_ptq/hf_ptq.py \ + --pyt_ckpt_path /scratch/models/Qwen3-235B \ + --qformat fp8 \ + --export_path /scratch/models/exported_model_qwen3_235b_fp8 \ + --trust_remote_code +``` + +## Run Benchmarks + +Create a script to run the benchmarks with different configurations. + +```bash +# Inside the container + +cat << 'EOF' > /scratch/run_benchmark.sh +#!/bin/bash + +# Function to run benchmarks +run_benchmark() { + local model_name=$1 + local isl=$2 + local osl=$3 + local num_requests=$4 + local tp_size=$5 + local pp_size=$6 + local ep_size=$7 + + echo "Running benchmark for $model_name with ISL=$isl, OSL=$osl, TP=$tp_size, PP=$pp_size, EP=$ep_size" + + dataset_file="/scratch/token-norm-dist_${model_name##*/}_${isl}_${osl}.json" + + python benchmarks/cpp/prepare_dataset.py --tokenizer=$model_name --stdout token-norm-dist --num-requests=$num_requests --input-mean=$isl --output-mean=$osl --input-stdev=0 --output-stdev=0 > $dataset_file + + # Save throughput output to a file + trtllm-bench --model $model_name --model_path ${model_name} throughput --concurrency 128 --dataset $dataset_file --tp $tp_size --pp $pp_size --ep $ep_size --backend pytorch > "/scratch/output_${model_name##*/}_isl${isl}_osl${osl}_tp${tp_size}_pp${pp_size}_ep${ep_size}_throughput.txt" + + rm -f $dataset_file +} + +model_name="/scratch/models/exported_model_qwen3_235b_fp8" +TP_SIZE=1 +PP_SIZE=1 +EP_SIZE=1 + +run_benchmark "$model_name" 128 128 1024 $TP_SIZE $PP_SIZE $EP_SIZE +run_benchmark "$model_name" 128 2048 1024 $TP_SIZE $PP_SIZE $EP_SIZE +run_benchmark "$model_name" 128 4096 1024 $TP_SIZE $PP_SIZE $EP_SIZE +run_benchmark "$model_name" 500 2000 1024 $TP_SIZE $PP_SIZE $EP_SIZE +run_benchmark "$model_name" 1000 1000 1024 $TP_SIZE $PP_SIZE $EP_SIZE +run_benchmark "$model_name" 2048 128 1024 $TP_SIZE $PP_SIZE $EP_SIZE +run_benchmark "$model_name" 2048 2048 1024 $TP_SIZE $PP_SIZE $EP_SIZE +run_benchmark "$model_name" 5000 500 1024 $TP_SIZE $PP_SIZE $EP_SIZE +run_benchmark "$model_name" 20000 2000 1024 $TP_SIZE $PP_SIZE $EP_SIZE +EOF + +chmod +x /scratch/run_benchmark.sh +/scratch/run_benchmark.sh + From 47f7068a5ce4ca5de03e4b260a0b25c440b2efa8 Mon Sep 17 00:00:00 2001 From: ankitkumar-quad Date: Mon, 16 Mar 2026 15:38:18 +0530 Subject: [PATCH 2/6] Update READEME.md --- .../single-host-serving/tensorrt-llm/READEME.md | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/inference/g4/qwen3_235b/single-host-serving/tensorrt-llm/READEME.md b/inference/g4/qwen3_235b/single-host-serving/tensorrt-llm/READEME.md index 4330a547..c770099c 100644 --- a/inference/g4/qwen3_235b/single-host-serving/tensorrt-llm/READEME.md +++ b/inference/g4/qwen3_235b/single-host-serving/tensorrt-llm/READEME.md @@ -1,6 +1,6 @@ # Single host inference benchmark of Qwen3-235B with TensorRT-LLM on G4 -This recipe shows how to serve and benchmark the Qwen3-4B model using [NVIDIA TensorRT-LLM](https://github.com/NVIDIA/TensorRT-LLM) on a single GCP VM with G4 GPUs. For more information on G4 machine types, see the [GCP documentation](https://cloud.google.com/compute/docs/accelerator-optimized-machines#g4-machine-types). +This recipe shows how to serve and benchmark the Qwen235B model using [NVIDIA TensorRT-LLM](https://github.com/NVIDIA/TensorRT-LLM) on a single GCP VM with G4 GPUs. For more information on G4 machine types, see the [GCP documentation](https://cloud.google.com/compute/docs/accelerator-optimized-machines#g4-machine-types). ## Before you begin @@ -13,13 +13,13 @@ Make sure you have the following prerequisites: * You have a project with a GPU quota. See [Request a quota increase](https://cloud.google.com/docs/quota/view-request#requesting_higher_quota). * [Enable required APIs](https://console.cloud.google.com/flows/enableapi?apiid=compute.googleapis.com). -The following commands set up environment variables and create a GCE instance. The `MACHINE_TYPE` is set to `g4-standard-48` for a single GPU VM. The boot disk is set to 200GB to accommodate the models and dependencies. +The following commands set up environment variables and create a GCE instance. The `MACHINE_TYPE` is set to `g4-standard-384` for a single GPU VM. The boot disk is set to 200GB to accommodate the models and dependencies. ```bash export VM_NAME="${USER}-g4-trtllm-qwen3-235b" export PROJECT_ID="your-project-id" export ZONE="your-zone" -export MACHINE_TYPE="g4-standard-48" +export MACHINE_TYPE="g4-standard-384" export IMAGE_PROJECT="ubuntu-os-accelerator-images" export IMAGE_FAMILY="ubuntu-accelerator-2404-amd64-with-nvidia-570" @@ -95,14 +95,23 @@ apt-get update && apt-get install -y huggingface-cli huggingface-cli download Qwen/Qwen3-235B --local-dir /scratch/models/Qwen3-235B +``` + # Quantize the model using FP8 +```bash +git clone https://github.com/NVIDIA/TensorRT-Model-Optimizer.git + +pushd TensorRT-Model-Optimizer + +pip install -e . + python examples/llm_ptq/hf_ptq.py \ --pyt_ckpt_path /scratch/models/Qwen3-235B \ --qformat fp8 \ --export_path /scratch/models/exported_model_qwen3_235b_fp8 \ --trust_remote_code -``` +``` ## Run Benchmarks Create a script to run the benchmarks with different configurations. From b07a2dfaf7e65f03d24b57b76e4849beaa89a76b Mon Sep 17 00:00:00 2001 From: ankitkumar-quad Date: Mon, 16 Mar 2026 15:39:58 +0530 Subject: [PATCH 3/6] Update READEME.md --- .../g4/qwen3_235b/single-host-serving/tensorrt-llm/READEME.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/inference/g4/qwen3_235b/single-host-serving/tensorrt-llm/READEME.md b/inference/g4/qwen3_235b/single-host-serving/tensorrt-llm/READEME.md index c770099c..25db1dfa 100644 --- a/inference/g4/qwen3_235b/single-host-serving/tensorrt-llm/READEME.md +++ b/inference/g4/qwen3_235b/single-host-serving/tensorrt-llm/READEME.md @@ -13,7 +13,7 @@ Make sure you have the following prerequisites: * You have a project with a GPU quota. See [Request a quota increase](https://cloud.google.com/docs/quota/view-request#requesting_higher_quota). * [Enable required APIs](https://console.cloud.google.com/flows/enableapi?apiid=compute.googleapis.com). -The following commands set up environment variables and create a GCE instance. The `MACHINE_TYPE` is set to `g4-standard-384` for a single GPU VM. The boot disk is set to 200GB to accommodate the models and dependencies. +The following commands set up environment variables and create a GCE instance. The `MACHINE_TYPE` is set to `g4-standard-384` for 8 GPU VM. The boot disk is set to 200GB to accommodate the models and dependencies. ```bash export VM_NAME="${USER}-g4-trtllm-qwen3-235b" From 567597e18638bc47036af6c0e240062fd01b1920 Mon Sep 17 00:00:00 2001 From: ankitkumar-quad Date: Mon, 16 Mar 2026 15:41:19 +0530 Subject: [PATCH 4/6] Update READEME.md --- .../g4/qwen3_235b/single-host-serving/tensorrt-llm/READEME.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/inference/g4/qwen3_235b/single-host-serving/tensorrt-llm/READEME.md b/inference/g4/qwen3_235b/single-host-serving/tensorrt-llm/READEME.md index 25db1dfa..a7566464 100644 --- a/inference/g4/qwen3_235b/single-host-serving/tensorrt-llm/READEME.md +++ b/inference/g4/qwen3_235b/single-host-serving/tensorrt-llm/READEME.md @@ -97,7 +97,7 @@ huggingface-cli download Qwen/Qwen3-235B --local-dir /scratch/models/Qwen3-235B ``` -# Quantize the model using FP8 +#### Quantize the model using FP8 ```bash git clone https://github.com/NVIDIA/TensorRT-Model-Optimizer.git From 261d7fcc3f0d20f663ceeb9bcee6813b7f977b52 Mon Sep 17 00:00:00 2001 From: ankitkumar-quad Date: Mon, 16 Mar 2026 16:07:05 +0530 Subject: [PATCH 5/6] Update READEME.md --- .../single-host-serving/tensorrt-llm/READEME.md | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/inference/g4/qwen3_235b/single-host-serving/tensorrt-llm/READEME.md b/inference/g4/qwen3_235b/single-host-serving/tensorrt-llm/READEME.md index a7566464..48525372 100644 --- a/inference/g4/qwen3_235b/single-host-serving/tensorrt-llm/READEME.md +++ b/inference/g4/qwen3_235b/single-host-serving/tensorrt-llm/READEME.md @@ -1,6 +1,6 @@ # Single host inference benchmark of Qwen3-235B with TensorRT-LLM on G4 -This recipe shows how to serve and benchmark the Qwen235B model using [NVIDIA TensorRT-LLM](https://github.com/NVIDIA/TensorRT-LLM) on a single GCP VM with G4 GPUs. For more information on G4 machine types, see the [GCP documentation](https://cloud.google.com/compute/docs/accelerator-optimized-machines#g4-machine-types). +This recipe shows how to serve and benchmark the Qwen3-235B model using [NVIDIA TensorRT-LLM](https://github.com/NVIDIA/TensorRT-LLM) on a single GCP VM with G4 GPUs. For more information on G4 machine types, see the [GCP documentation](https://cloud.google.com/compute/docs/accelerator-optimized-machines#g4-machine-types). ## Before you begin @@ -13,13 +13,13 @@ Make sure you have the following prerequisites: * You have a project with a GPU quota. See [Request a quota increase](https://cloud.google.com/docs/quota/view-request#requesting_higher_quota). * [Enable required APIs](https://console.cloud.google.com/flows/enableapi?apiid=compute.googleapis.com). -The following commands set up environment variables and create a GCE instance. The `MACHINE_TYPE` is set to `g4-standard-384` for 8 GPU VM. The boot disk is set to 200GB to accommodate the models and dependencies. +The following commands set up environment variables and create a GCE instance. The `MACHINE_TYPE` is set to `g4-standard-192` for 4 GPU VM. The boot disk is set to 200GB to accommodate the models and dependencies. ```bash export VM_NAME="${USER}-g4-trtllm-qwen3-235b" export PROJECT_ID="your-project-id" export ZONE="your-zone" -export MACHINE_TYPE="g4-standard-384" +export MACHINE_TYPE="g4-standard-192" export IMAGE_PROJECT="ubuntu-os-accelerator-images" export IMAGE_FAMILY="ubuntu-accelerator-2404-amd64-with-nvidia-570" @@ -83,10 +83,6 @@ mkdir -p /scratch/cache make -C docker release_run DOCKER_RUN_ARGS="-v /scratch:/scratch -v /scratch/cache:/root/.cache --ipc=host" ``` -Now you are inside the container. - -### 4. Download and Quantize the Model - ```bash # Inside the container From ab16df4e2af0fdac96f533bbf219504859a373e3 Mon Sep 17 00:00:00 2001 From: ankitkumar-quad Date: Mon, 16 Mar 2026 16:14:04 +0530 Subject: [PATCH 6/6] Update READEME.md --- .../g4/qwen3_235b/single-host-serving/tensorrt-llm/READEME.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/inference/g4/qwen3_235b/single-host-serving/tensorrt-llm/READEME.md b/inference/g4/qwen3_235b/single-host-serving/tensorrt-llm/READEME.md index 48525372..221083a1 100644 --- a/inference/g4/qwen3_235b/single-host-serving/tensorrt-llm/READEME.md +++ b/inference/g4/qwen3_235b/single-host-serving/tensorrt-llm/READEME.md @@ -82,6 +82,7 @@ make -C docker release_build mkdir -p /scratch/cache make -C docker release_run DOCKER_RUN_ARGS="-v /scratch:/scratch -v /scratch/cache:/root/.cache --ipc=host" ``` +### 4. Download the Model ```bash # Inside the container @@ -93,7 +94,7 @@ huggingface-cli download Qwen/Qwen3-235B --local-dir /scratch/models/Qwen3-235B ``` -#### Quantize the model using FP8 +### 5. Quantize the model using FP8 ```bash git clone https://github.com/NVIDIA/TensorRT-Model-Optimizer.git