From 054cdfb82cf1c7aba4c09cae764cffb82225c629 Mon Sep 17 00:00:00 2001 From: zengxian Date: Fri, 5 Dec 2025 16:11:46 +0800 Subject: [PATCH 1/4] skip test WIP on xpu --- test/quantization/test_qat.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/quantization/test_qat.py b/test/quantization/test_qat.py index 27d6d8bb85..e12248601e 100644 --- a/test/quantization/test_qat.py +++ b/test/quantization/test_qat.py @@ -102,6 +102,7 @@ is_fbcode, is_sm_at_least_89, ) +from torchao.testing.utils import skip_if_xpu # TODO: put this in a common test utils file _CUDA_IS_AVAILABLE = torch.cuda.is_available() @@ -2015,6 +2016,7 @@ def test_quantize_api_int8_intx(self, weight_dtype, weight_granularity, dtype): ) @unittest.skipIf(_DEVICE is None, "skipping when GPU is not available") + @skip_if_xpu("XPU enablement in progress") @parametrize( "weight_dtype, granularity, dtype, module_type", [ From bb119e13762445e11eb97ac10a003b6a75a9ae81 Mon Sep 17 00:00:00 2001 From: zengxian Date: Fri, 5 Dec 2025 16:15:06 +0800 Subject: [PATCH 2/4] skip test WIP on xpu --- test/quantization/test_qat.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/test/quantization/test_qat.py b/test/quantization/test_qat.py index e12248601e..c519cb0254 100644 --- a/test/quantization/test_qat.py +++ b/test/quantization/test_qat.py @@ -696,10 +696,7 @@ def test_qat_4w_quantizer_gradients(self): self._test_qat_quantized_gradients(quantizer) @unittest.skipIf(_DEVICE is None, "skipping when GPU is not available") - @unittest.skipIf( - _DEVICE is torch.device("xpu"), - "skipped due to https://github.com/intel/torch-xpu-ops/issues/1770", - ) + @skip_if_xpu("skipped due to https://github.com/intel/torch-xpu-ops/issues/1770") def test_qat_4w_quantizer(self): from torchao.quantization.GPTQ import Int4WeightOnlyQuantizer from torchao.quantization.qat import Int4WeightOnlyQATQuantizer From 703e7f5f3a3f9ba60d51c3a235a81a8e4b7f1af4 Mon Sep 17 00:00:00 2001 From: zengxian Date: Fri, 5 Dec 2025 16:30:21 +0800 Subject: [PATCH 3/4] skip test WIP on xpu --- test/quantization/test_qat.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/quantization/test_qat.py b/test/quantization/test_qat.py index c519cb0254..300c5e4f48 100644 --- a/test/quantization/test_qat.py +++ b/test/quantization/test_qat.py @@ -96,13 +96,14 @@ get_groupwise_affine_qparams, groupwise_affine_quantize_tensor, ) +from torchao.testing.utils import skip_if_xpu from torchao.utils import ( _is_fbgemm_gpu_genai_available, get_current_accelerator_device, is_fbcode, is_sm_at_least_89, ) -from torchao.testing.utils import skip_if_xpu + # TODO: put this in a common test utils file _CUDA_IS_AVAILABLE = torch.cuda.is_available() From 339a3bf74b00cc671bdfc1035d83b174829e8509 Mon Sep 17 00:00:00 2001 From: zengxian Date: Fri, 5 Dec 2025 16:32:13 +0800 Subject: [PATCH 4/4] skip test WIP on xpu --- test/quantization/test_qat.py | 1 - 1 file changed, 1 deletion(-) diff --git a/test/quantization/test_qat.py b/test/quantization/test_qat.py index 300c5e4f48..1ca0daee9a 100644 --- a/test/quantization/test_qat.py +++ b/test/quantization/test_qat.py @@ -104,7 +104,6 @@ is_sm_at_least_89, ) - # TODO: put this in a common test utils file _CUDA_IS_AVAILABLE = torch.cuda.is_available() _DEVICE = get_current_accelerator_device()