44import torch
55
66import bitsandbytes
7+ from bitsandbytes .functional import ipex_xpu
78from tests .helpers import TRUE_FALSE , get_available_devices , id_formatter
89
910# torch.library.opcheck is only available in torch 2.4 and later.
@@ -144,7 +145,7 @@ def test_dequantize_blockwise(self, device, dtype, blocksize):
144145 assert out .device == A .device
145146
146147 # TODO: Enable it
147- if device == "xpu" :
148+ if device == "xpu" and ipex_xpu :
148149 pytest .skip ("XPU implementation have torch.op inside torch.op, it will fail on op check" )
149150
150151 opcheck (torch .ops .bitsandbytes .dequantize_blockwise .default , (A , absmax , code , blocksize , dtype ))
@@ -170,7 +171,7 @@ def test_quantize_4bit(self, device, dtype, storage_dtype, quant_type, blocksize
170171 if storage_dtype != torch .uint8 :
171172 pytest .xfail ("opcheck fails for storage_dtype != torch.uint8" )
172173
173- opcheck (torch .ops .bitsandbytes .quantize_4bit , (A , blocksize , quant_type , storage_dtype ))
174+ opcheck (torch .ops .bitsandbytes .quantize_4bit . default , (A , blocksize , quant_type , storage_dtype ))
174175
175176 @pytest .mark .parametrize ("device" , get_available_devices ())
176177 @pytest .mark .parametrize ("dtype" , [torch .float16 , torch .bfloat16 , torch .float32 ], ids = id_formatter ("dtype" ))
0 commit comments