Skip to content

Commit 30dff4f

Browse files
committed
Remove fpx
1 parent 12e2a7c commit 30dff4f

File tree

1 file changed

+0
-13
lines changed

1 file changed

+0
-13
lines changed

test/dtypes/test_affine_quantized_tensor_parallel.py

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@
1212
from torchao.quantization import (
1313
float8_dynamic_activation_float8_weight,
1414
float8_weight_only,
15-
fpx_weight_only,
1615
int4_weight_only,
1716
int8_dynamic_activation_int8_weight,
1817
int8_weight_only,
@@ -179,22 +178,10 @@ def test_tp(self, dtype):
179178
return self._test_tp(dtype)
180179

181180

182-
class TestFpxwoAffineQuantizedTensorParallel(TestAffineQuantizedTensorParallel):
183-
QUANT_METHOD_FN = staticmethod(fpx_weight_only)
184-
COMMON_DTYPES = [torch.bfloat16]
185-
186-
@common_utils.parametrize("dtype", COMMON_DTYPES)
187-
@with_comms
188-
@unittest.skipIf(not torch.cuda.is_available(), "Need CUDA available")
189-
def test_tp(self, dtype):
190-
return self._test_tp(dtype)
191-
192-
193181
common_utils.instantiate_parametrized_tests(TestInt8woAffineQuantizedTensorParallel)
194182
common_utils.instantiate_parametrized_tests(TestInt4woAffineQuantizedTensorParallel)
195183
common_utils.instantiate_parametrized_tests(TestGemliteLayoutTensorParallel)
196184
common_utils.instantiate_parametrized_tests(TestInt8dqAffineQuantizedTensorParallel)
197-
common_utils.instantiate_parametrized_tests(TestFpxwoAffineQuantizedTensorParallel)
198185

199186
# Run only on H100
200187
if torch.cuda.is_available() and torch.cuda.get_device_capability() >= (9, 0):

0 commit comments

Comments
 (0)