diff --git a/README.md b/README.md index a573df54c8..8da9f67da2 100644 --- a/README.md +++ b/README.md @@ -39,12 +39,15 @@ python setup.py develop If you want to install from source run ```Shell -python setup.py install +python setup.py install ``` ** Note: -Since we are building pytorch c++/cuda extensions by default, running `pip install .` will -not work. +If you are running into any issues while building `ao` cpp extensions you can instead build using + +```shell +USE_CPP=0 python setup.py install +``` ### Quantization diff --git a/setup.py b/setup.py index f5ff1128fa..014a36832c 100644 --- a/setup.py +++ b/setup.py @@ -19,6 +19,8 @@ def read_requirements(file_path): # Determine the package name based on the presence of an environment variable package_name = "torchao-nightly" if os.environ.get("TORCHAO_NIGHTLY") else "torchao" version_suffix = os.getenv("VERSION_SUFFIX", "") +use_cpp = os.getenv('USE_CPP') + # Version is year.month.date if using nightlies version = current_date if package_name == "torchao-nightly" else "0.2.0" @@ -92,7 +94,7 @@ def get_extensions(): package_data={ "torchao.kernel.configs": ["*.pkl"], }, - ext_modules=get_extensions(), + ext_modules=get_extensions() if use_cpp != "0" else None, install_requires=read_requirements("requirements.txt"), extras_require={"dev": read_requirements("dev-requirements.txt")}, description="Package for applying ao techniques to GPU models", diff --git a/test/dtypes/test_float6_e3m2.py b/test/dtypes/test_float6_e3m2.py index c3365cffeb..304a78c563 100644 --- a/test/dtypes/test_float6_e3m2.py +++ b/test/dtypes/test_float6_e3m2.py @@ -5,6 +5,13 @@ parametrize, run_tests, ) + +try: + import torchao.ops +except RuntimeError: + pytest.skip("torchao.ops not available") + + from torchao.dtypes.float6_e3m2 import to_float6_e3m2, from_float6_e3m2 diff --git a/test/test_ops.py b/test/test_ops.py index 4e463b4e26..b20e029380 100644 --- a/test/test_ops.py +++ b/test/test_ops.py @@ -7,6 +7,11 @@ from parameterized import parameterized import pytest +try: + import torchao.ops +except RuntimeError: + pytest.skip("torchao.ops not available") + # torch.testing._internal.optests.generate_tests.OpCheckError: opcheck(op, ...): # test_faketensor failed with module 'torch' has no attribute '_custom_ops' (scroll up for stack trace) diff --git a/torchao/__init__.py b/torchao/__init__.py index c8f04c1d9e..676f5bdd52 100644 --- a/torchao/__init__.py +++ b/torchao/__init__.py @@ -1,12 +1,18 @@ import torch +import logging + _IS_FBCODE = ( hasattr(torch._utils_internal, "IS_FBSOURCE") and torch._utils_internal.IS_FBSOURCE ) if not _IS_FBCODE: - from . import _C - from . import ops + try: + from . import _C + from . import ops + except: + _C = None + logging.info("Skipping import of cpp extensions") from torchao.quantization import ( apply_weight_only_int8_quant, diff --git a/torchao/dtypes/__init__.py b/torchao/dtypes/__init__.py index d12a6da566..44077dab65 100644 --- a/torchao/dtypes/__init__.py +++ b/torchao/dtypes/__init__.py @@ -1,7 +1,6 @@ from .nf4tensor import NF4Tensor, to_nf4 from .uint4 import UInt4Tensor from .aqt import AffineQuantizedTensor, to_aq -from .float6_e3m2 import to_float6_e3m2, from_float6_e3m2 __all__ = [ "NF4Tensor", @@ -9,6 +8,11 @@ "UInt4Tensor" "AffineQuantizedTensor", "to_aq", - "to_float6_e3m2", - "from_float6_e3m2", ] + +# CPP extensions +try: + from .float6_e3m2 import to_float6_e3m2, from_float6_e3m2 + __all__.extend(["to_float6_e3m2", "from_float6_e3m2"]) +except RuntimeError: + pass