Skip to content

Switch to new ao quant api for 8da4w (#8501) #8772

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Mar 25, 2025
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 8 additions & 8 deletions examples/models/llama/source_transformation/quantize.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,14 +136,14 @@ def quantize( # noqa C901
# Check for required args
if group_size is None:
raise Exception("For 8da4w quantization, group size must be specified.")
from torchao.quantization.quant_api import Int8DynActInt4WeightQuantizer

# 1. Quantize in checkpoint dtype.
model = Int8DynActInt4WeightQuantizer(
precision=checkpoint_torch_dtype, groupsize=group_size
).quantize(model)
# 2. Set the computation dtype (what weights/acts dequantize to).
model = set_8da4w_computation_dtype(model, computation_torch_dtype)
from torchao.quantization import int8_dynamic_activation_int4_weight, quantize_
from torchao.utils import unwrap_tensor_subclass

quantize_(model, int8_dynamic_activation_int4_weight(group_size=group_size))
model = unwrap_tensor_subclass(model)

# TODO: deal with checkpoint / computation dtype decoupling.

if verbose:
print("quantized model:", model)
Expand Down Expand Up @@ -698,7 +698,7 @@ def convert_for_runtime(self) -> nn.Module:
def quantized_model(self) -> nn.Module:
model_updated_state_dict = self.create_quantized_state_dict(self.packed)
self.convert_for_runtime()
self.mod.load_state_dict(model_updated_state_dict)
self.mod.load_state_dict(model_updated_state_dict, assign=True)
return self.mod


Expand Down
Loading