Skip to content

Commit 04ab9d6

Browse files
mcr229facebook-github-bot
authored andcommitted
Fix Long Term Quant Testing (#78)
Summary: Pull Request resolved: #78 Long term Quant seems to be using their custom graph capture before running prepare and convert. Let us mirror this so we are testing with the proper quant flow https://fb.workplace.com/groups/257735836456307/permalink/545316467698241/ The change here would be that Quantize2 would run on the stage before Export. Testing wise the stages are as follows ``` export.capture_pre_autograd_graph --> prepare --> convert --> exir.capture() |--------------------------------------------------------| |-------------| Quantize2(stage) Export(stage) ``` Reviewed By: digantdesai Differential Revision: D48488929 fbshipit-source-id: 3165a5fb2a11a262d4d4f2a94a349becd451eecb
1 parent 707a7c2 commit 04ab9d6

File tree

2 files changed

+11
-11
lines changed

2 files changed

+11
-11
lines changed

backends/xnnpack/test/ops/add.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -75,9 +75,9 @@ def test_add_quantized_pt2e(self):
7575

7676
(
7777
Tester(add_module, model_inputs)
78+
.quantize2()
7879
.export()
7980
.check_count({"torch.ops.aten.add.Tensor": 4})
80-
.quantize2()
8181
.check(["torch.ops.quantized_decomposed"])
8282
.to_edge()
8383
.check_count({"executorch_exir_dialects_edge__ops_aten_add_Tensor": 4})

backends/xnnpack/test/tester/tester.py

+10-10
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
from typing import Any, Dict, List, Optional, Tuple
1111

1212
import torch
13+
import torch._export as export
1314
from executorch import exir
1415
from executorch.backends.xnnpack.partition.xnnpack_partitioner import (
1516
XnnpackFloatingPointPartitioner,
@@ -145,23 +146,23 @@ def __init__(
145146

146147
self.quantizer.set_global(self.quantization_config)
147148

148-
self.converted_program = None
149+
self.converted_graph = None
149150

150151
def run(
151-
self, artifact: ExirExportedProgram, inputs: Optional[Tuple[torch.Tensor]]
152+
self, artifact: torch.nn.Module, inputs: Optional[Tuple[torch.Tensor]]
152153
) -> None:
153-
prepared = prepare_pt2e(artifact.exported_program.graph_module, self.quantizer)
154+
captured_graph = export.capture_pre_autograd_graph(artifact, inputs)
155+
prepared = prepare_pt2e(captured_graph, self.quantizer)
154156
converted = convert_pt2e(prepared)
155-
artifact.exported_program._graph_module = converted
156-
self.converted_program = artifact
157+
self.converted_graph = converted
157158

158159
@property
159-
def artifact(self) -> ExirExportedProgram:
160-
return self.converted_program
160+
def artifact(self) -> torch.fx.GraphModule:
161+
return self.converted_graph
161162

162163
@property
163164
def graph_module(self) -> str:
164-
return self.converted_program.exported_program.graph_module
165+
return self.converted_graph
165166

166167

167168
@register_stage
@@ -274,12 +275,11 @@ def __init__(
274275
self.inputs = inputs
275276
self.stages: Dict[str, Stage] = OrderedDict.fromkeys(list(_stages_.keys()))
276277
self.pipeline = {
278+
self._stage_name(Quantize2): [self._stage_name(Export)],
277279
self._stage_name(Quantize): [self._stage_name(Export)],
278280
self._stage_name(Export): [
279-
self._stage_name(Quantize2),
280281
self._stage_name(ToEdge),
281282
],
282-
self._stage_name(Quantize2): [self._stage_name(ToEdge)],
283283
self._stage_name(ToEdge): [self._stage_name(Partition)],
284284
# TODO Make this Stage optional
285285
self._stage_name(Partition): [self._stage_name(ToExecutorch)],

0 commit comments

Comments
 (0)