Skip to content

Commit 33cb60d

Browse files
jerryzh168facebook-github-bot
authored andcommitted
[quant][pt2e] Rename _pt2e to pt2e (pytorch#104668)
Summary: Pull Request resolved: pytorch#104668 X-link: pytorch/executorch#3 att Test Plan: Imported from OSS Reviewed By: andrewor14 Differential Revision: D47202807 fbshipit-source-id: d6cdda700e7fecbdf2cc8b7940ef9245bd0da891
1 parent 63d1fb2 commit 33cb60d

24 files changed

+98
-65
lines changed

docs/source/quantization-support.rst

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -120,6 +120,13 @@ This module contains a few CustomConfig classes that's used in both eager mode a
120120
ConvertCustomConfig
121121
StandaloneModuleConfigEntry
122122

123+
torch.ao.quantization.pt2e (quantization in pytorch 2.0 export)
124+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
125+
126+
.. automodule:: torch.ao.quantization.pt2e
127+
.. automodule:: torch.ao.quantization.pt2e.quantizer
128+
.. automodule:: torch.ao.quantization.pt2e.representation
129+
123130
torch (quantization related functions)
124131
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
125132

test/inductor/test_inductor_freezing.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,13 +12,13 @@
1212
import torch
1313

1414
import torch._dynamo as torchdynamo
15-
import torch.ao.quantization._pt2e.quantizer.x86_inductor_quantizer as xiq
15+
import torch.ao.quantization.pt2e.quantizer.x86_inductor_quantizer as xiq
1616
from torch import nn
1717
from torch._inductor import config
1818
from torch._inductor.compile_fx import compile_fx
1919
from torch._inductor.utils import override_lowering, run_and_get_code
20-
from torch.ao.quantization._pt2e.quantizer import X86InductorQuantizer
2120
from torch.ao.quantization._quantize_pt2e import convert_pt2e, prepare_pt2e_quantizer
21+
from torch.ao.quantization.pt2e.quantizer import X86InductorQuantizer
2222
from torch.testing import FileCheck
2323
from torch.testing._internal.common_quantization import (
2424
skipIfNoDynamoSupport,

test/quantization/pt2e/test_graph_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
import torch
66
import torch._dynamo as torchdynamo
77

8-
from torch.ao.quantization._pt2e.graph_utils import (
8+
from torch.ao.quantization.pt2e.graph_utils import (
99
find_sequential_partitions,
1010
get_equivalent_types,
1111
update_equivalent_types_dict,

test/quantization/pt2e/test_quantize_pt2e.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
ObserverOrFakeQuantize,
1616
QConfigMapping,
1717
)
18-
from torch.ao.quantization._pt2e.quantizer import (
18+
from torch.ao.quantization.pt2e.quantizer import (
1919
ComposableQuantizer,
2020
DerivedQuantizationSpec,
2121
EmbeddingQuantizer,
@@ -27,10 +27,10 @@
2727
Quantizer,
2828
SharedQuantizationSpec,
2929
)
30-
from torch.ao.quantization._pt2e.quantizer.composable_quantizer import ( # noqa: F811
30+
from torch.ao.quantization.pt2e.quantizer.composable_quantizer import ( # noqa: F811
3131
ComposableQuantizer,
3232
)
33-
from torch.ao.quantization._pt2e.quantizer.qnnpack_quantizer import (
33+
from torch.ao.quantization.pt2e.quantizer.qnnpack_quantizer import (
3434
get_symmetric_quantization_config,
3535
)
3636
from torch.ao.quantization._quantize_pt2e import (
@@ -1774,7 +1774,7 @@ def __init__(self):
17741774
def forward(self, x, y):
17751775
return x + y
17761776

1777-
import torch.ao.quantization._pt2e.quantizer.qnnpack_quantizer as qq
1777+
import torch.ao.quantization.pt2e.quantizer.qnnpack_quantizer as qq
17781778

17791779
quantizer = QNNPackQuantizer()
17801780
operator_config = qq.get_symmetric_quantization_config(is_per_channel=True)
@@ -1799,7 +1799,7 @@ def __init__(self):
17991799
def forward(self, x, y):
18001800
return x + y
18011801

1802-
import torch.ao.quantization._pt2e.quantizer.qnnpack_quantizer as qq
1802+
import torch.ao.quantization.pt2e.quantizer.qnnpack_quantizer as qq
18031803

18041804
quantizer = QNNPackQuantizer()
18051805
operator_config = qq.get_symmetric_quantization_config(is_per_channel=True)

test/quantization/pt2e/test_x86inductor_quantizer.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
import torch
44
import torch._dynamo as torchdynamo
55
import torch.nn as nn
6-
from torch.ao.quantization._pt2e.quantizer import (
6+
from torch.ao.quantization.pt2e.quantizer import (
77
X86InductorQuantizer,
88
)
99
from torch.ao.quantization._quantize_pt2e import (
@@ -19,7 +19,7 @@
1919
from torch.testing._internal.common_quantized import override_quantized_engine
2020
from enum import Enum
2121
import itertools
22-
import torch.ao.quantization._pt2e.quantizer.x86_inductor_quantizer as xiq
22+
import torch.ao.quantization.pt2e.quantizer.x86_inductor_quantizer as xiq
2323
from torch.testing._internal.common_utils import skip_but_pass_in_sandcastle
2424

2525

torch/_dynamo/skipfiles.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -139,9 +139,9 @@ def _module_dir(m: types.ModuleType):
139139
# TODO: find a better way to express this path without having to import
140140
# `torch.ao.quantization._pt2e`, which interferes with memory profiling
141141
FILENAME_ALLOWLIST |= {
142-
_module_dir(torch) + "ao/quantization/_pt2e/qat_utils.py",
143-
_module_dir(torch) + "ao/quantization/_pt2e/quantizer/qnnpack_quantizer.py",
144-
_module_dir(torch) + "ao/quantization/_pt2e/representation/rewrite.py",
142+
_module_dir(torch) + "ao/quantization/pt2e/qat_utils.py",
143+
_module_dir(torch) + "ao/quantization/pt2e/quantizer/qnnpack_quantizer.py",
144+
_module_dir(torch) + "ao/quantization/pt2e/representation/rewrite.py",
145145
}
146146

147147
# TODO (zhxchen17) Make exportdb importable here.

torch/_inductor/freezing.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
from torch._inductor.compile_fx import fake_tensor_prop
1414
from torch._inductor.fx_passes.freezing_patterns import freezing_passes
1515
from torch._inductor.fx_passes.post_grad import view_to_reshape
16-
from torch.ao.quantization._pt2e.utils import _fuse_conv_bn_
16+
from torch.ao.quantization.pt2e.utils import _fuse_conv_bn_
1717
from torch.fx.experimental.proxy_tensor import make_fx
1818
from . import config
1919
from .decomposition import select_decomp_table

torch/ao/quantization/_quantize_pt2e.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,21 +1,21 @@
11
from torch.fx import GraphModule
22

3-
from ._pt2e.prepare import prepare
4-
from ._pt2e._propagate_annotation import propagate_annotation
5-
from ._pt2e.qat_utils import (
3+
from .pt2e.prepare import prepare
4+
from .pt2e._propagate_annotation import propagate_annotation
5+
from .pt2e.qat_utils import (
66
_fuse_conv_bn_qat,
77
_fold_conv_bn_qat,
88
)
9-
from ._pt2e.utils import (
9+
from .pt2e.utils import (
1010
_get_node_name_to_scope,
1111
_fuse_conv_bn_,
1212
_rearrange_weight_observer_for_decomposed_linear,
1313
)
14-
from ._pt2e.representation import reference_representation_rewrite
14+
from .pt2e.representation import reference_representation_rewrite
1515
from .fx.prepare import prepare as fx_prepare
1616
from .quantize_fx import _convert_to_reference_decomposed_fx
1717
from torch.ao.quantization import QConfigMapping
18-
from torch.ao.quantization._pt2e.quantizer import Quantizer
18+
from torch.ao.quantization.pt2e.quantizer import Quantizer
1919
from torch.ao.quantization.backend_config import BackendConfig
2020

2121
from typing import Any, Tuple

torch/ao/quantization/fx/prepare.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,7 @@
106106
PrepareCustomConfig,
107107
StandaloneModuleConfigEntry,
108108
)
109-
from torch.ao.quantization._pt2e.quantizer import (
109+
from torch.ao.quantization.pt2e.quantizer import (
110110
EdgeOrNode,
111111
QuantizationSpec,
112112
FixedQParamsQuantizationSpec,
File renamed without changes.

0 commit comments

Comments
 (0)