Skip to content

Commit a37193c

Browse files
committed
rebase
[ghstack-poisoned]
2 parents f78320d + ab3b3f0 commit a37193c

File tree

97 files changed

+2584
-1509
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

97 files changed

+2584
-1509
lines changed

.github/scripts/run_nm.py

Lines changed: 171 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,171 @@
1+
# Copyright (c) Meta Platforms, Inc. and affiliates.
2+
# All rights reserved.
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
7+
import re
8+
import subprocess
9+
import sys
10+
from dataclasses import dataclass
11+
from typing import Dict, List, Optional, Union
12+
13+
14+
@dataclass
15+
class Symbol:
16+
name: str
17+
addr: int
18+
size: int
19+
symbol_type: str
20+
21+
22+
class Parser:
23+
def __init__(self, elf: str, toolchain_prefix: str = "", filter=None):
24+
self.elf = elf
25+
self.toolchain_prefix = toolchain_prefix
26+
self.symbols: Dict[str, Symbol] = self._get_nm_output()
27+
self.filter = filter
28+
29+
@staticmethod
30+
def run_nm(
31+
elf_file_path: str, args: Optional[List[str]] = None, nm: str = "nm"
32+
) -> str:
33+
"""
34+
Run the nm command on the specified ELF file.
35+
"""
36+
args = [] if args is None else args
37+
cmd = [nm] + args + [elf_file_path]
38+
try:
39+
result = subprocess.run(cmd, check=True, capture_output=True, text=True)
40+
return result.stdout
41+
except FileNotFoundError:
42+
print(f"Error: 'nm' command not found. Please ensure it's installed.")
43+
sys.exit(1)
44+
except subprocess.CalledProcessError as e:
45+
print(f"Error running nm on {elf_file_path}: {e}")
46+
print(f"stderr: {e.stderr}")
47+
sys.exit(1)
48+
49+
def _get_nm_output(self) -> Dict[str, Symbol]:
50+
args = [
51+
"--print-size",
52+
"--size-sort",
53+
"--reverse-sort",
54+
"--demangle",
55+
"--format=bsd",
56+
]
57+
output = Parser.run_nm(
58+
self.elf,
59+
args,
60+
nm=self.toolchain_prefix + "nm" if self.toolchain_prefix else "nm",
61+
)
62+
lines = output.splitlines()
63+
symbols = []
64+
symbol_pattern = re.compile(
65+
r"(?P<addr>[0-9a-fA-F]+)\s+(?P<size>[0-9a-fA-F]+)\s+(?P<type>\w)\s+(?P<name>.+)"
66+
)
67+
68+
def parse_line(line: str) -> Optional[Symbol]:
69+
70+
match = symbol_pattern.match(line)
71+
if match:
72+
addr = int(match.group("addr"), 16)
73+
size = int(match.group("size"), 16)
74+
type_ = match.group("type").strip().strip("\n")
75+
name = match.group("name").strip().strip("\n")
76+
return Symbol(name=name, addr=addr, size=size, symbol_type=type_)
77+
return None
78+
79+
for line in lines:
80+
symbol = parse_line(line)
81+
if symbol:
82+
symbols.append(symbol)
83+
84+
assert len(symbols) > 0, "No symbols found in nm output"
85+
if len(symbols) != len(lines):
86+
print(
87+
"** Warning: Not all lines were parsed, check the output of nm. Parsed {len(symbols)} lines, given {len(lines)}"
88+
)
89+
if any(symbol.size == 0 for symbol in symbols):
90+
print("** Warning: Some symbols have zero size, check the output of nm.")
91+
92+
# TODO: Populate the section and module fields from the linker map if available (-Wl,-Map=linker.map)
93+
return {symbol.name: symbol for symbol in symbols}
94+
95+
def print(self):
96+
print(f"Elf: {self.elf}")
97+
98+
def print_table(filter=None, filter_name=None):
99+
print("\nAddress\t\tSize\tType\tName")
100+
# Apply filter and sort symbols
101+
symbols_to_print = {
102+
name: sym
103+
for name, sym in self.symbols.items()
104+
if not filter or filter(sym)
105+
}
106+
sorted_symbols = sorted(
107+
symbols_to_print.items(), key=lambda x: x[1].size, reverse=True
108+
)
109+
110+
# Print symbols and calculate total size
111+
size_total = 0
112+
for name, sym in sorted_symbols:
113+
print(f"{hex(sym.addr)}\t\t{sym.size}\t{sym.symbol_type}\t{sym.name}")
114+
size_total += sym.size
115+
116+
# Print summary
117+
symbol_percent = len(symbols_to_print) / len(self.symbols) * 100
118+
print("-----")
119+
print(f"> Total bytes: {size_total}")
120+
print(
121+
f"Counted: {len(symbols_to_print)}/{len(self.symbols)}, {symbol_percent:0.2f}% (filter: '{filter_name}')"
122+
)
123+
print("=====\n")
124+
125+
# Print tables with different filters
126+
def is_executorch_symbol(s):
127+
return "executorch" in s.name or s.name.startswith("et")
128+
129+
FILTER_NAME_TO_FILTER_AND_LABEL = {
130+
"all": (None, "All"),
131+
"executorch": (is_executorch_symbol, "ExecuTorch"),
132+
"executorch_text": (
133+
lambda s: is_executorch_symbol(s) and s.symbol_type.lower() == "t",
134+
"ExecuTorch .text",
135+
),
136+
}
137+
138+
filter_func, label = FILTER_NAME_TO_FILTER_AND_LABEL.get(
139+
self.filter, FILTER_NAME_TO_FILTER_AND_LABEL["all"]
140+
)
141+
print_table(filter_func, label)
142+
143+
144+
if __name__ == "__main__":
145+
import argparse
146+
147+
parser = argparse.ArgumentParser(
148+
description="Process ELF file and linker map file."
149+
)
150+
parser.add_argument(
151+
"-e", "--elf-file-path", required=True, help="Path to the ELF file"
152+
)
153+
parser.add_argument(
154+
"-f",
155+
"--filter",
156+
required=False,
157+
default="all",
158+
help="Filter symbols by pre-defined filters",
159+
choices=["all", "executorch", "executorch_text"],
160+
)
161+
parser.add_argument(
162+
"-p",
163+
"--toolchain-prefix",
164+
required=False,
165+
default="",
166+
help="Optional toolchain prefix for nm",
167+
)
168+
169+
args = parser.parse_args()
170+
p = Parser(args.elf_file_path, args.toolchain_prefix, filter=args.filter)
171+
p.print()

.github/workflows/trunk.yml

Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -231,6 +231,60 @@ jobs:
231231
# Run arm unit tests using the simulator
232232
backends/arm/test/test_arm_baremetal.sh test_pytest_ethosu_fvp
233233
234+
test-arm-cortex-m-size-test:
235+
name: test-arm-cortex-m-size-test
236+
uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
237+
permissions:
238+
id-token: write
239+
contents: read
240+
with:
241+
runner: linux.2xlarge
242+
docker-image: executorch-ubuntu-22.04-arm-sdk
243+
submodules: 'true'
244+
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
245+
timeout: 90
246+
script: |
247+
# The generic Linux job chooses to use base env, not the one setup by the image
248+
CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
249+
conda activate "${CONDA_ENV}"
250+
251+
source .ci/scripts/utils.sh
252+
install_executorch "--use-pt-pinned-commit"
253+
.ci/scripts/setup-arm-baremetal-tools.sh
254+
source examples/arm/ethos-u-scratch/setup_path.sh
255+
256+
# User baremetal toolchain
257+
arm-none-eabi-c++ --version
258+
toolchain_cmake=examples/arm/ethos-u-setup/arm-none-eabi-gcc.cmake
259+
toolchain_cmake=$(realpath ${toolchain_cmake})
260+
261+
# Build and test size test
262+
bash test/build_size_test.sh "-DCMAKE_TOOLCHAIN_FILE=${toolchain_cmake} -DEXECUTORCH_BUILD_ARM_BAREMETAL=ON"
263+
elf="cmake-out/test/size_test"
264+
265+
# Dump basic info
266+
ls -al ${elf}
267+
arm-none-eabi-size ${elf}
268+
269+
# Dump symbols
270+
python .github/scripts/run_nm.py -e ${elf}
271+
python .github/scripts/run_nm.py -e ${elf} -f "executorch" -p "arm-none-eabi-"
272+
python .github/scripts/run_nm.py -e ${elf} -f "executorch_text" -p "arm-none-eabi-"
273+
274+
# Add basic guard - TODO: refine this!
275+
arm-none-eabi-strip ${elf}
276+
output=$(ls -la ${elf})
277+
arr=($output)
278+
size=${arr[4]}
279+
threshold="102400" # 100KiB
280+
echo "size: $size, threshold: $threshold"
281+
if [[ "$size" -le "$threshold" ]]; then
282+
echo "Success $size <= $threshold"
283+
else
284+
echo "Fail $size > $threshold"
285+
exit 1
286+
fi
287+
234288
test-coreml-delegate:
235289
name: test-coreml-delegate
236290
uses: pytorch/test-infra/.github/workflows/macos_job.yml@main

CMakeLists.txt

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -645,13 +645,18 @@ target_link_options_shared_lib(executorch)
645645
# Real integrations should supply their own YAML file that only lists the
646646
# operators necessary for the models that will run.
647647
#
648+
if(EXECUTORCH_BUILD_KERNELS_OPTIMIZED)
649+
# find pytorch lib here to make it available to all
650+
# sub-directories. Find it before including portable so that
651+
# optimized_portable_kernels can use it.
652+
find_package_torch_headers()
653+
endif()
654+
648655
if(BUILD_EXECUTORCH_PORTABLE_OPS)
649656
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/kernels/portable)
650657
endif()
651658

652659
if(EXECUTORCH_BUILD_KERNELS_OPTIMIZED)
653-
# find pytorch lib here to make it available to all sub-directories
654-
find_package_torch_headers()
655660
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/kernels/optimized)
656661
endif()
657662

backends/arm/_passes/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@
3939
from .insert_table_ops import InsertTableOpsPass # noqa
4040
from .keep_dims_false_to_squeeze_pass import KeepDimsFalseToSqueezePass # noqa
4141
from .match_arg_ranks_pass import MatchArgRanksPass # noqa
42+
from .match_where_self_arg_dtype_pass import MatchWhereSelfDtypePass # noqa
4243
from .meandim_to_averagepool_pass import ConvertMeanDimToAveragePoolPass # noqa
4344
from .mm_to_bmm_pass import ConvertMmToBmmPass # noqa
4445
from .remove_clone_pass import RemoveClonePass # noqa

backends/arm/_passes/arm_pass_manager.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@
4040
InsertTableOpsPass,
4141
KeepDimsFalseToSqueezePass,
4242
MatchArgRanksPass,
43+
MatchWhereSelfDtypePass,
4344
QuantizeOperatorArguments,
4445
RemoveClonePass,
4546
ReplaceScalarWithTensorArgPassTOSABI,
@@ -80,6 +81,7 @@ def _tosa_080_BI_pipeline(self, exported_program: ExportedProgram) -> GraphModul
8081
self.add_pass(ConvertToClampPass())
8182
self.add_pass(ConvertMinMaxPass())
8283
self.add_pass(ConvertAnyDefaultDimDimsPass())
84+
self.add_pass(MatchWhereSelfDtypePass())
8385
if isinstance(self.tosa_spec, Tosa_0_80) and self.tosa_spec.is_U55_subset:
8486
self.add_pass(CastToInt32Pass())
8587

@@ -130,6 +132,7 @@ def _tosa_080_MI_pipeline(self, exported_program: ExportedProgram) -> GraphModul
130132
self.add_pass(ConvertToClampPass())
131133
self.add_pass(ConvertMinMaxPass())
132134
self.add_pass(ConvertAnyDefaultDimDimsPass())
135+
self.add_pass(MatchWhereSelfDtypePass())
133136

134137
self.add_pass(AnnotateDecomposedMatmulPass())
135138
self.add_pass(QuantizeOperatorArguments())

backends/arm/_passes/arm_pass_utils.py

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313

1414
import torch
1515
import torch.fx
16+
from executorch.backends.arm.tosa_utils import get_node_debug_info
1617
from executorch.exir import ExportedProgram
1718
from executorch.exir.dialects._ops import ops as exir_ops
1819

@@ -169,9 +170,13 @@ def get_first_fake_tensor(node: torch.fx.Node) -> FakeTensor:
169170
else:
170171
fake_tensor = node.meta["val"]
171172

172-
assert isinstance(
173-
fake_tensor, FakeTensor
174-
), f'Found {fake_tensor} in meta["val"] of {node}, expected to find FakeTensor.'
173+
if not isinstance(fake_tensor, FakeTensor):
174+
raise TypeError(
175+
f'Expected a FakeTensor in meta["val"] of node {node}, but got '
176+
f"{type(fake_tensor).__name__}\n"
177+
f"{get_node_debug_info(node)}"
178+
)
179+
175180
return fake_tensor
176181

177182

backends/arm/_passes/match_arg_ranks_pass.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,7 @@ def __init__(self, exported_program):
4949
exir_ops.edge.aten.bitwise_left_shift.Tensor,
5050
exir_ops.edge.aten.eq.Tensor,
5151
exir_ops.edge.aten.pow.Tensor_Tensor,
52+
exir_ops.edge.aten.where.self,
5253
]
5354

5455
def _match_op_rank(self, graph_module, node, arg, max_rank):

0 commit comments

Comments
 (0)