Skip to content

Commit a2ac2cc

Browse files
YIWENX14facebook-github-bot
authored andcommitted
Add buck rules in coreml llama transformer (#9017)
Summary: Add buck rules in coreml llama transformer for importing the modules to internal repo. Differential Revision: D70415647
1 parent 73acde9 commit a2ac2cc

File tree

3 files changed

+69
-3
lines changed

3 files changed

+69
-3
lines changed

examples/apple/coreml/llama/TARGETS

+66
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,66 @@
1+
# Any targets that should be shared between fbcode and xplat must be defined in
2+
# targets.bzl. This file can contain fbcode-only targets.
3+
4+
load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime")
5+
6+
runtime.python_library(
7+
name = "llama_transformer",
8+
srcs = [
9+
"llama_transformer.py",
10+
],
11+
_is_external_target = True,
12+
base_module = "executorch.examples.apple.coreml.llama",
13+
visibility = [
14+
"//executorch/...",
15+
"@EXECUTORCH_CLIENTS",
16+
],
17+
deps = [
18+
"//caffe2:torch",
19+
"//executorch/examples/models/llama:llama_transformer",
20+
],
21+
)
22+
23+
runtime.python_library(
24+
name = "utils",
25+
srcs = [
26+
"utils.py",
27+
],
28+
_is_external_target = True,
29+
base_module = "executorch.examples.apple.coreml.llama",
30+
visibility = [
31+
"//executorch/...",
32+
"@EXECUTORCH_CLIENTS",
33+
],
34+
deps = [
35+
"//caffe2:torch",
36+
],
37+
)
38+
39+
runtime.python_binary(
40+
name = "export",
41+
srcs = [
42+
"export.py",
43+
],
44+
main_function = "executorch.examples.apple.coreml.llama.export.main",
45+
visibility = [
46+
"//executorch/...",
47+
"@EXECUTORCH_CLIENTS",
48+
],
49+
deps = [
50+
"fbsource//third-party/pypi/coremltools:coremltools",
51+
":llama_transformer",
52+
":utils",
53+
"//caffe2:torch",
54+
"//executorch/backends/apple/coreml:backend",
55+
"//executorch/backends/apple/coreml:partitioner",
56+
"//executorch/examples/models/llama:source_transformation",
57+
"//executorch/exir/backend:utils",
58+
"//executorch/exir/capture:config",
59+
"//executorch/exir/passes:lib",
60+
"//executorch/exir/passes:quant_fusion_pass",
61+
"//executorch/exir/passes:sym_shape_eval_pass",
62+
"//executorch/exir/program:program",
63+
"//executorch/extension/export_util:export_util",
64+
"//executorch/extension/llm/export:export_lib",
65+
],
66+
)

examples/apple/coreml/llama/export.py

+2-3
Original file line numberDiff line numberDiff line change
@@ -24,9 +24,8 @@
2424
from executorch.exir.program._program import to_edge_with_preserved_ops
2525
from executorch.extension.export_util.utils import save_pte_program
2626

27-
sys.path.insert(0, ".")
28-
from llama_transformer import InputManager, load_model
29-
from utils import replace_linear_with_split_linear
27+
from executorch.examples.apple.coreml.llama.llama_transformer import InputManager, load_model
28+
from executorch.examples.apple.coreml.llama.utils import replace_linear_with_split_linear
3029

3130

3231
def main() -> None:

pyproject.toml

+1
Original file line numberDiff line numberDiff line change
@@ -92,6 +92,7 @@ flatc = "executorch.data.bin:flatc"
9292
# TODO(mnachin T180504136): Do not put examples/models
9393
# into core pip packages. Refactor out the necessary utils
9494
# or core models files into a separate package.
95+
"executorch.examples.apple" = "examples/apple"
9596
"executorch.examples.models" = "examples/models"
9697
"executorch.exir" = "exir"
9798
"executorch.extension" = "extension"

0 commit comments

Comments
 (0)