Skip to content

Commit 3937c6d

Browse files
committed
Update on "[executorch][serialization] Data serialization interface"
Introduce data serialization interface. Differential Revision: [D65947145](https://our.internmc.facebook.com/intern/diff/D65947145/) [ghstack-poisoned]
2 parents 29e90e0 + 54f5618 commit 3937c6d

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

41 files changed

+2415
-117
lines changed

.github/scripts/extract_benchmark_results.py

Lines changed: 76 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
# This source code is licensed under the BSD-style license found in the
66
# LICENSE file in the root directory of this source tree.
77

8+
import glob
89
import json
910
import logging
1011
import os
@@ -22,6 +23,7 @@
2223

2324
BENCHMARK_RESULTS_FILENAME = "benchmark_results.json"
2425
ARTIFACTS_FILENAME_REGEX = re.compile(r"(android|ios)-artifacts-(?P<job_id>\d+).json")
26+
BENCHMARK_CONFIG_REGEX = re.compile(r"The benchmark config is (?P<benchmark_config>.+)")
2527

2628
# iOS-related regexes and variables
2729
IOS_TEST_SPEC_REGEX = re.compile(
@@ -51,7 +53,7 @@ def __call__(
5153
parser.error(f"{values} is not a valid JSON file (*.json)")
5254

5355

54-
class ValidateOutputDir(Action):
56+
class ValidateDir(Action):
5557
def __call__(
5658
self,
5759
parser: ArgumentParser,
@@ -81,7 +83,7 @@ def parse_args() -> Any:
8183
"--output-dir",
8284
type=str,
8385
required=True,
84-
action=ValidateOutputDir,
86+
action=ValidateDir,
8587
help="the directory to keep the benchmark results",
8688
)
8789
parser.add_argument(
@@ -114,6 +116,13 @@ def parse_args() -> Any:
114116
required=True,
115117
help="which retry of the workflow this is",
116118
)
119+
parser.add_argument(
120+
"--benchmark-configs",
121+
type=str,
122+
required=True,
123+
action=ValidateDir,
124+
help="the directory to keep the benchmark configs",
125+
)
117126

118127
return parser.parse_args()
119128

@@ -300,9 +309,60 @@ def extract_job_id(artifacts_filename: str) -> int:
300309
return int(m.group("job_id"))
301310

302311

312+
def read_all_benchmark_configs() -> Dict[str, Dict[str, str]]:
313+
"""
314+
Read all the benchmark configs that we can find
315+
"""
316+
benchmark_configs = {}
317+
318+
for file in glob.glob(f"{benchmark_configs}/*.json"):
319+
filename = os.path.basename(file)
320+
with open(file) as f:
321+
try:
322+
benchmark_configs[filename] = json.load(f)
323+
except json.JSONDecodeError as e:
324+
warning(f"Fail to load benchmark config {file}: {e}")
325+
326+
return benchmark_configs
327+
328+
329+
def read_benchmark_config(
330+
artifact_s3_url: str, benchmark_configs_dir: str
331+
) -> Dict[str, str]:
332+
"""
333+
Get the correct benchmark config for this benchmark run
334+
"""
335+
try:
336+
with request.urlopen(artifact_s3_url) as data:
337+
for line in data.read().decode("utf8").splitlines():
338+
m = BENCHMARK_CONFIG_REGEX.match(line)
339+
if not m:
340+
continue
341+
342+
benchmark_config = m.group("benchmark_config")
343+
filename = os.path.join(
344+
benchmark_configs_dir, f"{benchmark_config}.json"
345+
)
346+
347+
if not os.path.exists(filename):
348+
warning(f"There is no benchmark config {filename}")
349+
continue
350+
351+
with open(filename) as f:
352+
try:
353+
return json.load(f)
354+
except json.JSONDecodeError as e:
355+
warning(f"Fail to load benchmark config {filename}: {e}")
356+
except error.HTTPError:
357+
warning(f"Fail to read the test spec output at {artifact_s3_url}")
358+
359+
return {}
360+
361+
303362
def transform(
304363
app_type: str,
305364
benchmark_results: List,
365+
benchmark_config: Dict[str, str],
306366
repo: str,
307367
head_branch: str,
308368
workflow_name: str,
@@ -352,29 +412,25 @@ def transform(
352412
for r in benchmark_results
353413
]
354414
elif schema_version == "v3":
355-
quantization = (
356-
r["benchmarkModel"]["quantization"]
357-
if r["benchmarkModel"]["quantization"]
358-
else "unknown"
359-
)
415+
v3_benchmark_results = []
360416
# From https://github.com/pytorch/pytorch/wiki/How-to-integrate-with-PyTorch-OSS-benchmark-database
361417
return [
362418
{
363419
"benchmark": {
364420
"name": "ExecuTorch",
365421
"mode": "inference",
366-
"dtype": quantization,
367422
"extra_info": {
368423
"app_type": app_type,
424+
# Just keep a copy of the benchmark config here
425+
"benchmark_config": json.dumps(benchmark_config),
369426
},
370427
},
371428
"model": {
372-
"name": r["benchmarkModel"]["name"],
429+
"name": benchmark_config.get("model", r["benchmarkModel"]["name"]),
373430
"type": "OSS model",
374-
"backend": r["benchmarkModel"].get("backend", ""),
375-
"extra_info": {
376-
"quantization": quantization,
377-
},
431+
"backend": benchmark_config.get(
432+
"config", r["benchmarkModel"].get("backend", "")
433+
),
378434
},
379435
"metric": {
380436
"name": r["metric"],
@@ -405,6 +461,7 @@ def main() -> None:
405461
"v2": [],
406462
"v3": [],
407463
}
464+
benchmark_config = {}
408465

409466
with open(args.artifacts) as f:
410467
for artifact in json.load(f):
@@ -420,6 +477,11 @@ def main() -> None:
420477
artifact_type = artifact["type"]
421478
artifact_s3_url = artifact["s3_url"]
422479

480+
if artifact_type == "TESTSPEC_OUTPUT":
481+
benchmark_config = read_benchmark_config(
482+
artifact_s3_url, args.benchmark_configs
483+
)
484+
423485
if app_type == "ANDROID_APP":
424486
benchmark_results = extract_android_benchmark_results(
425487
job_name, artifact_type, artifact_s3_url
@@ -435,6 +497,7 @@ def main() -> None:
435497
results = transform(
436498
app_type,
437499
benchmark_results,
500+
benchmark_config,
438501
args.repo,
439502
args.head_branch,
440503
args.workflow_name,

.github/workflows/android-perf.yml

Lines changed: 37 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -99,6 +99,8 @@ jobs:
9999

100100
- name: Prepare the spec
101101
shell: bash
102+
env:
103+
BENCHMARK_CONFIG: ${{ toJSON(matrix) }}
102104
working-directory: extension/benchmark/android/benchmark
103105
run: |
104106
set -eux
@@ -108,11 +110,19 @@ jobs:
108110
# We could write a script to properly use jinja here, but there is only one variable,
109111
# so let's just sed it
110112
sed -i -e 's,{{ model_path }},'"${MODEL_PATH}"',g' android-llm-device-farm-test-spec.yml.j2
111-
cp android-llm-device-farm-test-spec.yml.j2 android-llm-device-farm-test-spec.yml
112113
114+
BENCHMARK_CONFIG_ID="${{ matrix.model }}_${{ matrix.config }}"
115+
# The config for this benchmark runs, we save it in the test spec so that it can be fetched
116+
# later by the upload script
117+
sed -i -e 's,{{ benchmark_config_id }},'"${BENCHMARK_CONFIG_ID}"',g' android-llm-device-farm-test-spec.yml.j2
118+
119+
cp android-llm-device-farm-test-spec.yml.j2 android-llm-device-farm-test-spec.yml
113120
# Just print the test spec for debugging
114121
cat android-llm-device-farm-test-spec.yml
115122
123+
# Save the benchmark configs so that we can use it later in the dashboard
124+
echo "${BENCHMARK_CONFIG}" > "${BENCHMARK_CONFIG_ID}.json"
125+
116126
- name: Upload the spec
117127
uses: seemethere/upload-artifact-s3@v5
118128
with:
@@ -123,6 +133,16 @@ jobs:
123133
if-no-files-found: error
124134
path: extension/benchmark/android/benchmark/android-llm-device-farm-test-spec.yml
125135

136+
- name: Update the benchmark configs
137+
uses: seemethere/upload-artifact-s3@v5
138+
with:
139+
s3-bucket: gha-artifacts
140+
s3-prefix: |
141+
${{ github.repository }}/${{ github.run_id }}/artifacts/benchmark-configs/
142+
retention-days: 1
143+
if-no-files-found: error
144+
path: extension/benchmark/android/benchmark/${{ matrix.model }}_${{ matrix.config }}.json
145+
126146
export-models:
127147
name: export-models
128148
uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
@@ -397,6 +417,20 @@ jobs:
397417
398418
ls -lah artifacts
399419
420+
- name: Download the list of benchmark configs from S3
421+
env:
422+
BENCHMARK_CONFIGS_DIR: s3://gha-artifacts/${{ github.repository }}/${{ github.run_id }}/artifacts/benchmark-configs/
423+
shell: bash
424+
run: |
425+
set -eux
426+
427+
mkdir -p benchmark-configs
428+
pushd benchmark-configs
429+
${CONDA_RUN} aws s3 sync "${BENCHMARK_CONFIGS_DIR}" .
430+
popd
431+
432+
ls -lah benchmark-configs
433+
400434
- name: Extract the benchmark results JSON
401435
shell: bash
402436
run: |
@@ -414,7 +448,8 @@ jobs:
414448
--head-branch ${{ github.head_ref || github.ref_name }} \
415449
--workflow-name "${{ github.workflow }}" \
416450
--workflow-run-id ${{ github.run_id }} \
417-
--workflow-run-attempt ${{ github.run_attempt }}
451+
--workflow-run-attempt ${{ github.run_attempt }} \
452+
--benchmark-configs benchmark-configs
418453
done
419454
420455
for SCHEMA in v2 v3; do

.github/workflows/apple-perf.yml

Lines changed: 35 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -101,20 +101,30 @@ jobs:
101101

102102
- name: Prepare the spec
103103
shell: bash
104+
env:
105+
BENCHMARK_CONFIG: ${{ toJSON(matrix) }}
104106
working-directory: extension/benchmark/apple/Benchmark
105107
run: |
106108
set -eux
107109
108-
echo "DEBUG: ${{ matrix.model }}"
109110
# The model will be exported in the next step to this S3 path
110111
MODEL_PATH="https://gha-artifacts.s3.amazonaws.com/${{ github.repository }}/${{ github.run_id }}/artifacts/${{ matrix.model }}_${{ matrix.config }}/model.zip"
111112
# We could write a script to properly use jinja here, but there is only one variable,
112113
# so let's just sed it
113114
sed -i -e 's,{{ model_path }},'"${MODEL_PATH}"',g' default-ios-device-farm-appium-test-spec.yml.j2
115+
116+
BENCHMARK_CONFIG_ID="${{ matrix.model }}_${{ matrix.config }}"
117+
# The config for this benchmark runs, we save it in the test spec so that it can be fetched
118+
# later by the upload script
119+
sed -i -e 's,{{ benchmark_config_id }},'"${BENCHMARK_CONFIG_ID}"',g' default-ios-device-farm-appium-test-spec.yml.j2
120+
114121
cp default-ios-device-farm-appium-test-spec.yml.j2 default-ios-device-farm-appium-test-spec.yml
115122
# Just print the test spec for debugging
116123
cat default-ios-device-farm-appium-test-spec.yml
117124
125+
# Save the benchmark configs so that we can use it later in the dashboard
126+
echo "${BENCHMARK_CONFIG}" > "${BENCHMARK_CONFIG_ID}.json"
127+
118128
- name: Upload the spec
119129
uses: seemethere/upload-artifact-s3@v5
120130
with:
@@ -125,6 +135,16 @@ jobs:
125135
if-no-files-found: error
126136
path: extension/benchmark/apple/Benchmark/default-ios-device-farm-appium-test-spec.yml
127137

138+
- name: Update the benchmark configs
139+
uses: seemethere/upload-artifact-s3@v5
140+
with:
141+
s3-bucket: gha-artifacts
142+
s3-prefix: |
143+
${{ github.repository }}/${{ github.run_id }}/artifacts/benchmark-configs/
144+
retention-days: 1
145+
if-no-files-found: error
146+
path: extension/benchmark/apple/Benchmark/${{ matrix.model }}_${{ matrix.config }}.json
147+
128148
export-models:
129149
name: export-models
130150
uses: pytorch/test-infra/.github/workflows/macos_job.yml@main
@@ -481,6 +501,18 @@ jobs:
481501
482502
ls -lah artifacts
483503
504+
- name: Download the list of benchmark configs from S3
505+
env:
506+
BENCHMARK_CONFIGS_DIR: s3://gha-artifacts/${{ github.repository }}/${{ github.run_id }}/artifacts/benchmark-configs/
507+
shell: bash
508+
run: |
509+
set -eux
510+
mkdir -p benchmark-configs
511+
pushd benchmark-configs
512+
${CONDA_RUN} aws s3 sync "${BENCHMARK_CONFIGS_DIR}" .
513+
popd
514+
ls -lah benchmark-configs
515+
484516
- name: Extract the benchmark results JSON
485517
shell: bash
486518
run: |
@@ -498,7 +530,8 @@ jobs:
498530
--head-branch ${{ github.head_ref || github.ref_name }} \
499531
--workflow-name "${{ github.workflow }}" \
500532
--workflow-run-id ${{ github.run_id }} \
501-
--workflow-run-attempt ${{ github.run_attempt }}
533+
--workflow-run-attempt ${{ github.run_attempt }} \
534+
--benchmark-configs benchmark-configs
502535
done
503536
504537
for SCHEMA in v2 v3; do

CONTRIBUTING.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -80,8 +80,8 @@ We use [`lintrunner`](https://pypi.org/project/lintrunner/) to help make sure th
8080
code follows our standards. Set it up with:
8181

8282
```
83-
pip install lintrunner==0.11.0
84-
pip install lintrunner-adapters==0.11.0
83+
pip install lintrunner==0.12.7
84+
pip install lintrunner-adapters==0.12.4
8585
lintrunner init
8686
```
8787

backends/cadence/aot/TARGETS

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@ python_library(
4040
":utils",
4141
":ops_registrations",
4242
":replace_ops",
43+
":memory_planning",
4344
"//caffe2:torch",
4445
"//executorch/backends/cadence/aot/quantizer:fusion_pass",
4546
"//executorch/backends/cadence/aot/quantizer:quantizer",
@@ -408,3 +409,22 @@ python_library(
408409
"//executorch/exir:tensor",
409410
],
410411
)
412+
413+
414+
python_unittest(
415+
name = "test_memory_passes",
416+
srcs = [
417+
"tests/test_memory_passes.py",
418+
],
419+
typing = True,
420+
deps = [
421+
":compiler",
422+
":memory_planning",
423+
":ops_registrations",
424+
":pass_utils",
425+
"//caffe2:torch",
426+
"//executorch/exir:memory",
427+
"//executorch/exir/dialects:lib",
428+
"//executorch/exir/tests:models",
429+
],
430+
)

0 commit comments

Comments
 (0)