Skip to content

Move print_delegate_info to devtools #8207

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Feb 6, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion devtools/backend_debug/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from executorch.devtools.backend_debug.delegation_info import (
DelegationBreakdown,
get_delegation_info,
print_delegation_info,
)

__all__ = ["DelegationBreakdown", "get_delegation_info"]
__all__ = ["DelegationBreakdown", "get_delegation_info", "print_delegation_info"]
9 changes: 8 additions & 1 deletion devtools/backend_debug/delegation_info.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@

import pandas as pd
import torch

from tabulate import tabulate

# Column names of the DataFrame returned by DelegationInfo.get_operator_delegation_dataframe()
# which describes the summarized delegation information grouped by each operator type
Expand Down Expand Up @@ -174,3 +174,10 @@ def _insert_op_occurrences_dict(node_name: str, delegated: bool) -> None:
num_delegated_subgraphs=delegated_subgraph_counter,
delegation_by_operator=op_occurrences_dict,
)


def print_delegation_info(graph_module: torch.fx.GraphModule):
delegation_info = get_delegation_info(graph_module)
print(delegation_info.get_summary())
df = delegation_info.get_operator_delegation_dataframe()
print(tabulate(df, headers="keys", tablefmt="fancy_grid"))
9 changes: 1 addition & 8 deletions examples/models/llama/export_llama_lib.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
import torch

from executorch.backends.vulkan._passes.remove_asserts import remove_asserts
from executorch.devtools.backend_debug import get_delegation_info
from executorch.devtools.backend_debug import print_delegation_info

from executorch.devtools.etrecord import generate_etrecord
from executorch.exir.passes.init_mutable_pass import InitializedMutableBufferPass
Expand All @@ -46,7 +46,6 @@
get_vulkan_quantizer,
)
from executorch.util.activation_memory_profiler import generate_memory_trace
from tabulate import tabulate

from ..model_factory import EagerModelFactory
from .source_transformation.apply_spin_quant_r1_r2 import (
Expand Down Expand Up @@ -801,12 +800,6 @@ def _export_llama(args) -> LLMEdgeManager: # noqa: C901
for partitioner in partitioners:
logging.info(f"--> {partitioner.__class__.__name__}")

def print_delegation_info(graph_module: torch.fx.GraphModule):
delegation_info = get_delegation_info(graph_module)
print(delegation_info.get_summary())
df = delegation_info.get_operator_delegation_dataframe()
print(tabulate(df, headers="keys", tablefmt="fancy_grid"))

additional_passes = []
if args.model in TORCHTUNE_DEFINED_MODELS:
additional_passes = [InitializedMutableBufferPass(["kv_cache_pos"])]
Expand Down
Loading