Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
81 changes: 26 additions & 55 deletions tools/deep_gemm_pre-compile/generate_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,70 +41,41 @@ def generate_kn_pairs(args, model_cfg: dict) -> Tuple[List, List, List]:
gemm_kn_pairs = []
grouped_gemm_contiguous_kn_pairs = []
grouped_gemm_masked_kn_pairs = []
if tp_size > 1 and ep_size == 1:
logger.debug("Generating kn pairs for tensor parallel.")
# Dense normal gemm
gemm_kn_pairs.extend(
[
[int(intermediate_size / tp_size), hidden_size],
[hidden_size, int(head_dim * (num_attention_heads + num_key_value_heads * 2) / tp_size)],
[hidden_size, int(intermediate_size * 2 / tp_size)],
[int(hidden_size / tp_size), hidden_size],
]
)
logger.debug("Generating kn pairs for tensor parallel.")
# Dense normal gemm
gemm_kn_pairs.extend(
[
[int(intermediate_size / tp_size), hidden_size],
[hidden_size, int(head_dim * (num_attention_heads + num_key_value_heads * 2) / tp_size)],
[hidden_size, int(intermediate_size * 2 / tp_size)],
[int(hidden_size / tp_size), hidden_size],
]
)

# Moe grouped gemm contiguous
grouped_gemm_contiguous_kn_pairs.extend(
[
[int(moe_intermediate_size / tp_size), hidden_size],
[hidden_size, int(moe_intermediate_size * 2 / tp_size)],
]
)
if has_shared_experts:
logger.debug("Generating kn pairs for models with shared experts.")
gemm_kn_pairs.extend(
[
[hidden_size, int(moe_intermediate_size * 4 / tp_size)],
[int(moe_intermediate_size * 2 / tp_size), hidden_size],
]
)
elif tp_size == 1 and ep_size > 1:
logger.debug("Generating kn pairs for expert parallel.")
# Dense normal gemm
gemm_kn_pairs.extend(
[
[intermediate_size, hidden_size],
[hidden_size, int(head_dim * (num_attention_heads + num_key_value_heads * 2))],
[hidden_size, int(intermediate_size * 2)],
[hidden_size, hidden_size],
]
)
# Moe grouped gemm contiguous
grouped_gemm_contiguous_kn_pairs.extend(
# Moe grouped gemm contiguous
grouped_gemm_contiguous_kn_pairs.extend(
[
[int(moe_intermediate_size / tp_size), hidden_size],
[hidden_size, int(moe_intermediate_size * 2 / tp_size)],
]
)

if ep_size > 1:
# Moe grouped gemm masked
grouped_gemm_masked_kn_pairs.extend(
[
[moe_intermediate_size, hidden_size],
[hidden_size, int(moe_intermediate_size * 2)],
]
)
# Moe grouped gemm masked
grouped_gemm_masked_kn_pairs.extend(
if has_shared_experts:
logger.debug("Generating kn pairs for models with shared experts.")
gemm_kn_pairs.extend(
[
[moe_intermediate_size, hidden_size],
[hidden_size, int(moe_intermediate_size * 2)],
[hidden_size, int(moe_intermediate_size * 4 / tp_size)],
[int(moe_intermediate_size * 2 / tp_size), hidden_size],
]
)
if has_shared_experts:
logger.debug("Generating kn pairs for models with shared experts.")
gemm_kn_pairs.extend(
[
[hidden_size, int(moe_intermediate_size * 4)],
[int(moe_intermediate_size * 2), hidden_size],
]
)
elif tp_size > 1 and ep_size > 1:
raise ValueError("Not supported to enable EP and TP at the same time for now.")
else:
raise ValueError("Please check the tensor parallel size and expert parallel size.")

return (
gemm_kn_pairs,
Expand Down
Loading