Skip to content

Commit d9dd529

Browse files
authored
enable DeepSeek V3 shared_experts_fusion in sm90 (#5571)
1 parent 0a0dd34 commit d9dd529

File tree

1 file changed

+12
-0
lines changed

1 file changed

+12
-0
lines changed

python/sglang/srt/models/deepseek_v2.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1427,6 +1427,18 @@ def __init__(
14271427
assert (
14281428
self.n_share_experts_fusion == self.tp_size
14291429
), f"Shared experts fusion optimization is enabled in DeepSeek V3/R1, set it to {self.tp_size} can get best optimized performace."
1430+
elif self.n_share_experts_fusion == 0:
1431+
if (
1432+
torch.cuda.get_device_capability("cuda") >= (9, 0)
1433+
and self.config.architectures[0] == "DeepseekV3ForCausalLM"
1434+
and self.config.n_routed_experts == 256
1435+
and (not global_server_args_dict["enable_deepep_moe"])
1436+
):
1437+
self.n_share_experts_fusion = self.tp_size
1438+
global_server_args_dict["n_share_experts_fusion"] = self.tp_size
1439+
logger.info(
1440+
"Deepseek V3/R1 with fp8 can use shared experts fusion optimization when SM version >=90. Shared experts fusion optimization is enabled."
1441+
)
14301442

14311443
self.model = DeepseekV2Model(
14321444
config, quant_config, prefix=add_prefix("model", prefix)

0 commit comments

Comments
 (0)