We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent edff0f9 commit 949f97bCopy full SHA for 949f97b
src/transformers/modeling_utils.py
@@ -2499,6 +2499,7 @@ def _check_and_adjust_attn_implementation(
2499
and self._supports_flash_attn
2500
and not (is_flash_attn_2_available() or is_flash_attn_3_available())
2501
and is_kernels_available()
2502
+ and not is_torch_npu_available()
2503
):
2504
if attn_implementation.endswith("2"):
2505
applicable_attn_implementation = "kernels-community/flash-attn"
0 commit comments