|
| 1 | +import unittest |
| 2 | + |
| 3 | +import torch |
| 4 | +from executorch.examples.models.llama.attention import ( |
| 5 | + AttentionMHA, |
| 6 | + KVCache, |
| 7 | + ModelArgs, |
| 8 | + Rope, |
| 9 | + SDPA, |
| 10 | +) |
| 11 | + |
| 12 | + |
| 13 | +class TestAttentionMHA(unittest.TestCase): |
| 14 | + |
| 15 | + def create_mock_args(self): |
| 16 | + return ModelArgs( |
| 17 | + use_kv_cache=True, |
| 18 | + n_heads=8, |
| 19 | + n_kv_heads=4, |
| 20 | + head_dim=64, |
| 21 | + max_batch_size=2, |
| 22 | + max_context_len=16, |
| 23 | + dim=512, |
| 24 | + attention_qkv_bias=False, |
| 25 | + enable_dynamic_shape=False, |
| 26 | + ) |
| 27 | + |
| 28 | + def test_attentionmha_init(self): |
| 29 | + args = self.create_mock_args() |
| 30 | + rope = Rope(args) |
| 31 | + attn = AttentionMHA(args, layer_id=0, rope=rope) |
| 32 | + |
| 33 | + self.assertEqual(attn.n_heads, 8) |
| 34 | + self.assertEqual(attn.n_kv_heads, 4) |
| 35 | + self.assertEqual(attn.n_local_heads, 8) |
| 36 | + self.assertEqual(attn.n_local_kv_heads, 4) |
| 37 | + self.assertEqual(attn.head_dim, 64) |
| 38 | + self.assertEqual(attn.dim, 512) |
| 39 | + self.assertEqual(attn.mask.shape, (16, 16)) # Causal mask shape check |
| 40 | + self.assertTrue(attn.use_kv_cache) |
| 41 | + |
| 42 | + if attn.use_kv_cache: |
| 43 | + self.assertIsInstance(attn.kv_cache, KVCache) |
| 44 | + self.assertIsInstance(attn.SDPA, SDPA) |
| 45 | + |
| 46 | + def test_attentionmha_forward(self): |
| 47 | + args = self.create_mock_args() |
| 48 | + rope = Rope(args) |
| 49 | + attn = AttentionMHA(args, layer_id=0, rope=rope) |
| 50 | + |
| 51 | + bsz, seqlen, dim = 2, 4, args.dim |
| 52 | + x = torch.randn(bsz, seqlen, dim) |
| 53 | + freqs_cos = torch.randn(seqlen, args.head_dim // 2) |
| 54 | + freqs_sin = torch.randn(seqlen, args.head_dim // 2) |
| 55 | + input_pos = torch.tensor([0, 1, 2, 3]) |
| 56 | + |
| 57 | + output, _ = attn.forward(x, freqs_cos, freqs_sin, input_pos=input_pos) |
| 58 | + |
| 59 | + self.assertEqual(output.shape, (bsz, seqlen, dim)) |
| 60 | + |
| 61 | + def test_attentionmha_forward_no_kv_cache(self): |
| 62 | + args = self.create_mock_args() |
| 63 | + args.use_kv_cache = False # Disable KV cache for this test |
| 64 | + rope = Rope(args) |
| 65 | + attn = AttentionMHA(args, layer_id=0, rope=rope) |
| 66 | + |
| 67 | + bsz, seqlen, dim = 2, 4, args.dim |
| 68 | + x = torch.randn(bsz, seqlen, dim) |
| 69 | + freqs_cos = torch.randn(seqlen, args.head_dim // 2) |
| 70 | + freqs_sin = torch.randn(seqlen, args.head_dim // 2) |
| 71 | + |
| 72 | + output, _ = attn.forward(x, freqs_cos, freqs_sin) |
| 73 | + |
| 74 | + self.assertEqual(output.shape, (bsz, seqlen, dim)) |
| 75 | + |
| 76 | + def test_attentionmha_invalid_kv_cache(self): |
| 77 | + args = self.create_mock_args() |
| 78 | + rope = Rope(args) |
| 79 | + attn = AttentionMHA(args, layer_id=0, rope=rope) |
| 80 | + |
| 81 | + bsz, seqlen, dim = 2, 4, args.dim |
| 82 | + x = torch.randn(bsz, seqlen, dim) |
| 83 | + freqs_cos = torch.randn(seqlen, args.head_dim // 2) |
| 84 | + freqs_sin = torch.randn(seqlen, args.head_dim // 2) |
| 85 | + |
| 86 | + # No input_pos provided, should raise assertion error |
| 87 | + with self.assertRaises(AssertionError): |
| 88 | + attn.forward(x, freqs_cos, freqs_sin) |
0 commit comments