@@ -287,6 +287,7 @@ enum llm_kv {
287
287
288
288
LLM_KV_VOCAB_SIZE,
289
289
LLM_KV_CONTEXT_LENGTH,
290
+ LLM_KV_CONTEXT_LENGTH_SWA,
290
291
LLM_KV_EMBEDDING_LENGTH,
291
292
LLM_KV_BLOCK_COUNT,
292
293
LLM_KV_LEADING_DENSE_BLOCK_COUNT,
@@ -379,6 +380,7 @@ static const std::map<llm_kv, const char *> LLM_KV_NAMES = {
379
380
380
381
{ LLM_KV_VOCAB_SIZE, "%s.vocab_size" },
381
382
{ LLM_KV_CONTEXT_LENGTH, "%s.context_length" },
383
+ { LLM_KV_CONTEXT_LENGTH_SWA, "%s.context_length_swa" },
382
384
{ LLM_KV_EMBEDDING_LENGTH, "%s.embedding_length" },
383
385
{ LLM_KV_BLOCK_COUNT, "%s.block_count" },
384
386
{ LLM_KV_LEADING_DENSE_BLOCK_COUNT, "%s.leading_dense_block_count" },
@@ -2079,7 +2081,8 @@ struct llama_hparams {
2079
2081
bool use_par_res;
2080
2082
2081
2083
uint32_t n_vocab;
2082
- uint32_t n_ctx_train; // context size the model was trained on
2084
+ uint32_t n_ctx_train; // context size the model was trained on
2085
+ int32_t n_ctx_swa = -1; // context size for sliding window attention (SWA)
2083
2086
uint32_t n_embd;
2084
2087
uint32_t n_head;
2085
2088
uint32_t n_head_kv;
@@ -2661,6 +2664,9 @@ struct llama_context {
2661
2664
struct ggml_tensor * inp_s_mask; // F32 [1, n_kv]
2662
2665
struct ggml_tensor * inp_s_seq; // I32 [n_kv, n_batch]
2663
2666
2667
+ // KQ mask per layer, used by sliding window attention (gemma 2)
2668
+ std::vector<struct ggml_tensor *> inp_KQ_mask_l;
2669
+
2664
2670
// control vectors
2665
2671
struct llama_control_vector cvec;
2666
2672
};
@@ -4709,6 +4715,8 @@ static void llm_load_hparams(
4709
4715
} break;
4710
4716
case LLM_ARCH_GEMMA2:
4711
4717
{
4718
+ hparams.n_ctx_swa = 4096; // default value
4719
+ ml.get_key(LLM_KV_CONTEXT_LENGTH_SWA, hparams.n_ctx_swa, false);
4712
4720
ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
4713
4721
ml.get_key(LLM_KV_ATTN_LOGIT_SOFTCAPPING, hparams.f_attn_logit_softcapping, false);
4714
4722
ml.get_key(LLM_KV_FINAL_LOGIT_SOFTCAPPING, hparams.f_final_logit_softcapping, false);
@@ -11029,9 +11037,16 @@ struct llm_build_context {
11029
11037
struct ggml_tensor * inp_pos = build_inp_pos();
11030
11038
11031
11039
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
11032
- struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
11040
+ // gemma 2 requires different mask for layers using sliding window (SWA)
11041
+ struct ggml_tensor * KQ_mask_full = build_inp_KQ_mask();
11042
+ struct ggml_tensor * KQ_mask_SWA = build_inp_KQ_mask();
11043
+ lctx.inp_KQ_mask_l.clear();
11033
11044
11034
11045
for (int il = 0; il < n_layer; ++il) {
11046
+ // (il % 2) layers use SWA
11047
+ struct ggml_tensor * KQ_mask = (il % 2 == 0) ? KQ_mask_SWA : KQ_mask_full;
11048
+ lctx.inp_KQ_mask_l.push_back(KQ_mask);
11049
+
11035
11050
// norm
11036
11051
cur = llm_build_norm(ctx0, inpL, hparams,
11037
11052
model.layers[il].attn_norm, NULL,
@@ -12671,6 +12686,14 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) {
12671
12686
GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_KQ_mask->buffer));
12672
12687
12673
12688
float * data = (float *) lctx.inp_KQ_mask->data;
12689
+ float * data_swa = nullptr;
12690
+
12691
+ if (lctx.model.arch == LLM_ARCH_GEMMA2) {
12692
+ GGML_ASSERT(!lctx.inp_KQ_mask_l.empty() && "gemma 2 requires different KQ mask per layer");
12693
+ GGML_ASSERT(hparams.n_ctx_swa > 0);
12694
+ data_swa = (float *) lctx.inp_KQ_mask_l[0]->data;
12695
+ data = (float *) lctx.inp_KQ_mask_l[1]->data;
12696
+ }
12674
12697
12675
12698
// For causal attention, use only the previous KV cells
12676
12699
// of the correct sequence for each token of the batch.
@@ -12692,6 +12715,15 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) {
12692
12715
}
12693
12716
}
12694
12717
data[h*(n_kv*n_tokens) + j*n_kv + i] = f;
12718
+
12719
+ // may need to cut off old tokens for sliding window
12720
+ if (data_swa && f != -INFINITY) {
12721
+ const llama_pos n_keep = hparams.n_ctx_swa - batch.n_tokens;
12722
+ if (pos - lctx.kv_self.cells[i].pos > n_keep) {
12723
+ f = -INFINITY;
12724
+ }
12725
+ data_swa[h*(n_kv*n_tokens) + j*n_kv + i] = f;
12726
+ }
12695
12727
}
12696
12728
}
12697
12729
0 commit comments