Skip to content

Commit 93b8765

Browse files
committed
llama : expose model's rope_freq_scale in the API
so it can be scaled further before creating a context.
1 parent c97f01c commit 93b8765

File tree

2 files changed

+7
-0
lines changed

2 files changed

+7
-0
lines changed

Diff for: llama.cpp

+4
Original file line numberDiff line numberDiff line change
@@ -6894,6 +6894,10 @@ int llama_n_embd(const struct llama_model * model) {
68946894
return model->hparams.n_embd;
68956895
}
68966896

6897+
float llama_rope_freq_scale_train(const struct llama_model * model) {
6898+
return model->hparams.rope_freq_scale_train;
6899+
}
6900+
68976901
int llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size) {
68986902
return snprintf(buf, buf_size, "%s %s %s",
68996903
llama_model_arch_name(model->arch).c_str(),

Diff for: llama.h

+3
Original file line numberDiff line numberDiff line change
@@ -282,6 +282,9 @@ extern "C" {
282282
LLAMA_API int llama_n_ctx_train(const struct llama_model * model);
283283
LLAMA_API int llama_n_embd (const struct llama_model * model);
284284

285+
// Get the model's RoPE frequency scaling factor
286+
LLAMA_API float llama_rope_freq_scale_train(const struct llama_model * model);
287+
285288
// Get a string describing the model type
286289
LLAMA_API int llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size);
287290

0 commit comments

Comments
 (0)