Skip to content

Commit 29cfecc

Browse files
committed
baby-llama : fix -Wmaybe-uninitialized warning from gcc
1 parent 2657f7b commit 29cfecc

File tree

1 file changed

+6
-6
lines changed

1 file changed

+6
-6
lines changed

examples/baby-llama/baby-llama.cpp

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,10 @@
11
#include "ggml.h"
2-
#include <vector>
2+
33
#include <cassert>
4-
#include <random>
4+
#include <cstdlib>
55
#include <cstring>
6+
#include <random>
7+
#include <vector>
68

79
#if defined(_MSC_VER)
810
#pragma warning(disable: 4244 4267) // possible loss of data
@@ -457,7 +459,7 @@ static void randomize_model_lora(
457459
}
458460
}
459461

460-
static bool init_kv_cache(struct llama_kv_cache* cache, struct llama_model * model, int n_batch) {
462+
static void init_kv_cache(struct llama_kv_cache* cache, struct llama_model * model, int n_batch) {
461463
const auto & hparams = model->hparams;
462464

463465
const uint32_t n_ctx = hparams.n_ctx;
@@ -483,14 +485,12 @@ static bool init_kv_cache(struct llama_kv_cache* cache, struct llama_model * mod
483485

484486
if (!cache->ctx) {
485487
fprintf(stderr, "%s: failed to allocate memory for kv cache\n", __func__);
486-
return false;
488+
exit(1);
487489
}
488490
}
489491

490492
cache->k = ggml_new_tensor_1d(cache->ctx, GGML_TYPE_F32, n_elements);
491493
cache->v = ggml_new_tensor_1d(cache->ctx, GGML_TYPE_F32, n_elements);
492-
493-
return true;
494494
}
495495

496496
static bool init_kv_cache_lora(struct llama_kv_cache* cache, struct llama_model_lora * model, int n_batch) {

0 commit comments

Comments
 (0)