Skip to content

Commit c10704d

Browse files
committed
llama : fix MPI threads (close #2827)
1 parent 230d46c commit c10704d

File tree

1 file changed

+2
-1
lines changed

1 file changed

+2
-1
lines changed

llama.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2845,7 +2845,6 @@ static bool llama_eval_internal(
28452845

28462846
GGML_ASSERT(n_tokens > 0);
28472847
GGML_ASSERT(n_past >= 0);
2848-
GGML_ASSERT(n_threads > 0);
28492848
// TODO: keep the values of n_batch and n_ctx
28502849
// GGML_ASSERT(n_tokens <= n_batch);
28512850
// GGML_ASSERT(n_past + n_tokens <= n_ctx);
@@ -2856,6 +2855,8 @@ static bool llama_eval_internal(
28562855
ggml_mpi_eval_init(lctx.ctx_mpi, &n_tokens, &n_past, &n_threads);
28572856
#endif
28582857

2858+
GGML_ASSERT(n_threads > 0);
2859+
28592860
const int N = n_tokens;
28602861

28612862
const auto & model = lctx.model;

0 commit comments

Comments
 (0)