We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 230d46c commit c10704dCopy full SHA for c10704d
llama.cpp
@@ -2845,7 +2845,6 @@ static bool llama_eval_internal(
2845
2846
GGML_ASSERT(n_tokens > 0);
2847
GGML_ASSERT(n_past >= 0);
2848
- GGML_ASSERT(n_threads > 0);
2849
// TODO: keep the values of n_batch and n_ctx
2850
// GGML_ASSERT(n_tokens <= n_batch);
2851
// GGML_ASSERT(n_past + n_tokens <= n_ctx);
@@ -2856,6 +2855,8 @@ static bool llama_eval_internal(
2856
2855
ggml_mpi_eval_init(lctx.ctx_mpi, &n_tokens, &n_past, &n_threads);
2857
#endif
2858
+ GGML_ASSERT(n_threads > 0);
2859
+
2860
const int N = n_tokens;
2861
2862
const auto & model = lctx.model;
0 commit comments