Skip to content

Commit 6f121b1

Browse files
Documented CUDA reproducibility, added warning
1 parent 173d0e6 commit 6f121b1

File tree

3 files changed

+8
-1
lines changed

3 files changed

+8
-1
lines changed

README.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -257,6 +257,8 @@ Building the program with BLAS support may lead to some performance improvements
257257
cmake --build . --config Release
258258
```
259259
260+
Note: Because llama.cpp uses multiple CUDA streams for matrix multiplication results [are not guaranteed to be reproducible](https://docs.nvidia.com/cuda/cublas/index.html#results-reproducibility). If you need reproducibility, set `GGML_CUDA_MAX_STREAMS` in the file `ggml-cuda.cu` to 1.
261+
260262
### Prepare Data & Run
261263
262264
```bash

examples/common.cpp

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -100,6 +100,9 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
100100
arg = argv[i];
101101

102102
if (arg == "-s" || arg == "--seed") {
103+
#if defined(GGML_USE_CUBLAS)
104+
fprintf(stderr, "WARNING: when using cuBLAS generation results are NOT guaranteed to be reproducible.\n");
105+
#endif
103106
if (++i >= argc) {
104107
invalid_param = true;
105108
break;

ggml-cuda.cu

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -348,7 +348,7 @@ static void ggml_cuda_pool_free(void * ptr, size_t size) {
348348
CUDA_CHECK(cudaFree(ptr));
349349
}
350350

351-
#define GGML_CUDA_MAX_STREAMS 8
351+
#define GGML_CUDA_MAX_STREAMS 8 // Set this to 1 for reproducible matrix multiplication.
352352
#define GGML_CUDA_MAX_EVENTS 64
353353
static cublasHandle_t g_cublasH = nullptr;
354354
static cudaStream_t g_cudaStreams[GGML_CUDA_MAX_STREAMS] = { nullptr };
@@ -469,6 +469,7 @@ static void ggml_cuda_mul_mat_f32(const ggml_tensor * src0, const ggml_tensor *
469469
&alpha, c_X, ne00,
470470
c_Y, ne10,
471471
&beta, c_D, ne01));
472+
// Due to multiple CUDA streams the mat. mul. results are NOT guaranteed to be reproducible.
472473

473474
// copy dst to host
474475
float * d = (float *) ((char *) dst->data + i02*nb2 + i03*nb3);
@@ -637,6 +638,7 @@ static void ggml_cuda_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor
637638
&alpha, c_X, ne00,
638639
c_Y, ne10,
639640
&beta, c_D, ne01));
641+
// Due to multiple CUDA streams the mat. mul. results are NOT guaranteed to be reproducible.
640642

641643
// copy dst to host
642644
float * d = (float *) ((char *) dst->data + i02*nb2 + i03*nb3);

0 commit comments

Comments
 (0)