@@ -104,7 +104,7 @@ option(LLAMA_BUILD_SERVER "llama: build server example"
104
104
# Compile flags
105
105
#
106
106
107
- set (CMAKE_CXX_STANDARD 11 )
107
+ set (CMAKE_CXX_STANDARD 20 )
108
108
set (CMAKE_CXX_STANDARD_REQUIRED true )
109
109
set (CMAKE_C_STANDARD 11 )
110
110
set (CMAKE_C_STANDARD_REQUIRED true )
@@ -230,7 +230,12 @@ if (LLAMA_BLAS)
230
230
231
231
message (STATUS "BLAS found, Includes: ${BLAS_INCLUDE_DIRS} " )
232
232
add_compile_options (${BLAS_LINKER_FLAGS} )
233
- add_compile_definitions (GGML_USE_OPENBLAS )
233
+
234
+ # from https://github.com/NVIDIA/cutlass
235
+ make_directory ("${PROJECT_BINARY_DIR} /nvcc_tmp" )
236
+ set (cuda_flags --keep "SHELL:--keep-dir ${PROJECT_BINARY_DIR} /nvcc_tmp" ${cuda_flags} )
237
+
238
+ # add_compile_definitions(GGML_USE_OPENBLAS)
234
239
if (${BLAS_INCLUDE_DIRS} MATCHES "mkl" AND (${LLAMA_BLAS_VENDOR} MATCHES "Generic" OR ${LLAMA_BLAS_VENDOR} MATCHES "Intel" ))
235
240
add_compile_definitions (GGML_BLAS_USE_MKL )
236
241
endif ()
@@ -312,7 +317,7 @@ if (LLAMA_MPI)
312
317
if (MPI_C_FOUND )
313
318
message (STATUS "MPI found" )
314
319
set (GGML_HEADERS_MPI ggml-mpi.h )
315
- set (GGML_SOURCES_MPI ggml-mpi.c ggml-mpi.h )
320
+ set (GGML_SOURCES_MPI ggml-mpi.cpp ggml-mpi.h )
316
321
add_compile_definitions (GGML_USE_MPI )
317
322
add_compile_definitions (${MPI_C_COMPILE_DEFINITIONS} )
318
323
if (NOT MSVC )
@@ -438,6 +443,9 @@ if (NOT cuda_host_flags STREQUAL "")
438
443
set (cuda_flags ${cuda_flags} -Xcompiler ${cuda_host_flags} )
439
444
endif ()
440
445
446
+ #
447
+ set (cuda_flags --verbose -G ${cuda_flags} )
448
+
441
449
add_compile_options ("$<$<COMPILE_LANGUAGE:CUDA>:${cuda_flags} >" )
442
450
443
451
if (WIN32 )
@@ -485,8 +493,10 @@ if (NOT MSVC)
485
493
add_link_options (-static-libgcc -static-libstdc++ )
486
494
endif ()
487
495
endif ()
496
+ add_link_options ("-Wl,-Map=${TARGET} .map" )
497
+
488
498
if (LLAMA_GPROF )
489
- add_compile_options (-pg )
499
+ add_compile_options (-pg )
490
500
endif ()
491
501
endif ()
492
502
@@ -645,13 +655,16 @@ if (GGML_USE_CPU_HBM)
645
655
endif ()
646
656
647
657
add_library (ggml OBJECT
648
- ggml.c
658
+ ggml.cpp
649
659
ggml.h
650
- ggml-alloc.c
660
+ print.hpp
661
+ ggml-internal.hpp
662
+ llama-internal.hpp
663
+ ggml-alloc.cpp
651
664
ggml-alloc.h
652
- ggml-backend.c
665
+ ggml-backend.cpp
653
666
ggml-backend.h
654
- ggml-quants.c
667
+ ggml-quants.cpp
655
668
ggml-quants.h
656
669
${GGML_SOURCES_CUDA} ${GGML_HEADERS_CUDA}
657
670
${GGML_SOURCES_OPENCL} ${GGML_HEADERS_OPENCL}
@@ -683,7 +696,7 @@ add_library(llama
683
696
)
684
697
685
698
target_include_directories (llama PUBLIC . )
686
- target_compile_features (llama PUBLIC cxx_std_11 ) # don't bump
699
+ target_compile_features (llama PUBLIC cxx_std_20 ) # don't bump
687
700
target_link_libraries (llama PRIVATE
688
701
ggml
689
702
${LLAMA_EXTRA_LIBS}
0 commit comments