1
1
llama_add_compile_flags()
2
2
3
+ function (llama_build source )
4
+ if (DEFINED LLAMA_TEST_NAME)
5
+ set (TEST_TARGET ${LLAMA_TEST_NAME} )
6
+ else ()
7
+ get_filename_component (TEST_TARGET ${source} NAME_WE )
8
+ endif ()
9
+
10
+ add_executable (${TEST_TARGET} ${source} )
11
+ target_link_libraries (${TEST_TARGET} PRIVATE common)
12
+ install (TARGETS ${TEST_TARGET} RUNTIME)
13
+ endfunction ()
14
+
3
15
function (llama_test target )
4
16
include (CMakeParseArguments)
5
17
set (options )
@@ -36,7 +48,7 @@ endfunction()
36
48
# - LABEL: label for the test (defaults to main)
37
49
# - ARGS: arguments to pass to the test executable
38
50
# - WORKING_DIRECTORY
39
- function (llama_target_and_test source )
51
+ function (llama_build_and_test source )
40
52
include (CMakeParseArguments)
41
53
set (options )
42
54
set (oneValueArgs NAME LABEL WORKING_DIRECTORY )
@@ -58,6 +70,7 @@ function(llama_target_and_test source)
58
70
add_executable (${TEST_TARGET} ${source} get -model.cpp)
59
71
install (TARGETS ${TEST_TARGET} RUNTIME)
60
72
target_link_libraries (${TEST_TARGET} PRIVATE common)
73
+
61
74
add_test (
62
75
NAME ${TEST_TARGET}
63
76
WORKING_DIRECTORY ${LLAMA_TEST_WORKING_DIRECTORY}
@@ -68,9 +81,7 @@ function(llama_target_and_test source)
68
81
endfunction ()
69
82
70
83
# build test-tokenizer-0 target once and add many tests
71
- add_executable (test -tokenizer-0 test -tokenizer-0.cpp)
72
- target_link_libraries (test -tokenizer-0 PRIVATE common)
73
- install (TARGETS test -tokenizer-0 RUNTIME)
84
+ llama_build(test -tokenizer-0.cpp)
74
85
75
86
llama_test(test -tokenizer-0 NAME test -tokenizer-0-bert-bge ARGS ${CMAKE_CURRENT_SOURCE_DIR} /../models/ggml-vocab-bert-bge.gguf)
76
87
llama_test(test -tokenizer-0 NAME test -tokenizer-0-command -r ARGS ${CMAKE_CURRENT_SOURCE_DIR} /../models/ggml-vocab-command -r.gguf)
@@ -87,27 +98,27 @@ llama_test(test-tokenizer-0 NAME test-tokenizer-0-refact ARGS ${CMAKE
87
98
llama_test(test -tokenizer-0 NAME test -tokenizer-0-starcoder ARGS ${CMAKE_CURRENT_SOURCE_DIR} /../models/ggml-vocab-starcoder.gguf)
88
99
89
100
if (LLAMA_LLGUIDANCE)
90
- llama_target_and_test (test -grammar-llguidance.cpp ARGS ${CMAKE_CURRENT_SOURCE_DIR} /../models/ggml-vocab-llama-bpe.gguf)
101
+ llama_build_and_test (test -grammar-llguidance.cpp ARGS ${CMAKE_CURRENT_SOURCE_DIR} /../models/ggml-vocab-llama-bpe.gguf)
91
102
endif ()
92
103
93
104
if (NOT WIN32 )
94
105
# these tests are disabled on Windows because they use internal functions not exported with LLAMA_API
95
- llama_target_and_test (test -sampling.cpp)
96
- llama_target_and_test (test -grammar-parser.cpp)
97
- llama_target_and_test (test -grammar-integration.cpp)
98
- llama_target_and_test (test -llama-grammar.cpp)
99
- llama_target_and_test (test -chat.cpp)
106
+ llama_build_and_test (test -sampling.cpp)
107
+ llama_build_and_test (test -grammar-parser.cpp)
108
+ llama_build_and_test (test -grammar-integration.cpp)
109
+ llama_build_and_test (test -llama-grammar.cpp)
110
+ llama_build_and_test (test -chat.cpp)
100
111
# TODO: disabled on loongarch64 because the ggml-ci node lacks Python 3.8
101
112
if (NOT ${CMAKE_SYSTEM_PROCESSOR} MATCHES "loongarch64" )
102
- llama_target_and_test (test -json-schema-to-grammar.cpp WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} /..)
113
+ llama_build_and_test (test -json-schema-to-grammar.cpp WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} /..)
103
114
target_include_directories (test -json-schema-to-grammar PRIVATE ${CMAKE_CURRENT_SOURCE_DIR} /../examples/server)
104
115
endif ()
105
116
117
+ llama_build(test -quantize-stats.cpp)
118
+ llama_build(test -gbnf-validator.cpp)
106
119
107
120
# build test-tokenizer-1-bpe target once and add many tests
108
- add_executable (test -tokenizer-1-bpe test -tokenizer-1-bpe.cpp)
109
- target_link_libraries (test -tokenizer-1-bpe PRIVATE common)
110
- install (TARGETS test -tokenizer-1-bpe RUNTIME)
121
+ llama_build(test -tokenizer-1-bpe.cpp)
111
122
112
123
# TODO: disabled due to slowness
113
124
#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-aquila ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-aquila.gguf)
@@ -120,37 +131,35 @@ if (NOT WIN32)
120
131
#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-starcoder ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-starcoder.gguf)
121
132
122
133
# build test-tokenizer-1-spm target once and add many tests
123
- add_executable (test -tokenizer-1-spm test -tokenizer-1-spm.cpp)
124
- target_link_libraries (test -tokenizer-1-spm PRIVATE common)
125
- install (TARGETS test -tokenizer-1-spm RUNTIME)
134
+ llama_build(test -tokenizer-1-spm.cpp)
126
135
127
136
llama_test(test -tokenizer-1-spm NAME test -tokenizer-1-llama-spm ARGS ${CMAKE_CURRENT_SOURCE_DIR} /../models/ggml-vocab-llama-spm.gguf)
128
137
#llama_test(test-tokenizer-1-spm NAME test-tokenizer-1-baichuan ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-baichuan.gguf)
129
138
130
- # llama_target_and_test (test-double-float.cpp) # SLOW
139
+ # llama_build_and_test (test-double-float.cpp) # SLOW
131
140
endif ()
132
141
133
- llama_target_and_test (test -log .cpp)
134
- llama_target_and_test (test -chat-template.cpp)
142
+ llama_build_and_test (test -log .cpp)
143
+ llama_build_and_test (test -chat-template.cpp)
135
144
136
145
# this fails on windows (github hosted runner) due to curl DLL not found (exit code 0xc0000135)
137
146
if (NOT WIN32 )
138
- llama_target_and_test (test -arg-parser.cpp)
147
+ llama_build_and_test (test -arg-parser.cpp)
139
148
endif ()
140
149
141
- # llama_target_and_test (test-opt.cpp) # SLOW
142
- llama_target_and_test (test -gguf.cpp)
143
- llama_target_and_test (test -backend-ops.cpp)
150
+ # llama_build_and_test (test-opt.cpp) # SLOW
151
+ llama_build_and_test (test -gguf.cpp)
152
+ llama_build_and_test (test -backend-ops.cpp)
144
153
145
- llama_target_and_test (test -model-load-cancel.cpp LABEL "model" )
146
- llama_target_and_test (test -autorelease.cpp LABEL "model" )
154
+ llama_build_and_test (test -model-load-cancel.cpp LABEL "model" )
155
+ llama_build_and_test (test -autorelease.cpp LABEL "model" )
147
156
148
157
if (NOT GGML_BACKEND_DL)
149
158
# these tests use the backends directly and cannot be built with dynamic loading
150
- llama_target_and_test (test -barrier.cpp)
151
- llama_target_and_test (test -quantize-fns.cpp)
152
- llama_target_and_test (test -quantize-perf.cpp)
153
- llama_target_and_test (test -rope.cpp)
159
+ llama_build_and_test (test -barrier.cpp)
160
+ llama_build_and_test (test -quantize-fns.cpp)
161
+ llama_build_and_test (test -quantize-perf.cpp)
162
+ llama_build_and_test (test -rope.cpp)
154
163
endif ()
155
164
156
165
0 commit comments