Skip to content

Commit 13b4548

Browse files
authored
cmake : do not include ./src as public for libllama (#13062)
* cmake : do not include ./src as public for libllama ggml-ci * cmake : rework tests ggml-ci * llguidance : remove unicode include ggml-ci * cmake : make c++17 private ggml-ci
1 parent 572b314 commit 13b4548

17 files changed

+64
-69
lines changed

common/arg.cpp

-2
Original file line numberDiff line numberDiff line change
@@ -994,7 +994,6 @@ static void common_params_print_completion(common_params_context & ctx_arg) {
994994
"llama-embedding",
995995
"llama-eval-callback",
996996
"llama-export-lora",
997-
"llama-gbnf-validator",
998997
"llama-gen-docs",
999998
"llama-gguf",
1000999
"llama-gguf-hash",
@@ -1014,7 +1013,6 @@ static void common_params_print_completion(common_params_context & ctx_arg) {
10141013
"llama-perplexity",
10151014
"llama-q8dot",
10161015
"llama-quantize",
1017-
"llama-quantize-stats",
10181016
"llama-qwen2vl-cli",
10191017
"llama-retrieval",
10201018
"llama-run",

examples/CMakeLists.txt

-9
Original file line numberDiff line numberDiff line change
@@ -21,11 +21,6 @@ else()
2121
add_subdirectory(embedding)
2222
add_subdirectory(eval-callback)
2323

24-
if (NOT WIN32)
25-
# disabled on Windows because it uses internal functions not exported with LLAMA_API
26-
add_subdirectory(gbnf-validator)
27-
endif()
28-
2924
add_subdirectory(gguf-hash)
3025
add_subdirectory(gguf-split)
3126
add_subdirectory(gguf)
@@ -58,10 +53,6 @@ else()
5853
add_subdirectory(convert-llama2c-to-ggml)
5954
add_subdirectory(cvector-generator)
6055
add_subdirectory(export-lora)
61-
if (NOT WIN32)
62-
# disabled on Windows because it uses internal functions not exported with LLAMA_API
63-
add_subdirectory(quantize-stats)
64-
endif()
6556
add_subdirectory(llava)
6657
if (GGML_RPC)
6758
add_subdirectory(rpc)

examples/gbnf-validator/CMakeLists.txt

-5
This file was deleted.

examples/quantize-stats/CMakeLists.txt

-6
This file was deleted.

grammars/README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,7 @@ You can use GBNF grammars:
112112

113113
- In [llama-server](../examples/server)'s completion endpoints, passed as the `grammar` body field
114114
- In [llama-cli](../examples/main), passed as the `--grammar` & `--grammar-file` flags
115-
- With [llama-gbnf-validator](../examples/gbnf-validator) tool, to test them against strings.
115+
- With [test-gbnf-validator](../tests/test-gbnf-validator.cpp), to test them against strings.
116116

117117
## JSON Schemas → GBNF
118118

src/CMakeLists.txt

+3-2
Original file line numberDiff line numberDiff line change
@@ -32,8 +32,9 @@ add_library(llama
3232
unicode.h
3333
)
3434

35-
target_include_directories(llama PUBLIC . ../include)
36-
target_compile_features (llama PUBLIC cxx_std_17) # don't bump
35+
target_include_directories(llama PRIVATE .)
36+
target_include_directories(llama PUBLIC ../include)
37+
target_compile_features (llama PRIVATE cxx_std_17) # don't bump
3738

3839
target_link_libraries(llama PUBLIC ggml)
3940

tests/CMakeLists.txt

+39-30
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,17 @@
11
llama_add_compile_flags()
22

3+
function(llama_build source)
4+
if (DEFINED LLAMA_TEST_NAME)
5+
set(TEST_TARGET ${LLAMA_TEST_NAME})
6+
else()
7+
get_filename_component(TEST_TARGET ${source} NAME_WE)
8+
endif()
9+
10+
add_executable(${TEST_TARGET} ${source})
11+
target_link_libraries(${TEST_TARGET} PRIVATE common)
12+
install(TARGETS ${TEST_TARGET} RUNTIME)
13+
endfunction()
14+
315
function(llama_test target)
416
include(CMakeParseArguments)
517
set(options)
@@ -36,7 +48,7 @@ endfunction()
3648
# - LABEL: label for the test (defaults to main)
3749
# - ARGS: arguments to pass to the test executable
3850
# - WORKING_DIRECTORY
39-
function(llama_target_and_test source)
51+
function(llama_build_and_test source)
4052
include(CMakeParseArguments)
4153
set(options)
4254
set(oneValueArgs NAME LABEL WORKING_DIRECTORY)
@@ -58,6 +70,7 @@ function(llama_target_and_test source)
5870
add_executable(${TEST_TARGET} ${source} get-model.cpp)
5971
install(TARGETS ${TEST_TARGET} RUNTIME)
6072
target_link_libraries(${TEST_TARGET} PRIVATE common)
73+
6174
add_test(
6275
NAME ${TEST_TARGET}
6376
WORKING_DIRECTORY ${LLAMA_TEST_WORKING_DIRECTORY}
@@ -68,9 +81,7 @@ function(llama_target_and_test source)
6881
endfunction()
6982

7083
# build test-tokenizer-0 target once and add many tests
71-
add_executable(test-tokenizer-0 test-tokenizer-0.cpp)
72-
target_link_libraries(test-tokenizer-0 PRIVATE common)
73-
install(TARGETS test-tokenizer-0 RUNTIME)
84+
llama_build(test-tokenizer-0.cpp)
7485

7586
llama_test(test-tokenizer-0 NAME test-tokenizer-0-bert-bge ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-bert-bge.gguf)
7687
llama_test(test-tokenizer-0 NAME test-tokenizer-0-command-r ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-command-r.gguf)
@@ -87,27 +98,27 @@ llama_test(test-tokenizer-0 NAME test-tokenizer-0-refact ARGS ${CMAKE
8798
llama_test(test-tokenizer-0 NAME test-tokenizer-0-starcoder ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-starcoder.gguf)
8899

89100
if (LLAMA_LLGUIDANCE)
90-
llama_target_and_test(test-grammar-llguidance.cpp ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-bpe.gguf)
101+
llama_build_and_test(test-grammar-llguidance.cpp ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-bpe.gguf)
91102
endif ()
92103

93104
if (NOT WIN32)
94105
# these tests are disabled on Windows because they use internal functions not exported with LLAMA_API
95-
llama_target_and_test(test-sampling.cpp)
96-
llama_target_and_test(test-grammar-parser.cpp)
97-
llama_target_and_test(test-grammar-integration.cpp)
98-
llama_target_and_test(test-llama-grammar.cpp)
99-
llama_target_and_test(test-chat.cpp)
106+
llama_build_and_test(test-sampling.cpp)
107+
llama_build_and_test(test-grammar-parser.cpp)
108+
llama_build_and_test(test-grammar-integration.cpp)
109+
llama_build_and_test(test-llama-grammar.cpp)
110+
llama_build_and_test(test-chat.cpp)
100111
# TODO: disabled on loongarch64 because the ggml-ci node lacks Python 3.8
101112
if (NOT ${CMAKE_SYSTEM_PROCESSOR} MATCHES "loongarch64")
102-
llama_target_and_test(test-json-schema-to-grammar.cpp WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/..)
113+
llama_build_and_test(test-json-schema-to-grammar.cpp WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/..)
103114
target_include_directories(test-json-schema-to-grammar PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/../examples/server)
104115
endif()
105116

117+
llama_build(test-quantize-stats.cpp)
118+
llama_build(test-gbnf-validator.cpp)
106119

107120
# build test-tokenizer-1-bpe target once and add many tests
108-
add_executable(test-tokenizer-1-bpe test-tokenizer-1-bpe.cpp)
109-
target_link_libraries(test-tokenizer-1-bpe PRIVATE common)
110-
install(TARGETS test-tokenizer-1-bpe RUNTIME)
121+
llama_build(test-tokenizer-1-bpe.cpp)
111122

112123
# TODO: disabled due to slowness
113124
#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-aquila ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-aquila.gguf)
@@ -120,37 +131,35 @@ if (NOT WIN32)
120131
#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-starcoder ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-starcoder.gguf)
121132

122133
# build test-tokenizer-1-spm target once and add many tests
123-
add_executable(test-tokenizer-1-spm test-tokenizer-1-spm.cpp)
124-
target_link_libraries(test-tokenizer-1-spm PRIVATE common)
125-
install(TARGETS test-tokenizer-1-spm RUNTIME)
134+
llama_build(test-tokenizer-1-spm.cpp)
126135

127136
llama_test(test-tokenizer-1-spm NAME test-tokenizer-1-llama-spm ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-spm.gguf)
128137
#llama_test(test-tokenizer-1-spm NAME test-tokenizer-1-baichuan ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-baichuan.gguf)
129138

130-
# llama_target_and_test(test-double-float.cpp) # SLOW
139+
# llama_build_and_test(test-double-float.cpp) # SLOW
131140
endif()
132141

133-
llama_target_and_test(test-log.cpp)
134-
llama_target_and_test(test-chat-template.cpp)
142+
llama_build_and_test(test-log.cpp)
143+
llama_build_and_test(test-chat-template.cpp)
135144

136145
# this fails on windows (github hosted runner) due to curl DLL not found (exit code 0xc0000135)
137146
if (NOT WIN32)
138-
llama_target_and_test(test-arg-parser.cpp)
147+
llama_build_and_test(test-arg-parser.cpp)
139148
endif()
140149

141-
# llama_target_and_test(test-opt.cpp) # SLOW
142-
llama_target_and_test(test-gguf.cpp)
143-
llama_target_and_test(test-backend-ops.cpp)
150+
# llama_build_and_test(test-opt.cpp) # SLOW
151+
llama_build_and_test(test-gguf.cpp)
152+
llama_build_and_test(test-backend-ops.cpp)
144153

145-
llama_target_and_test(test-model-load-cancel.cpp LABEL "model")
146-
llama_target_and_test(test-autorelease.cpp LABEL "model")
154+
llama_build_and_test(test-model-load-cancel.cpp LABEL "model")
155+
llama_build_and_test(test-autorelease.cpp LABEL "model")
147156

148157
if (NOT GGML_BACKEND_DL)
149158
# these tests use the backends directly and cannot be built with dynamic loading
150-
llama_target_and_test(test-barrier.cpp)
151-
llama_target_and_test(test-quantize-fns.cpp)
152-
llama_target_and_test(test-quantize-perf.cpp)
153-
llama_target_and_test(test-rope.cpp)
159+
llama_build_and_test(test-barrier.cpp)
160+
llama_build_and_test(test-quantize-fns.cpp)
161+
llama_build_and_test(test-quantize-perf.cpp)
162+
llama_build_and_test(test-rope.cpp)
154163
endif()
155164

156165

tests/test-chat.cpp

+3-2
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,9 @@
1111
#include <string>
1212

1313
#include "chat.h"
14-
#include "llama-grammar.h"
15-
#include "unicode.h"
14+
15+
#include "../src/unicode.h"
16+
#include "../src/llama-grammar.h"
1617

1718
using json = nlohmann::ordered_json;
1819

examples/gbnf-validator/gbnf-validator.cpp renamed to tests/test-gbnf-validator.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
1-
#include "unicode.h"
2-
#include "llama-grammar.h"
1+
#include "../src/unicode.h"
2+
#include "../src/llama-grammar.h"
33

44
#include <cstdio>
55
#include <cstdlib>

tests/test-grammar-integration.cpp

+3-2
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,11 @@
22
#undef NDEBUG
33
#endif
44

5-
#include "unicode.h"
6-
#include "llama-grammar.h"
75
#include "json-schema-to-grammar.h"
86

7+
#include "../src/unicode.h"
8+
#include "../src/llama-grammar.h"
9+
910
#include <cassert>
1011
#include <string>
1112
#include <vector>

tests/test-grammar-llguidance.cpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22
# undef NDEBUG
33
#endif
44

5-
#include "unicode.h"
65
#include "sampling.h"
76

87
#include <cassert>
@@ -84,7 +83,7 @@ static void test(const std::string & test_desc, const std::string & grammar_str,
8483

8584
fprintf(stderr,
8685
"\n NOTE: Debug grammar file generated. To analyze this failure in detail, run the following "
87-
"command: ./llama-gbnf-validator test-grammar-integration.grammar.gbnf "
86+
"command: ./test-gbnf-validator test-grammar-integration.grammar.gbnf "
8887
"test-grammar-integration.string.txt\n\n");
8988
} else {
9089
fprintf(stdout, "✅︎\n");

tests/test-grammar-parser.cpp

+3-1
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,9 @@
33
#endif
44

55
#include "llama.h"
6-
#include "llama-grammar.h"
6+
7+
// TODO: shold not include libllama sources
8+
#include "../src/llama-grammar.h"
79

810
#include <cassert>
911

tests/test-json-schema-to-grammar.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44

55
#include "json-schema-to-grammar.h"
66

7-
#include "llama-grammar.h"
7+
#include "../src/llama-grammar.h"
88

99
#include <cassert>
1010
#include <fstream>

tests/test-llama-grammar.cpp

+2-1
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,8 @@
33
#endif
44

55
#include "llama.h"
6-
#include "llama-grammar.h"
6+
7+
#include "../src/llama-grammar.h"
78

89
#include <cassert>
910
#include <stdexcept>

examples/quantize-stats/quantize-stats.cpp renamed to tests/test-quantize-stats.cpp

+2-1
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,9 @@
11
#include "ggml.h"
22
#include "llama.h"
3-
#include "llama-model.h"
43
#include "common.h"
54

5+
#include "../src/llama-model.h"
6+
67
#include <algorithm>
78
#include <cassert>
89
#include <cinttypes>

tests/test-tokenizer-1-bpe.cpp

+2-1
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,9 @@
11
#include "llama.h"
22
#include "common.h"
3-
#include "unicode.h"
43
#include "console.h"
54

5+
#include "../src/unicode.h"
6+
67
#include <cassert>
78
#include <codecvt>
89
#include <cstdio>

tests/test-tokenizer-1-spm.cpp

+2-1
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,9 @@
11
#include "llama.h"
22
#include "common.h"
3-
#include "unicode.h"
43
#include "console.h"
54

5+
#include "../src/unicode.h"
6+
67
#include <cassert>
78
#include <codecvt>
89
#include <cstdio>

0 commit comments

Comments
 (0)