Skip to content

fix some warnings from gcc and clang-tidy #3038

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 14 commits into from
Sep 7, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions .clang-tidy
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ Checks: >
bugprone-*,
-bugprone-easily-swappable-parameters,
-bugprone-implicit-widening-of-multiplication-result,
-bugprone-misplaced-widening-cast,
-bugprone-narrowing-conversions,
readability-*,
-readability-avoid-unconditional-preprocessor-if,
Expand All @@ -15,4 +16,8 @@ Checks: >
-clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling,
performance-*,
portability-*,
misc-*,
-misc-const-correctness,
-misc-non-private-member-variables-in-classes,
-misc-no-recursion,
FormatStyle: none
2 changes: 1 addition & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -423,7 +423,7 @@ if (LLAMA_ALL_WARNINGS)
)
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
# g++ only
set(cxx_flags ${cxx_flags} -Wno-format-truncation)
set(cxx_flags ${cxx_flags} -Wno-format-truncation -Wno-array-bounds)
endif()
else()
# todo : msvc
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ MK_CXXFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wno-m

ifeq '' '$(findstring clang++,$(CXX))'
# g++ only
MK_CXXFLAGS += -Wno-format-truncation
MK_CXXFLAGS += -Wno-format-truncation -Wno-array-bounds
endif

# OS specific
Expand Down
2 changes: 1 addition & 1 deletion common/common.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ int32_t get_num_physical_cores() {
siblings.insert(line);
}
}
if (siblings.size() > 0) {
if (!siblings.empty()) {
return static_cast<int32_t>(siblings.size());
}
#elif defined(__APPLE__) && defined(__MACH__)
Expand Down
3 changes: 3 additions & 0 deletions common/common.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,9 @@
#define DIRECTORY_SEPARATOR '/'
#endif // _WIN32

#define die(msg) do { fputs("error: " msg "\n", stderr); exit(1); } while (0)
#define die_fmt(fmt, ...) do { fprintf(stderr, "error: " fmt "\n", ##__VA_ARGS__); exit(1); } while (0)

//
// CLI argument parsing
//
Expand Down
1 change: 1 addition & 0 deletions common/grammar-parser.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -415,6 +415,7 @@ namespace grammar_parser {

std::vector<const llama_grammar_element *> parse_state::c_rules() {
std::vector<const llama_grammar_element *> ret;
ret.reserve(rules.size());
for (const auto & rule : rules) {
ret.push_back(rule.data());
}
Expand Down
8 changes: 4 additions & 4 deletions examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
#include "ggml.h"
#include "llama.h"
#include "common.h"

#include <unordered_map>
#include <vector>
Expand Down Expand Up @@ -499,10 +500,10 @@ struct llama_file {
errno = 0;
std::size_t ret = std::fread(ptr, size, 1, fp);
if (ferror(fp)) {
throw std::runtime_error(format("read error: %s", strerror(errno)));
die_fmt("fread failed: %s", strerror(errno));
}
if (ret != 1) {
throw std::runtime_error(std::string("unexpectedly reached end of file"));
die("unexpectedly reached end of file");
}
}

Expand Down Expand Up @@ -597,8 +598,7 @@ void load_vocab(const char *filename, Config *config, struct llama_vocab *vocab)
printf("Assuming llama2.c vocabulary since %s is not a gguf file\n", filename);
llama_file file(filename, "rb");
if (!file.fp) {
fprintf(stderr, "error: %s: %s\n", strerror(errno), filename);
exit(1);
die_fmt("%s: %s", strerror(errno), filename);
}
const int n_vocab = config->vocab_size;
/* uint32_t max_token_length = */ file.read_u32(); // unused
Expand Down
2 changes: 1 addition & 1 deletion examples/embd-input/embd-input-lib.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ extern "C" {
struct MyModel* create_mymodel(int argc, char ** argv) {
gpt_params params;

if (gpt_params_parse(argc, argv, params) == false) {
if (!gpt_params_parse(argc, argv, params)) {
return nullptr;
}

Expand Down
2 changes: 1 addition & 1 deletion examples/embedding/embedding.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
int main(int argc, char ** argv) {
gpt_params params;

if (gpt_params_parse(argc, argv, params) == false) {
if (!gpt_params_parse(argc, argv, params)) {
return 1;
}

Expand Down
2 changes: 1 addition & 1 deletion examples/gptneox-wip/falcon-main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -953,7 +953,7 @@ int main(int argc, char ** argv) {

gpt_params params;

if (gpt_params_parse(argc, argv, params) == false) {
if (!gpt_params_parse(argc, argv, params)) {
return 1;
}

Expand Down
2 changes: 1 addition & 1 deletion examples/gptneox-wip/gptneox-main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -925,7 +925,7 @@ int main(int argc, char ** argv) {

gpt_params params;

if (gpt_params_parse(argc, argv, params) == false) {
if (!gpt_params_parse(argc, argv, params)) {
return 1;
}

Expand Down
19 changes: 10 additions & 9 deletions examples/main/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,9 @@ static bool is_interacting = false;

void write_logfile(
const llama_context * ctx, const gpt_params & params, const llama_model * model,
const std::vector<llama_token> input_tokens, const std::string output, const std::vector<llama_token> output_tokens) {

const std::vector<llama_token> & input_tokens, const std::string & output,
const std::vector<llama_token> & output_tokens
) {
if (params.logdir.empty()) {
return;
}
Expand Down Expand Up @@ -109,7 +110,7 @@ int main(int argc, char ** argv) {
gpt_params params;
g_params = &params;

if (gpt_params_parse(argc, argv, params) == false) {
if (!gpt_params_parse(argc, argv, params)) {
return 1;
}

Expand Down Expand Up @@ -303,7 +304,7 @@ int main(int argc, char ** argv) {

// debug message about similarity of saved session, if applicable
size_t n_matching_session_tokens = 0;
if (session_tokens.size() > 0) {
if (!session_tokens.empty()) {
for (llama_token id : session_tokens) {
if (n_matching_session_tokens >= embd_inp.size() || id != embd_inp[n_matching_session_tokens]) {
break;
Expand Down Expand Up @@ -401,7 +402,7 @@ int main(int argc, char ** argv) {

LOG_TEE("%s: interactive mode on.\n", __func__);

if (params.antiprompt.size()) {
if (!params.antiprompt.empty()) {
for (const auto & antiprompt : params.antiprompt) {
LOG_TEE("Reverse prompt: '%s'\n", antiprompt.c_str());
}
Expand Down Expand Up @@ -499,7 +500,7 @@ int main(int argc, char ** argv) {

while ((n_remain != 0 && !is_antiprompt) || params.interactive) {
// predict
if (embd.size() > 0) {
if (!embd.empty()) {
// Note: n_ctx - 4 here is to match the logic for commandline prompt handling via
// --prompt or --file which uses the same value.
int max_embd_size = n_ctx - 4;
Expand Down Expand Up @@ -624,7 +625,7 @@ int main(int argc, char ** argv) {
LOG("n_past = %d\n", n_past);
}

if (embd.size() > 0 && !path_session.empty()) {
if (!embd.empty() && !path_session.empty()) {
session_tokens.insert(session_tokens.end(), embd.begin(), embd.end());
n_session_consumed = session_tokens.size();
}
Expand Down Expand Up @@ -695,7 +696,7 @@ int main(int argc, char ** argv) {
// if not currently processing queued inputs;
if ((int) embd_inp.size() <= n_consumed) {
// check for reverse prompt
if (params.antiprompt.size()) {
if (!params.antiprompt.empty()) {
std::string last_output;
for (auto id : last_tokens) {
last_output += llama_token_to_piece(ctx, id);
Expand Down Expand Up @@ -732,7 +733,7 @@ int main(int argc, char ** argv) {
LOG("found EOS token\n");

if (params.interactive) {
if (params.antiprompt.size() != 0) {
if (!params.antiprompt.empty()) {
// tokenize and inject first reverse prompt
const auto first_antiprompt = ::llama_tokenize(ctx, params.antiprompt.front(), false);
embd_inp.insert(embd_inp.end(), first_antiprompt.begin(), first_antiprompt.end());
Expand Down
2 changes: 1 addition & 1 deletion examples/perplexity/perplexity.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -655,7 +655,7 @@ int main(int argc, char ** argv) {
gpt_params params;

params.n_batch = 512;
if (gpt_params_parse(argc, argv, params) == false) {
if (!gpt_params_parse(argc, argv, params)) {
return 1;
}

Expand Down
2 changes: 1 addition & 1 deletion examples/quantize-stats/quantize-stats.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ void quantize_stats_print_usage(int /*argc*/, char ** argv) {
}

// Check if a layer is included/excluded by command line
bool layer_included(const quantize_stats_params params, const std::string & layer) {
bool layer_included(const quantize_stats_params & params, const std::string & layer) {
for (const auto& excluded : params.exclude_layers) {
if (std::regex_search(layer, std::regex(excluded))) {
return false;
Expand Down
7 changes: 3 additions & 4 deletions examples/quantize/quantize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -143,10 +143,9 @@ int main(int argc, char ** argv) {
if (!try_parse_ftype(argv[arg_idx], params.ftype, ftype_str)) {
fprintf(stderr, "%s: invalid ftype '%s'\n", __func__, argv[3]);
return 1;
} else {
if (ftype_str == "COPY") {
params.only_copy = true;
}
}
if (ftype_str == "COPY") {
params.only_copy = true;
}
arg_idx++;
}
Expand Down
4 changes: 2 additions & 2 deletions examples/save-load-state/save-load-state.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ int main(int argc, char ** argv) {
params.repeat_last_n = 64;
params.prompt = "The quick brown fox";

if (gpt_params_parse(argc, argv, params) == false) {
if (!gpt_params_parse(argc, argv, params)) {
return 1;
}

Expand Down Expand Up @@ -44,7 +44,7 @@ int main(int argc, char ** argv) {
llama_free_model(model);
return 1;
}
auto tokens = llama_tokenize(ctx, params.prompt.c_str(), true);
auto tokens = llama_tokenize(ctx, params.prompt, true);
auto n_prompt_tokens = tokens.size();
if (n_prompt_tokens < 1) {
fprintf(stderr, "%s : failed to tokenize prompt\n", __func__);
Expand Down
8 changes: 4 additions & 4 deletions examples/server/server.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ static std::string tokens_to_output_formatted_string(const llama_context *ctx, c
}

// convert a vector of completion_token_output to json
static json probs_vector_to_json(const llama_context *ctx, const std::vector<completion_token_output> probs)
static json probs_vector_to_json(const llama_context *ctx, const std::vector<completion_token_output> & probs)
{
json out = json::array();
for (const auto &prob : probs)
Expand Down Expand Up @@ -271,7 +271,7 @@ struct llama_server_context
return true;
}

std::vector<llama_token> tokenize(json json_prompt, bool add_bos)
std::vector<llama_token> tokenize(const json & json_prompt, bool add_bos) const
{
// If `add_bos` is true, we only add BOS, when json_prompt is a string,
// or the first element of the json_prompt array is a string.
Expand Down Expand Up @@ -611,7 +611,7 @@ struct llama_server_context

completion_token_output doCompletion()
{
const completion_token_output token_with_probs = nextToken();
auto token_with_probs = nextToken();

const std::string token_text = token_with_probs.tok == -1 ? "" : llama_token_to_piece(ctx, token_with_probs.tok);
generated_text += token_text;
Expand Down Expand Up @@ -1255,7 +1255,7 @@ void beam_search_callback(void * callback_data, llama_beams_state beams_state) {
struct token_translator {
llama_context * ctx;
std::string operator()(llama_token tok) const { return llama_token_to_piece(ctx, tok); }
std::string operator()(completion_token_output cto) const { return (*this)(cto.tok); }
std::string operator()(const completion_token_output & cto) const { return (*this)(cto.tok); }
};

void append_to_generated_text_from_generated_token_probs(llama_server_context & llama) {
Expand Down
Loading