Skip to content

Commit b2a92d0

Browse files
committed
configchecker / flake8 errors
1 parent eec384f commit b2a92d0

File tree

7 files changed

+9
-9
lines changed

7 files changed

+9
-9
lines changed

common/chat-parser.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -84,15 +84,15 @@ bool common_chat_msg_parser::add_tool_call_short_form(const json & tool_call) {
8484
// Get the tool name (the single key in the object)
8585
auto it = tool_call.begin();
8686
std::string name = it.key();
87-
87+
8888
if (name.empty()) {
8989
return false;
9090
}
9191

9292
// Get the arguments (the nested object)
9393
const json & args_json = it.value();
9494
std::string arguments = "";
95-
95+
9696
if (args_json.is_object()) {
9797
arguments = args_json.dump();
9898
} else if (args_json.is_string()) {

common/chat.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2375,7 +2375,7 @@ static void common_chat_parse_apertus(common_chat_msg_parser & builder) {
23752375
static const common_regex tool_call_regex(regex_escape("<|tools_prefix|>"));
23762376
if (auto res = builder.try_find_regex(tool_call_regex)) {
23772377
builder.move_to(res->groups[0].end);
2378-
2378+
23792379
auto tool_calls_data = builder.consume_json();
23802380
if (tool_calls_data.json.is_array()) {
23812381
builder.consume_spaces();

convert_hf_to_gguf.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8613,7 +8613,7 @@ def modify_tensors(self, data_torch, name, bid):
86138613
if (len(self._eps) == n_layers):
86148614
self.gguf_writer.add_xielu_eps([self._eps[k] for k in sorted(self._eps)])
86158615
return []
8616-
8616+
86178617
return super().modify_tensors(data_torch, name, bid)
86188618

86198619

ggml/src/ggml-cuda/unary.cu

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -325,10 +325,10 @@ void ggml_cuda_op_geglu_quick(ggml_backend_cuda_context & ctx, ggml_tensor * dst
325325
// Functor for XIELU operation with parameters
326326
struct op_xielu_functor {
327327
float alpha_n, alpha_p, beta, eps;
328-
328+
329329
__host__ __device__ __forceinline__ op_xielu_functor(float a_n, float a_p, float b, float e)
330330
: alpha_n(a_n), alpha_p(a_p), beta(b), eps(e) {}
331-
331+
332332
__device__ __forceinline__ float operator()(float x) const {
333333
float gate_pos = (x > 0.0f); // positive branch gate
334334

gguf-py/gguf/gguf_writer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1059,7 +1059,7 @@ def add_xielu_alpha_n(self, value: Sequence[float]):
10591059

10601060
def add_xielu_beta(self, value: Sequence[float]):
10611061
self.add_array(Keys.xIELU.XIELU_BETA, value)
1062-
1062+
10631063
def add_xielu_eps(self, value: Sequence[float]):
10641064
self.add_array(Keys.xIELU.XIELU_EPS, value)
10651065

src/llama-model.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18686,7 +18686,7 @@ struct llm_build_apertus : public llm_graph_context {
1868618686
Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, NULL, LLM_NORM_RMS, il);
1868718687
cb(Kcur, "Kcur_normed", il);
1868818688

18689-
Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
18689+
Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
1869018690

1869118691
Qcur = ggml_rope_ext(
1869218692
ctx0, Qcur, inp_pos, rope_factors,

tools/tts/convert_pt_to_hf.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
from safetensors.torch import save_file
1313

1414
# default
15-
model_path = './model.pt';
15+
model_path = './model.pt'
1616

1717
# read from CLI
1818
if len(sys.argv) > 1:

0 commit comments

Comments
 (0)