Skip to content

Commit 034b055

Browse files
committed
cont : clean-up debug logs
1 parent cc7952b commit 034b055

File tree

2 files changed

+2
-8
lines changed

2 files changed

+2
-8
lines changed

src/llama-batch.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -638,9 +638,9 @@ llama_ubatch llama_batch_allocr::ubatch_add(const std::vector<int32_t> & idxs, u
638638
/*.output =*/ ubatch.output.data(),
639639
};
640640

641-
LLAMA_LOG_DEBUG("%s: added ubatch %d in split\n", __func__, (int) ubatches.size() - 1);
642-
643641
if (debug > 0) {
642+
LLAMA_LOG_DEBUG("%s: added ubatch %d to split:\n", __func__, (int) ubatches.size() - 1);
643+
644644
ubatch_print(res, debug);
645645
}
646646

src/llama-kv-cache-unified.cpp

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -650,12 +650,6 @@ int32_t llama_kv_cache_unified::find_slot(const llama_ubatch & ubatch) const {
650650
}
651651

652652
void llama_kv_cache_unified::apply_ubatch(uint32_t head_cur, const llama_ubatch & ubatch) {
653-
if (debug > 0) {
654-
LLAMA_LOG_DEBUG("%s: ubatch info:\n", __func__);
655-
LLAMA_LOG_DEBUG("%s: n_tokens = %d, equal_seqs = %d\n", __func__, ubatch.n_tokens, ubatch.equal_seqs);
656-
LLAMA_LOG_DEBUG("%s: n_seq_tokens = %d, n_seqs = %d\n", __func__, ubatch.n_seq_tokens, ubatch.n_seqs);
657-
}
658-
659653
// keep track of the max sequence position that we would overwrite with this ubatch
660654
// for non-SWA cache, this would be always empty
661655
llama_seq_id seq_pos_max_rm[LLAMA_MAX_SEQ];

0 commit comments

Comments
 (0)