From a057d827ca32acb242ee64ae5d961c26b03188f9 Mon Sep 17 00:00:00 2001 From: Ed Addario Date: Sat, 21 Feb 2026 10:10:32 +0000 Subject: [PATCH] Minor refactoring --- src/llama-quant.cpp | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index 3de48971b1..9fbc908c16 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -1696,7 +1696,6 @@ static std::unordered_map target_bpw_type( float cutoff = std::numeric_limits::quiet_NaN(); if (statistics_data && !statistics_data->empty()) { cutoff = threshold_score(* statistics_data, params->importance_pct); } - LLAMA_LOG_INFO("%s: - importance score cutoff: %1.4f\n", func, cutoff); // Certain tensors have a higher impact on model quality, so we apply a lower penalty to them auto is_important = [&](const std::string & tensor_name) -> bool { @@ -2451,9 +2450,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: LLAMA_LOG_INFO("%s: quant size = %8.2f MiB (%7.4f BPW)\n", __func__, total_size_new/1024.0/1024.0, total_size_new*8.0/ml.n_elements); if (!params->imatrix && params->dry_run && will_require_imatrix) { - LLAMA_LOG_WARN("%s: WARNING: dry run completed successfully, but actually completing this quantization will require an imatrix!\n", - __func__ - ); + LLAMA_LOG_WARN("%s: WARNING: dry run completed successfully, but actually completing this quantization will require an imatrix!\n", __func__); } if (qs.n_fallback > 0) {