From 844ad3e3268259b85456ebfd4d3417f9b3825c29 Mon Sep 17 00:00:00 2001 From: ddh0 Date: Wed, 11 Feb 2026 12:47:13 -0600 Subject: [PATCH 01/20] clean slate for branch --- include/llama.h | 1 + src/llama-quant.cpp | 3 ++- tools/quantize/quantize.cpp | 8 ++++++-- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/include/llama.h b/include/llama.h index 46c3672e98..8bcefda896 100644 --- a/include/llama.h +++ b/include/llama.h @@ -393,6 +393,7 @@ extern "C" { void * kv_overrides; // pointer to vector containing overrides void * tensor_types; // pointer to vector containing tensor types void * prune_layers; // pointer to vector containing layer indices to prune + bool dry_run; // calculate and show the final quantization size without performing quantization } llama_model_quantize_params; typedef struct llama_logit_bias { diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index a7891647c3..730f13e29e 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -1048,7 +1048,8 @@ llama_model_quantize_params llama_model_quantize_default_params() { /*.imatrix =*/ nullptr, /*.kv_overrides =*/ nullptr, /*.tensor_type =*/ nullptr, - /*.prune_layers =*/ nullptr + /*.prune_layers =*/ nullptr, + /*.dry_run =*/ false }; return result; diff --git a/tools/quantize/quantize.cpp b/tools/quantize/quantize.cpp index c0f49279ee..3f99d9e6a7 100644 --- a/tools/quantize/quantize.cpp +++ b/tools/quantize/quantize.cpp @@ -120,7 +120,7 @@ static bool try_parse_ftype(const std::string & ftype_str_in, llama_ftype & ftyp static void usage(const char * executable) { printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--include-weights]\n", executable); printf(" [--exclude-weights] [--output-tensor-type] [--token-embedding-type] [--tensor-type] [--tensor-type-file]\n"); - printf(" [--prune-layers] [--keep-split] [--override-kv]\n"); + printf(" [--prune-layers] [--keep-split] [--override-kv] [--dry-run]\n"); printf(" model-f32.gguf [model-quant.gguf] type [nthreads]\n\n"); printf(" --allow-requantize\n"); printf(" allow requantizing tensors that have already been quantized\n"); @@ -156,7 +156,9 @@ static void usage(const char * executable) { printf(" generate quantized model in the same shards as input\n"); printf(" --override-kv KEY=TYPE:VALUE\n"); printf(" override model metadata by key in the quantized model. may be specified multiple times.\n"); - printf(" WARNING: this is an advanced option, use with care.\n\n"); + printf(" WARNING: this is an advanced option, use with care.\n"); + printf(" --dry-run\n"); + printf(" calculate and show the final quantization size without performing quantization\n\n"); printf("note: --include-weights and --exclude-weights cannot be used together\n\n"); printf("-----------------------------------------------------------------------------\n"); printf(" allowed quantization types\n"); @@ -532,6 +534,8 @@ int main(int argc, char ** argv) { if (arg_idx == argc-1 || !string_parse_kv_override(argv[++arg_idx], kv_overrides)) { usage(argv[0]); } + } else if (strcmp(argv[arg_idx], "--dry-run") == 0) { + params.dry_run = true; } else if (strcmp(argv[arg_idx], "--allow-requantize") == 0) { params.allow_requantize = true; } else if (strcmp(argv[arg_idx], "--pure") == 0) { From 0d22288f001163d5312a33a99ebf9db26c37e344 Mon Sep 17 00:00:00 2001 From: ddh0 Date: Wed, 11 Feb 2026 14:08:01 -0600 Subject: [PATCH 02/20] use 6 characters for tensor dims --- src/llama-impl.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/llama-impl.cpp b/src/llama-impl.cpp index 8e3e7b223a..60c7fcd050 100644 --- a/src/llama-impl.cpp +++ b/src/llama-impl.cpp @@ -111,7 +111,7 @@ std::string llama_format_tensor_shape(const struct ggml_tensor * t) { char buf[256]; snprintf(buf, sizeof(buf), "%5" PRId64, t->ne[0]); for (int i = 1; i < GGML_MAX_DIMS; i++) { - snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %5" PRId64, t->ne[i]); + snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %6" PRId64, t->ne[i]); } return buf; } From 56c27b13ad0ea970111b68c90056ed8c830d2dc2 Mon Sep 17 00:00:00 2001 From: ddh0 Date: Wed, 11 Feb 2026 14:08:17 -0600 Subject: [PATCH 03/20] add --dry-run to llama-quantize --- src/llama-quant.cpp | 259 ++++++++++++++++++++---------------- tools/quantize/quantize.cpp | 39 ++++-- 2 files changed, 169 insertions(+), 129 deletions(-) diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index 730f13e29e..2836caaf3a 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -735,24 +735,31 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: }; const auto tn = LLM_TN(model.arch); - new_ofstream(0); + + // no output file for --dry-run + if (!params->dry_run) { + new_ofstream(0); + } + for (const auto * it : tensors) { const auto & weight = *it; ggml_tensor * tensor = weight.tensor; - if (weight.idx != cur_split && params->keep_split) { + if (!params->dry_run && (weight.idx != cur_split && params->keep_split)) { close_ofstream(); new_ofstream(weight.idx); } const std::string name = ggml_get_name(tensor); - if (!ml.use_mmap) { - if (read_data.size() < ggml_nbytes(tensor)) { - read_data.resize(ggml_nbytes(tensor)); + if (!params->dry_run) { + if (!ml.use_mmap) { + if (read_data.size() < ggml_nbytes(tensor)) { + read_data.resize(ggml_nbytes(tensor)); + } + tensor->data = read_data.data(); } - tensor->data = read_data.data(); + ml.load_data_for(tensor); } - ml.load_data_for(tensor); LLAMA_LOG_INFO("[%4d/%4d] %36s - [%s], type = %6s, ", ++idx, ml.n_tensors, @@ -900,126 +907,148 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: quantize = tensor->type != new_type; } - if (!quantize) { - new_type = tensor->type; - new_data = tensor->data; - new_size = ggml_nbytes(tensor); - LLAMA_LOG_INFO("size = %8.3f MiB\n", ggml_nbytes(tensor)/1024.0/1024.0); - } else { - const int64_t nelements = ggml_nelements(tensor); - - const float * imatrix = nullptr; - if (imatrix_data) { - auto it = imatrix_data->find(remap_imatrix(tensor->name, mapped)); - if (it == imatrix_data->end()) { - LLAMA_LOG_INFO("\n====== %s: did not find weights for %s\n", __func__, tensor->name); - } else { - if (it->second.size() == (size_t)tensor->ne[0]*tensor->ne[2]) { - imatrix = it->second.data(); - } else { - LLAMA_LOG_INFO("\n====== %s: imatrix size %d is different from tensor size %d for %s\n", __func__, - int(it->second.size()), int(tensor->ne[0]*tensor->ne[2]), tensor->name); - - // this can happen when quantizing an old mixtral model with split tensors with a new incompatible imatrix - // this is a significant error and it may be good idea to abort the process if this happens, - // since many people will miss the error and not realize that most of the model is being quantized without an imatrix - // tok_embd should be ignored in this case, since it always causes this warning - if (name != tn(LLM_TENSOR_TOKEN_EMBD, "weight")) { - throw std::runtime_error(format("imatrix size %d is different from tensor size %d for %s", - int(it->second.size()), int(tensor->ne[0]*tensor->ne[2]), tensor->name)); - } - } - } - } - if ((new_type == GGML_TYPE_IQ2_XXS || - new_type == GGML_TYPE_IQ2_XS || - new_type == GGML_TYPE_IQ2_S || - new_type == GGML_TYPE_IQ1_S || - (new_type == GGML_TYPE_IQ1_M && strcmp(tensor->name, "token_embd.weight") && strcmp(tensor->name, "output.weight")) || - (new_type == GGML_TYPE_Q2_K && params->ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S && strcmp(tensor->name, "token_embd.weight") != 0)) && !imatrix) { - LLAMA_LOG_ERROR("\n\n============================================================\n"); - LLAMA_LOG_ERROR("Missing importance matrix for tensor %s in a very low-bit quantization\n", tensor->name); - LLAMA_LOG_ERROR("The result will be garbage, so bailing out\n"); - LLAMA_LOG_ERROR("============================================================\n\n"); - throw std::runtime_error(format("Missing importance matrix for tensor %s in a very low-bit quantization", tensor->name)); - } - - float * f32_data; - - if (tensor->type == GGML_TYPE_F32) { - f32_data = (float *) tensor->data; - } else if (ggml_is_quantized(tensor->type) && !params->allow_requantize) { - throw std::runtime_error(format("requantizing from type %s is disabled", ggml_type_name(tensor->type))); + // we have now decided on the target type for this tensor + // the --dry-run option calculates the final quantization size without quantizting + if (params->dry_run) { + if (quantize) { + new_size = ggml_nrows(tensor) * ggml_row_size(new_type, tensor->ne[0]); + LLAMA_LOG_INFO("size = %8.2f MiB -> %8.2f MiB (%s)\n", + ggml_nbytes(tensor)/1024.0/1024.0, + new_size/1024.0/1024.0, + ggml_type_name(new_type)); } else { - llama_tensor_dequantize_impl(tensor, f32_conv_buf, workers, nelements, nthread); - f32_data = (float *) f32_conv_buf.data(); + new_size = ggml_nbytes(tensor); + LLAMA_LOG_INFO("size = %8.3f MiB\n", new_size/1024.0/1024.0); } + total_size_org += ggml_nbytes(tensor); + total_size_new += new_size; + continue; + } else { + // no --dry-run, perform quantization + if (!quantize) { + new_type = tensor->type; + new_data = tensor->data; + new_size = ggml_nbytes(tensor); + LLAMA_LOG_INFO("size = %8.3f MiB\n", ggml_nbytes(tensor)/1024.0/1024.0); + } else { + const int64_t nelements = ggml_nelements(tensor); - LLAMA_LOG_INFO("converting to %s .. ", ggml_type_name(new_type)); - fflush(stdout); + const float * imatrix = nullptr; + if (imatrix_data) { + auto it = imatrix_data->find(remap_imatrix(tensor->name, mapped)); + if (it == imatrix_data->end()) { + LLAMA_LOG_INFO("\n====== %s: did not find weights for %s\n", __func__, tensor->name); + } else { + if (it->second.size() == (size_t)tensor->ne[0]*tensor->ne[2]) { + imatrix = it->second.data(); + } else { + LLAMA_LOG_INFO("\n====== %s: imatrix size %d is different from tensor size %d for %s\n", __func__, + int(it->second.size()), int(tensor->ne[0]*tensor->ne[2]), tensor->name); - if (work.size() < (size_t)nelements * 4) { - work.resize(nelements * 4); // upper bound on size - } - new_data = work.data(); - - const int64_t n_per_row = tensor->ne[0]; - const int64_t nrows = tensor->ne[1]; - - static const int64_t min_chunk_size = 32 * 512; - const int64_t chunk_size = (n_per_row >= min_chunk_size ? n_per_row : n_per_row * ((min_chunk_size + n_per_row - 1)/n_per_row)); - - const int64_t nelements_matrix = tensor->ne[0] * tensor->ne[1]; - const int64_t nchunk = (nelements_matrix + chunk_size - 1)/chunk_size; - const int64_t nthread_use = nthread > 1 ? std::max((int64_t)1, std::min((int64_t)nthread, nchunk)) : 1; - - // quantize each expert separately since they have different importance matrices - new_size = 0; - for (int64_t i03 = 0; i03 < tensor->ne[2]; ++i03) { - const float * f32_data_03 = f32_data + i03 * nelements_matrix; - void * new_data_03 = (char *)new_data + ggml_row_size(new_type, n_per_row) * i03 * nrows; - const float * imatrix_03 = imatrix ? imatrix + i03 * n_per_row : nullptr; - - new_size += llama_tensor_quantize_impl(new_type, f32_data_03, new_data_03, chunk_size, nrows, n_per_row, imatrix_03, workers, nthread_use); - - // TODO: temporary sanity check that the F16 -> MXFP4 is lossless -#if 0 - if (new_type == GGML_TYPE_MXFP4) { - auto * x = f32_data_03; - - //LLAMA_LOG_INFO("nrows = %d, n_per_row = %d\n", nrows, n_per_row); - std::vector deq(nrows*n_per_row); - const ggml_type_traits * qtype = ggml_get_type_traits(new_type); - qtype->to_float(new_data_03, deq.data(), deq.size()); - - double err = 0.0f; - for (int i = 0; i < (int) deq.size(); ++i) { - err += fabsf(deq[i] - x[i]); - //if (fabsf(deq[i] - x[i]) > 0.00001 && i < 256) { - if (deq[i] != x[i]) { - LLAMA_LOG_INFO("deq[%d] = %f, x[%d] = %f\n", i, deq[i], i, x[i]); + // this can happen when quantizing an old mixtral model with split tensors with a new incompatible imatrix + // this is a significant error and it may be good idea to abort the process if this happens, + // since many people will miss the error and not realize that most of the model is being quantized without an imatrix + // tok_embd should be ignored in this case, since it always causes this warning + if (name != tn(LLM_TENSOR_TOKEN_EMBD, "weight")) { + throw std::runtime_error(format("imatrix size %d is different from tensor size %d for %s", + int(it->second.size()), int(tensor->ne[0]*tensor->ne[2]), tensor->name)); + } } } - //LLAMA_LOG_INFO("err = %f\n", err); - GGML_ASSERT(err == 0.00000); } + if ((new_type == GGML_TYPE_IQ2_XXS || + new_type == GGML_TYPE_IQ2_XS || + new_type == GGML_TYPE_IQ2_S || + new_type == GGML_TYPE_IQ1_S || + (new_type == GGML_TYPE_IQ1_M && strcmp(tensor->name, "token_embd.weight") && strcmp(tensor->name, "output.weight")) || + (new_type == GGML_TYPE_Q2_K && params->ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S && strcmp(tensor->name, "token_embd.weight") != 0)) && !imatrix) { + LLAMA_LOG_ERROR("\n\n============================================================\n"); + LLAMA_LOG_ERROR("Missing importance matrix for tensor %s in a very low-bit quantization\n", tensor->name); + LLAMA_LOG_ERROR("The result will be garbage, so bailing out\n"); + LLAMA_LOG_ERROR("============================================================\n\n"); + throw std::runtime_error(format("Missing importance matrix for tensor %s in a very low-bit quantization", tensor->name)); + } + + float * f32_data; + + if (tensor->type == GGML_TYPE_F32) { + f32_data = (float *) tensor->data; + } else if (ggml_is_quantized(tensor->type) && !params->allow_requantize) { + throw std::runtime_error(format("requantizing from type %s is disabled", ggml_type_name(tensor->type))); + } else { + llama_tensor_dequantize_impl(tensor, f32_conv_buf, workers, nelements, nthread); + f32_data = (float *) f32_conv_buf.data(); + } + + LLAMA_LOG_INFO("converting to %s .. ", ggml_type_name(new_type)); + fflush(stdout); + + if (work.size() < (size_t)nelements * 4) { + work.resize(nelements * 4); // upper bound on size + } + new_data = work.data(); + + const int64_t n_per_row = tensor->ne[0]; + const int64_t nrows = tensor->ne[1]; + + static const int64_t min_chunk_size = 32 * 512; + const int64_t chunk_size = (n_per_row >= min_chunk_size ? n_per_row : n_per_row * ((min_chunk_size + n_per_row - 1)/n_per_row)); + + const int64_t nelements_matrix = tensor->ne[0] * tensor->ne[1]; + const int64_t nchunk = (nelements_matrix + chunk_size - 1)/chunk_size; + const int64_t nthread_use = nthread > 1 ? std::max((int64_t)1, std::min((int64_t)nthread, nchunk)) : 1; + + // quantize each expert separately since they have different importance matrices + new_size = 0; + for (int64_t i03 = 0; i03 < tensor->ne[2]; ++i03) { + const float * f32_data_03 = f32_data + i03 * nelements_matrix; + void * new_data_03 = (char *)new_data + ggml_row_size(new_type, n_per_row) * i03 * nrows; + const float * imatrix_03 = imatrix ? imatrix + i03 * n_per_row : nullptr; + + new_size += llama_tensor_quantize_impl(new_type, f32_data_03, new_data_03, chunk_size, nrows, n_per_row, imatrix_03, workers, nthread_use); + + // TODO: temporary sanity check that the F16 -> MXFP4 is lossless +#if 0 + if (new_type == GGML_TYPE_MXFP4) { + auto * x = f32_data_03; + + //LLAMA_LOG_INFO("nrows = %d, n_per_row = %d\n", nrows, n_per_row); + std::vector deq(nrows*n_per_row); + const ggml_type_traits * qtype = ggml_get_type_traits(new_type); + qtype->to_float(new_data_03, deq.data(), deq.size()); + + double err = 0.0f; + for (int i = 0; i < (int) deq.size(); ++i) { + err += fabsf(deq[i] - x[i]); + //if (fabsf(deq[i] - x[i]) > 0.00001 && i < 256) { + if (deq[i] != x[i]) { + LLAMA_LOG_INFO("deq[%d] = %f, x[%d] = %f\n", i, deq[i], i, x[i]); + } + } + //LLAMA_LOG_INFO("err = %f\n", err); + GGML_ASSERT(err == 0.00000); + } #endif + } + LLAMA_LOG_INFO("size = %8.2f MiB -> %8.2f MiB\n", ggml_nbytes(tensor)/1024.0/1024.0, new_size/1024.0/1024.0); } - LLAMA_LOG_INFO("size = %8.2f MiB -> %8.2f MiB\n", ggml_nbytes(tensor)/1024.0/1024.0, new_size/1024.0/1024.0); - } - total_size_org += ggml_nbytes(tensor); - total_size_new += new_size; + total_size_org += ggml_nbytes(tensor); + total_size_new += new_size; - // update the gguf meta data as we go - gguf_set_tensor_type(ctx_outs[cur_split].get(), name.c_str(), new_type); - GGML_ASSERT(gguf_get_tensor_size(ctx_outs[cur_split].get(), gguf_find_tensor(ctx_outs[cur_split].get(), name.c_str())) == new_size); - gguf_set_tensor_data(ctx_outs[cur_split].get(), name.c_str(), new_data); + // update the gguf meta data as we go + gguf_set_tensor_type(ctx_outs[cur_split].get(), name.c_str(), new_type); + GGML_ASSERT(gguf_get_tensor_size(ctx_outs[cur_split].get(), gguf_find_tensor(ctx_outs[cur_split].get(), name.c_str())) == new_size); + gguf_set_tensor_data(ctx_outs[cur_split].get(), name.c_str(), new_data); - // write tensor data + padding - fout.write((const char *) new_data, new_size); - zeros(fout, GGML_PAD(new_size, align) - new_size); + // write tensor data + padding + fout.write((const char *) new_data, new_size); + zeros(fout, GGML_PAD(new_size, align) - new_size); + } // no --dry-run + } // iterate over tensors + + if (!params->dry_run) { + close_ofstream(); } - close_ofstream(); LLAMA_LOG_INFO("%s: model size = %8.2f MiB\n", __func__, total_size_org/1024.0/1024.0); LLAMA_LOG_INFO("%s: quant size = %8.2f MiB\n", __func__, total_size_new/1024.0/1024.0); diff --git a/tools/quantize/quantize.cpp b/tools/quantize/quantize.cpp index 3f99d9e6a7..91b0367742 100644 --- a/tools/quantize/quantize.cpp +++ b/tools/quantize/quantize.cpp @@ -626,7 +626,7 @@ int main(int argc, char ** argv) { llama_backend_init(); - // parse command line arguments +// parse command line arguments const std::string fname_inp = argv[arg_idx]; arg_idx++; std::string fname_out; @@ -634,22 +634,26 @@ int main(int argc, char ** argv) { std::string ftype_str; std::string suffix = ".gguf"; if (try_parse_ftype(argv[arg_idx], params.ftype, ftype_str)) { - std::string fpath; - const size_t pos = fname_inp.find_last_of("/\\"); - if (pos != std::string::npos) { - fpath = fname_inp.substr(0, pos + 1); - } + // argv[arg_idx] is the ftype directly: + if (!params.dry_run) { + std::string fpath; + const size_t pos = fname_inp.find_last_of("/\\"); + if (pos != std::string::npos) { + fpath = fname_inp.substr(0, pos + 1); + } - // export as [inp path]/ggml-model-[ftype]. Only add extension if there is no splitting - fname_out = fpath + "ggml-model-" + ftype_str; - if (!params.keep_split) { - fname_out += suffix; + // export as [inp path]/ggml-model-[ftype]. Only add extension if there is no splitting + fname_out = fpath + "ggml-model-" + ftype_str; + if (!params.keep_split) { + fname_out += suffix; + } } arg_idx++; if (ftype_str == "COPY") { params.only_copy = true; } } else { + // argv[arg_idx] is not a valid ftype, so treat it as output path: fname_out = argv[arg_idx]; if (params.keep_split && fname_out.find(suffix) != std::string::npos) { fname_out = fname_out.substr(0, fname_out.length() - suffix.length()); @@ -692,14 +696,21 @@ int main(int argc, char ** argv) { return 1; } - if (std::error_code ec; std::filesystem::equivalent(fname_inp, fname_out, ec)) { - fprintf(stderr, "%s: error: input and output files are the same: '%s'\n", __func__, fname_inp.c_str()); - return 1; + if (!params.dry_run) { + if (std::error_code ec; std::filesystem::equivalent(fname_inp, fname_out, ec)) { + fprintf(stderr, "%s: error: input and output files are the same: '%s'\n", __func__, fname_inp.c_str()); + return 1; + } } print_build_info(); - fprintf(stderr, "%s: quantizing '%s' to '%s' as %s", __func__, fname_inp.c_str(), fname_out.c_str(), ftype_str.c_str()); + if (params.dry_run) { + fprintf(stderr, "%s: calculating quantization size for '%s' as %s", __func__, fname_inp.c_str(), ftype_str.c_str()); + } else { + fprintf(stderr, "%s: quantizing '%s' to '%s' as %s", __func__, fname_inp.c_str(), fname_out.c_str(), ftype_str.c_str()); + } + if (params.nthread > 0) { fprintf(stderr, " using %d threads", params.nthread); } From c3f42dedd1f446b2e7733ef12c6d93e61a0e5509 Mon Sep 17 00:00:00 2001 From: ddh0 Date: Wed, 11 Feb 2026 14:29:22 -0600 Subject: [PATCH 04/20] use 6 characters for tensor dims (cont.) --- src/llama-impl.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/llama-impl.cpp b/src/llama-impl.cpp index 60c7fcd050..710a5a1e08 100644 --- a/src/llama-impl.cpp +++ b/src/llama-impl.cpp @@ -109,7 +109,7 @@ std::string llama_format_tensor_shape(const std::vector & ne) { std::string llama_format_tensor_shape(const struct ggml_tensor * t) { char buf[256]; - snprintf(buf, sizeof(buf), "%5" PRId64, t->ne[0]); + snprintf(buf, sizeof(buf), "%6" PRId64, t->ne[0]); for (int i = 1; i < GGML_MAX_DIMS; i++) { snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %6" PRId64, t->ne[i]); } From b9b32f0d2d7a8f041d97e6d6ce00f636cdd6f42b Mon Sep 17 00:00:00 2001 From: ddh0 Date: Wed, 11 Feb 2026 14:45:44 -0600 Subject: [PATCH 05/20] no need to re-calculate ggml_nbytes for tensor --- src/llama-quant.cpp | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index 2836caaf3a..e65c28723f 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -750,11 +750,12 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: } const std::string name = ggml_get_name(tensor); + const size_t tensor_size = ggml_nbytes(tensor); if (!params->dry_run) { if (!ml.use_mmap) { - if (read_data.size() < ggml_nbytes(tensor)) { - read_data.resize(ggml_nbytes(tensor)); + if (read_data.size() < tensor_size) { + read_data.resize(tensor_size); } tensor->data = read_data.data(); } @@ -908,19 +909,19 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: } // we have now decided on the target type for this tensor - // the --dry-run option calculates the final quantization size without quantizting if (params->dry_run) { + // the --dry-run option calculates the final quantization size without quantizting if (quantize) { new_size = ggml_nrows(tensor) * ggml_row_size(new_type, tensor->ne[0]); LLAMA_LOG_INFO("size = %8.2f MiB -> %8.2f MiB (%s)\n", - ggml_nbytes(tensor)/1024.0/1024.0, + tensor_size/1024.0/1024.0, new_size/1024.0/1024.0, ggml_type_name(new_type)); } else { - new_size = ggml_nbytes(tensor); + new_size = tensor_size; LLAMA_LOG_INFO("size = %8.3f MiB\n", new_size/1024.0/1024.0); } - total_size_org += ggml_nbytes(tensor); + total_size_org += tensor_size; total_size_new += new_size; continue; } else { @@ -928,8 +929,8 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: if (!quantize) { new_type = tensor->type; new_data = tensor->data; - new_size = ggml_nbytes(tensor); - LLAMA_LOG_INFO("size = %8.3f MiB\n", ggml_nbytes(tensor)/1024.0/1024.0); + new_size = tensor_size; + LLAMA_LOG_INFO("size = %8.3f MiB\n", tensor_size/1024.0/1024.0); } else { const int64_t nelements = ggml_nelements(tensor); @@ -1030,9 +1031,9 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: } #endif } - LLAMA_LOG_INFO("size = %8.2f MiB -> %8.2f MiB\n", ggml_nbytes(tensor)/1024.0/1024.0, new_size/1024.0/1024.0); + LLAMA_LOG_INFO("size = %8.2f MiB -> %8.2f MiB\n", tensor_size/1024.0/1024.0, new_size/1024.0/1024.0); } - total_size_org += ggml_nbytes(tensor); + total_size_org += tensor_size; total_size_new += new_size; // update the gguf meta data as we go From 150e1db21d32db1eb2b19c24cd82cd23aaf52398 Mon Sep 17 00:00:00 2001 From: ddh0 Date: Wed, 11 Feb 2026 14:49:56 -0600 Subject: [PATCH 06/20] fix indent --- tools/quantize/quantize.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/quantize/quantize.cpp b/tools/quantize/quantize.cpp index 91b0367742..8497cb8039 100644 --- a/tools/quantize/quantize.cpp +++ b/tools/quantize/quantize.cpp @@ -626,7 +626,7 @@ int main(int argc, char ** argv) { llama_backend_init(); -// parse command line arguments + // parse command line arguments const std::string fname_inp = argv[arg_idx]; arg_idx++; std::string fname_out; From 966b21a981d2279358d6de76a03dc8de6b8617d4 Mon Sep 17 00:00:00 2001 From: ddh0 Date: Wed, 11 Feb 2026 15:30:12 -0600 Subject: [PATCH 07/20] show model and quant BPW when quant completes --- src/llama-quant.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index e65c28723f..d7b90db01f 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -1051,8 +1051,8 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: close_ofstream(); } - LLAMA_LOG_INFO("%s: model size = %8.2f MiB\n", __func__, total_size_org/1024.0/1024.0); - LLAMA_LOG_INFO("%s: quant size = %8.2f MiB\n", __func__, total_size_new/1024.0/1024.0); + LLAMA_LOG_INFO("%s: model size = %8.2f MiB (%.2f BPW)\n", __func__, total_size_org/1024.0/1024.0, total_size_org*8.0/ml.n_elements); + LLAMA_LOG_INFO("%s: quant size = %8.2f MiB (%.2f BPW)\n", __func__, total_size_new/1024.0/1024.0, total_size_new*8.0/ml.n_elements); if (qs.n_fallback > 0) { LLAMA_LOG_WARN("%s: WARNING: %d of %d tensor(s) required fallback quantization\n", From 07f882bbbb8380ad5ef1b5da845322d8dcd11b7d Mon Sep 17 00:00:00 2001 From: ddh0 Date: Wed, 11 Feb 2026 15:36:42 -0600 Subject: [PATCH 08/20] add example to --help --- tools/quantize/quantize.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/quantize/quantize.cpp b/tools/quantize/quantize.cpp index 8497cb8039..7c9a7f29cc 100644 --- a/tools/quantize/quantize.cpp +++ b/tools/quantize/quantize.cpp @@ -158,7 +158,8 @@ static void usage(const char * executable) { printf(" override model metadata by key in the quantized model. may be specified multiple times.\n"); printf(" WARNING: this is an advanced option, use with care.\n"); printf(" --dry-run\n"); - printf(" calculate and show the final quantization size without performing quantization\n\n"); + printf(" calculate and show the final quantization size without performing quantization\n"); + printf(" example: llama-quantize --dry-run model-f32.gguf Q4_K\n\n"); printf("note: --include-weights and --exclude-weights cannot be used together\n\n"); printf("-----------------------------------------------------------------------------\n"); printf(" allowed quantization types\n"); From 2769f352077c3692e3f4cf1ad1e1fa5f56a2af7b Mon Sep 17 00:00:00 2001 From: ddh0 Date: Wed, 11 Feb 2026 20:49:05 -0600 Subject: [PATCH 09/20] new function `tensor_requires_imatrix`, add courtesy warning about imatrix --- src/llama-quant.cpp | 33 +++++++++++++++++++++++++++------ 1 file changed, 27 insertions(+), 6 deletions(-) diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index d7b90db01f..8a668e6b23 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -479,6 +479,22 @@ static size_t llama_tensor_quantize_impl(enum ggml_type new_type, const float * return new_size; } +static bool tensor_requires_imatrix(const llama_model_quantize_params * params, const ggml_tensor * t, const ggml_type dst_type) { + if (!params->imatrix) { + if ( + dst_type == GGML_TYPE_IQ2_XXS || dst_type == GGML_TYPE_IQ2_XS || + dst_type == GGML_TYPE_IQ2_S || dst_type == GGML_TYPE_IQ1_S || ( + dst_type == GGML_TYPE_IQ1_M && strcmp(t->name, "token_embd.weight") && + strcmp(t->name, "output.weight") + ) || ( + dst_type == GGML_TYPE_Q2_K && params->ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S && + strcmp(t->name, "token_embd.weight") != 0 + ) + ) return true; + } + return false; +} + static void llama_model_quantize_impl(const std::string & fname_inp, const std::string & fname_out, const llama_model_quantize_params * params) { ggml_type default_type; llama_ftype ftype = params->ftype; @@ -741,6 +757,10 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: new_ofstream(0); } + // flag for `--dry-run`, to let the user know if imatrix will be required for a real + // quantization, as a courtesy + bool will_require_imatrix = false; + for (const auto * it : tensors) { const auto & weight = *it; ggml_tensor * tensor = weight.tensor; @@ -921,6 +941,9 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: new_size = tensor_size; LLAMA_LOG_INFO("size = %8.3f MiB\n", new_size/1024.0/1024.0); } + if (!will_require_imatrix && tensor_requires_imatrix(params, tensor, new_type)) { + will_require_imatrix = true; + } total_size_org += tensor_size; total_size_new += new_size; continue; @@ -957,12 +980,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: } } } - if ((new_type == GGML_TYPE_IQ2_XXS || - new_type == GGML_TYPE_IQ2_XS || - new_type == GGML_TYPE_IQ2_S || - new_type == GGML_TYPE_IQ1_S || - (new_type == GGML_TYPE_IQ1_M && strcmp(tensor->name, "token_embd.weight") && strcmp(tensor->name, "output.weight")) || - (new_type == GGML_TYPE_Q2_K && params->ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S && strcmp(tensor->name, "token_embd.weight") != 0)) && !imatrix) { + if (tensor_requires_imatrix(params, tensor, new_type)) { LLAMA_LOG_ERROR("\n\n============================================================\n"); LLAMA_LOG_ERROR("Missing importance matrix for tensor %s in a very low-bit quantization\n", tensor->name); LLAMA_LOG_ERROR("The result will be garbage, so bailing out\n"); @@ -1053,6 +1071,9 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: LLAMA_LOG_INFO("%s: model size = %8.2f MiB (%.2f BPW)\n", __func__, total_size_org/1024.0/1024.0, total_size_org*8.0/ml.n_elements); LLAMA_LOG_INFO("%s: quant size = %8.2f MiB (%.2f BPW)\n", __func__, total_size_new/1024.0/1024.0, total_size_new*8.0/ml.n_elements); + if (!params->imatrix && params->dry_run && will_require_imatrix) { + LLAMA_LOG_WARN("%s: WARNING: dry run completed successfully, but actually completing this quantization will require an imatrix!\n"); + } if (qs.n_fallback > 0) { LLAMA_LOG_WARN("%s: WARNING: %d of %d tensor(s) required fallback quantization\n", From ea8da0503c48077b0468c15345aaf49ebf8e1a37 Mon Sep 17 00:00:00 2001 From: ddh0 Date: Wed, 11 Feb 2026 20:57:16 -0600 Subject: [PATCH 10/20] missing __func__, move imatrix flag set --- src/llama-quant.cpp | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index 8a668e6b23..76581f8b4b 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -937,13 +937,13 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: tensor_size/1024.0/1024.0, new_size/1024.0/1024.0, ggml_type_name(new_type)); + if (!will_require_imatrix && tensor_requires_imatrix(params, tensor, new_type)) { + will_require_imatrix = true; + } } else { new_size = tensor_size; LLAMA_LOG_INFO("size = %8.3f MiB\n", new_size/1024.0/1024.0); } - if (!will_require_imatrix && tensor_requires_imatrix(params, tensor, new_type)) { - will_require_imatrix = true; - } total_size_org += tensor_size; total_size_new += new_size; continue; @@ -1072,7 +1072,9 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: LLAMA_LOG_INFO("%s: model size = %8.2f MiB (%.2f BPW)\n", __func__, total_size_org/1024.0/1024.0, total_size_org*8.0/ml.n_elements); LLAMA_LOG_INFO("%s: quant size = %8.2f MiB (%.2f BPW)\n", __func__, total_size_new/1024.0/1024.0, total_size_new*8.0/ml.n_elements); if (!params->imatrix && params->dry_run && will_require_imatrix) { - LLAMA_LOG_WARN("%s: WARNING: dry run completed successfully, but actually completing this quantization will require an imatrix!\n"); + LLAMA_LOG_WARN("%s: WARNING: dry run completed successfully, but actually completing this quantization will require an imatrix!\n", + __func__ + ); } if (qs.n_fallback > 0) { From 3211a847ef3c153fe499aeb259e2a6f996c6e75d Mon Sep 17 00:00:00 2001 From: ddh0 Date: Wed, 11 Feb 2026 20:58:52 -0600 Subject: [PATCH 11/20] logic error --- src/llama-quant.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index 76581f8b4b..c411d41153 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -980,7 +980,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: } } } - if (tensor_requires_imatrix(params, tensor, new_type)) { + if (tensor_requires_imatrix(params, tensor, new_type) && !imatrix) { LLAMA_LOG_ERROR("\n\n============================================================\n"); LLAMA_LOG_ERROR("Missing importance matrix for tensor %s in a very low-bit quantization\n", tensor->name); LLAMA_LOG_ERROR("The result will be garbage, so bailing out\n"); From 55dbee2bbe1059dac78eb139869c0aa189558df2 Mon Sep 17 00:00:00 2001 From: ddh0 Date: Wed, 11 Feb 2026 21:03:34 -0600 Subject: [PATCH 12/20] fixup tensor_requires_imatrix --- src/llama-quant.cpp | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index c411d41153..252fbe2085 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -480,19 +480,18 @@ static size_t llama_tensor_quantize_impl(enum ggml_type new_type, const float * } static bool tensor_requires_imatrix(const llama_model_quantize_params * params, const ggml_tensor * t, const ggml_type dst_type) { - if (!params->imatrix) { - if ( - dst_type == GGML_TYPE_IQ2_XXS || dst_type == GGML_TYPE_IQ2_XS || - dst_type == GGML_TYPE_IQ2_S || dst_type == GGML_TYPE_IQ1_S || ( - dst_type == GGML_TYPE_IQ1_M && strcmp(t->name, "token_embd.weight") && - strcmp(t->name, "output.weight") - ) || ( - dst_type == GGML_TYPE_Q2_K && params->ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S && - strcmp(t->name, "token_embd.weight") != 0 - ) - ) return true; + if (dst_type == GGML_TYPE_IQ2_XXS || dst_type == GGML_TYPE_IQ2_XS || + dst_type == GGML_TYPE_IQ2_S || dst_type == GGML_TYPE_IQ1_S || ( + dst_type == GGML_TYPE_IQ1_M && strcmp(t->name, "token_embd.weight") && + strcmp(t->name, "output.weight") + ) || ( + dst_type == GGML_TYPE_Q2_K && params->ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S && + strcmp(t->name, "token_embd.weight") != 0 + )) { + return true; + } else { + return false; } - return false; } static void llama_model_quantize_impl(const std::string & fname_inp, const std::string & fname_out, const llama_model_quantize_params * params) { From 22db76409b7495835c2fac8f491423887445ad1a Mon Sep 17 00:00:00 2001 From: ddh0 Date: Wed, 11 Feb 2026 21:14:19 -0600 Subject: [PATCH 13/20] add missing `GGML_TYPE`s --- src/llama-quant.cpp | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index 252fbe2085..3cad6bc6e7 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -480,14 +480,19 @@ static size_t llama_tensor_quantize_impl(enum ggml_type new_type, const float * } static bool tensor_requires_imatrix(const llama_model_quantize_params * params, const ggml_tensor * t, const ggml_type dst_type) { - if (dst_type == GGML_TYPE_IQ2_XXS || dst_type == GGML_TYPE_IQ2_XS || - dst_type == GGML_TYPE_IQ2_S || dst_type == GGML_TYPE_IQ1_S || ( + if ( + dst_type == GGML_TYPE_IQ2_XXS || dst_type == GGML_TYPE_IQ2_XS || + dst_type == GGML_TYPE_IQ3_XXS || dst_type == GGML_TYPE_IQ1_S || + dst_type == GGML_TYPE_IQ2_S || dst_type == GGML_TYPE_IQ1_M || + dst_type == GGML_TYPE_TQ1_0 || dst_type == GGML_TYPE_TQ2_0 || + ( dst_type == GGML_TYPE_IQ1_M && strcmp(t->name, "token_embd.weight") && strcmp(t->name, "output.weight") ) || ( dst_type == GGML_TYPE_Q2_K && params->ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S && strcmp(t->name, "token_embd.weight") != 0 - )) { + ) + ) { return true; } else { return false; @@ -979,7 +984,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: } } } - if (tensor_requires_imatrix(params, tensor, new_type) && !imatrix) { + if (!imatrix && tensor_requires_imatrix(params, tensor, new_type)) { LLAMA_LOG_ERROR("\n\n============================================================\n"); LLAMA_LOG_ERROR("Missing importance matrix for tensor %s in a very low-bit quantization\n", tensor->name); LLAMA_LOG_ERROR("The result will be garbage, so bailing out\n"); From ae786b862da889a9345a7360e1c7b57c6056510f Mon Sep 17 00:00:00 2001 From: ddh0 Date: Wed, 11 Feb 2026 21:21:40 -0600 Subject: [PATCH 14/20] simplify and rename `tensor_type_requires_imatrix` --- src/llama-quant.cpp | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index 3cad6bc6e7..5b3fec3dc5 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -479,20 +479,11 @@ static size_t llama_tensor_quantize_impl(enum ggml_type new_type, const float * return new_size; } -static bool tensor_requires_imatrix(const llama_model_quantize_params * params, const ggml_tensor * t, const ggml_type dst_type) { - if ( - dst_type == GGML_TYPE_IQ2_XXS || dst_type == GGML_TYPE_IQ2_XS || +static bool tensor_type_requires_imatrix(const ggml_type dst_type) { + if (dst_type == GGML_TYPE_IQ2_XXS || dst_type == GGML_TYPE_IQ2_XS || dst_type == GGML_TYPE_IQ3_XXS || dst_type == GGML_TYPE_IQ1_S || dst_type == GGML_TYPE_IQ2_S || dst_type == GGML_TYPE_IQ1_M || - dst_type == GGML_TYPE_TQ1_0 || dst_type == GGML_TYPE_TQ2_0 || - ( - dst_type == GGML_TYPE_IQ1_M && strcmp(t->name, "token_embd.weight") && - strcmp(t->name, "output.weight") - ) || ( - dst_type == GGML_TYPE_Q2_K && params->ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S && - strcmp(t->name, "token_embd.weight") != 0 - ) - ) { + dst_type == GGML_TYPE_TQ1_0 || dst_type == GGML_TYPE_TQ2_0) { return true; } else { return false; @@ -941,7 +932,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: tensor_size/1024.0/1024.0, new_size/1024.0/1024.0, ggml_type_name(new_type)); - if (!will_require_imatrix && tensor_requires_imatrix(params, tensor, new_type)) { + if (!will_require_imatrix && tensor_type_requires_imatrix(new_type)) { will_require_imatrix = true; } } else { @@ -984,7 +975,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: } } } - if (!imatrix && tensor_requires_imatrix(params, tensor, new_type)) { + if (!imatrix && tensor_type_requires_imatrix(new_type)) { LLAMA_LOG_ERROR("\n\n============================================================\n"); LLAMA_LOG_ERROR("Missing importance matrix for tensor %s in a very low-bit quantization\n", tensor->name); LLAMA_LOG_ERROR("The result will be garbage, so bailing out\n"); From 1ccd7a49baeb5f4643bccc75008de47ba85d843c Mon Sep 17 00:00:00 2001 From: ddh0 Date: Wed, 11 Feb 2026 21:41:37 -0600 Subject: [PATCH 15/20] simplify for style --- src/llama-quant.cpp | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index 5b3fec3dc5..31694e2834 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -480,14 +480,12 @@ static size_t llama_tensor_quantize_impl(enum ggml_type new_type, const float * } static bool tensor_type_requires_imatrix(const ggml_type dst_type) { - if (dst_type == GGML_TYPE_IQ2_XXS || dst_type == GGML_TYPE_IQ2_XS || + return ( + dst_type == GGML_TYPE_IQ2_XXS || dst_type == GGML_TYPE_IQ2_XS || dst_type == GGML_TYPE_IQ3_XXS || dst_type == GGML_TYPE_IQ1_S || dst_type == GGML_TYPE_IQ2_S || dst_type == GGML_TYPE_IQ1_M || - dst_type == GGML_TYPE_TQ1_0 || dst_type == GGML_TYPE_TQ2_0) { - return true; - } else { - return false; - } + dst_type == GGML_TYPE_TQ1_0 || dst_type == GGML_TYPE_TQ2_0 + ); } static void llama_model_quantize_impl(const std::string & fname_inp, const std::string & fname_out, const llama_model_quantize_params * params) { @@ -1066,6 +1064,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: LLAMA_LOG_INFO("%s: model size = %8.2f MiB (%.2f BPW)\n", __func__, total_size_org/1024.0/1024.0, total_size_org*8.0/ml.n_elements); LLAMA_LOG_INFO("%s: quant size = %8.2f MiB (%.2f BPW)\n", __func__, total_size_new/1024.0/1024.0, total_size_new*8.0/ml.n_elements); + if (!params->imatrix && params->dry_run && will_require_imatrix) { LLAMA_LOG_WARN("%s: WARNING: dry run completed successfully, but actually completing this quantization will require an imatrix!\n", __func__ From 1658228d6acc770c884965ff0582a7633b75f96a Mon Sep 17 00:00:00 2001 From: ddh0 Date: Wed, 11 Feb 2026 21:53:07 -0600 Subject: [PATCH 16/20] add back Q2_K edge case for imatrix --- src/llama-quant.cpp | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index 31694e2834..543b658e56 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -479,12 +479,15 @@ static size_t llama_tensor_quantize_impl(enum ggml_type new_type, const float * return new_size; } -static bool tensor_type_requires_imatrix(const ggml_type dst_type) { +static bool tensor_type_requires_imatrix(const llama_model_quantize_params * params, const ggml_tensor * t, const ggml_type dst_type) { return ( dst_type == GGML_TYPE_IQ2_XXS || dst_type == GGML_TYPE_IQ2_XS || dst_type == GGML_TYPE_IQ3_XXS || dst_type == GGML_TYPE_IQ1_S || dst_type == GGML_TYPE_IQ2_S || dst_type == GGML_TYPE_IQ1_M || - dst_type == GGML_TYPE_TQ1_0 || dst_type == GGML_TYPE_TQ2_0 + dst_type == GGML_TYPE_TQ1_0 || dst_type == GGML_TYPE_TQ2_0 || + ( // Q2_K is the worst k-quant type - only allow it without imatrix for token embeddings + dst_type == GGML_TYPE_Q2_K && strcmp(t->name, "token_embd.weight") != 0 + ) ); } @@ -930,7 +933,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: tensor_size/1024.0/1024.0, new_size/1024.0/1024.0, ggml_type_name(new_type)); - if (!will_require_imatrix && tensor_type_requires_imatrix(new_type)) { + if (!will_require_imatrix && tensor_type_requires_imatrix(params, tensor, new_type)) { will_require_imatrix = true; } } else { @@ -973,7 +976,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: } } } - if (!imatrix && tensor_type_requires_imatrix(new_type)) { + if (!imatrix && tensor_type_requires_imatrix(params, tensor, new_type)) { LLAMA_LOG_ERROR("\n\n============================================================\n"); LLAMA_LOG_ERROR("Missing importance matrix for tensor %s in a very low-bit quantization\n", tensor->name); LLAMA_LOG_ERROR("The result will be garbage, so bailing out\n"); From b15bb3404cf49d4be1a4d1e5cafbdb544d086d0d Mon Sep 17 00:00:00 2001 From: ddh0 Date: Wed, 11 Feb 2026 21:57:55 -0600 Subject: [PATCH 17/20] guard ftype imatrix warning --- tools/quantize/quantize.cpp | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tools/quantize/quantize.cpp b/tools/quantize/quantize.cpp index 7c9a7f29cc..59bf9bd3fd 100644 --- a/tools/quantize/quantize.cpp +++ b/tools/quantize/quantize.cpp @@ -686,11 +686,12 @@ int main(int argc, char ** argv) { } } - if ((params.ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || params.ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || - params.ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || - params.ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S || - params.ftype == LLAMA_FTYPE_MOSTLY_IQ1_S || - params.ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) && imatrix_data.empty()) { + if (!params.dry_run && + ( + params.ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || params.ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || + params.ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || params.ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S || + params.ftype == LLAMA_FTYPE_MOSTLY_IQ1_S || params.ftype == LLAMA_FTYPE_MOSTLY_IQ1_M + ) && imatrix_data.empty()) { fprintf(stderr, "\n==========================================================================================================\n"); fprintf(stderr, "Please do not use IQ1_S, IQ1_M, IQ2_S, IQ2_XXS, IQ2_XS or Q2_K_S quantization without an importance matrix\n"); fprintf(stderr, "==========================================================================================================\n\n\n"); From 40528248fcbc212bcde26f8d25b4b411a023d5f3 Mon Sep 17 00:00:00 2001 From: ddh0 Date: Wed, 11 Feb 2026 22:18:56 -0600 Subject: [PATCH 18/20] comment ref #12557 --- src/llama-quant.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index 543b658e56..49a9696503 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -484,7 +484,7 @@ static bool tensor_type_requires_imatrix(const llama_model_quantize_params * par dst_type == GGML_TYPE_IQ2_XXS || dst_type == GGML_TYPE_IQ2_XS || dst_type == GGML_TYPE_IQ3_XXS || dst_type == GGML_TYPE_IQ1_S || dst_type == GGML_TYPE_IQ2_S || dst_type == GGML_TYPE_IQ1_M || - dst_type == GGML_TYPE_TQ1_0 || dst_type == GGML_TYPE_TQ2_0 || + // dst_type == GGML_TYPE_TQ1_0 || dst_type == GGML_TYPE_TQ2_0 || // uncomment if #12557 is merged ( // Q2_K is the worst k-quant type - only allow it without imatrix for token embeddings dst_type == GGML_TYPE_Q2_K && strcmp(t->name, "token_embd.weight") != 0 ) From 44f9fee2488858307798bae9b576541e9e887599 Mon Sep 17 00:00:00 2001 From: ddh0 Date: Wed, 11 Feb 2026 22:23:10 -0600 Subject: [PATCH 19/20] remove per @compilade --- src/llama-quant.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index 49a9696503..652d93dbc9 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -484,7 +484,6 @@ static bool tensor_type_requires_imatrix(const llama_model_quantize_params * par dst_type == GGML_TYPE_IQ2_XXS || dst_type == GGML_TYPE_IQ2_XS || dst_type == GGML_TYPE_IQ3_XXS || dst_type == GGML_TYPE_IQ1_S || dst_type == GGML_TYPE_IQ2_S || dst_type == GGML_TYPE_IQ1_M || - // dst_type == GGML_TYPE_TQ1_0 || dst_type == GGML_TYPE_TQ2_0 || // uncomment if #12557 is merged ( // Q2_K is the worst k-quant type - only allow it without imatrix for token embeddings dst_type == GGML_TYPE_Q2_K && strcmp(t->name, "token_embd.weight") != 0 ) From f58de63ec30f96b1e88eecd5ca659d9248b9eda8 Mon Sep 17 00:00:00 2001 From: ddh0 Date: Wed, 11 Feb 2026 22:30:06 -0600 Subject: [PATCH 20/20] remove unused `params` parameter --- src/llama-quant.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index 652d93dbc9..9781202f90 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -479,7 +479,7 @@ static size_t llama_tensor_quantize_impl(enum ggml_type new_type, const float * return new_size; } -static bool tensor_type_requires_imatrix(const llama_model_quantize_params * params, const ggml_tensor * t, const ggml_type dst_type) { +static bool tensor_type_requires_imatrix(const ggml_tensor * t, const ggml_type dst_type) { return ( dst_type == GGML_TYPE_IQ2_XXS || dst_type == GGML_TYPE_IQ2_XS || dst_type == GGML_TYPE_IQ3_XXS || dst_type == GGML_TYPE_IQ1_S || @@ -932,7 +932,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: tensor_size/1024.0/1024.0, new_size/1024.0/1024.0, ggml_type_name(new_type)); - if (!will_require_imatrix && tensor_type_requires_imatrix(params, tensor, new_type)) { + if (!will_require_imatrix && tensor_type_requires_imatrix(tensor, new_type)) { will_require_imatrix = true; } } else { @@ -975,7 +975,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: } } } - if (!imatrix && tensor_type_requires_imatrix(params, tensor, new_type)) { + if (!imatrix && tensor_type_requires_imatrix(tensor, new_type)) { LLAMA_LOG_ERROR("\n\n============================================================\n"); LLAMA_LOG_ERROR("Missing importance matrix for tensor %s in a very low-bit quantization\n", tensor->name); LLAMA_LOG_ERROR("The result will be garbage, so bailing out\n");