remove unused `params` parameter

This commit is contained in:
ddh0 2026-02-11 22:30:06 -06:00
parent 44f9fee248
commit f58de63ec3
1 changed files with 3 additions and 3 deletions

View File

@ -479,7 +479,7 @@ static size_t llama_tensor_quantize_impl(enum ggml_type new_type, const float *
return new_size;
}
static bool tensor_type_requires_imatrix(const llama_model_quantize_params * params, const ggml_tensor * t, const ggml_type dst_type) {
static bool tensor_type_requires_imatrix(const ggml_tensor * t, const ggml_type dst_type) {
return (
dst_type == GGML_TYPE_IQ2_XXS || dst_type == GGML_TYPE_IQ2_XS ||
dst_type == GGML_TYPE_IQ3_XXS || dst_type == GGML_TYPE_IQ1_S ||
@ -932,7 +932,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
tensor_size/1024.0/1024.0,
new_size/1024.0/1024.0,
ggml_type_name(new_type));
if (!will_require_imatrix && tensor_type_requires_imatrix(params, tensor, new_type)) {
if (!will_require_imatrix && tensor_type_requires_imatrix(tensor, new_type)) {
will_require_imatrix = true;
}
} else {
@ -975,7 +975,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
}
}
}
if (!imatrix && tensor_type_requires_imatrix(params, tensor, new_type)) {
if (!imatrix && tensor_type_requires_imatrix(tensor, new_type)) {
LLAMA_LOG_ERROR("\n\n============================================================\n");
LLAMA_LOG_ERROR("Missing importance matrix for tensor %s in a very low-bit quantization\n", tensor->name);
LLAMA_LOG_ERROR("The result will be garbage, so bailing out\n");