add missing `GGML_TYPE`s

This commit is contained in:
ddh0 2026-02-11 21:14:19 -06:00
parent 55dbee2bbe
commit 22db76409b
1 changed files with 9 additions and 4 deletions

View File

@ -480,14 +480,19 @@ static size_t llama_tensor_quantize_impl(enum ggml_type new_type, const float *
}
static bool tensor_requires_imatrix(const llama_model_quantize_params * params, const ggml_tensor * t, const ggml_type dst_type) {
if (dst_type == GGML_TYPE_IQ2_XXS || dst_type == GGML_TYPE_IQ2_XS ||
dst_type == GGML_TYPE_IQ2_S || dst_type == GGML_TYPE_IQ1_S || (
if (
dst_type == GGML_TYPE_IQ2_XXS || dst_type == GGML_TYPE_IQ2_XS ||
dst_type == GGML_TYPE_IQ3_XXS || dst_type == GGML_TYPE_IQ1_S ||
dst_type == GGML_TYPE_IQ2_S || dst_type == GGML_TYPE_IQ1_M ||
dst_type == GGML_TYPE_TQ1_0 || dst_type == GGML_TYPE_TQ2_0 ||
(
dst_type == GGML_TYPE_IQ1_M && strcmp(t->name, "token_embd.weight") &&
strcmp(t->name, "output.weight")
) || (
dst_type == GGML_TYPE_Q2_K && params->ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S &&
strcmp(t->name, "token_embd.weight") != 0
)) {
)
) {
return true;
} else {
return false;
@ -979,7 +984,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
}
}
}
if (tensor_requires_imatrix(params, tensor, new_type) && !imatrix) {
if (!imatrix && tensor_requires_imatrix(params, tensor, new_type)) {
LLAMA_LOG_ERROR("\n\n============================================================\n");
LLAMA_LOG_ERROR("Missing importance matrix for tensor %s in a very low-bit quantization\n", tensor->name);
LLAMA_LOG_ERROR("The result will be garbage, so bailing out\n");