diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index 252fbe2085..3cad6bc6e7 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -480,14 +480,19 @@ static size_t llama_tensor_quantize_impl(enum ggml_type new_type, const float * } static bool tensor_requires_imatrix(const llama_model_quantize_params * params, const ggml_tensor * t, const ggml_type dst_type) { - if (dst_type == GGML_TYPE_IQ2_XXS || dst_type == GGML_TYPE_IQ2_XS || - dst_type == GGML_TYPE_IQ2_S || dst_type == GGML_TYPE_IQ1_S || ( + if ( + dst_type == GGML_TYPE_IQ2_XXS || dst_type == GGML_TYPE_IQ2_XS || + dst_type == GGML_TYPE_IQ3_XXS || dst_type == GGML_TYPE_IQ1_S || + dst_type == GGML_TYPE_IQ2_S || dst_type == GGML_TYPE_IQ1_M || + dst_type == GGML_TYPE_TQ1_0 || dst_type == GGML_TYPE_TQ2_0 || + ( dst_type == GGML_TYPE_IQ1_M && strcmp(t->name, "token_embd.weight") && strcmp(t->name, "output.weight") ) || ( dst_type == GGML_TYPE_Q2_K && params->ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S && strcmp(t->name, "token_embd.weight") != 0 - )) { + ) + ) { return true; } else { return false; @@ -979,7 +984,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: } } } - if (tensor_requires_imatrix(params, tensor, new_type) && !imatrix) { + if (!imatrix && tensor_requires_imatrix(params, tensor, new_type)) { LLAMA_LOG_ERROR("\n\n============================================================\n"); LLAMA_LOG_ERROR("Missing importance matrix for tensor %s in a very low-bit quantization\n", tensor->name); LLAMA_LOG_ERROR("The result will be garbage, so bailing out\n");