Claw back a few of the changes for less dramatic file size increase

This commit is contained in:
Colin Kealty 2025-04-01 19:01:40 -04:00
parent cb496c4e8a
commit 3f8d7a2582
1 changed files with 17 additions and 2 deletions

View File

@ -259,12 +259,27 @@ static ggml_type llama_tensor_get_type(quantize_state_impl & qs, ggml_type new_t
else new_type = ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K;
++qs.i_attention_wv;
}
else if (qs.model.hparams.n_expert >= 8 && name.find("attn_k") != std::string::npos) {
else if (qs.model.hparams.n_expert >= 8 && name.find("attn_k.weight") != std::string::npos) {
new_type = GGML_TYPE_Q4_K;
}
else if (qs.model.hparams.n_expert >= 8 && name.find("attn_q") != std::string::npos) {
else if (qs.model.hparams.n_expert >= 8 && name.find("attn_kv_a_mqa.weight") != std::string::npos) {
new_type = GGML_TYPE_Q4_K;
}
else if (qs.model.hparams.n_expert >= 8 && name.find("attn_kv_b.weight") != std::string::npos) {
if (qs.i_attention_wv < qs.n_attention_wv/16) {
new_type = GGML_TYPE_Q4_K;
}
else if (use_more_bits(qs.i_attention_wv, qs.n_attention_wv)) {
new_type = ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K;
}
++qs.i_attention_wv;
}
else if (qs.model.hparams.n_expert >= 8 && name.find("attn_q_a.weight") != std::string::npos) {
new_type = GGML_TYPE_Q4_K;
}
else if (qs.model.hparams.n_expert >= 8 && name.find("attn_q_b.weight") != std::string::npos) {
new_type = ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K;
}
else if (qs.model.hparams.n_expert >= 8 && name.find("ffn_down") != std::string::npos) {
if (qs.i_ffn_down < qs.n_ffn_down/16) {
new_type = GGML_TYPE_Q4_K;