This commit is contained in:
Bartowski 2026-03-15 23:55:07 +02:00 committed by GitHub
commit 305c1c9410
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 442 additions and 47 deletions

View File

@ -389,8 +389,8 @@ typedef struct {
} block_iq3_xxs;
static_assert(sizeof(block_iq3_xxs) == sizeof(ggml_half) + 3*(QK_K/8), "wrong iq3_xxs block size/padding");
// 3.4375 bpw
#define IQ3S_N_SCALE QK_K/64
// 3.4375 bpw
typedef struct {
ggml_half d;
uint8_t qs[QK_K/4];

View File

@ -25,15 +25,28 @@ struct tensor_type_option {
enum class tensor_category {
TOKEN_EMBD,
ATTENTION_Q,
ATTENTION_Q_A,
ATTENTION_Q_B,
ATTENTION_K_B,
ATTENTION_V_B,
ATTENTION_V,
ATTENTION_K,
ATTENTION_QKV,
ATTENTION_KV_B,
ATTENTION_KV_A_MQA,
ATTENTION_OUTPUT,
FFN_UP,
FFN_GATE,
FFN_DOWN,
OUTPUT,
FFN_UP_EXPS,
FFN_DOWN_EXPS,
FFN_GATE_EXPS,
FFN_UP_SHEXP,
FFN_DOWN_SHEXP,
FFN_GATE_SHEXP,
SSM_OUT,
SSM_BA,
OTHER
};
@ -135,9 +148,57 @@ static tensor_category tensor_get_category(const std::string & tensor_name) {
if (tensor_name.find("attn_q.weight") != std::string::npos) {
return tensor_category::ATTENTION_Q;
}
if (tensor_name.find("attn_kv_a_mqa.weight") != std::string::npos) {
return tensor_category::ATTENTION_KV_A_MQA;
}
if (tensor_name.find("attn_q_a.weight") != std::string::npos) {
return tensor_category::ATTENTION_Q_A;
}
if (tensor_name.find("attn_q_b.weight") != std::string::npos) {
return tensor_category::ATTENTION_Q_B;
}
if (tensor_name.find("attn_k_b.weight") != std::string::npos) {
return tensor_category::ATTENTION_K_B;
}
if (tensor_name.find("attn_v_b.weight") != std::string::npos) {
return tensor_category::ATTENTION_V_B;
}
if (tensor_name.find("attn_output.weight") != std::string::npos) {
return tensor_category::ATTENTION_OUTPUT;
}
if (tensor_name.find("ffn_down_exps") != std::string::npos) {
return tensor_category::FFN_DOWN_EXPS;
}
if (tensor_name.find("ffn_gate_exps") != std::string::npos) {
return tensor_category::FFN_GATE_EXPS;
}
if (tensor_name.find("ffn_up_exps") != std::string::npos) {
return tensor_category::FFN_UP_EXPS;
}
if (tensor_name.find("ffn_down_shexp") != std::string::npos) {
return tensor_category::FFN_DOWN_SHEXP;
}
if (tensor_name.find("ffn_gate_shexp") != std::string::npos) {
return tensor_category::FFN_GATE_SHEXP;
}
if (tensor_name.find("ffn_up_shexp") != std::string::npos) {
return tensor_category::FFN_UP_SHEXP;
}
if (tensor_name.find("attn_output.weight") != std::string::npos) {
return tensor_category::ATTENTION_OUTPUT;
}
if (tensor_name.find("ssm_out.weight") != std::string::npos) {
return tensor_category::SSM_OUT;
}
if (tensor_name.find("ssm_ba.weight") != std::string::npos) {
return tensor_category::SSM_BA;
}
if (tensor_name.find("ssm_alpha.weight") != std::string::npos) {
return tensor_category::SSM_BA;
}
if (tensor_name.find("ssm_beta.weight") != std::string::npos) {
return tensor_category::SSM_BA;
}
if (tensor_name.find("ffn_up") != std::string::npos) {
return tensor_category::FFN_UP;
}
@ -153,7 +214,6 @@ static tensor_category tensor_get_category(const std::string & tensor_name) {
// check if category is for attention-v-like tensors (more sensitive to quantization)
static bool category_is_attn_v(tensor_category cat) {
return cat == tensor_category::ATTENTION_V ||
cat == tensor_category::ATTENTION_QKV ||
cat == tensor_category::ATTENTION_KV_B;
}
@ -165,14 +225,32 @@ struct quantize_state_impl {
const llama_model & model;
const llama_model_quantize_params * params;
int n_attention_wv = 0;
int n_ffn_down = 0;
int n_ffn_gate = 0;
int n_ffn_up = 0;
int i_attention_wv = 0;
int i_ffn_down = 0;
int i_ffn_gate = 0;
int i_ffn_up = 0;
int n_attention_wv = 0;
int n_attn_qkv = 0;
int n_ffn_down = 0;
int n_ffn_gate = 0;
int n_ffn_up = 0;
int n_ffn_down_exps = 0;
int n_ffn_gate_exps = 0;
int n_ffn_up_exps = 0;
int n_ffn_down_shexp = 0;
int n_ffn_gate_shexp = 0;
int n_ffn_up_shexp = 0;
int n_ssm_out = 0;
int n_attn_q = 0;
int i_attn_qkv = 0;
int i_attention_wv = 0;
int i_ffn_down = 0;
int i_ffn_gate = 0;
int i_ffn_up = 0;
int i_ffn_down_exps = 0;
int i_ffn_gate_exps = 0;
int i_ffn_up_exps = 0;
int i_ffn_down_shexp = 0;
int i_ffn_gate_shexp = 0;
int i_ffn_up_shexp = 0;
int i_ssm_out = 0;
int i_attn_q = 0;
int n_fallback = 0;
@ -402,6 +480,65 @@ static ggml_type tensor_type_fallback(quantize_state_impl & qs, const ggml_tenso
return return_type;
}
// Returns the appropriate type for expert _exps tensors based on ftype
static inline ggml_type get_exps_type_low_bpw_bump(llama_ftype ftype, ggml_type new_type) {
if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) new_type = GGML_TYPE_IQ3_S;
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_S) new_type = GGML_TYPE_IQ3_XXS;
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS) new_type = GGML_TYPE_IQ2_S;
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS) new_type = GGML_TYPE_IQ2_XS;
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) new_type = GGML_TYPE_IQ2_XXS;
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ1_S) new_type = GGML_TYPE_IQ1_M;
return new_type;
}
static inline ggml_type get_exps_type_low_bpw_squash(llama_ftype ftype, ggml_type new_type) {
if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) new_type = GGML_TYPE_IQ2_XS;
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_S) new_type = GGML_TYPE_IQ2_XXS;
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS) new_type = GGML_TYPE_IQ2_XXS;
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS) new_type = GGML_TYPE_IQ1_M;
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) new_type = GGML_TYPE_IQ1_S;
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ1_S) new_type = GGML_TYPE_IQ1_S;
return new_type;
}
static inline ggml_type get_exps_type_high_bpw_bump(llama_ftype ftype, ggml_type new_type, bool has_imatrix) {
if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) new_type = GGML_TYPE_Q4_K;
else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S) new_type = GGML_TYPE_Q5_K;
else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) new_type = GGML_TYPE_Q6_K;
else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) new_type = GGML_TYPE_Q6_K;
else if (ftype == LLAMA_FTYPE_MOSTLY_Q6_K) new_type = GGML_TYPE_Q8_0;
// Bump I-quants
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_S || ftype == LLAMA_FTYPE_MOSTLY_IQ3_M) new_type = GGML_TYPE_Q5_K;
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) new_type = GGML_TYPE_Q4_K;
else if ((ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL || ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS) && !has_imatrix) new_type = GGML_TYPE_Q6_K;
return new_type;
}
static inline ggml_type get_ffn_shexp_ggml_type(llama_ftype ftype, ggml_type new_type) {
if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_M || ftype == LLAMA_FTYPE_MOSTLY_IQ3_S ||
ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS ||
ftype == LLAMA_FTYPE_MOSTLY_Q2_K || ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S) {
new_type = GGML_TYPE_Q4_K;
}
else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_0) new_type = GGML_TYPE_Q5_0;
else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_1) new_type = GGML_TYPE_Q5_1;
else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L ||
ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_S) {
new_type = GGML_TYPE_Q5_K;
}
else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_S) {
new_type = GGML_TYPE_Q6_K;
}
else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q6_K ||
ftype == LLAMA_FTYPE_MOSTLY_Q8_0) {
new_type = GGML_TYPE_Q8_0;
}
return new_type;
}
// internal standard logic for selecting the target tensor type based on tensor category, ftype, and model arch
static ggml_type llama_tensor_get_type_impl(quantize_state_impl & qs, ggml_type new_type, const ggml_tensor * tensor, llama_ftype ftype, tensor_category category) {
const std::string name = ggml_get_name(tensor);
@ -453,6 +590,8 @@ static ggml_type llama_tensor_get_type_impl(quantize_state_impl & qs, ggml_type
new_type = GGML_TYPE_Q6_K;
}
}
} else if (category == tensor_category::SSM_BA) {
new_type = GGML_TYPE_F32;
} else if (ftype == LLAMA_FTYPE_MOSTLY_MXFP4_MOE) {
// MoE tensors -> MXFP4
// other tensors -> Q8_0
@ -482,33 +621,136 @@ static ggml_type llama_tensor_get_type_impl(quantize_state_impl & qs, ggml_type
} else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ1_S ||
ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M || ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) {
if (category_is_attn_v(category)) {
if (qs.model.hparams.n_gqa() >= 4 || qs.model.hparams.n_expert >= 4) new_type = GGML_TYPE_Q4_K;
else new_type = ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K;
if (use_more_bits(qs.i_attention_wv, qs.n_attention_wv)) {
new_type = GGML_TYPE_Q6_K;
}
else {
new_type = GGML_TYPE_Q4_K;
}
++qs.i_attention_wv;
}
else if (qs.model.hparams.n_expert == 8 && category == tensor_category::ATTENTION_K) {
else if (category == tensor_category::ATTENTION_QKV) {
if (qs.model.hparams.n_expert >= 8) {
if (use_more_bits(qs.i_attn_qkv, qs.n_attn_qkv)) {
new_type = GGML_TYPE_Q6_K;
}
else {
new_type = GGML_TYPE_Q4_K;
}
}
else if (use_more_bits(qs.i_attn_qkv, qs.n_attn_qkv)) {
new_type = GGML_TYPE_Q4_K;
}
++qs.i_attn_qkv;
}
else if (category == tensor_category::SSM_OUT) {
if (use_more_bits(qs.i_ssm_out, qs.n_ssm_out)) {
new_type = GGML_TYPE_Q4_K;
}
++qs.i_ssm_out;
}
else if (category == tensor_category::ATTENTION_K) {
new_type = GGML_TYPE_Q4_K;
}
else if (category == tensor_category::ATTENTION_Q) {
if (qs.model.hparams.n_expert >= 8 || use_more_bits(qs.i_attn_q, qs.n_attn_q)){
new_type = GGML_TYPE_Q4_K;
}
}
else if (qs.model.hparams.n_expert >= 8 && category == tensor_category::ATTENTION_KV_A_MQA) {
new_type = GGML_TYPE_Q4_K;
}
else if (qs.model.hparams.n_expert >= 8 && category == tensor_category::ATTENTION_KV_B) {
if (qs.i_attention_wv < qs.n_attention_wv/8) {
new_type = GGML_TYPE_Q4_K;
}
else if (use_more_bits(qs.i_attention_wv, qs.n_attention_wv)) {
new_type = (ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K;
}
++qs.i_attention_wv;
}
else if (category == tensor_category::ATTENTION_Q_A) {
new_type = GGML_TYPE_Q4_K;
}
else if (category == tensor_category::ATTENTION_Q_B) {
new_type = (ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K;
}
else if (category == tensor_category::FFN_DOWN) {
if (qs.i_ffn_down < qs.n_ffn_down/8) {
new_type = ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K;
if (qs.i_ffn_down < qs.n_ffn_down/16) {
new_type = GGML_TYPE_Q6_K;
}
else if (qs.i_ffn_down < qs.n_ffn_down/8) {
new_type = (ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K;
}
++qs.i_ffn_down;
}
else if (category == tensor_category::FFN_GATE) {
if (qs.i_ffn_gate < qs.n_ffn_gate/16) {
new_type = GGML_TYPE_Q4_K;
}
else if (qs.i_ffn_gate < qs.n_ffn_gate/8) {
new_type = (ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K;
}
++qs.i_ffn_gate;
}
else if (category == tensor_category::FFN_UP) {
if (qs.i_ffn_up < qs.n_ffn_up/16) {
new_type = GGML_TYPE_Q4_K;
}
else if (qs.i_ffn_up < qs.n_ffn_up/8) {
new_type = (ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K;
}
++qs.i_ffn_up;
}
else if (category == tensor_category::FFN_DOWN_EXPS) {
if (qs.i_ffn_down_exps < qs.n_ffn_down_exps/8 || qs.i_ffn_down_exps > 7*qs.n_ffn_down_exps/8) {
new_type = get_exps_type_low_bpw_bump(ftype, new_type);
}
++qs.i_ffn_down_exps;
}
else if (category == tensor_category::FFN_DOWN_SHEXP) {
if (use_more_bits(qs.i_ffn_down_shexp, qs.n_ffn_down_shexp)) {
new_type = GGML_TYPE_Q8_0;
}
else {
new_type = GGML_TYPE_Q4_K;
}
++qs.i_ffn_down_shexp;
}
else if (category == tensor_category::FFN_GATE_SHEXP) {
if (use_more_bits(qs.i_ffn_gate_shexp, qs.n_ffn_gate_shexp)) {
new_type = GGML_TYPE_Q8_0;
}
else {
new_type = GGML_TYPE_Q4_K;
}
++qs.i_ffn_gate_shexp;
}
else if (category == tensor_category::FFN_UP_SHEXP) {
if (use_more_bits(qs.i_ffn_up_shexp, qs.n_ffn_up_shexp)) {
new_type = GGML_TYPE_Q8_0;
}
else {
new_type = GGML_TYPE_Q4_K;
}
++qs.i_ffn_up_shexp;
}
else if (category == tensor_category::ATTENTION_OUTPUT) {
if (qs.model.hparams.n_expert == 8) {
new_type = GGML_TYPE_Q5_K;
if (qs.model.hparams.n_expert >= 8) {
new_type = (ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K;
} else {
if (ftype == LLAMA_FTYPE_MOSTLY_IQ1_S || ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) new_type = GGML_TYPE_IQ2_XXS;
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) new_type = GGML_TYPE_IQ3_S;
}
}
} else if (category_is_attn_v(category)) {
if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) {
new_type = qs.model.hparams.n_gqa() >= 4 ? GGML_TYPE_Q4_K : GGML_TYPE_Q3_K;
} else if (category == tensor_category::SSM_OUT) {
if (use_more_bits(qs.i_ssm_out, qs.n_ssm_out)) {
new_type = GGML_TYPE_Q8_0;
}
else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S && qs.model.hparams.n_gqa() >= 4) {
new_type = GGML_TYPE_Q4_K;
++qs.i_ssm_out;
} else if (category_is_attn_v(category)) {
if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K || ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S) {
new_type = qs.model.hparams.n_gqa() >= 4 ? GGML_TYPE_Q4_K : GGML_TYPE_Q3_K;
}
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
new_type = qs.model.hparams.n_gqa() >= 4 ? GGML_TYPE_Q4_K : !qs.has_imatrix ? GGML_TYPE_IQ3_S : GGML_TYPE_IQ3_XXS;
@ -517,49 +759,96 @@ static ggml_type llama_tensor_get_type_impl(quantize_state_impl & qs, ggml_type
new_type = GGML_TYPE_Q4_K;
}
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_M) {
new_type = GGML_TYPE_Q4_K;
new_type = GGML_TYPE_Q5_K;
}
else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) {
new_type = qs.i_attention_wv < 2 ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K;
if (use_more_bits(qs.i_attention_wv, qs.n_attention_wv)) {
new_type = GGML_TYPE_Q5_K;
} else {
new_type = GGML_TYPE_Q4_K;
}
}
else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
else if ((ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL || ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS) && qs.model.hparams.n_gqa() >= 4) {
new_type = GGML_TYPE_Q5_K;
new_type = GGML_TYPE_Q6_K;
}
else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) &&
use_more_bits(qs.i_attention_wv, qs.n_attention_wv)) new_type = GGML_TYPE_Q6_K;
else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && qs.i_attention_wv < 4) new_type = GGML_TYPE_Q5_K;
else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && use_more_bits(qs.i_attention_wv, qs.n_attention_wv)) new_type = GGML_TYPE_Q5_K;
else if (ftype == LLAMA_FTYPE_MOSTLY_Q6_K && use_more_bits(qs.i_attention_wv, qs.n_attention_wv)) new_type = GGML_TYPE_Q8_0;
if (qs.model.type == LLM_TYPE_70B) {
// In the 70B model we have 8 heads sharing the same attn_v weights. As a result, the attn_v.weight tensor is
// 8x smaller compared to attn_q.weight. Hence, we can get a nice boost in quantization accuracy with
// nearly negligible increase in model size by quantizing this tensor with more bits:
if (new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_Q4_K) new_type = GGML_TYPE_Q5_K;
}
if (qs.model.hparams.n_expert == 8) {
if (qs.model.hparams.n_expert >= 8) {
// for the 8-expert model, bumping this to Q8_0 trades just ~128MB
// TODO: explore better strategies
new_type = GGML_TYPE_Q8_0;
}
++qs.i_attention_wv;
} else if (category == tensor_category::ATTENTION_Q) {
if (use_more_bits(qs.i_attn_q, qs.n_attn_q)) {
if (ftype == LLAMA_FTYPE_MOSTLY_Q6_K || ftype == LLAMA_FTYPE_MOSTLY_Q8_0) {
new_type = GGML_TYPE_Q8_0;
}
else {
new_type = GGML_TYPE_Q6_K;
}
}
++qs.i_attn_q;
} else if (category == tensor_category::ATTENTION_K) {
if (qs.model.hparams.n_expert == 8) {
if (qs.model.hparams.n_expert >= 8) {
// for the 8-expert model, bumping this to Q8_0 trades just ~128MB
// TODO: explore better strategies
new_type = GGML_TYPE_Q8_0;
}
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS) {
new_type = GGML_TYPE_IQ3_XXS;
else if (ftype == LLAMA_FTYPE_MOSTLY_Q6_K) {
new_type = GGML_TYPE_Q8_0;
}
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
new_type = GGML_TYPE_IQ2_S;
else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) {
new_type = GGML_TYPE_Q6_K;
}
} else if (category == tensor_category::ATTENTION_Q) {
if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS) {
new_type = GGML_TYPE_IQ3_XXS;
else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) {
new_type = GGML_TYPE_Q5_K;
}
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
new_type = GGML_TYPE_IQ2_S;
} else if (category == tensor_category::FFN_DOWN_SHEXP) {
if (use_more_bits(qs.i_ffn_down_shexp, qs.n_ffn_down_shexp)) {
new_type = GGML_TYPE_Q8_0;
}
else {
new_type = get_ffn_shexp_ggml_type(ftype, new_type);
}
++qs.i_ffn_down_shexp;
} else if (category == tensor_category::FFN_GATE_SHEXP) {
if (use_more_bits(qs.i_ffn_gate_shexp, qs.n_ffn_gate_shexp)) {
new_type = GGML_TYPE_Q8_0;
}
else {
new_type = get_ffn_shexp_ggml_type(ftype, new_type);
}
++qs.i_ffn_gate_shexp;
} else if (category == tensor_category::FFN_UP_SHEXP) {
if (use_more_bits(qs.i_ffn_up_shexp, qs.n_ffn_up_shexp)) {
new_type = GGML_TYPE_Q8_0;
}
else {
new_type = get_ffn_shexp_ggml_type(ftype, new_type);
}
++qs.i_ffn_up_shexp;
} else if (category == tensor_category::FFN_DOWN_EXPS) {
if (use_more_bits(qs.i_ffn_down_exps, qs.n_ffn_down_exps)) {
if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_0 || ftype == LLAMA_FTYPE_MOSTLY_Q5_0) && qs.has_imatrix) {
// Guard against craziness in the first few ffn_down layers that can happen even with imatrix for Q4_0/Q5_0.
// We only do it when an imatrix is provided because a) we want to make sure that one can always get the
// same quantization as before imatrix stuff, and b) Q4_1/Q5_1 do go crazy on ffn_down without an imatrix.
new_type = ftype == LLAMA_FTYPE_MOSTLY_Q4_0 ? GGML_TYPE_Q4_1 : GGML_TYPE_Q5_1;
} else {
new_type = get_exps_type_high_bpw_bump(ftype, new_type, qs.has_imatrix);
}
}
++qs.i_ffn_down_exps;
} else if (category == tensor_category::FFN_DOWN) {
auto info = layer_info(qs.i_ffn_down, qs.n_ffn_down, name.c_str());
int i_layer = info.first, n_layer = info.second;
@ -576,7 +865,7 @@ static ggml_type llama_tensor_get_type_impl(quantize_state_impl & qs, ggml_type
: GGML_TYPE_Q3_K;
}
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_M && (i_layer < n_layer/8 ||
(qs.model.hparams.n_expert == 8 && use_more_bits(i_layer, n_layer)))) {
(qs.model.hparams.n_expert >= 8 && use_more_bits(i_layer, n_layer)))) {
new_type = GGML_TYPE_Q4_K;
}
else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) {
@ -607,30 +896,52 @@ static ggml_type llama_tensor_get_type_impl(quantize_state_impl & qs, ggml_type
++qs.i_ffn_down;
} else if (category == tensor_category::ATTENTION_OUTPUT) {
if (arch != LLM_ARCH_FALCON) {
if (qs.model.hparams.n_expert == 8) {
if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS ||
ftype == LLAMA_FTYPE_MOSTLY_Q3_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL ||
ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_IQ3_S ||
ftype == LLAMA_FTYPE_MOSTLY_IQ3_M || ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS) {
if (qs.model.hparams.n_expert >= 8) {
if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS ||
ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS || ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S ||
ftype == LLAMA_FTYPE_MOSTLY_Q3_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) {
new_type = GGML_TYPE_Q4_K;
}
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL || ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M ||
ftype == LLAMA_FTYPE_MOSTLY_IQ3_S || ftype == LLAMA_FTYPE_MOSTLY_IQ3_M ||
ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_S) {
new_type = GGML_TYPE_Q5_K;
}
else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) {
new_type = GGML_TYPE_Q6_K;
}
else if (ftype == LLAMA_FTYPE_MOSTLY_Q6_K) {
new_type = GGML_TYPE_Q8_0;
}
} else {
if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K ) new_type = GGML_TYPE_Q3_K;
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) new_type = GGML_TYPE_IQ3_S;
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS ) new_type = GGML_TYPE_IQ3_S;
else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M ) new_type = GGML_TYPE_Q4_K;
else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L ) new_type = GGML_TYPE_Q5_K;
else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L ) new_type = GGML_TYPE_Q4_K;
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_M ) new_type = GGML_TYPE_Q4_K;
else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M ) new_type = GGML_TYPE_Q5_K;
else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S ) new_type = GGML_TYPE_Q4_K;
else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M ) new_type = GGML_TYPE_Q6_K;
else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_S ) new_type = GGML_TYPE_Q5_K;
else if (ftype == LLAMA_FTYPE_MOSTLY_Q6_K ) new_type = GGML_TYPE_Q8_0;
}
} else {
if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q4_K;
}
}
else if (category == tensor_category::ATTENTION_QKV) {
if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L || ftype == LLAMA_FTYPE_MOSTLY_IQ3_M) {
if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) {
new_type = GGML_TYPE_Q4_K;
}
else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) new_type = GGML_TYPE_Q5_K;
else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) new_type = GGML_TYPE_Q6_K;
else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS ||
ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L || ftype == LLAMA_FTYPE_MOSTLY_IQ3_M ||
ftype == LLAMA_FTYPE_MOSTLY_Q3_K_S || ftype == LLAMA_FTYPE_MOSTLY_IQ3_S ||
ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS || ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S) {
new_type = GGML_TYPE_Q5_K;
}
else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_S) new_type = GGML_TYPE_Q6_K;
else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q6_K ) new_type = GGML_TYPE_Q8_0;
}
else if (category == tensor_category::FFN_GATE) {
auto info = layer_info(qs.i_ffn_gate, qs.n_ffn_gate, name.c_str());
@ -648,6 +959,63 @@ static ggml_type llama_tensor_get_type_impl(quantize_state_impl & qs, ggml_type
}
++qs.i_ffn_up;
}
else if (category == tensor_category::ATTENTION_KV_A_MQA) {
new_type = GGML_TYPE_Q8_0;
}
else if (category == tensor_category::ATTENTION_K_B || category == tensor_category::ATTENTION_V_B) {
if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K || ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S ||
ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M ||
ftype == LLAMA_FTYPE_MOSTLY_Q3_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S) {
new_type = GGML_TYPE_Q5_K;
}
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_M || ftype == LLAMA_FTYPE_MOSTLY_IQ3_S ||
ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
new_type = GGML_TYPE_IQ4_XS;
}
else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_S) {
new_type = GGML_TYPE_Q6_K;
}
else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q6_K) {
new_type = GGML_TYPE_Q8_0;
}
}
else if (category == tensor_category::ATTENTION_Q_B) {
if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K || ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S ||
ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M ||
ftype == LLAMA_FTYPE_MOSTLY_Q3_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S) {
new_type = GGML_TYPE_Q4_K;
}
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_M || ftype == LLAMA_FTYPE_MOSTLY_IQ3_S ||
ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
new_type = GGML_TYPE_IQ4_XS;
}
else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_S) {
new_type = GGML_TYPE_Q5_K;
}
else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) {
new_type = GGML_TYPE_Q6_K;
}
else if (ftype == LLAMA_FTYPE_MOSTLY_Q6_K) {
new_type = GGML_TYPE_Q8_0;
}
}
else if (category == tensor_category::ATTENTION_Q_A) {
if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K || ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S ||
ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M ||
ftype == LLAMA_FTYPE_MOSTLY_Q3_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S) {
new_type = GGML_TYPE_Q5_K;
}
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_M || ftype == LLAMA_FTYPE_MOSTLY_IQ3_S ||
ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
new_type = GGML_TYPE_IQ4_XS;
}
else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_S) {
new_type = GGML_TYPE_Q6_K;
}
else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q6_K) {
new_type = GGML_TYPE_Q8_0;
}
}
return new_type;
}
@ -983,6 +1351,33 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
if (category_is_attn_v(cat)) {
++qs.n_attention_wv;
}
else if (cat == tensor_category::ATTENTION_QKV) {
++qs.n_attn_qkv;
}
else if (cat == tensor_category::FFN_GATE_EXPS) {
++qs.n_ffn_gate_exps;
}
else if (cat == tensor_category::FFN_GATE_SHEXP) {
++qs.n_ffn_gate_shexp;
}
else if (cat == tensor_category::FFN_DOWN_EXPS) {
++qs.n_ffn_down_exps;
}
else if (cat == tensor_category::FFN_DOWN_SHEXP) {
++qs.n_ffn_down_shexp;
}
else if (cat == tensor_category::FFN_UP_EXPS) {
++qs.n_ffn_up_exps;
}
else if (cat == tensor_category::FFN_UP_SHEXP) {
++qs.n_ffn_up_shexp;
}
else if (cat == tensor_category::SSM_OUT) {
++qs.n_ssm_out;
}
else if (cat == tensor_category::ATTENTION_Q) {
++qs.n_attn_q;
}
if (cat == tensor_category::OUTPUT) {
qs.has_tied_embeddings = false;
}