This commit is contained in:
ddh0 2026-02-16 13:59:13 -06:00
parent a26db356c9
commit ce0ad2986b
1 changed files with 26 additions and 7 deletions

View File

@ -40,7 +40,7 @@ struct quantize_state_impl {
// used to figure out if a model shares tok_embd with the output weight
bool has_output = false;
// if this flag is false, the code will skip updating this struct
// if this flag is false, the code will skip updating the per-tensor counters
bool do_count = false;
quantize_state_impl(const llama_model & model, const llama_model_quantize_params * params)
@ -181,10 +181,10 @@ static void llama_tensor_dequantize_impl(
// internal standard logic for selecting the target tensor type for a specific
// quantization mixture & model architecture
static ggml_type llama_tensor_get_type_impl(
quantize_state_impl * qs,
ggml_type new_type,
const ggml_tensor * tensor,
const llama_ftype ftype
quantize_state_impl * qs,
ggml_type new_type,
const ggml_tensor * tensor,
llama_ftype ftype
) {
const std::string name = ggml_get_name(tensor);
@ -267,6 +267,9 @@ static ggml_type llama_tensor_get_type_impl(
if (name.find("attn_v.weight") != std::string::npos) {
if (qs->model.hparams.n_gqa() >= 4 || qs->model.hparams.n_expert >= 4) new_type = GGML_TYPE_Q4_K;
else new_type = ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K;
if (qs->do_count) {
++qs->i_attention_wv;
}
}
else if (qs->model.hparams.n_expert == 8 && name.find("attn_k.weight") != std::string::npos) {
new_type = GGML_TYPE_Q4_K;
@ -275,6 +278,9 @@ static ggml_type llama_tensor_get_type_impl(
if (qs->i_ffn_down < qs->n_ffn_down/8) {
new_type = ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K;
}
if (qs->do_count) {
++qs->i_ffn_down;
}
}
else if (name.find("attn_output.weight") != std::string::npos) {
if (qs->model.hparams.n_expert == 8) {
@ -321,6 +327,9 @@ static ggml_type llama_tensor_get_type_impl(
// TODO: explore better strategies
new_type = GGML_TYPE_Q8_0;
}
if (qs->do_count) {
++qs->i_attention_wv;
}
} else if (name.find("attn_k.weight") != std::string::npos) {
if (qs->model.hparams.n_expert == 8) {
// for the 8-expert model, bumping this to Q8_0 trades just ~128MB
@ -384,6 +393,9 @@ static ggml_type llama_tensor_get_type_impl(
// same quantization as before imatrix stuff, and b) Q4_1/Q5_1 do go crazy on ffn_down without an imatrix.
new_type = ftype == LLAMA_FTYPE_MOSTLY_Q4_0 ? GGML_TYPE_Q4_1 : GGML_TYPE_Q5_1;
}
if (qs->do_count) {
++qs->i_ffn_down;
}
} else if (name.find("attn_output.weight") != std::string::npos) {
if (arch != LLM_ARCH_FALCON) {
if (qs->model.hparams.n_expert == 8) {
@ -417,6 +429,9 @@ static ggml_type llama_tensor_get_type_impl(
if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && (i_layer >= n_layer/8 && i_layer < 7*n_layer/8)) {
new_type = GGML_TYPE_IQ3_XXS;
}
if (qs->do_count) {
++qs->i_ffn_gate;
}
}
else if (name.find("ffn_up") != std::string::npos) {
auto info = layer_info(qs->i_ffn_up, qs->n_ffn_up, name.c_str());
@ -424,6 +439,9 @@ static ggml_type llama_tensor_get_type_impl(
if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && (i_layer >= n_layer/8 && i_layer < 7*n_layer/8)) {
new_type = GGML_TYPE_IQ3_XXS;
}
if (qs->do_count) {
++qs->i_ffn_up;
}
}
return new_type;
}
@ -714,6 +732,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
model.load_hparams(ml);
model.load_stats (ml);
// quantize_state_impl qs(model, params);
auto qs = std::make_unique<quantize_state_impl>(model, params);
if (params->only_copy) {
@ -843,7 +862,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
}
gguf_add_tensor(ctx_outs[i_split].get(), tensor);
ggml_type target_type = llama_tensor_get_type(qs, params, tensor, default_type);
ggml_type target_type = llama_tensor_get_type(qs.get(), params, tensor, default_type);
if (!params->imatrix &&
tensor_allows_quantization(params, model.arch, tensor) &&
@ -960,7 +979,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
// if so, what will be the new type?
if (do_quantize) {
new_type = llama_tensor_get_type(qs, params, tensor, default_type);
new_type = llama_tensor_get_type(qs.get(), params, tensor, default_type);
// If we've decided to quantize to the same type the tensor is already
// in then there's nothing to do.
do_quantize = tensor->type != new_type;