new function `llama_tensor_update_stats`

This commit is contained in:
ddh0 2026-02-16 12:20:16 -06:00
parent 0c976fafd7
commit aaf010edeb
1 changed files with 29 additions and 35 deletions

View File

@ -175,7 +175,14 @@ static void llama_tensor_dequantize_impl(
workers.clear(); workers.clear();
} }
static ggml_type llama_tensor_get_type_impl(quantize_state_impl & qs, ggml_type new_type, const ggml_tensor * tensor, llama_ftype ftype, bool update_stats) { // internal logic for selecting the target tensor type for a given quantization
// and model arch
static ggml_type llama_tensor_get_type_impl(
quantize_state_impl & qs,
ggml_type new_type,
const ggml_tensor * tensor,
const llama_ftype ftype
) {
const std::string name = ggml_get_name(tensor); const std::string name = ggml_get_name(tensor);
// TODO: avoid hardcoded tensor names - use the TN_* constants // TODO: avoid hardcoded tensor names - use the TN_* constants
@ -257,9 +264,6 @@ static ggml_type llama_tensor_get_type_impl(quantize_state_impl & qs, ggml_type
if (name.find("attn_v.weight") != std::string::npos) { if (name.find("attn_v.weight") != std::string::npos) {
if (qs.model.hparams.n_gqa() >= 4 || qs.model.hparams.n_expert >= 4) new_type = GGML_TYPE_Q4_K; if (qs.model.hparams.n_gqa() >= 4 || qs.model.hparams.n_expert >= 4) new_type = GGML_TYPE_Q4_K;
else new_type = ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K; else new_type = ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K;
if (update_stats) {
++qs.i_attention_wv;
}
} }
else if (qs.model.hparams.n_expert == 8 && name.find("attn_k.weight") != std::string::npos) { else if (qs.model.hparams.n_expert == 8 && name.find("attn_k.weight") != std::string::npos) {
new_type = GGML_TYPE_Q4_K; new_type = GGML_TYPE_Q4_K;
@ -268,9 +272,6 @@ static ggml_type llama_tensor_get_type_impl(quantize_state_impl & qs, ggml_type
if (qs.i_ffn_down < qs.n_ffn_down/8) { if (qs.i_ffn_down < qs.n_ffn_down/8) {
new_type = ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K; new_type = ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K;
} }
if (update_stats) {
++qs.i_ffn_down;
}
} }
else if (name.find("attn_output.weight") != std::string::npos) { else if (name.find("attn_output.weight") != std::string::npos) {
if (qs.model.hparams.n_expert == 8) { if (qs.model.hparams.n_expert == 8) {
@ -317,9 +318,6 @@ static ggml_type llama_tensor_get_type_impl(quantize_state_impl & qs, ggml_type
// TODO: explore better strategies // TODO: explore better strategies
new_type = GGML_TYPE_Q8_0; new_type = GGML_TYPE_Q8_0;
} }
if (update_stats) {
++qs.i_attention_wv;
}
} else if (name.find("attn_k.weight") != std::string::npos) { } else if (name.find("attn_k.weight") != std::string::npos) {
if (qs.model.hparams.n_expert == 8) { if (qs.model.hparams.n_expert == 8) {
// for the 8-expert model, bumping this to Q8_0 trades just ~128MB // for the 8-expert model, bumping this to Q8_0 trades just ~128MB
@ -383,9 +381,6 @@ static ggml_type llama_tensor_get_type_impl(quantize_state_impl & qs, ggml_type
// same quantization as before imatrix stuff, and b) Q4_1/Q5_1 do go crazy on ffn_down without an imatrix. // same quantization as before imatrix stuff, and b) Q4_1/Q5_1 do go crazy on ffn_down without an imatrix.
new_type = ftype == LLAMA_FTYPE_MOSTLY_Q4_0 ? GGML_TYPE_Q4_1 : GGML_TYPE_Q5_1; new_type = ftype == LLAMA_FTYPE_MOSTLY_Q4_0 ? GGML_TYPE_Q4_1 : GGML_TYPE_Q5_1;
} }
if (update_stats) {
++qs.i_ffn_down;
}
} else if (name.find("attn_output.weight") != std::string::npos) { } else if (name.find("attn_output.weight") != std::string::npos) {
if (arch != LLM_ARCH_FALCON) { if (arch != LLM_ARCH_FALCON) {
if (qs.model.hparams.n_expert == 8) { if (qs.model.hparams.n_expert == 8) {
@ -419,9 +414,6 @@ static ggml_type llama_tensor_get_type_impl(quantize_state_impl & qs, ggml_type
if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && (i_layer >= n_layer/8 && i_layer < 7*n_layer/8)) { if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && (i_layer >= n_layer/8 && i_layer < 7*n_layer/8)) {
new_type = GGML_TYPE_IQ3_XXS; new_type = GGML_TYPE_IQ3_XXS;
} }
if (update_stats) {
++qs.i_ffn_gate;
}
} }
else if (name.find("ffn_up") != std::string::npos) { else if (name.find("ffn_up") != std::string::npos) {
auto info = layer_info(qs.i_ffn_up, qs.n_ffn_up, name.c_str()); auto info = layer_info(qs.i_ffn_up, qs.n_ffn_up, name.c_str());
@ -429,23 +421,17 @@ static ggml_type llama_tensor_get_type_impl(quantize_state_impl & qs, ggml_type
if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && (i_layer >= n_layer/8 && i_layer < 7*n_layer/8)) { if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && (i_layer >= n_layer/8 && i_layer < 7*n_layer/8)) {
new_type = GGML_TYPE_IQ3_XXS; new_type = GGML_TYPE_IQ3_XXS;
} }
if (update_stats) {
++qs.i_ffn_up;
}
} }
return new_type; return new_type;
} }
// determine the ggml_type that this tensor should be quantized to. // determine the ggml_type that this tensor should be quantized to
//
// `qs` statistics will only be updated if the `update_stats` parameter is true.
static ggml_type llama_tensor_get_type( static ggml_type llama_tensor_get_type(
quantize_state_impl & qs, quantize_state_impl & qs,
const llama_model_quantize_params * params, const llama_model_quantize_params * params,
const ggml_tensor * tensor, const ggml_tensor * tensor,
ggml_type default_type, const ggml_type default_type
bool update_stats
) { ) {
ggml_type new_type = default_type; ggml_type new_type = default_type;
// get more optimal quantization type based on the tensor shape, layer, etc. // get more optimal quantization type based on the tensor shape, layer, etc.
@ -470,7 +456,7 @@ static ggml_type llama_tensor_get_type(
// if not manual - use the standard logic for choosing the quantization type based on the selected mixture // if not manual - use the standard logic for choosing the quantization type based on the selected mixture
if (!manual) { if (!manual) {
new_type = llama_tensor_get_type_impl(qs, new_type, tensor, params->ftype, update_stats); new_type = llama_tensor_get_type_impl(qs, new_type, tensor, params->ftype);
} }
// incompatible tensor shapes are handled here - fallback to a compatible type // incompatible tensor shapes are handled here - fallback to a compatible type
@ -484,10 +470,6 @@ static ggml_type llama_tensor_get_type(
if (nx % qk_k != 0) { if (nx % qk_k != 0) {
LLAMA_LOG_WARN("\n\n%s : tensor cols %" PRId64 " x %" PRId64 " are not divisible by %" PRId64 ", required for %s", __func__, nx, ny, qk_k, ggml_type_name(new_type)); LLAMA_LOG_WARN("\n\n%s : tensor cols %" PRId64 " x %" PRId64 " are not divisible by %" PRId64 ", required for %s", __func__, nx, ny, qk_k, ggml_type_name(new_type));
convert_incompatible_tensor = true; convert_incompatible_tensor = true;
} else {
if (update_stats) {
++qs.n_k_quantized;
}
} }
if (convert_incompatible_tensor) { if (convert_incompatible_tensor) {
@ -512,10 +494,6 @@ static ggml_type llama_tensor_get_type(
if (tensor->ne[0] % ggml_blck_size(new_type) != 0) { if (tensor->ne[0] % ggml_blck_size(new_type) != 0) {
new_type = GGML_TYPE_F16; new_type = GGML_TYPE_F16;
} }
if (update_stats) {
LLAMA_LOG_WARN(" - using fallback quantization %s\n", ggml_type_name(new_type));
++qs.n_fallback;
}
} }
} }
} }
@ -528,6 +506,20 @@ static ggml_type llama_tensor_get_type(
return new_type; return new_type;
} }
// update internal quantization state statistics based on the tensor name
static void llama_tensor_update_stats(quantize_state_impl & qs, const std::string & name) {
if (name.find("attn_v.weight") != std::string::npos ||
name.find("attn_kv_b.weight") != std::string::npos) {
++qs.i_attention_wv;
} else if (name.find("ffn_down") != std::string::npos) {
++qs.i_ffn_down;
} else if (name.find("ffn_gate") != std::string::npos) {
++qs.i_ffn_gate;
} else if (name.find("ffn_up") != std::string::npos) {
++qs.i_ffn_up;
}
}
static size_t llama_tensor_quantize_impl(enum ggml_type new_type, const float * f32_data, void * new_data, const int64_t chunk_size, int64_t nrows, int64_t n_per_row, const float * imatrix, std::vector<std::thread> & workers, const int nthread) { static size_t llama_tensor_quantize_impl(enum ggml_type new_type, const float * f32_data, void * new_data, const int64_t chunk_size, int64_t nrows, int64_t n_per_row, const float * imatrix, std::vector<std::thread> & workers, const int nthread) {
if (nthread < 2) { if (nthread < 2) {
// single-thread // single-thread
@ -869,7 +861,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
} }
gguf_add_tensor(ctx_outs[i_split].get(), tensor); gguf_add_tensor(ctx_outs[i_split].get(), tensor);
ggml_type target_type = llama_tensor_get_type(qs, params, tensor, default_type, false); ggml_type target_type = llama_tensor_get_type(qs, params, tensor, default_type);
if (!params->imatrix && if (!params->imatrix &&
tensor_allows_quantization(params, model.arch, tensor) && tensor_allows_quantization(params, model.arch, tensor) &&
@ -975,12 +967,14 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
// if so, what will be the target type? // if so, what will be the target type?
if (do_quantize) { if (do_quantize) {
new_type = llama_tensor_get_type(qs, params, tensor, default_type, true); new_type = llama_tensor_get_type(qs, params, tensor, default_type);
// If we've decided to quantize to the same type the tensor is already // If we've decided to quantize to the same type the tensor is already
// in then there's nothing to do. // in then there's nothing to do.
do_quantize = tensor->type != new_type; do_quantize = tensor->type != new_type;
} }
llama_tensor_update_stats(qs, name);
void * new_data; void * new_data;
size_t new_size; size_t new_size;