Add --keep-bpw-state option

This commit is contained in:
Ed Addario 2025-10-12 18:23:23 +01:00
parent b6094a97bf
commit ca282302b5
No known key found for this signature in database
GPG Key ID: E7875815A3230993
3 changed files with 10 additions and 12 deletions

View File

@ -366,6 +366,7 @@ extern "C" {
void * tensor_types; // pointer to vector containing tensor types
void * prune_layers; // pointer to vector containing layer indices to prune
float target_bpw; // target bits per weight (bpw)
bool keep_bpw_state; // keep bpw state file
} llama_model_quantize_params;
typedef struct llama_logit_bias {

View File

@ -659,7 +659,6 @@ static std::unordered_map<std::string, ggml_type> target_bpw_type(
GGML_TYPE_IQ2_S,
GGML_TYPE_Q2_K,
GGML_TYPE_IQ3_XXS,
GGML_TYPE_IQ3_S,
GGML_TYPE_Q3_K,
GGML_TYPE_IQ4_XS,
GGML_TYPE_IQ4_NL,
@ -773,11 +772,9 @@ static std::unordered_map<std::string, ggml_type> target_bpw_type(
auto save_bpw_state = [&](const std::vector<tensor_info> & all_vec) {
const std::string tmp = checkpoint_file + ".tmp";
std::ofstream ofs(tmp, std::ios::binary | std::ios::trunc);
if (!ofs) { return; } // best-effort
const float target_bpw = params->target_bpw;
if (!ofs) { return; }
ofs.write((const char *)&file_magic, sizeof(file_magic));
ofs.write((const char *)&model_id, sizeof(model_id));
ofs.write((const char *)&target_bpw, sizeof(target_bpw));
const uint64_t n = all_vec.size();
ofs.write((const char *)&n, sizeof(n));
for (const auto & ti : all_vec) {
@ -817,19 +814,14 @@ static std::unordered_map<std::string, ggml_type> target_bpw_type(
uint32_t magic = 0;
uint64_t id = 0;
float bpw = 0.0f;
ifs.read((char *)&magic, sizeof(magic));
ifs.read((char *)&id, sizeof(id));
ifs.read((char *)&bpw, sizeof(bpw));
if (magic != file_magic) {
LLAMA_LOG_WARN("%s: invalid resume file, ignoring: %s\n", func, checkpoint_file.c_str());
return out;
} else if (id != model_id) {
LLAMA_LOG_WARN("%s: model ID mismatch, ignoring: %s\n", func, checkpoint_file.c_str());
return out;
} else if (bpw != params->target_bpw) {
LLAMA_LOG_WARN("%s: target bpw of %f does not match %f, ignoring: %s\n", func, params->target_bpw, bpw, checkpoint_file.c_str());
return out;
} else {
LLAMA_LOG_INFO("%s: resuming tensor quantization\n", func);
}
@ -874,7 +866,7 @@ static std::unordered_map<std::string, ggml_type> target_bpw_type(
auto delete_bpw_state = [&] {
std::ifstream ifs(checkpoint_file);
if (ifs.good()) {
if (ifs.good() && !params->keep_bpw_state) {
LLAMA_LOG_INFO("%s: deleting %s\n", func, checkpoint_file.c_str());
std::remove(checkpoint_file.c_str());
}
@ -1489,6 +1481,7 @@ static std::unordered_map<std::string, ggml_type> target_bpw_type(
}
check_signal_handler(all);
if (params->keep_bpw_state) { save_bpw_state(all); }
if (all.empty()) { return {}; }
@ -2240,7 +2233,8 @@ llama_model_quantize_params llama_model_quantize_default_params() {
/*.kv_overrides =*/ nullptr,
/*.tensor_type =*/ nullptr,
/*.prune_layers =*/ nullptr,
/*.target_bpw =*/ -1.0f
/*.target_bpw =*/ -1.0f,
/*.keep_bpw_state =*/ false
};
return result;

View File

@ -118,7 +118,7 @@ static bool try_parse_ftype(const std::string & ftype_str_in, llama_ftype & ftyp
[[noreturn]]
static void usage(const char * executable) {
printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--include-weights] [--exclude-weights]\n", executable);
printf(" [--target-bpw n] [--output-tensor-type] [--token-embedding-type] [--tensor-type] [--prune-layers] [--keep-split] [--override-kv]\n");
printf(" [--target-bpw n] [--keep-bpw-state] [--output-tensor-type] [--token-embedding-type] [--tensor-type] [--prune-layers] [--keep-split] [--override-kv]\n");
printf(" model-f32.gguf [model-quant.gguf] type [nthreads]\n\n");
printf(" --allow-requantize: allows requantizing tensors that have already been quantized. Warning: This can severely reduce quality compared to quantizing from 16bit or 32bit\n");
printf(" --leave-output-tensor: will leave output.weight un(re)quantized. Increases model size but may also increase quality, especially when requantizing\n");
@ -134,6 +134,7 @@ static void usage(const char * executable) {
printf(" Advanced option to remove all tensors from the given layers\n");
printf(" --target-bpw: target bits per weight (bpw). Must be a positive number between 0.0 and 8.0\n");
printf(" Advanced option to automatically select quantization types to achieve a total bits per weight (bpw) target\n");
printf(" --keep-bpw-state: preserve the bpw computations in a state file\n");
printf(" --keep-split: will generate quantized model in the same shards as input\n");
printf(" --override-kv KEY=TYPE:VALUE\n");
printf(" Advanced option to override model metadata by key in the quantized model. May be specified multiple times.\n");
@ -557,6 +558,8 @@ int main(int argc, char ** argv) {
if (arg_idx == argc-1 || !parse_target_bpw(argv[++arg_idx], target_bpw)) {
usage(argv[0]);
}
} else if (strcmp(argv[arg_idx], "--keep-bpw-state") == 0) {
params.keep_bpw_state = true;
} else if (strcmp(argv[arg_idx], "--prune-layers") == 0) {
if (arg_idx == argc-1 || !parse_layer_prune(argv[++arg_idx], prune_layers)) {
usage(argv[0]);