Remove --no-bias option

This commit is contained in:
Ed Addario 2025-10-10 13:26:51 +01:00
parent 3a3d807fc3
commit c93131cef6
No known key found for this signature in database
GPG Key ID: E7875815A3230993
3 changed files with 2 additions and 8 deletions

View File

@ -365,7 +365,6 @@ extern "C" {
void * tensor_types; // pointer to vector containing tensor types void * tensor_types; // pointer to vector containing tensor types
void * prune_layers; // pointer to vector containing layer indices to prune void * prune_layers; // pointer to vector containing layer indices to prune
float target_bpw; // target bits per weight (bpw) float target_bpw; // target bits per weight (bpw)
bool no_bias; // use mean square error estimation only (no aligment bias)
} llama_model_quantize_params; } llama_model_quantize_params;
typedef struct llama_logit_bias { typedef struct llama_logit_bias {

View File

@ -2180,8 +2180,7 @@ llama_model_quantize_params llama_model_quantize_default_params() {
/*.kv_overrides =*/ nullptr, /*.kv_overrides =*/ nullptr,
/*.tensor_type =*/ nullptr, /*.tensor_type =*/ nullptr,
/*.prune_layers =*/ nullptr, /*.prune_layers =*/ nullptr,
/*.target_bpw =*/ -1.0f, /*.target_bpw =*/ -1.0f
/*.no_bias =*/ false
}; };
return result; return result;

View File

@ -118,7 +118,7 @@ static bool try_parse_ftype(const std::string & ftype_str_in, llama_ftype & ftyp
[[noreturn]] [[noreturn]]
static void usage(const char * executable) { static void usage(const char * executable) {
printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--include-weights] [--exclude-weights]\n", executable); printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--include-weights] [--exclude-weights]\n", executable);
printf(" [--target-bpw n] [--no-bias] [--output-tensor-type] [--token-embedding-type] [--tensor-type] [--prune-layers] [--keep-split] [--override-kv]\n"); printf(" [--target-bpw n] [--output-tensor-type] [--token-embedding-type] [--tensor-type] [--prune-layers] [--keep-split] [--override-kv]\n");
printf(" model-f32.gguf [model-quant.gguf] type [nthreads]\n\n"); printf(" model-f32.gguf [model-quant.gguf] type [nthreads]\n\n");
printf(" --allow-requantize: allows requantizing tensors that have already been quantized. Warning: This can severely reduce quality compared to quantizing from 16bit or 32bit\n"); printf(" --allow-requantize: allows requantizing tensors that have already been quantized. Warning: This can severely reduce quality compared to quantizing from 16bit or 32bit\n");
printf(" --leave-output-tensor: will leave output.weight un(re)quantized. Increases model size but may also increase quality, especially when requantizing\n"); printf(" --leave-output-tensor: will leave output.weight un(re)quantized. Increases model size but may also increase quality, especially when requantizing\n");
@ -134,8 +134,6 @@ static void usage(const char * executable) {
printf(" Advanced option to remove all tensors from the given layers\n"); printf(" Advanced option to remove all tensors from the given layers\n");
printf(" --target-bpw: target bits per weight (bpw). Must be a positive number between 0.0 and 8.0\n"); printf(" --target-bpw: target bits per weight (bpw). Must be a positive number between 0.0 and 8.0\n");
printf(" Advanced option to automatically select quantization types to achieve a total bits per weight (bpw) target\n"); printf(" Advanced option to automatically select quantization types to achieve a total bits per weight (bpw) target\n");
printf(" --no-bias: use mean square error estimation only (no aligment bias)\n");
printf(" Advanced option use MSE only and disable aligment bias error estimation\n");
printf(" --keep-split: will generate quantized model in the same shards as input\n"); printf(" --keep-split: will generate quantized model in the same shards as input\n");
printf(" --override-kv KEY=TYPE:VALUE\n"); printf(" --override-kv KEY=TYPE:VALUE\n");
printf(" Advanced option to override model metadata by key in the quantized model. May be specified multiple times.\n"); printf(" Advanced option to override model metadata by key in the quantized model. May be specified multiple times.\n");
@ -559,8 +557,6 @@ int main(int argc, char ** argv) {
if (arg_idx == argc-1 || !parse_target_bpw(argv[++arg_idx], target_bpw)) { if (arg_idx == argc-1 || !parse_target_bpw(argv[++arg_idx], target_bpw)) {
usage(argv[0]); usage(argv[0]);
} }
} else if (strcmp(argv[arg_idx], "--no-bias") == 0) {
params.no_bias = true;
} else if (strcmp(argv[arg_idx], "--prune-layers") == 0) { } else if (strcmp(argv[arg_idx], "--prune-layers") == 0) {
if (arg_idx == argc-1 || !parse_layer_prune(argv[++arg_idx], prune_layers)) { if (arg_idx == argc-1 || !parse_layer_prune(argv[++arg_idx], prune_layers)) {
usage(argv[0]); usage(argv[0]);