diff --git a/include/llama.h b/include/llama.h index d0ca37dc65..ba6c185346 100644 --- a/include/llama.h +++ b/include/llama.h @@ -365,7 +365,7 @@ extern "C" { void * tensor_types; // pointer to vector containing tensor types void * prune_layers; // pointer to vector containing layer indices to prune float target_bpw; // target bits per weight (bpw) - bool precise_lambda; // use precise_lambda calculation - slow computation but very accurate + int32_t bpw_bias; // type of error bias to use: 0 = no bias (MSE only), 1 = fast (default), 2 = precise (slow) } llama_model_quantize_params; typedef struct llama_logit_bias { diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index db688fdf02..74ceb3de9c 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -902,26 +902,6 @@ static std::unordered_map target_bpw_type( return std::isfinite(total_err) ? total_err : infinity; }; - // Scaling factor to increase lambda when activations are concentrated - auto directional_scale = [&](const float * values, const float * activations, int64_t n_per_row) { - if (!activations) { return 1.0f; } - double sum_v = 0.0; - double sum_aw2 = 0.0; - double sum_a2 = 0.0; - for (int64_t j = 0; j < n_per_row; ++j) { - const double v = values ? std::max(0.0f, values[j]) : 1.0; - const double a = activations[j]; - sum_v += v; - sum_aw2 += v * a * a; - sum_a2 += a * a; - } - const double rms_a = std::sqrt(sum_a2 / std::max(1.0, (double)n_per_row)); - const double denom = std::sqrt(std::max(epsilon, sum_v)) * std::max(epsilon, rms_a); - const double scale = denom > 0.0 ? std::sqrt(sum_aw2) / denom : 1.0; - - return (float)std::clamp(scale, 0.5, 2.0); - }; - // Higher precision but much longer to compute auto precise_lambda = [&](const ggml_tensor * t, const std::vector & f32_sample, @@ -979,11 +959,7 @@ static std::unordered_map target_bpw_type( if (ratios.empty()) { return 0.0f; } std::nth_element(ratios.begin(), ratios.begin() + ratios.size() / 2, ratios.end()); - double lambda = ratios[ratios.size() / 2]; - - const float scale = directional_scale(values, activations, n_per_row); - lambda *= scale; - lambda = std::clamp(lambda, 0.0, 8.0); + const double lambda = std::clamp(ratios[ratios.size() / 2], 0.0, 8.0); return (float)lambda; }; @@ -1007,8 +983,7 @@ static std::unordered_map target_bpw_type( double base = 1.0 - s * s / (d * s2 + epsilon); base = std::clamp(base, 0.0, 1.0); - const double scale = directional_scale(values, activations, n_per_row); - const double lambda = std::clamp(base * scale, 0.0, 1.0) * 8.0; + const double lambda = std::clamp(base, 0.0, 1.0) * 8.0; return (float)lambda; }; @@ -1159,8 +1134,11 @@ static std::unordered_map target_bpw_type( { const float * values = values_sample.empty() ? nullptr : values_sample.data(); const float * activations = activations_sample.empty() ? nullptr : activations_sample.data(); - bias_lambda = params->precise_lambda ? precise_lambda(t, f32_sample, sample_rows_per_slice, values, activations, compatible_candidates) : - fast_lambda(values, activations, n_per_row); + if (params->bpw_bias == 1) { + bias_lambda = fast_lambda(values, activations, n_per_row); + } else if (params->bpw_bias == 2) { + bias_lambda = precise_lambda(t, f32_sample, sample_rows_per_slice, values, activations, compatible_candidates); + } } // Now evaluate candidates @@ -1656,7 +1634,8 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: } else { LLAMA_LOG_WARN("%s: imatrix without activations provided, target bpw quantization will be less accurate - ", __func__); } - LLAMA_LOG_INFO("using %s\n", params->precise_lambda ? "precise lambda (slow)" : "fast lambda"); + const char* msg[] = {"no bias (MSE only)", "fast (default)", "precise (slow)"}; + LLAMA_LOG_INFO("using %s error estimation\n", msg[params->bpw_bias]); LLAMA_LOG_INFO("%s: computing tensor quantization mix to achieve %.4f bpw\n", __func__, params->target_bpw); bpw_overrides = target_bpw_type(ml, read_data, model, tensors, mapped, values_data, activations_data, params, nthread); } else { @@ -1967,7 +1946,7 @@ llama_model_quantize_params llama_model_quantize_default_params() { /*.tensor_type =*/ nullptr, /*.prune_layers =*/ nullptr, /*.target_bpw =*/ -1.0f, - /*.precise_lambda =*/ false + /*.bpw_bias =*/ 1 }; return result; diff --git a/tools/quantize/quantize.cpp b/tools/quantize/quantize.cpp index 0c9460513c..0fe65daea0 100644 --- a/tools/quantize/quantize.cpp +++ b/tools/quantize/quantize.cpp @@ -134,7 +134,7 @@ static void usage(const char * executable) { printf(" Advanced option to remove all tensors from the given layers\n"); printf(" --target-bpw: target bits per weight (bpw). Must be a positive number between 0.0 and 16.0\n"); printf(" Advanced option to automatically select quantization types to achieve a total bits per weight (bpw) target\n"); - printf(" --precise-lambda: given a target bpw, use a high-precision error computation at the expense of longer processing times\n"); + printf(" --bpw_bias: type of error bias to use: 0 = no bias (MSE only), 1 = fast (default), 2 = precise (slow)\n"); printf(" --keep-split: will generate quantized model in the same shards as input\n"); printf(" --override-kv KEY=TYPE:VALUE\n"); printf(" Advanced option to override model metadata by key in the quantized model. May be specified multiple times.\n"); @@ -496,6 +496,27 @@ static bool parse_target_bpw(const char * data, float & target_bpw) { return true; } +static bool parse_bpw_bias(const char * data, int & bpw_bias) { + if (!data) { + printf("\n%s: error bias type not provided\n\n", __func__); + return false; + } + + try { + bpw_bias = std::stoi(data); + if (bpw_bias < 0 || bpw_bias > 2) { + printf("\n%s: error bias type must be one of 0 (no bias, MSE only), 1 (fast), or 2 (precise, but slow)\n\n", __func__); + return false; + } + } + catch (const std::exception & e) { + printf("\n%s: '%s' is not valid. Target bits per weight (bpw) must be a positive number between 0.0 and 16.0\n\n", __func__, data); + return false; + } + + return true; +} + int main(int argc, char ** argv) { if (argc < 3) { usage(argv[0]); @@ -510,6 +531,7 @@ int main(int argc, char ** argv) { std::vector tensor_types; std::vector prune_layers; float target_bpw = -1.0f; + int bpw_bias = 1; for (; arg_idx < argc && strncmp(argv[arg_idx], "--", 2) == 0; arg_idx++) { if (strcmp(argv[arg_idx], "--leave-output-tensor") == 0) { @@ -540,8 +562,11 @@ int main(int argc, char ** argv) { if (arg_idx == argc-1 || !parse_target_bpw(argv[++arg_idx], target_bpw)) { usage(argv[0]); } - } else if (strcmp(argv[arg_idx], "--precise-lambda") == 0) { - params.precise_lambda = true; + } else if (strcmp(argv[arg_idx], "--bpw-bias") == 0) { + if (arg_idx == argc-1 || !parse_bpw_bias(argv[++arg_idx], bpw_bias)) { + usage(argv[0]); + } + params.bpw_bias = bpw_bias; } else if (strcmp(argv[arg_idx], "--prune-layers") == 0) { if (arg_idx == argc-1 || !parse_layer_prune(argv[++arg_idx], prune_layers)) { usage(argv[0]);