This commit is contained in:
Ed Addario 2025-12-16 15:20:38 -08:00 committed by GitHub
commit c0bcf2b962
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 1446 additions and 118 deletions

View File

@ -378,9 +378,14 @@ extern "C" {
bool pure; // quantize all tensors to the default type bool pure; // quantize all tensors to the default type
bool keep_split; // quantize to the same number of shards bool keep_split; // quantize to the same number of shards
void * imatrix; // pointer to importance matrix data void * imatrix; // pointer to importance matrix data
void * activations; // pointer to activations data
void * kv_overrides; // pointer to vector containing overrides void * kv_overrides; // pointer to vector containing overrides
void * tensor_types; // pointer to vector containing tensor types void * tensor_types; // pointer to vector containing tensor types
void * prune_layers; // pointer to vector containing layer indices to prune void * prune_layers; // pointer to vector containing layer indices to prune
float target_bpw; // target bits per weight (bpw)
bool keep_bpw_state; // keep bpw state file
void * bpw_state; // pointer to bpw state file
bool no_importance; // allocate target bpw budget equitably across all tensors
} llama_model_quantize_params; } llama_model_quantize_params;
typedef struct llama_logit_bias { typedef struct llama_logit_bias {

File diff suppressed because it is too large Load Diff

View File

@ -117,21 +117,27 @@ static bool try_parse_ftype(const std::string & ftype_str_in, llama_ftype & ftyp
[[noreturn]] [[noreturn]]
static void usage(const char * executable) { static void usage(const char * executable) {
printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--include-weights]\n", executable); printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--include-weights] [--exclude-weights]\n", executable);
printf(" [--exclude-weights] [--output-tensor-type] [--token-embedding-type] [--tensor-type] [--prune-layers] [--keep-split] [--override-kv]\n"); printf(" [--target-bpw n] [--no-importance] [--keep-bpw-state] [--bpw-state filename] [--output-tensor-type] [--token-embedding-type] [--tensor-type]\n");
printf(" model-f32.gguf [model-quant.gguf] type [nthreads]\n\n"); printf(" [--prune-layers] [--keep-split] [--override-kv] model-f32.gguf [model-quant.gguf] type [nthreads]\n\n");
printf(" --allow-requantize: Allows requantizing tensors that have already been quantized. Warning: This can severely reduce quality compared to quantizing from 16bit or 32bit\n"); printf(" --allow-requantize: allows requantizing tensors that have already been quantized. Warning: This can severely reduce quality compared to quantizing from 16bit or 32bit\n");
printf(" --leave-output-tensor: Will leave output.weight un(re)quantized. Increases model size but may also increase quality, especially when requantizing\n"); printf(" --leave-output-tensor: will leave output.weight un(re)quantized. Increases model size but may also increase quality, especially when requantizing\n");
printf(" --pure: Disable k-quant mixtures and quantize all tensors to the same type\n"); printf(" --pure: disable k-quant mixtures and quantize all tensors to the same type\n");
printf(" --imatrix file_name: use data in file_name as importance matrix for quant optimizations\n"); printf(" --imatrix file_name: use data in file_name as importance matrix for quant optimizations\n");
printf(" --include-weights tensor_name: use importance matrix for this/these tensor(s)\n"); printf(" --include-weights tensor_name: use importance matrix for this/these tensor(s)\n");
printf(" --exclude-weights tensor_name: use importance matrix for this/these tensor(s)\n"); printf(" --exclude-weights tensor_name: use importance matrix for this/these tensor(s)\n");
printf(" --output-tensor-type ggml_type: use this ggml_type for the output.weight tensor\n"); printf(" --output-tensor-type ggml_type: use this ggml_type for the output.weight tensor\n");
printf(" --token-embedding-type ggml_type: use this ggml_type for the token embeddings tensor\n"); printf(" --token-embedding-type ggml_type: use this ggml_type for the token embeddings tensor\n");
printf(" --tensor-type TENSOR=TYPE: quantize this tensor to this ggml_type. example: --tensor-type attn_q=q8_0\n"); printf(" --tensor-type TENSOR=TYPE: quantize this tensor to this ggml_type. Example: --tensor-type attn_q=q8_0\n");
printf(" Advanced option to selectively quantize tensors. May be specified multiple times.\n"); printf(" Advanced option to selectively quantize tensors. May be specified multiple times.\n");
printf(" --prune-layers L0,L1,L2...comma-separated list of layer numbers to prune from the model\n"); printf(" --prune-layers L0,L1,L2...comma-separated list of layer numbers to prune from the model\n");
printf(" Advanced option to remove all tensors from the given layers\n"); printf(" Advanced option to remove all tensors from the given layers\n");
printf(" --target-bpw: target bits per weight (bpw). Must be a positive number between 0.0 and 16.0\n");
printf(" Advanced option to automatically select quantization types to achieve a total bits per weight (bpw) target\n");
printf(" --no-importance: distribute bpw budget equitably across all tensors\n");
printf(" Advanced option to disable assigning more bpw budget to important tensors. It may increase quality for some models\n");
printf(" --keep-bpw-state: save the bpw computations to <architecture>-<model hash>.bpw_state\n");
printf(" --bpw-state: file name to use instead of default\n");
printf(" --keep-split: will generate quantized model in the same shards as input\n"); printf(" --keep-split: will generate quantized model in the same shards as input\n");
printf(" --override-kv KEY=TYPE:VALUE\n"); printf(" --override-kv KEY=TYPE:VALUE\n");
printf(" Advanced option to override model metadata by key in the quantized model. May be specified multiple times.\n"); printf(" Advanced option to override model metadata by key in the quantized model. May be specified multiple times.\n");
@ -214,7 +220,10 @@ static int load_legacy_imatrix(const std::string & imatrix_file, std::vector<std
return m_last_call; return m_last_call;
} }
static int load_imatrix(const std::string & imatrix_file, std::vector<std::string> & imatrix_datasets, std::unordered_map<std::string, std::vector<float>> & imatrix_data) { static int load_imatrix(const std::string & imatrix_file,
std::vector<std::string> & imatrix_datasets,
std::unordered_map<std::string, std::vector<float>> & values_data,
std::unordered_map<std::string, std::vector<float>> & activations_data) {
struct ggml_context * ctx = nullptr; struct ggml_context * ctx = nullptr;
struct gguf_init_params meta_gguf_params = { struct gguf_init_params meta_gguf_params = {
@ -224,7 +233,7 @@ static int load_imatrix(const std::string & imatrix_file, std::vector<std::strin
struct gguf_context * ctx_gguf = gguf_init_from_file(imatrix_file.c_str(), meta_gguf_params); struct gguf_context * ctx_gguf = gguf_init_from_file(imatrix_file.c_str(), meta_gguf_params);
if (!ctx_gguf) { if (!ctx_gguf) {
fprintf(stderr, "%s: imatrix file '%s' is using old format\n", __func__, imatrix_file.c_str()); fprintf(stderr, "%s: imatrix file '%s' is using old format\n", __func__, imatrix_file.c_str());
return load_legacy_imatrix(imatrix_file, imatrix_datasets, imatrix_data); return load_legacy_imatrix(imatrix_file, imatrix_datasets, values_data);
} }
const int32_t n_entries = gguf_get_n_tensors(ctx_gguf); const int32_t n_entries = gguf_get_n_tensors(ctx_gguf);
if (n_entries < 1) { if (n_entries < 1) {
@ -246,11 +255,12 @@ static int load_imatrix(const std::string & imatrix_file, std::vector<std::strin
const uint32_t chunk_size = gguf_get_val_u32(ctx_gguf, chunk_size_idx); const uint32_t chunk_size = gguf_get_val_u32(ctx_gguf, chunk_size_idx);
const std::string sums_suffix{ ".in_sum2" }; const std::string sums_suffix{ ".in_sum" };
const std::string sums2_suffix{ ".in_sum2" };
const std::string counts_suffix{ ".counts" }; const std::string counts_suffix{ ".counts" };
// Using an ordered map to get a deterministic iteration order. // Using an ordered map to get a deterministic iteration order.
std::map<std::string, std::pair<struct ggml_tensor *, struct ggml_tensor *>> sums_counts_for; std::map<std::string, std::tuple<struct ggml_tensor *, struct ggml_tensor *, struct ggml_tensor *>> sums_counts_for;
for (struct ggml_tensor * cur = ggml_get_first_tensor(ctx); cur; cur = ggml_get_next_tensor(ctx, cur)) { for (struct ggml_tensor * cur = ggml_get_first_tensor(ctx); cur; cur = ggml_get_next_tensor(ctx, cur)) {
std::string name = cur->name; std::string name = cur->name;
@ -258,11 +268,14 @@ static int load_imatrix(const std::string & imatrix_file, std::vector<std::strin
if (name.empty()) { continue; } if (name.empty()) { continue; }
if (string_remove_suffix(name, sums_suffix)) { if (string_remove_suffix(name, sums_suffix)) {
// in_sum
std::get<0>(sums_counts_for[std::move(name)]) = cur;
} else if (string_remove_suffix(name, sums2_suffix)) {
// in_sum2 // in_sum2
sums_counts_for[std::move(name)].first = cur; std::get<1>(sums_counts_for[std::move(name)]) = cur;
} else if (string_remove_suffix(name, counts_suffix)) { } else if (string_remove_suffix(name, counts_suffix)) {
// counts // counts
sums_counts_for[std::move(name)].second = cur; std::get<2>(sums_counts_for[std::move(name)]) = cur;
} else { } else {
// ignore other tensors // ignore other tensors
} }
@ -270,32 +283,40 @@ static int load_imatrix(const std::string & imatrix_file, std::vector<std::strin
for (const auto & sc : sums_counts_for) { for (const auto & sc : sums_counts_for) {
const std::string & name = sc.first; const std::string & name = sc.first;
const struct ggml_tensor * sums = sc.second.first; const struct ggml_tensor * sums = std::get<0>(sc.second);
const struct ggml_tensor * counts = sc.second.second; const struct ggml_tensor * sums2 = std::get<1>(sc.second);
const struct ggml_tensor * counts = std::get<2>(sc.second);
if (!sums || !counts) { // check sums2 and counts are present, and that sums and sums2 have the same shape
if (!sums2 || !counts || (sums != nullptr && ggml_nelements(sums) != ggml_nelements(sums2))) {
fprintf(stderr, "%s: mismatched sums and counts for %s\n", __func__, name.c_str()); fprintf(stderr, "%s: mismatched sums and counts for %s\n", __func__, name.c_str());
gguf_free(ctx_gguf); gguf_free(ctx_gguf);
ggml_free(ctx); ggml_free(ctx);
exit(1); exit(1);
} }
const int64_t ne0 = sums->ne[0]; const int64_t ne0 = sums2->ne[0];
const int64_t ne1 = sums->ne[1]; const int64_t ne1 = sums2->ne[1];
auto & e = imatrix_data[name]; auto & activations = activations_data[name];
e.resize(ggml_nelements(sums)); auto & values = values_data[name];
if (sums) {
activations.resize(ggml_nelements(sums));
}
values.resize(ggml_nelements(sums2));
float max_count = 0.0f; float max_count = 0.0f;
for (int64_t j = 0; j < ne1; ++j) { for (int64_t j = 0; j < ne1; ++j) {
const float count = ((const float *) counts->data)[j]; const float count = ((const float *) counts->data)[j];
if (count > 0.0f) { if (count > 0.0f) {
for (int64_t i = 0; i < ne0; ++i) { for (int64_t i = 0; i < ne0; ++i) {
e[j*ne0 + i] = ((const float *) sums->data)[j*ne0 + i] / count; values[j*ne0 + i] = ((const float *) sums2->data)[j*ne0 + i] / count;
if (sums) { activations[j*ne0 + i] = ((const float *) sums->data)[j*ne0 + i] / count; }
} }
} else { } else {
// Partial imatrix data, this tensor never got any input during calibration // Partial imatrix data, this tensor never got any input during calibration
for (int64_t i = 0; i < ne0; ++i) { for (int64_t i = 0; i < ne0; ++i) {
e[j*ne0 + i] = 1; values[j*ne0 + i] = 1;
if (sums) { activations[j*ne0 + i] = 0; }
} }
} }
if (count > max_count) { if (count > max_count) {
@ -303,7 +324,8 @@ static int load_imatrix(const std::string & imatrix_file, std::vector<std::strin
} }
} }
if (getenv("LLAMA_TRACE")) { if (getenv("LLAMA_TRACE")) {
printf("%s: loaded data (size = %6d, n_tokens = %6d, n_chunks = %6d) for '%s'\n", __func__, int(e.size()), int(max_count), int(max_count / chunk_size), name.c_str()); printf("%s: loaded data (size = %6d, n_tokens = %6d, n_chunks = %6d) for '%s'\n",
__func__, int(values.size()), int(max_count), int(max_count / chunk_size), name.c_str());
} }
} }
@ -320,7 +342,7 @@ static int load_imatrix(const std::string & imatrix_file, std::vector<std::strin
} }
printf("]\n"); printf("]\n");
printf("%s: loaded %d importance matrix entries from %s computed on %d chunks\n", __func__, int(imatrix_data.size()), imatrix_file.c_str(), m_last_chunk); printf("%s: loaded %d importance matrix entries from %s computed on %d chunks\n", __func__, int(values_data.size()), imatrix_file.c_str(), m_last_chunk);
gguf_free(ctx_gguf); gguf_free(ctx_gguf);
ggml_free(ctx); ggml_free(ctx);
@ -332,41 +354,56 @@ static int prepare_imatrix(const std::string & imatrix_file,
std::vector<std::string> & imatrix_dataset, std::vector<std::string> & imatrix_dataset,
const std::vector<std::string> & included_weights, const std::vector<std::string> & included_weights,
const std::vector<std::string> & excluded_weights, const std::vector<std::string> & excluded_weights,
std::unordered_map<std::string, std::vector<float>> & imatrix_data) { std::unordered_map<std::string, std::vector<float>> & values_data,
std::unordered_map<std::string, std::vector<float>> & activations_data) {
int m_last_call = -1; int m_last_call = -1;
if (!imatrix_file.empty()) { if (!imatrix_file.empty()) {
m_last_call = load_imatrix(imatrix_file, imatrix_dataset, imatrix_data); m_last_call = load_imatrix(imatrix_file, imatrix_dataset, values_data, activations_data);
} }
if (imatrix_data.empty()) { if (values_data.empty()) {
return m_last_call; return m_last_call;
} }
if (!excluded_weights.empty()) { if (!excluded_weights.empty()) {
for (const auto & name : excluded_weights) { for (const auto & name : excluded_weights) {
for (auto it = imatrix_data.begin(); it != imatrix_data.end();) { for (auto vt = values_data.begin(); vt != values_data.end();) {
auto pos = it->first.find(name); auto pos = vt->first.find(name);
if (pos != std::string::npos) { if (pos != std::string::npos) {
it = imatrix_data.erase(it); vt = values_data.erase(vt);
} else { } else {
++it; ++vt;
}
}
for (auto at = activations_data.begin(); at != activations_data.end();) {
auto pos = at->first.find(name);
if (pos != std::string::npos) {
at = activations_data.erase(at);
} else {
++at;
} }
} }
} }
} }
if (!included_weights.empty()) { if (!included_weights.empty()) {
std::unordered_map<std::string, std::vector<float>> tmp; std::unordered_map<std::string, std::vector<float>> tmp_values;
std::unordered_map<std::string, std::vector<float>> tmp_activations;
for (const auto & name : included_weights) { for (const auto & name : included_weights) {
for (auto & e : imatrix_data) { for (auto & e : values_data) {
auto pos = e.first.find(name); auto pos = e.first.find(name);
if (pos != std::string::npos) { if (pos != std::string::npos) {
tmp.emplace(std::move(e)); tmp_values.emplace(std::move(e));
}
}
for (auto & a : activations_data) {
auto pos = a.first.find(name);
if (pos != std::string::npos) {
tmp_activations.emplace(std::move(a));
} }
} }
} }
imatrix_data = std::move(tmp); values_data = std::move(tmp_values);
} activations_data = std::move(tmp_activations);
if (!imatrix_data.empty()) {
printf("%s: have %d importance matrix entries\n", __func__, int(imatrix_data.size()));
} }
return m_last_call; return m_last_call;
} }
@ -440,6 +477,52 @@ static bool parse_layer_prune(const char * data, std::vector<int> & prune_layers
return true; return true;
} }
static bool parse_target_bpw(const char * data, float & target_bpw) {
if (!data) {
printf("\n%s: no target bits per weight (bpw) provided\n\n", __func__);
return false;
}
try {
target_bpw = std::stof(data);
if (target_bpw < 0.0f || target_bpw > 16.0f) {
printf("\n%s: target bits per weight (bpw) must be a positive number between 0.0 and 16.0\n\n", __func__);
return false;
}
}
catch (const std::exception & e) {
printf("\n%s: '%s' is not valid. Target bits per weight (bpw) must be a positive number between 0.0 and 16.0\n\n", __func__, data);
return false;
}
return true;
}
static const char * get_ftype(const float bpw) {
const std::map<float, const char *> quant_bpw = {
{1.5625, "IQ1_S"},
{1.7500, "IQ1_M"},
{2.0625, "IQ2_XXS"},
{2.3125, "IQ2_XS"},
{2.5625, "IQ2_S"},
{2.6250, "Q2_K"},
{3.0625, "IQ3_XXS"},
{3.4375, "Q3_K"},
{4.2500, "IQ4_XS"},
{4.5000, "Q4_K"},
{5.5000, "Q5_K"},
{6.5625, "Q6_K"},
{8.5000, "Q8_0"},
#ifdef GGML_USE_METAL
{16.0000, "F16"}
#else
{16.0000, "BF16"}
#endif
};
return quant_bpw.lower_bound(bpw)->second;
}
int main(int argc, char ** argv) { int main(int argc, char ** argv) {
if (argc < 3) { if (argc < 3) {
usage(argv[0]); usage(argv[0]);
@ -453,6 +536,7 @@ int main(int argc, char ** argv) {
std::vector<llama_model_kv_override> kv_overrides; std::vector<llama_model_kv_override> kv_overrides;
std::vector<tensor_quantization> tensor_types; std::vector<tensor_quantization> tensor_types;
std::vector<int> prune_layers; std::vector<int> prune_layers;
float target_bpw = -1.0f;
for (; arg_idx < argc && strncmp(argv[arg_idx], "--", 2) == 0; arg_idx++) { for (; arg_idx < argc && strncmp(argv[arg_idx], "--", 2) == 0; arg_idx++) {
if (strcmp(argv[arg_idx], "--leave-output-tensor") == 0) { if (strcmp(argv[arg_idx], "--leave-output-tensor") == 0) {
@ -479,6 +563,20 @@ int main(int argc, char ** argv) {
if (arg_idx == argc-1 || !parse_tensor_type(argv[++arg_idx], tensor_types)) { if (arg_idx == argc-1 || !parse_tensor_type(argv[++arg_idx], tensor_types)) {
usage(argv[0]); usage(argv[0]);
} }
} else if (strcmp(argv[arg_idx], "--target-bpw") == 0) {
if (arg_idx == argc-1 || !parse_target_bpw(argv[++arg_idx], target_bpw)) {
usage(argv[0]);
}
} else if (strcmp(argv[arg_idx], "--no-importance") == 0) {
params.no_importance = true;
} else if (strcmp(argv[arg_idx], "--keep-bpw-state") == 0) {
params.keep_bpw_state = true;
} else if (strcmp(argv[arg_idx], "--bpw-state") == 0) {
if (arg_idx < argc-1) {
params.bpw_state = argv[++arg_idx];
} else {
usage(argv[0]);
}
} else if (strcmp(argv[arg_idx], "--prune-layers") == 0) { } else if (strcmp(argv[arg_idx], "--prune-layers") == 0) {
if (arg_idx == argc-1 || !parse_layer_prune(argv[++arg_idx], prune_layers)) { if (arg_idx == argc-1 || !parse_layer_prune(argv[++arg_idx], prune_layers)) {
usage(argv[0]); usage(argv[0]);
@ -525,10 +623,11 @@ int main(int argc, char ** argv) {
} }
std::vector<std::string> imatrix_datasets; std::vector<std::string> imatrix_datasets;
std::unordered_map<std::string, std::vector<float>> imatrix_data; std::unordered_map<std::string, std::vector<float>> values_data;
int m_last_call = prepare_imatrix(imatrix_file, imatrix_datasets, included_weights, excluded_weights, imatrix_data); std::unordered_map<std::string, std::vector<float>> activations_data;
if (!imatrix_data.empty()) { int m_last_call = prepare_imatrix(imatrix_file, imatrix_datasets, included_weights, excluded_weights, values_data, activations_data);
params.imatrix = &imatrix_data; if (!values_data.empty()) {
params.imatrix = &values_data;
{ {
llama_model_kv_override kvo; llama_model_kv_override kvo;
std::strcpy(kvo.key, LLM_KV_QUANTIZE_IMATRIX_FILE); std::strcpy(kvo.key, LLM_KV_QUANTIZE_IMATRIX_FILE);
@ -551,7 +650,7 @@ int main(int argc, char ** argv) {
llama_model_kv_override kvo; llama_model_kv_override kvo;
std::strcpy(kvo.key, LLM_KV_QUANTIZE_IMATRIX_N_ENTRIES); std::strcpy(kvo.key, LLM_KV_QUANTIZE_IMATRIX_N_ENTRIES);
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_INT; kvo.tag = LLAMA_KV_OVERRIDE_TYPE_INT;
kvo.val_i64 = imatrix_data.size(); kvo.val_i64 = values_data.size();
kv_overrides.emplace_back(std::move(kvo)); kv_overrides.emplace_back(std::move(kvo));
} }
@ -563,6 +662,9 @@ int main(int argc, char ** argv) {
kv_overrides.emplace_back(std::move(kvo)); kv_overrides.emplace_back(std::move(kvo));
} }
} }
if (!activations_data.empty()) {
params.activations = &activations_data;
}
if (!kv_overrides.empty()) { if (!kv_overrides.empty()) {
kv_overrides.emplace_back(); kv_overrides.emplace_back();
kv_overrides.back().key[0] = 0; kv_overrides.back().key[0] = 0;
@ -574,6 +676,9 @@ int main(int argc, char ** argv) {
if (!prune_layers.empty()) { if (!prune_layers.empty()) {
params.prune_layers = &prune_layers; params.prune_layers = &prune_layers;
} }
if (target_bpw != -1.0f) {
params.target_bpw = target_bpw;
}
llama_backend_init(); llama_backend_init();
@ -584,6 +689,7 @@ int main(int argc, char ** argv) {
std::string ftype_str; std::string ftype_str;
std::string suffix = ".gguf"; std::string suffix = ".gguf";
std::vector<const char *> tmp_argv(argv, argv + argc);
if (try_parse_ftype(argv[arg_idx], params.ftype, ftype_str)) { if (try_parse_ftype(argv[arg_idx], params.ftype, ftype_str)) {
std::string fpath; std::string fpath;
const size_t pos = fname_inp.find_last_of("/\\"); const size_t pos = fname_inp.find_last_of("/\\");
@ -607,7 +713,15 @@ int main(int argc, char ** argv) {
} }
arg_idx++; arg_idx++;
if (argc <= arg_idx) { // select quantization type if target_bpw is set unless user specifies type and threads
if (argc - arg_idx <= 1 && params.target_bpw != -1.0f) {
auto * ftype = const_cast<char *>(get_ftype(params.target_bpw));
if (argc == arg_idx) { tmp_argv.push_back(ftype); }
else { tmp_argv.insert(tmp_argv.end() - 1, ftype); }
tmp_argv.push_back(nullptr);
argv = const_cast<char **>(tmp_argv.data());
argc++;
} else if (argc <= arg_idx) {
fprintf(stderr, "%s: missing ftype\n", __func__); fprintf(stderr, "%s: missing ftype\n", __func__);
return 1; return 1;
} }
@ -636,7 +750,7 @@ int main(int argc, char ** argv) {
params.ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || params.ftype == LLAMA_FTYPE_MOSTLY_IQ2_S ||
params.ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S || params.ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S ||
params.ftype == LLAMA_FTYPE_MOSTLY_IQ1_S || params.ftype == LLAMA_FTYPE_MOSTLY_IQ1_S ||
params.ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) && imatrix_data.empty()) { params.ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) && values_data.empty()) {
fprintf(stderr, "\n==========================================================================================================\n"); fprintf(stderr, "\n==========================================================================================================\n");
fprintf(stderr, "Please do not use IQ1_S, IQ1_M, IQ2_S, IQ2_XXS, IQ2_XS or Q2_K_S quantization without an importance matrix\n"); fprintf(stderr, "Please do not use IQ1_S, IQ1_M, IQ2_S, IQ2_XXS, IQ2_XS or Q2_K_S quantization without an importance matrix\n");
fprintf(stderr, "==========================================================================================================\n\n\n"); fprintf(stderr, "==========================================================================================================\n\n\n");