Remove activation_statistics() option

This commit is contained in:
Ed Addario 2025-08-31 14:03:19 +01:00
parent 70dd25b229
commit 8f1aa7885e
No known key found for this signature in database
GPG Key ID: E7875815A3230993
2 changed files with 8 additions and 16 deletions

View File

@ -2758,16 +2758,9 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
params.show_statistics = true;
}
).set_examples({LLAMA_EXAMPLE_IMATRIX}));
add_opt(common_arg(
{"--activation-statistics"},
string_format("generate data to compute activation-based statistics (default: %s)", params.show_statistics ? "true" : "false"),
[](common_params & params) {
params.activation_statistics = true;
}
).set_examples({LLAMA_EXAMPLE_IMATRIX}));
add_opt(common_arg(
{"--parse-special"},
string_format("prase special tokens (chat, tool, etc) (default: %s)", params.parse_special ? "true" : "false"),
string_format("parse special tokens (chat, tool, etc) (default: %s)", params.parse_special ? "true" : "false"),
[](common_params & params) {
params.parse_special = true;
}

View File

@ -30,7 +30,7 @@ static void print_usage(int, char ** argv) {
" -m model.gguf -f some-text.txt [-o imatrix.gguf] [--output-format {gguf,dat}] [--no-ppl] \\\n"
" [--process-output] [--chunk 123] [--save-frequency 0] [--output-frequency 10] \\\n"
" [--in-file imatrix-prev-0.gguf --in-file imatrix-prev-1.gguf ...] [--parse-special] \\\n"
" [--output-format gguf|dat] [--activation-statistics] [--show-statistics] [...]\n" , argv[0]);
" [--output-format gguf|dat] [--show-statistics] [...]\n" , argv[0]);
LOG("\n");
}
@ -63,7 +63,6 @@ class IMatrixCollector {
public:
IMatrixCollector() = default;
void set_params(common_params params) { m_params = std::move(params); }
bool activation_statistics() const { return m_params.activation_statistics; }
bool collect_imatrix(struct ggml_tensor * t, bool ask, void * user_data);
void save_imatrix_legacy(int32_t ncall = -1) const;
void save_imatrix(int32_t n_chunk = -1) const;
@ -434,7 +433,7 @@ bool IMatrixCollector::collect_imatrix(struct ggml_tensor * t, bool ask, void *
e.counts.resize(n_as, e.counts[0]);
}
if (e.values.empty()) {
if (activation_statistics()) { e.activations.resize(src1->ne[0]*n_as, 0); }
e.activations.resize(src1->ne[0]*n_as, 0);
e.values.resize(src1->ne[0]*n_as, 0);
e.counts.resize(n_as, 0);
}
@ -466,7 +465,7 @@ bool IMatrixCollector::collect_imatrix(struct ggml_tensor * t, bool ask, void *
e.counts[ex]++;
for (int64_t j = 0; j < src1->ne[0]; ++j) {
if (activation_statistics()) { e.activations[e_start + j] += x[j]; }
e.activations[e_start + j] += x[j];
e.values[e_start + j] += x[j] * x[j];
if (!std::isfinite((float)e.values[e_start + j])) {
LOG_ERR("%f detected in %s\n", (float)e.values[e_start + j], wname.c_str());
@ -506,7 +505,7 @@ bool IMatrixCollector::collect_imatrix(struct ggml_tensor * t, bool ask, void *
}
}
if (e.values.empty()) {
if (activation_statistics()) { e.activations.resize(src1->ne[0] * n_mat, 0); }
e.activations.resize(src1->ne[0] * n_mat, 0);
e.values.resize(src1->ne[0] * n_mat, 0);
e.counts.resize(1, 0);
}
@ -525,7 +524,7 @@ bool IMatrixCollector::collect_imatrix(struct ggml_tensor * t, bool ask, void *
for (int64_t row = 0; row < src1->ne[1]; ++row) {
const float * x = (const float *) (data + row * src1->nb[1] + i2 * src1->nb[2] + i3 * src1->nb[3]);
for (int64_t j = 0; j < src1->ne[0]; ++j) {
if (activation_statistics()) { e.activations[mat_start + j] += x[j]; }
e.activations[mat_start + j] += x[j];
e.values[mat_start + j] += x[j] * x[j];
if (!std::isfinite((float)e.values[j])) {
LOG_ERR("%f detected in %s\n", (float)e.values[j], wname.c_str());
@ -707,7 +706,7 @@ void IMatrixCollector::save_imatrix(int32_t n_chunk) const {
}
to_store.push_back(kv.first);
if (activation_statistics()) { data_size += GGML_PAD(ggml_tensor_overhead() + sizeof(float) * kv.second.activations.size(), GGML_MEM_ALIGN); }
data_size += GGML_PAD(ggml_tensor_overhead() + sizeof(float) * kv.second.activations.size(), GGML_MEM_ALIGN);
data_size += GGML_PAD(ggml_tensor_overhead() + sizeof(float) * kv.second.values.size(), GGML_MEM_ALIGN);
data_size += GGML_PAD(ggml_tensor_overhead() + sizeof(float) * kv.second.counts.size(), GGML_MEM_ALIGN);
}
@ -761,7 +760,7 @@ void IMatrixCollector::save_imatrix(int32_t n_chunk) const {
gguf_add_tensor(ctx_gguf, in_sum2);
gguf_add_tensor(ctx_gguf, counts);
if (!stat.activations.empty() && activation_statistics()) {
if (!stat.activations.empty()) {
const int32_t nact = (int32_t) stat.activations.size();
struct ggml_tensor * in_sum = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, nact / nmat, nmat);
ggml_format_name(in_sum, "%s.in_sum", name.c_str());