remove unused `std::vector<ggml_tensor*> tensors;`

This commit is contained in:
ddh0 2026-02-12 18:24:16 -06:00
parent 6734e77662
commit d648629f56
1 changed files with 1 additions and 8 deletions

View File

@ -790,13 +790,6 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
weights.push_back(&it.second);
}
// make a list of tensors (same pointers as from weights)
std::vector<ggml_tensor*> tensors;
tensors.reserve(weights.size());
for (size_t i = 0; i < weights.size(); ++i) {
tensors.push_back(weights[i]->tensor);
}
if (!prune_list.empty()) {
gguf_set_val_u32(ctx_out.get(), ml.llm_kv(LLM_KV_BLOCK_COUNT).c_str(), blk_id);
}
@ -885,7 +878,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
for (size_t i = 0; i < ctx_outs.size(); ++i) {
gguf_set_val_u16(ctx_outs[i].get(), ml.llm_kv(LLM_KV_SPLIT_NO).c_str(), i);
gguf_set_val_u16(ctx_outs[i].get(), ml.llm_kv(LLM_KV_SPLIT_COUNT).c_str(), n_split);
gguf_set_val_i32(ctx_outs[i].get(), ml.llm_kv(LLM_KV_SPLIT_TENSORS_COUNT).c_str(), (int32_t)tensors.size());
gguf_set_val_i32(ctx_outs[i].get(), ml.llm_kv(LLM_KV_SPLIT_TENSORS_COUNT).c_str(), (int32_t)weights.size());
}
}